text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
import numpy as np
import h5py
from multipledispatch import MDNotImplementedError
from datashape import DataShape, to_numpy
from ..partition import partitions, partition_get, partition_set, flatten
from ..expr import Reduction, Field, Projection, Broadcast, Selection, Symbol
from ..expr import Distinct, Sort, Head, Label, ReLabel, Union, Expr, Slice
from ..expr import std, var, count, nunique
from ..expr import BinOp, UnaryOp, USub, Not
from ..expr import path
from ..expr.split import split
from .core import base, compute
from ..dispatch import dispatch
from ..api.into import into
from ..partition import partitions, partition_get, partition_set
__all__ = []
@dispatch(Slice, h5py.Dataset)
def compute_up(expr, data, **kwargs):
return data[expr.index]
@dispatch(Expr, h5py.Dataset)
def compute_down(expr, data, **kwargs):
""" Compute expressions on H5Py datasets by operating on chunks
This uses blaze.expr.split to break a full-array-computation into a
per-chunk computation and a on-aggregate computation.
This uses blaze.partition to pick out chunks from the h5py dataset, uses
compute(numpy) to compute on each chunk and then uses blaze.partition to
aggregate these (hopefully smaller) intermediate results into a local
numpy array. It then performs a second operation (again given by
blaze.expr.split) on this intermediate aggregate
The expression must contain some sort of Reduction. Both the intermediate
result and the final result are assumed to fit into memory
"""
leaf = expr._leaves()[0]
if not any(isinstance(node, Reduction) for node in path(expr, leaf)):
raise MDNotImplementedError()
# Compute chunksize (this should be improved)
chunksize = kwargs.get('chunksize', data.chunks)
# Split expression into per-chunk and on-aggregate pieces
chunk = Symbol('chunk', DataShape(*(chunksize + (leaf.dshape.measure,))))
(chunk, chunk_expr), (agg, agg_expr) = \
split(leaf, expr, chunk=chunk)
# Create numpy array to hold intermediate aggregate
shape, dtype = to_numpy(agg.dshape)
intermediate = np.empty(shape=shape, dtype=dtype)
# Compute partitions
data_partitions = partitions(data, chunksize=chunksize)
int_partitions = partitions(intermediate, chunksize=chunk_expr.shape)
# For each partition, compute chunk->chunk_expr
# Insert into intermediate
# This could be parallelized
for d, i in zip(data_partitions, int_partitions):
chunk_data = partition_get(data, d, chunksize=chunksize)
result = compute(chunk_expr, {chunk: chunk_data})
partition_set(intermediate, i, result, chunksize=chunk_expr.shape)
# Compute on the aggregate
return compute(agg_expr, {agg: intermediate})
| {
"repo_name": "vitan/blaze",
"path": "blaze/compute/h5py.py",
"copies": "1",
"size": "2844",
"license": "bsd-3-clause",
"hash": -7568301141289312000,
"line_mean": 37.4324324324,
"line_max": 78,
"alpha_frac": 0.7281997187,
"autogenerated": false,
"ratio": 3.99438202247191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00031426775612822125,
"num_lines": 74
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import h5py
import pytest
from blaze import compute
from blaze.expr import Symbol
from datashape import discover
from blaze.utils import tmpfile
from blaze.compute.h5py import *
def eq(a, b):
return (a == b).all()
x = np.arange(20*24, dtype='f4').reshape((20, 24))
@pytest.yield_fixture
def data():
with tmpfile('.h5') as filename:
f = h5py.File(filename)
d = f.create_dataset('/x', shape=x.shape, dtype=x.dtype,
fillvalue=0.0, chunks=(4, 6))
d[:] = x
yield d
f.close()
rec = np.empty(shape=(20, 24), dtype=[('x', 'i4'), ('y', 'i4')])
rec['x'] = 1
rec['y'] = 2
@pytest.yield_fixture
def recdata():
with tmpfile('.h5') as filename:
f = h5py.File(filename)
d = f.create_dataset('/x', shape=rec.shape,
dtype=rec.dtype,
chunks=(4, 6))
d['x'] = rec['x']
d['y'] = rec['y']
yield d
f.close()
s = Symbol('s', discover(x))
def test_slicing(data):
for idx in [0, 1, (0, 1), slice(1, 3), (0, slice(1, 5, 2))]:
assert eq(compute(s[idx], data), x[idx])
def test_reductions(data):
assert eq(compute(s.sum(), data),
x.sum())
assert eq(compute(s.sum(axis=1), data),
x.sum(axis=1))
assert eq(compute(s.sum(axis=0), data),
x.sum(axis=0))
def test_mixed(recdata):
s = Symbol('s', discover(recdata))
expr = (s.x + 1).sum(axis=1)
assert eq(compute(expr, recdata), compute(expr, rec))
def test_uneven_chunk_size(data):
assert eq(compute(s.sum(axis=1), data, chunksize=(7, 7)),
x.sum(axis=1))
| {
"repo_name": "vitan/blaze",
"path": "blaze/compute/tests/test_h5py.py",
"copies": "1",
"size": "1776",
"license": "bsd-3-clause",
"hash": 2020718357220294400,
"line_mean": 23,
"line_max": 64,
"alpha_frac": 0.5433558559,
"autogenerated": false,
"ratio": 3.041095890410959,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4084451746310959,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import json
from collections import OrderedDict
import copy
import math
from atom.api import Atom, Str, observe, Typed, Int, Dict, List, Float, Bool
from skbeam.fluorescence import XrfElement as Element
from skbeam.core.fitting.xrf_model import (
ParamController,
compute_escape_peak,
trim,
construct_linear_model,
linear_spectrum_fitting,
)
from skbeam.core.fitting.xrf_model import K_LINE, L_LINE, M_LINE
from ..core.map_processing import snip_method_numba
from ..core.xrf_utils import check_if_eline_supported, get_eline_parameters, get_element_atomic_number
from ..core.utils import gaussian_sigma_to_fwhm, gaussian_fwhm_to_sigma
import logging
logger = logging.getLogger(__name__)
bound_options = ["none", "lohi", "fixed", "lo", "hi"]
fit_strategy_list = [
"fit_with_tail",
"free_more",
"e_calibration",
"linear",
"adjust_element1",
"adjust_element2",
"adjust_element3",
]
autofit_param = ["e_offset", "e_linear", "fwhm_offset", "fwhm_fanoprime"]
class PreFitStatus(Atom):
"""
Data structure for pre fit analysis.
Attributes
----------
z : str
z number of element
spectrum : array
spectrum of given element
status : bool
True as plot is visible
stat_copy : bool
copy of status
maxv : float
max value of a spectrum
norm : float
norm value in respect to the strongest peak
lbd_stat : bool
define plotting status under a threshold value
"""
z = Str()
energy = Str()
area = Float()
spectrum = Typed(np.ndarray)
status = Bool(False)
stat_copy = Bool(False)
maxv = Float()
norm = Float()
lbd_stat = Bool(False)
class ElementController(object):
"""
This class performs basic ways to rank elements, show elements,
calculate normed intensity, and etc.
"""
def __init__(self):
self.element_dict = OrderedDict()
def delete_item(self, k):
try:
del self.element_dict[k]
self.update_norm()
logger.debug("Item {} is deleted.".format(k))
except KeyError:
pass
def order(self, option="z"):
"""
Order dict in different ways.
"""
if option == "z":
self.element_dict = OrderedDict(sorted(self.element_dict.items(), key=lambda t: t[1].z))
elif option == "energy":
self.element_dict = OrderedDict(sorted(self.element_dict.items(), key=lambda t: t[1].energy))
elif option == "name":
self.element_dict = OrderedDict(sorted(self.element_dict.items(), key=lambda t: t[0]))
elif option == "maxv":
self.element_dict = OrderedDict(
sorted(self.element_dict.items(), key=lambda t: t[1].maxv, reverse=True)
)
def add_to_dict(self, dictv):
"""
This function updates the dictionary element if it already exists.
"""
self.element_dict.update(dictv)
logger.debug("Item {} is added.".format(list(dictv.keys())))
self.update_norm()
def update_norm(self, threshv=0.0):
"""
Calculate the normalized intensity for each element peak.
Parameters
----------
threshv : float
No value is shown when smaller than the threshold value
"""
# Do nothing if no elements are selected
if not self.element_dict:
return
max_dict = np.max([v.maxv for v in self.element_dict.values()])
for v in self.element_dict.values():
v.norm = v.maxv / max_dict * 100
v.lbd_stat = bool(v.norm > threshv)
# also delete smaller values
# there is some bugs in plotting when values < 0.0
self.delete_peaks_below_threshold(threshv=threshv)
def delete_all(self):
self.element_dict.clear()
def is_element_in_list(self, element_line_name):
"""
Check if element 'k' is in the list of selected elements
"""
if element_line_name in self.element_dict.keys():
return True
else:
return False
def get_element_list(self):
current_elements = [v for v in self.element_dict.keys() if (v.lower() != v)]
# logger.info('Current Elements for '
# 'fitting are {}'.format(current_elements))
return current_elements
def update_peak_ratio(self):
"""
If 'maxv' is modified, then the values of 'area' and 'spectrum' are adjusted accordingly:
(1) maximum of spectrum is set equal to 'maxv'; (2) 'area' is scaled proportionally.
It is important that only 'maxv' is changed before this function is called.
"""
for v in self.element_dict.values():
max_spectrum = np.max(v.spectrum)
if not math.isclose(max_spectrum, 0.0, abs_tol=1e-20):
factor = v.maxv / max_spectrum
else:
factor = 0.0
v.spectrum *= factor
v.area *= factor
self.update_norm()
def turn_on_all(self, option=True):
"""
Set plotting status on for all lines.
"""
if option is True:
_plot = option
else:
_plot = False
for v in self.element_dict.values():
v.status = _plot
def delete_peaks_below_threshold(self, threshv=0.1):
"""
Delete elements smaller than threshold value. Non element
peaks are not included.
"""
remove_list = []
non_element = ["compton", "elastic", "background"]
for k, v in self.element_dict.items():
if math.isnan(v.norm) or (v.norm >= threshv) or (k in non_element):
continue
# We don't want to delete userpeaks or pileup peaks (they are always added manually).
if ("-" in k) or (k.lower().startswith("userpeak")):
continue
remove_list.append(k)
for name in remove_list:
del self.element_dict[name]
return remove_list
def delete_unselected_items(self):
remove_list = []
non_element = ["compton", "elastic", "background"]
for k, v in self.element_dict.items():
if k in non_element:
continue
if v.status is False:
remove_list.append(k)
for name in remove_list:
del self.element_dict[name]
return remove_list
class ParamModel(Atom):
"""
The module used for maintain the set of fitting parameters.
Attributes
----------
parameters : `atom.Dict`
A list of `Parameter` objects, subclassed from the `Atom` base class.
These `Parameter` objects hold all relevant xrf information.
data : array
1D array of spectrum
prefit_x : array
xX axis with range defined by low and high limits.
param_d : dict
Parameters can be transferred into this dictionary.
param_new : dict
More information are saved, such as element position and width.
total_y : dict
Results from k lines
total_y_l : dict
Results from l lines
total_y_m : dict
Results from l lines
e_list : str
All elements used for fitting.
file_path : str
The path where file is saved.
element_list : list
The list of element lines selected for fitting
n_selected_elines_for_fitting : Int
The number of element lines selected for fitting
n_selected_pure_elines_for_fitting : Int
The number of element lines selected for fitting
excluding pileup peaks and user defined peaks.
Only 'pure' lines like Ca_K, K_K etc.
"""
# Reference to FileIOModel object
io_model = Typed(object)
default_parameters = Dict()
# data = Typed(np.ndarray)
prefit_x = Typed(object)
result_dict_names = List()
param_new = Dict()
total_y = Typed(object)
# total_l = Dict()
# total_m = Dict()
# total_pileup = Dict()
e_name = Str() # Element line name selected in combo box
add_element_intensity = Float(1000.0)
element_list = List()
# data_sets = Typed(OrderedDict)
EC = Typed(object)
x0 = Typed(np.ndarray)
y0 = Typed(np.ndarray)
max_area_dig = Int(2)
auto_fit_all = Dict()
bound_val = Float(1.0)
energy_bound_high_buf = Float(0.0)
energy_bound_low_buf = Float(0.0)
n_selected_elines_for_fitting = Int(0)
n_selected_pure_elines_for_fitting = Int(0)
parameters_changed_cb = List()
def __init__(self, *, default_parameters, io_model):
try:
self.io_model = io_model
self.default_parameters = default_parameters
self.param_new = copy.deepcopy(default_parameters)
# TODO: do we set 'element_list' as a list of keys of 'EC.element_dict'
self.element_list = get_element_list(self.param_new)
except ValueError:
logger.info("No default parameter files are chosen.")
self.EC = ElementController()
# The following line is part of the fix for automated updating of the energy bound
# in 'Automatic Element Finding' dialog box
self.energy_bound_high_buf = self.param_new["non_fitting_values"]["energy_bound_high"]["value"]
self.energy_bound_low_buf = self.param_new["non_fitting_values"]["energy_bound_low"]["value"]
def add_parameters_changed_cb(self, cb):
"""
Add callback to the list of callback function that are called after parameters are updated.
"""
self.parameters_changed_cb.append(cb)
def remove_parameters_changed_cb(self, cb):
"""
Remove reference from the list of callback functions.
"""
self.parameters_changed_cb = [_ for _ in self.parameters_changed_cb if _ != cb]
def parameters_changed(self):
"""
Run callback functions in the list. This method is expected to be called after the parameters
are update to initiate necessary updates in the GUI.
"""
for cb in self.parameters_changed_cb:
cb()
def default_param_update(self, default_parameters):
"""
Replace the reference to the dictionary of default parameters.
Parameters
----------
default_parameters : dict
Reference to complete and valid dictionary of default parameters.
"""
self.default_parameters = default_parameters
# The following function is part of the fix for automated updating of the energy bound
# in 'Automatic Element Finding' dialog box
@observe("energy_bound_high_buf")
def _update_energy_bound_high_buf(self, change):
self.param_new["non_fitting_values"]["energy_bound_high"]["value"] = change["value"]
self.define_range()
@observe("energy_bound_low_buf")
def _update_energy_bound_high_low(self, change):
self.param_new["non_fitting_values"]["energy_bound_low"]["value"] = change["value"]
self.define_range()
def get_new_param_from_file(self, param_path):
"""
Update parameters if new param_path is given.
Parameters
----------
param_path : str
path to save the file
"""
with open(param_path, "r") as json_data:
self.param_new = json.load(json_data)
self.create_spectrum_from_param_dict(reset=True)
logger.info("Elements read from file are: {}".format(self.element_list))
def update_new_param(self, param, reset=True):
"""
Update the parameters based on the dictionary of parameters. Set ``reset=False``
if selection status of elemental lines should be kept.
Parameters
----------
param : dict
new dictionary of parameters
reset : boolean
reset (``True``) or clear (``False``) selection status of the element lines.
"""
self.param_new = param
self.create_spectrum_from_param_dict(reset=reset)
@observe("bound_val")
def _update_bound(self, change):
if change["type"] != "create":
logger.info(f"Peaks with values than the threshold {self.bound_val} will be removed from the list.")
def define_range(self):
"""
Cut x range according to values define in param_dict.
"""
if self.io_model.data is None:
return
lowv = self.param_new["non_fitting_values"]["energy_bound_low"]["value"]
highv = self.param_new["non_fitting_values"]["energy_bound_high"]["value"]
self.x0, self.y0 = define_range(
self.io_model.data,
lowv,
highv,
self.param_new["e_offset"]["value"],
self.param_new["e_linear"]["value"],
)
def create_spectrum_from_param_dict(self, reset=True):
"""
Create spectrum profile with based on the current set of parameters.
(``self.param_new`` -> ``self.EC`` and ``self.element_list``).
Typical use: update self.param_new, then call this function.
Set ``reset=False`` to keep selection status of the elemental lines.
Parameters
----------
reset : boolean
clear or keep status of the elemental lines (in ``self.EC``).
"""
param_dict = self.param_new
self.element_list = get_element_list(param_dict)
self.define_range()
self.prefit_x, pre_dict, area_dict = calculate_profile(self.x0, self.y0, param_dict, self.element_list)
# add escape peak
if param_dict["non_fitting_values"]["escape_ratio"] > 0:
pre_dict["escape"] = trim_escape_peak(self.io_model.data, param_dict, len(self.y0))
temp_dict = OrderedDict()
for e in pre_dict.keys():
if e in ["background", "escape"]:
spectrum = pre_dict[e]
# summed spectrum here is not correct,
# as the interval is assumed as 1, not energy interval
# however area of background and escape is not used elsewhere, not important
area = np.sum(spectrum)
ps = PreFitStatus(
z=get_Z(e),
energy=get_energy(e),
area=float(area),
spectrum=spectrum,
maxv=float(np.around(np.max(spectrum), self.max_area_dig)),
norm=-1,
status=True,
lbd_stat=False,
)
temp_dict[e] = ps
elif "-" in e: # pileup peaks
energy = self.get_pileup_peak_energy(e)
energy = f"{energy:.4f}"
spectrum = pre_dict[e]
area = area_dict[e]
ps = PreFitStatus(
z=get_Z(e),
energy=str(energy),
area=area,
spectrum=spectrum,
maxv=np.around(np.max(spectrum), self.max_area_dig),
norm=-1,
status=True,
lbd_stat=False,
)
temp_dict[e] = ps
else:
ename = e.split("_")[0]
for k, v in param_dict.items():
energy = get_energy(e) # For all peaks except Userpeaks
if ename in k and "area" in k:
spectrum = pre_dict[e]
area = area_dict[e]
elif ename == "compton" and k == "compton_amplitude":
spectrum = pre_dict[e]
area = area_dict[e]
elif ename == "elastic" and k == "coherent_sct_amplitude":
spectrum = pre_dict[e]
area = area_dict[e]
elif self.get_eline_name_category(ename) == "userpeak":
key = ename + "_delta_center"
energy = param_dict[key]["value"] + 5.0
energy = f"{energy:.4f}"
else:
continue
ps = PreFitStatus(
z=get_Z(ename),
energy=energy,
area=area,
spectrum=spectrum,
maxv=np.around(np.max(spectrum), self.max_area_dig),
norm=-1,
status=True,
lbd_stat=False,
)
temp_dict[e] = ps
# Copy element status
if not reset:
element_status = {_: self.EC.element_dict[_].status for _ in self.EC.element_dict}
self.EC.delete_all()
self.EC.add_to_dict(temp_dict)
if not reset:
for key in self.EC.element_dict.keys():
if key in element_status:
self.EC.element_dict[key].status = element_status[key]
self.result_dict_names = list(self.EC.element_dict.keys())
def get_selected_eline_energy_fwhm(self, eline):
"""
Returns values of energy and fwhm for the peak 'eline' from the dictionary `self.param_new`.
The emission line must exist in the dictionary. Primarily intended for use
with user-defined peaks.
Parameters
----------
eline: str
emission line (e.g. Ca_K) or peak name (e.g. Userpeak2, V_Ka1-Co_Ka1)
"""
if eline not in self.EC.element_dict:
raise ValueError(f"Emission line '{eline}' is not in the list of selected lines.")
keys = self._generate_param_keys(eline)
if not keys["key_dcenter"] or not keys["key_dsigma"]:
raise ValueError(f"Failed to generate keys for the emission line '{eline}'.")
energy = self.param_new[keys["key_dcenter"]]["value"] + 5.0
dsigma = self.param_new[keys["key_dsigma"]]["value"]
fwhm = gaussian_sigma_to_fwhm(dsigma) + self._compute_fwhm_base(energy)
return energy, fwhm
def get_pileup_peak_energy(self, eline):
"""
Returns the energy (center) of pileup peak. Returns None if there is an error.
Parameters
----------
eline: str
Name of the pileup peak, e.g. V_Ka1-Co_Ka1
Returns
-------
float or None
Energy in keV or None
"""
incident_energy = self.param_new["coherent_sct_energy"]["value"]
try:
element_line1, element_line2 = eline.split("-")
e1_cen = get_eline_parameters(element_line1, incident_energy)["energy"]
e2_cen = get_eline_parameters(element_line2, incident_energy)["energy"]
en = e1_cen + e2_cen
except Exception:
en = None
return en
def add_peak_manual(self, userpeak_center=2.5):
"""
Manually add an emission line (or peak).
Parameters
----------
userpeak_center: float
Center of the user defined peak. Ignored if emission line other
than 'userpeak' is added
"""
self._manual_input(userpeak_center=userpeak_center)
self.update_name_list()
self.data_for_plot()
def remove_peak_manual(self):
"""
Manually add an emission line (or peak). The name emission line (peak) to be deleted
must be writtent to `self.e_name` before calling the function.
"""
if self.e_name not in self.EC.element_dict:
msg = (
f"Line '{self.e_name}' is not in the list of selected lines,\n"
f"therefore it can not be deleted from the list."
)
raise RuntimeError(msg)
# Update parameter list
self._remove_parameters_for_eline(self.e_name)
# Update EC
self.EC.delete_item(self.e_name)
self.EC.update_peak_ratio()
self.update_name_list()
self.data_for_plot()
def remove_elements_below_threshold(self, threshv=None):
if threshv is None:
threshv = self.bound_val
deleted_elements = self.EC.delete_peaks_below_threshold(threshv=threshv)
for eline in deleted_elements:
self._remove_parameters_for_eline(eline)
self.EC.update_peak_ratio()
self.update_name_list()
self.data_for_plot()
def remove_elements_unselected(self):
deleted_elements = self.EC.delete_unselected_items()
for eline in deleted_elements:
self._remove_parameters_for_eline(eline)
self.EC.update_peak_ratio()
self.update_name_list()
self.data_for_plot()
def _remove_parameters_for_eline(self, eline):
"""Remove entries for `eline` from the dictionary `self.param_new`"""
if self.get_eline_name_category(eline) == "pileup":
key_prefix = "pileup_" + self.e_name.replace("-", "_")
else:
key_prefix = eline
# It is sufficient to compare using lowercase. It could be more reliable.
key_prefix = key_prefix.lower()
keys_to_delete = [_ for _ in self.param_new.keys() if _.lower().startswith(key_prefix)]
for key in keys_to_delete:
del self.param_new[key]
# Add name to the name list
_remove_element_from_list(eline, self.param_new)
def _manual_input(self, userpeak_center=2.5):
"""
Manually add an emission line (or peak).
Parameters
----------
userpeak_center: float
Center of the user defined peak. Ignored if emission line other
than 'userpeak' is added
"""
if self.e_name in self.EC.element_dict:
msg = f"Line '{self.e_name}' is in the list of selected lines. \nDuplicate entries are not allowed."
raise RuntimeError(msg)
default_area = 1e2
# Add the new data entry to the parameter dictionary. This operation is necessary for 'userpeak'
# lines, because they need to be placed to the specific position (by setting 'delta_center'
# parameter, while regular element lines are placed to their default positions.
d_energy = userpeak_center - 5.0
# PC.params will contain a deepcopy of 'self.param_new' with the new line added
PC = ParamController(self.param_new, [self.e_name])
if self.get_eline_name_category(self.e_name) == "userpeak":
energy = userpeak_center
# Default values for 'delta_center'
dc = copy.deepcopy(PC.params[f"{self.e_name}_delta_center"])
# Modify the default values in the dictionary of parameters
PC.params[f"{self.e_name}_delta_center"]["value"] = d_energy
PC.params[f"{self.e_name}_delta_center"]["min"] = d_energy - (dc["value"] - dc["min"])
PC.params[f"{self.e_name}_delta_center"]["max"] = d_energy + (dc["max"] - dc["value"])
elif self.get_eline_name_category(self.e_name) == "pileup":
energy = self.get_pileup_peak_energy(self.e_name)
else:
energy = get_energy(self.e_name)
param_tmp = PC.params
param_tmp = create_full_dict(param_tmp, fit_strategy_list)
# Add name to the name list
_add_element_to_list(self.e_name, param_tmp)
# 'self.param_new' is used to provide 'hint' values for the model, but all active
# emission lines in 'elemental_lines' will be included in the model.
# The model will contain lines in 'elemental_lines', Compton and elastic
x, data_out, area_dict = calculate_profile(
self.x0, self.y0, param_tmp, elemental_lines=[self.e_name], default_area=default_area
)
# Check if element profile was calculated successfully.
# Calculation may fail if the selected line is not activated.
# The calculation is performed using ``xraylib` library, so there is no
# control over it.
if self.e_name not in data_out:
raise Exception(f"Failed to add the emission line '{self.e_name}': line is not activated.")
# If model was generated successfully (the emission line was successfully added), then
# make temporary parameters permanent
self.param_new = param_tmp
ratio_v = self.add_element_intensity / np.max(data_out[self.e_name])
ps = PreFitStatus(
z=get_Z(self.e_name),
energy=energy if isinstance(energy, str) else f"{energy:.4f}",
area=area_dict[self.e_name] * ratio_v,
spectrum=data_out[self.e_name] * ratio_v,
maxv=self.add_element_intensity,
norm=-1,
status=True, # for plotting
lbd_stat=False,
)
self.EC.add_to_dict({self.e_name: ps})
self.EC.update_peak_ratio()
def _generate_param_keys(self, eline):
"""
Returns prefix of the key from `param_new` dictionary based on emission line name
If eline is actual emission line (like Ca_K), then the `key_dcenter` and `key_dsigma`
point to 'a1' line (Ca_ka1). Function has to be extended if access to specific lines is
required. Function is primarily intended for use with user-defined peaks.
"""
category = self.get_eline_name_category(eline)
if category == "pileup":
eline = eline.replace("-", "_")
key_area = "pileup_" + eline + "_area"
key_dcenter = "pileup_" + eline + "delta_center"
key_dsigma = "pileup_" + eline + "delta_sigma"
elif category == "eline":
eline = eline[:-1] + eline[-1].lower()
key_area = eline + "a1_area"
key_dcenter = eline + "a1_delta_center"
key_dsigma = eline + "a1_delta_sigma"
elif category == "userpeak":
key_area = eline + "_area"
key_dcenter = eline + "_delta_center"
key_dsigma = eline + "_delta_sigma"
elif eline == "compton":
key_area = eline + "_amplitude"
key_dcenter, key_dsigma = "", ""
else:
# No key exists (for "background", "escape", "elastic")
key_area, key_dcenter, key_dsigma = "", "", ""
return {"key_area": key_area, "key_dcenter": key_dcenter, "key_dsigma": key_dsigma}
def modify_peak_height(self, maxv_new):
"""
Modify the height of the emission line.
Parameters
----------
new_maxv: float
New maximum value for the emission line `self.e_name`
"""
ignored_peaks = {"escape"}
if self.e_name in ignored_peaks:
msg = f"Height of the '{self.e_name}' peak can not be changed."
raise RuntimeError(msg)
if self.e_name not in self.EC.element_dict:
msg = (
f"Attempt to modify maximum value for the emission line '{self.e_name},'\n"
f"which is not currently selected."
)
raise RuntimeError(msg)
key = self._generate_param_keys(self.e_name)["key_area"]
maxv_current = self.EC.element_dict[self.e_name].maxv
coef = maxv_new / maxv_current if maxv_current > 0 else 0
# Only 'maxv' needs to be updated.
self.EC.element_dict[self.e_name].maxv = maxv_new
# The following function updates 'spectrum', 'area' and 'norm'.
self.EC.update_peak_ratio()
# Some of the parameters are represented only in EC, not in 'self.param_new'.
# (particularly "background" and "elastic")
if key:
self.param_new[key]["value"] *= coef
def _compute_fwhm_base(self, energy):
# Computes 'sigma' value based on default parameters and peak energy (for Userpeaks)
# does not include corrections for fwhm.
# If both peak center (energy) and fwhm is updated, energy needs to be set first,
# since it is used in computation of ``fwhm_base``
sigma = gaussian_fwhm_to_sigma(self.param_new["fwhm_offset"]["value"])
sigma_sqr = energy + 5.0 # center
sigma_sqr *= self.param_new["non_fitting_values"]["epsilon"] # epsilon
sigma_sqr *= self.param_new["fwhm_fanoprime"]["value"] # fanoprime
sigma_sqr += sigma * sigma # We have computed the expression under sqrt
sigma_total = np.sqrt(sigma_sqr)
return gaussian_sigma_to_fwhm(sigma_total)
def _update_userpeak_energy(self, eline, energy_new, fwhm_new):
"""
Set new center for the user-defined peak at 'new_energy'
"""
# According to the accepted peak model, as energy of the peak center grows,
# the peak becomes wider. The most user friendly solution is to automatically
# increase FWHM as the peak moves along the energy axis to the right and
# decrease otherwise. So generally, the user should first place the peak
# center at the desired energy, and then adjust FWHM.
# We change energy, so we will have to change FWHM as well
# so before updating energy we will save the difference between
# the default (base) FWHM and the displayed FWHM
name_userpeak_dcenter = eline + "_delta_center"
old_energy = self.param_new[name_userpeak_dcenter]["value"]
# This difference represents the required change in fwhm
fwhm_difference = fwhm_new - self._compute_fwhm_base(old_energy)
# Now we change energy.
denergy = energy_new - 5.0
v_center = self.param_new[name_userpeak_dcenter]["value"]
v_max = self.param_new[name_userpeak_dcenter]["max"]
v_min = self.param_new[name_userpeak_dcenter]["min"]
# Keep the possible range for value change the same
self.param_new[name_userpeak_dcenter]["value"] = denergy
self.param_new[name_userpeak_dcenter]["max"] = denergy + v_max - v_center
self.param_new[name_userpeak_dcenter]["min"] = denergy - (v_center - v_min)
# The base value is updated now (since the energy has changed)
fwhm_base = self._compute_fwhm_base(energy_new)
fwhm = fwhm_difference + fwhm_base
return fwhm
def _update_userpeak_fwhm(self, eline, energy_new, fwhm_new):
name_userpeak_dsigma = eline + "_delta_sigma"
fwhm_base = self._compute_fwhm_base(energy_new)
dfwhm = fwhm_new - fwhm_base
dsigma = gaussian_fwhm_to_sigma(dfwhm)
v_center = self.param_new[name_userpeak_dsigma]["value"]
v_max = self.param_new[name_userpeak_dsigma]["max"]
v_min = self.param_new[name_userpeak_dsigma]["min"]
# Keep the possible range for value change the same
self.param_new[name_userpeak_dsigma]["value"] = dsigma
self.param_new[name_userpeak_dsigma]["max"] = dsigma + v_max - v_center
self.param_new[name_userpeak_dsigma]["min"] = dsigma - (v_center - v_min)
def _update_userpeak_energy_fwhm(self, eline, fwhm_new, energy_new):
"""
Update energy and fwhm of the user-defined peak 'eline'. The 'delta_center'
and 'delta_sigma' parameters in the `self.param_new` dictionary are updated.
`area` should be updated after call to this function. This function also
doesn't change entries in the `EC` dictionary.
"""
# Ensure, that the values are greater than some small value to ensure that
# there is no computational problems.
# Energy resolution for the existing beamlines is 0.01 keV, so 0.001 is small
# enough both for center energy and FWHM.
energy_small_value = 0.001
energy_new = max(energy_new, energy_small_value)
fwhm_new = max(fwhm_new, energy_small_value)
fwhm_new = self._update_userpeak_energy(eline, energy_new, fwhm_new)
self._update_userpeak_fwhm(eline, energy_new, fwhm_new)
def modify_userpeak_params(self, maxv_new, fwhm_new, energy_new):
if self.get_eline_name_category(self.e_name) != "userpeak":
msg = (
f"Hight and width can be modified only for a user defined peak.\n"
f"The function was called for '{self.e_name}' peak"
)
raise RuntimeError(msg)
if self.e_name not in self.EC.element_dict:
msg = (
f"Attempt to modify maximum value for the emission line '{self.e_name},'\n"
f"which is not currently selected."
)
raise RuntimeError(msg)
# Some checks of the input values
if maxv_new <= 0.0:
raise ValueError("Peak height must be a positive number greater than 0.001.")
if energy_new <= 0.0:
raise ValueError("User peak energy must be a positive number greater than 0.001.")
if fwhm_new <= 0:
raise ValueError("User peak FWHM must be a positive number.")
# Make sure that the energy of the user peak is within the selected fitting range
energy_bound_high = self.param_new["non_fitting_values"]["energy_bound_high"]["value"]
energy_bound_low = self.param_new["non_fitting_values"]["energy_bound_low"]["value"]
if energy_new > energy_bound_high or energy_new < energy_bound_low:
raise ValueError("User peak energy is outside the selected range.")
# This updates 'delta_center' and 'delta_sigma' entries of the 'self.param_new' dictionary
self._update_userpeak_energy_fwhm(self.e_name, fwhm_new, energy_new)
default_area = 1e2
key = self._generate_param_keys(self.e_name)["key_area"]
# Set area to default area, change it later once the area is computed
self.param_new[key]["value"] = default_area
# 'self.param_new' is used to provide 'hint' values for the model, but all active
# emission lines in 'elemental_lines' will be included in the model.
# The model will contain lines in 'elemental_lines', Compton and elastic
x, data_out, area_dict = calculate_profile(
self.x0, self.y0, self.param_new, elemental_lines=[self.e_name], default_area=default_area
)
ratio_v = maxv_new / np.max(data_out[self.e_name])
area = area_dict[self.e_name] * ratio_v
self.param_new[key]["value"] = area
ps = PreFitStatus(
z=get_Z(self.e_name),
energy=f"{energy_new:.4f}",
area=area,
spectrum=data_out[self.e_name] * ratio_v,
maxv=maxv_new,
norm=-1,
status=True, # for plotting
lbd_stat=False,
)
self.EC.element_dict[self.e_name] = ps
logger.debug(
f"The parameters of the user defined peak. The new values:\n"
f" Energy: {energy_new} keV, FWHM: {fwhm_new}, Maximum: {maxv_new}\n"
)
def generate_pileup_peak_name(self, name1, name2):
"""
Returns name for the pileup peak. The element line with the lowest
energy is placed first in the name.
"""
incident_energy = self.param_new["coherent_sct_energy"]["value"]
e1 = get_eline_parameters(name1, incident_energy)["energy"]
e2 = get_eline_parameters(name2, incident_energy)["energy"]
if e1 > e2:
name1, name2 = name2, name1
return name1 + "-" + name2
def update_name_list(self):
"""
When result_dict_names change, the looper in enaml will update.
"""
# need to clean list first, in order to refresh the list in GUI
self.result_dict_names = []
self.result_dict_names = list(self.EC.element_dict.keys())
self.element_list = get_element_list(self.param_new)
peak_list = self.get_user_peak_list()
# Create the list of selected emission lines such as Ca_K, K_K, etc.
# No pileup or user peaks
pure_peak_list = [n for n in self.result_dict_names if n in peak_list]
self.n_selected_elines_for_fitting = len(self.result_dict_names)
self.n_selected_pure_elines_for_fitting = len(pure_peak_list)
logger.info(f"The update list of emission lines: {self.result_dict_names}")
def get_eline_name_category(self, eline_name):
"""
Returns the category to which `eline_name` belongs: `eline`, `userpeak`,
`pileup` or `other`.
Parameters
----------
eline_name: str
Name to be analyzed
Returns
-------
str
category: one of `("eline", "userpeak", "pileup" or "other")`
"""
if check_if_eline_supported(eline_name):
return "eline"
elif eline_name.lower().startswith("userpeak"):
return "userpeak"
elif "-" in eline_name: # This is specific to currently accepted naming convention
return "pileup"
else:
return "other"
def _sort_eline_list(self, element_list):
"""
Sort the list of elements
"""
names_elines, names_userpeaks, names_pileup_peaks, names_other = [], [], [], []
for name in element_list:
if self.get_eline_name_category(name) == "eline":
try:
z = get_element_atomic_number(name.split("_")[0])
except Exception:
z = 0
names_elines.append([name, z])
elif self.get_eline_name_category(name) == "userpeak":
names_userpeaks.append(name)
elif self.get_eline_name_category(name) == "pileup":
names_pileup_peaks.append(name)
else:
names_other.append(name)
names_elines.sort(key=lambda v: int(v[1])) # Sort by Z (atomic number)
names_elines = [_[0] for _ in names_elines] # Get rid of Z
names_userpeaks.sort()
names_pileup_peaks.sort()
names_other.sort()
return names_elines + names_userpeaks + names_pileup_peaks + names_other
def get_sorted_result_dict_names(self):
"""
The function returns the list of selected emission lines. The emission lines are
sorted in the following order: emission line names (sorted in the order of growing
atomic number Z), userpeaks (in alphabetic order), pileup peaks (in alphabetic order),
other peaks (in alphabetic order).
Returns
-------
list(str)
the list if emission line names
"""
return self._sort_eline_list(self.result_dict_names)
def get_sorted_element_list(self):
"""
Returns sorted ``element_list``.
"""
return self._sort_eline_list(self.element_list)
def read_param_from_file(self, param_path):
"""
Update parameters if new param_path is given.
Parameters
----------
param_path : str
path to save the file
"""
with open(param_path, "r") as json_data:
param = json.load(json_data)
self.update_new_param(param, reset=True)
def find_peak(self, *, threshv=0.1, elemental_lines=None):
"""
Run automatic peak finding, and save results as dict of object.
Parameters
----------
threshv: float
The value will not be shown on GUI if it is smaller than the threshold.
elemental_lines: list(str)
The list of elemental lines to find. If ``None``, then all supported
lines (K, L and M) are searched.
"""
self.define_range() # in case the energy calibraiton changes
self.prefit_x, out_dict, area_dict = linear_spectrum_fitting(
self.x0, self.y0, self.param_new, elemental_lines=elemental_lines
)
logger.info(
"Energy range: {}, {}".format(
self.param_new["non_fitting_values"]["energy_bound_low"]["value"],
self.param_new["non_fitting_values"]["energy_bound_high"]["value"],
)
)
prefit_dict = OrderedDict()
for k, v in out_dict.items():
ps = PreFitStatus(
z=get_Z(k),
energy=get_energy(k),
area=area_dict[k],
spectrum=v,
maxv=np.around(np.max(v), self.max_area_dig),
norm=-1,
lbd_stat=False,
)
prefit_dict.update({k: ps})
logger.info("Automatic Peak Finding found elements as : {}".format(list(prefit_dict.keys())))
self.EC.delete_all()
self.EC.add_to_dict(prefit_dict)
self.create_full_param()
def create_full_param(self):
"""
Update current ``self.param_new`` with elements from ``self.EC`` (delete elements that
are not in ``self.EC`` and update the existing elements.
"""
self.define_range()
# We set 'self.element_list' from 'EC' (because we want to set elements of 'self.param_new'
# from 'EC.element_dict'
self.element_list = self.EC.get_element_list()
self.param_new = update_param_from_element(self.param_new, self.element_list)
element_temp = [e for e in self.element_list if len(e) <= 4]
pileup_temp = [e for e in self.element_list if "-" in e]
userpeak_temp = [e for e in self.element_list if "user" in e.lower()]
# update area values in param_new according to results saved in ElementController
if len(self.EC.element_dict):
for k, v in self.param_new.items():
if "area" in k:
if "pileup" in k:
name_cut = k[7:-5] # remove pileup_ and _area
for p in pileup_temp:
if name_cut == p.replace("-", "_"):
v["value"] = self.EC.element_dict[p].area
elif "user" in k.lower():
for p in userpeak_temp:
if p in k:
v["value"] = self.EC.element_dict[p].area
else:
for e in element_temp:
k_name, k_line, _ = k.split("_")
e_name, e_line = e.split("_")
if k_name == e_name and e_line.lower() == k_line[0]: # attention: S_k and As_k
v["value"] = self.EC.element_dict[e].area
if "compton" in self.EC.element_dict:
self.param_new["compton_amplitude"]["value"] = self.EC.element_dict["compton"].area
if "coherent_sct_amplitude" in self.EC.element_dict:
self.param_new["coherent_sct_amplitude"]["value"] = self.EC.element_dict["elastic"].area
if "escape" in self.EC.element_dict:
self.param_new["non_fitting_values"]["escape_ratio"] = self.EC.element_dict[
"escape"
].maxv / np.max(self.y0)
else:
self.param_new["non_fitting_values"]["escape_ratio"] = 0.0
def data_for_plot(self):
"""
Save data in terms of K, L, M lines for plot.
"""
self.total_y = None
self.auto_fit_all = {}
for k, v in self.EC.element_dict.items():
if v.status is True:
self.auto_fit_all[k] = v.spectrum
if self.total_y is None:
self.total_y = np.array(v.spectrum) # need to copy an array
else:
self.total_y += v.spectrum
# for k, v in new_dict.items():
# if '-' in k: # pileup
# self.total_pileup[k] = self.EC.element_dict[k].spectrum
# elif 'K' in k:
# self.total_y[k] = self.EC.element_dict[k].spectrum
# elif 'L' in k:
# self.total_l[k] = self.EC.element_dict[k].spectrum
# elif 'M' in k:
# self.total_m[k] = self.EC.element_dict[k].spectrum
# else:
# self.total_y[k] = self.EC.element_dict[k].spectrum
def get_user_peak_list(self):
"""
Returns the list of element emission peaks
"""
return K_LINE + L_LINE + M_LINE
def get_selected_emission_line_data(self):
"""
Assembles the full emission line data for processing.
Returns
-------
list(dict)
Each dictionary includes the following data: "name" (e.g. Ca_ka1 etc.),
"area" (estimated peak area based on current fitting results), "ratio"
(ratio such as Ca_ka2/Ca_ka1)
"""
# Full list of supported emission lines (such as Ca_K)
supported_elines = self.get_user_peak_list()
# Parameter keys start with full emission line name (eg. Ca_ka1)
param_keys = list(self.param_new.keys())
incident_energy = self.param_new["coherent_sct_energy"]["value"]
full_line_list = []
for eline in self.EC.element_dict.keys():
if eline not in supported_elines:
continue
area = self.EC.element_dict[eline].area
lines = [_ for _ in param_keys if _.lower().startswith(eline.lower())]
lines = set(["_".join(_.split("_")[:2]) for _ in lines])
for ln in lines:
eline_info = get_eline_parameters(ln, incident_energy)
data = {"name": ln, "area": area, "ratio": eline_info["ratio"], "energy": eline_info["energy"]}
full_line_list.append(data)
return full_line_list
def guess_pileup_peak_components(self, energy, tolerance=0.05):
"""
Provides a guess on components of pileup peak based on the set of selected emission lines,
and selected energy.
Parameters
----------
energy: float
Approximate (selected) energy of pileup peak location
tolerance: float
Allowed deviation of the sum of component energies from the selected energy, keV
Returns
-------
tuple(str, str, float)
Component emission lines (such as Ca_ka1, K_ka1 etc) and the energy of
the resulting pileup peak.
"""
line_data = self.get_selected_emission_line_data()
energy_min, energy_max = energy - tolerance, energy + tolerance
# Not very efficient algorithm, which tries all combinations of lines
pileup_components, areas = [], []
for n1, line1 in enumerate(line_data):
for n2 in range(n1, len(line_data)):
line2 = line_data[n2]
if energy_min < line1["energy"] + line2["energy"] < energy_max:
if line1 == line2:
area = line1["area"] * line1["ratio"]
else:
area = line1["area"] * line1["ratio"] + line2["area"] * line2["ratio"]
pileup_components.append((line1["name"], line2["name"], line1["energy"] + line2["energy"]))
areas.append(area)
if len(areas):
# Find index with maximum area
n = areas.index(max(areas))
return pileup_components[n]
else:
return None
def save_as(file_path, data):
"""
Save full param dict into a file.
"""
with open(file_path, "w") as outfile:
json.dump(data, outfile, sort_keys=True, indent=4)
def define_range(data, low, high, a0, a1):
"""
Cut x range according to values define in param_dict.
Parameters
----------
data : array
raw spectrum
low : float
low bound in KeV
high : float
high bound in KeV
a0 : float
offset term of energy calibration
a1 : float
linear term of energy calibration
Returns
-------
x : array
trimmed channel number
y : array
trimmed spectrum according to x
"""
x = np.arange(data.size)
# ratio to transfer energy value back to channel value
# approx_ratio = 100
low_new = int(np.around((low - a0) / a1))
high_new = int(np.around((high - a0) / a1))
x0, y0 = trim(x, data, low_new, high_new)
return x0, y0
def calculate_profile(x, y, param, elemental_lines, default_area=1e5):
"""
Calculate the spectrum profile based on given parameters. Use function
construct_linear_model from xrf_model.
Parameters
----------
x : array
channel array
y : array
spectrum intensity
param : dict
parameters
elemental_lines : list
such as Si_K, Pt_M
default_area : float
default value for the gaussian area of each element
Returns
-------
x : array
trimmed energy range
temp_d : dict
dict of array
area_dict : dict
dict of area for elements and other peaks
"""
# Need to use deepcopy here to avoid unexpected change on parameter dict
fitting_parameters = copy.deepcopy(param)
total_list, matv, area_dict = construct_linear_model(
x, fitting_parameters, elemental_lines, default_area=default_area
)
temp_d = {k: v for (k, v) in zip(total_list, matv.transpose())}
# add background
bg = snip_method_numba(
y,
fitting_parameters["e_offset"]["value"],
fitting_parameters["e_linear"]["value"],
fitting_parameters["e_quadratic"]["value"],
width=fitting_parameters["non_fitting_values"]["background_width"],
)
temp_d["background"] = bg
x_energy = (
fitting_parameters["e_offset"]["value"]
+ fitting_parameters["e_linear"]["value"] * x
+ fitting_parameters["e_quadratic"]["value"] * x ** 2
)
return x_energy, temp_d, area_dict
def trim_escape_peak(data, param_dict, y_size):
"""
Calculate escape peak within required range.
Parameters
----------
data : array
raw spectrum
param_dict : dict
parameters for fitting
y_size : int
the size of trimmed spectrum
Returns
-------
array :
trimmed escape peak spectrum
"""
ratio = param_dict["non_fitting_values"]["escape_ratio"]
xe, ye = compute_escape_peak(data, ratio, param_dict)
lowv = param_dict["non_fitting_values"]["energy_bound_low"]["value"]
highv = param_dict["non_fitting_values"]["energy_bound_high"]["value"]
xe, es_peak = trim(xe, ye, lowv, highv)
logger.info("Escape peak is considered with ratio {}".format(ratio))
# align to the same length
if y_size > es_peak.size:
temp = es_peak
es_peak = np.zeros(y_size)
es_peak[: temp.size] = temp
else:
es_peak = es_peak[:y_size]
return es_peak
def create_full_dict(param, name_list, fixed_list=["adjust_element2", "adjust_element3"]):
"""
Create full param dict so each item has the same nested dict.
This is for GUI purpose only.
Pamameters
----------
param : dict
all parameters including element
name_list : list
strategy names
Returns
-------
dict: with update
"""
param_new = copy.deepcopy(param)
for n in name_list:
for k, v in param_new.items():
if k == "non_fitting_values":
continue
if n not in v:
# enforce newly created parameter to be fixed
# for strategy in fixed_list
if n in fixed_list:
v.update({n: "fixed"})
else:
v.update({n: v["bound_type"]})
return param_new
def strip_line(ename):
return ename.split("_")[0]
def get_Z(ename):
"""
Return element's Z number.
Parameters
----------
ename : str
element name
Returns
-------
int or None
element Z number
"""
non_element = ["compton", "elastic", "background", "escape"]
if (ename.lower() in non_element) or "-" in ename or "user" in ename.lower():
return "-"
else:
e = Element(strip_line(ename))
return str(e.Z)
def get_energy(ename):
"""
Return energy value by given elemental name. Need to consider non-elemental case.
"""
non_element = ["compton", "elastic", "background", "escape"]
if (ename.lower() in non_element) or "user" in ename.lower():
return "-"
else:
e = Element(strip_line(ename))
ename = ename.lower()
if "_k" in ename:
energy = e.emission_line["ka1"]
elif "_l" in ename:
energy = e.emission_line["la1"]
elif "_m" in ename:
energy = e.emission_line["ma1"]
return str(np.around(energy, 4))
def get_element_list(param):
"""Extract elements from parameter class object"""
element_list = param["non_fitting_values"]["element_list"]
element_list = [e.strip(" ") for e in element_list.split(",")]
# Unfortunately, "".split(",") returns [""] instead of [], but we need [] !!!
if element_list == [""]:
element_list = []
return element_list
def _set_element_list(element_list, param):
element_list = ", ".join(element_list)
param["non_fitting_values"]["element_list"] = element_list
def _add_element_to_list(eline, param):
"""Add element to list in the parameter class object"""
elist = get_element_list(param)
elist_lower = [_.lower() for _ in elist]
if eline.lower() not in elist_lower:
elist.append(eline)
_set_element_list(elist, param)
def _remove_element_from_list(eline, param):
"""Add element to list in the parameter class object"""
elist = get_element_list(param)
elist_lower = [_.lower() for _ in elist]
try:
index = elist_lower.index(eline.lower())
elist.pop(index)
_set_element_list(elist, param)
except ValueError:
pass
def param_dict_cleaner(parameter, element_list):
"""
Make sure param only contains element from element_list.
Parameters
----------
parameter : dict
fitting parameters
element_list : list
list of elemental lines
Returns
-------
dict :
new param dict containing given elements
"""
param = copy.deepcopy(parameter)
param_new = {}
elines_list = [e for e in element_list if len(e) <= 4]
elines_lower = [e.lower() for e in elines_list]
pileup_list = [e for e in element_list if "-" in e]
userpeak_list = [e for e in element_list if "user" in e.lower()]
new_element_set = set()
for k, v in param.items():
if k == "non_fitting_values" or k == k.lower():
param_new.update({k: v})
elif "pileup" in k:
for p in pileup_list:
if p.replace("-", "_") in k:
param_new.update({k: v})
new_element_set.add(p)
elif "user" in k.lower():
for p in userpeak_list:
if p in k:
param_new.update({k: v})
new_element_set.add(p)
elif k[:3].lower() in elines_lower:
index = elines_lower.index(k[:3].lower())
param_new.update({k: v})
new_element_set.add(elines_list[index])
elif k[:4].lower() in elines_lower:
index = elines_lower.index(k[:4].lower())
param_new.update({k: v})
new_element_set.add(elines_list[index])
new_element_list = list(new_element_set)
_set_element_list(new_element_list, param_new)
return param_new
def update_param_from_element(param, element_list):
"""
Clean up or extend param according to new element list.
Parameters
----------
param : dict
fitting parameters
element_list : list
list of elemental lines
Returns
-------
dict
"""
param_new = copy.deepcopy(param)
for eline in element_list:
_add_element_to_list(eline, param_new)
# first remove some items not included in element_list
param_new = param_dict_cleaner(param_new, element_list)
# second add some elements to a full parameter dict
# create full parameter list including elements
PC = ParamController(param_new, element_list)
# parameter values not updated based on param_new, so redo it
param_temp = PC.params
# enforce adjust_element area to be fixed,
# while bound_type in xrf_model is defined as none for area
# for k, v in param_temp.items():
# if '_area' in k:
# v['bound_type'] = 'fixed'
for k, v in param_temp.items():
if k == "non_fitting_values":
continue
if k in param_new:
param_temp[k] = param_new[k]
# for k1 in v.keys():
# v[k1] = param_new[k][k1]
param_new = param_temp
# to create full param dict, for GUI only
param_new = create_full_dict(param_new, fit_strategy_list)
return param_new
| {
"repo_name": "NSLS-II/PyXRF",
"path": "pyxrf/model/parameters.py",
"copies": "1",
"size": "57227",
"license": "bsd-3-clause",
"hash": 7991104755529701000,
"line_mean": 35.0598613737,
"line_max": 112,
"alpha_frac": 0.5709367956,
"autogenerated": false,
"ratio": 3.8158965126358604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.488683330823586,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import math
from functools import partial
from matplotlib.figure import Figure, Axes
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.ticker as mticker
from mpl_toolkits.axes_grid1.axes_rgb import make_rgb_axes
from atom.api import Atom, Str, Typed, Int, List, Dict, Bool
from ..core.utils import normalize_data_by_scaler, grid_interpolate
from ..core.xrf_utils import check_if_eline_supported
from .draw_image import DrawImageAdvanced
import logging
logger = logging.getLogger(__name__)
np.seterr(divide="ignore", invalid="ignore") # turn off warning on invalid division
class DrawImageRGB(Atom):
"""
This class draws RGB image.
Attributes
----------
fig : object
matplotlib Figure
ax : Axes
The `Axes` object of matplotlib
ax_r : Axes
The `Axes` object to add the artist too
ax_g : Axes
The `Axes` object to add the artist too
ax_b : Axes
The `Axes` object to add the artist too
file_name : str
stat_dict : dict
determine which image to show
img_dict : dict
multiple data sets to plot, such as fit data, or roi data
img_dict_keys : list
data_opt : int
index to show which data is chosen to plot
dict_to_plot : dict
selected data dict to plot, i.e., fitting data or roi is selected
map_keys : list
keys of dict_to_plot
color_opt : str
orange or gray plot
scaler_norm_dict : dict
scaler normalization data, from img_dict
scaler_items : list
keys of scaler_norm_dict
scaler_name_index : int
index to select on GUI level
scaler_data : None or numpy
selected scaler data
pixel_or_pos : int
index to choose plot with pixel (== 0) or with positions (== 1)
grid_interpolate: bool
choose to interpolate 2D image in terms of x,y or not
plot_all : Bool
to control plot all of the data or not
"""
# Reference to FileIOMOdel
io_model = Typed(object)
fig = Typed(Figure)
ax = Typed(Axes)
ax_r = Typed(Axes)
ax_g = Typed(Axes)
ax_b = Typed(Axes)
data_opt = Int(0)
img_title = Str()
# plot_opt = Int(0)
# plot_item = Str()
dict_to_plot = Dict()
map_keys = List()
scaler_norm_dict = Dict()
scaler_items = List()
scaler_name_index = Int()
scaler_data = Typed(object)
pixel_or_pos = Int(0)
grid_interpolate = Bool(False)
plot_all = Bool(False)
limit_dict = Dict()
range_dict = Dict()
# 'stat_dict' is legacy from 'DrawImageAdvanced' class. It is not used here,
# but it may be repurposed in the future if multicolor map presentation is developed
stat_dict = Dict()
# Contains dictionary {"red": <key>, "green": <key>, "blue": <key>}, key is the key
# from the dictionary 'self.dict_to_plot' or None.
rgb_keys = List(str) # The list of keys in 'rgb_dict'
rgb_dict = Dict()
# Reference used to access some fields
img_model_adv = Typed(DrawImageAdvanced)
# Variable that indicates whether quanitative normalization should be applied to data
# Associated with 'Quantitative' checkbox
quantitative_normalization = Bool(False)
rgb_name_list = List() # List of names for RGB channels printed on the plot
rgb_limit = Dict()
name_not_scalable = List()
def __init__(self, *, io_model, img_model_adv):
self.io_model = io_model
self.img_model_adv = img_model_adv
self.fig = plt.figure(figsize=(3, 2))
self.rgb_name_list = ["R", "G", "B"]
# Do not apply scaler norm on following data
self.name_not_scalable = [
"r2_adjust",
"r_factor",
"alive",
"dead",
"elapsed_time",
"scaler_alive",
"i0_time",
"time",
"time_diff",
"dwell_time",
]
self.rgb_keys = ["red", "green", "blue"]
self._init_rgb_dict()
def img_dict_updated(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : bool
True - 'io_model.img_dict` was updated, False - ignore
"""
if change["value"]:
self.select_dataset(self.io_model.img_dict_default_selected_item)
self.init_plot_status()
def init_plot_status(self):
# init of pos values
self.set_pixel_or_pos(0)
# init of scaler for normalization
self.scaler_name_index = 0
scaler_groups = [v for v in list(self.io_model.img_dict.keys()) if "scaler" in v]
if len(scaler_groups) > 0:
# self.scaler_group_name = scaler_groups[0]
self.scaler_norm_dict = self.io_model.img_dict[scaler_groups[0]]
# for GUI purpose only
self.scaler_items = []
self.scaler_items = list(self.scaler_norm_dict.keys())
self.scaler_items.sort()
self.scaler_data = None
logger.debug(
"The following groups are included for RGB image display: {}".format(self.io_model.img_dict_keys)
)
self.show_image()
def select_dataset(self, dataset_index):
"""
Select dataset. Meaning of the index: 0 - no dataset is selected,
1, 2, ... datasets with index 0, 1, ... is selected
Parameters
----------
dataset_index: int
index of the selected dataset
"""
self.data_opt = dataset_index
try:
if self.data_opt == 0:
self.dict_to_plot = {}
self.map_keys.clear()
self.init_limits_and_stat()
self.img_title = ""
elif self.data_opt > 0:
plot_item = self._get_current_plot_item()
self.img_title = str(plot_item)
self.dict_to_plot = self.io_model.img_dict[plot_item]
# for GUI purpose only
self.set_map_keys()
self.init_limits_and_stat()
# Select the first 3 entries for RGB display
for n in range(min(len(self.rgb_keys), len(self.map_keys))):
self.rgb_dict[self.rgb_keys[n]] = self.map_keys[n]
except IndexError:
pass
# Redraw image
self.show_image()
def set_map_keys(self):
"""
Create sorted list of map keys. The list starts with sorted sequence of emission lines,
followed by the sorted list of scalers and other maps.
"""
self.map_keys.clear()
# The key to use with 'img_dict', the name of the current dataset.
plot_item = self._get_current_plot_item()
keys_unsorted = list(self.io_model.img_dict[plot_item].keys())
if len(keys_unsorted) != len(set(keys_unsorted)):
logger.warning(
f"DrawImageAdvanced:set_map_keys(): repeated keys in the dictionary 'img_dict': {keys_unsorted}"
)
keys_elines, keys_scalers = [], []
for key in keys_unsorted:
if check_if_eline_supported(key): # Check if 'key' is an emission line (such as "Ca_K")
keys_elines.append(key)
else:
keys_scalers.append(key)
keys_elines.sort()
keys_scalers.sort()
self.map_keys = keys_elines + keys_scalers
def get_selected_scaler_name(self):
if self.scaler_name_index == 0:
return None
else:
return self.scaler_items[self.scaler_name_index - 1]
def set_scaler_index(self, scaler_index):
self.scaler_name_index = scaler_index
if self.scaler_name_index == 0:
self.scaler_data = None
else:
try:
scaler_name = self.scaler_items[self.scaler_name_index - 1]
except IndexError:
scaler_name = None
if scaler_name:
self.scaler_data = self.scaler_norm_dict[scaler_name]
logger.info(
"Use scaler data to normalize, "
"and the shape of scaler data is {}, "
"with (low, high) as ({}, {})".format(
self.scaler_data.shape, np.min(self.scaler_data), np.max(self.scaler_data)
)
)
self.set_low_high_value() # reset low high values based on normalization
self.show_image()
def _get_current_plot_item(self):
"""Get the key for the current plot item (use in dictionary 'img_dict')"""
return self.io_model.img_dict_keys[self.data_opt - 1]
def set_pixel_or_pos(self, pixel_or_pos):
self.pixel_or_pos = pixel_or_pos
self.show_image()
def set_grid_interpolate(self, grid_interpolate):
self.grid_interpolate = grid_interpolate
self.show_image()
def enable_quantitative_normalization(self, enable):
"""
Enable/Disable quantitative normalization.
Parameters
----------
enable: bool
Enable quantitative normalization if True, disable if False.
"""
self.quantitative_normalization = bool(enable)
self.set_low_high_value() # reset low high values based on normalization
self.show_image()
def set_low_high_value(self):
"""Set default low and high values based on normalization for each image."""
# do not apply scaler norm on not scalable data
self.range_dict.clear()
for data_name in self.dict_to_plot.keys():
if self.quantitative_normalization:
# Quantitative normalization
data_arr, _ = self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(
data_in=self.dict_to_plot[data_name],
scaler_dict=self.scaler_norm_dict,
scaler_name_default=self.get_selected_scaler_name(),
data_name=data_name,
name_not_scalable=self.name_not_scalable,
)
else:
# Normalize by the selected scaler in a regular way
data_arr = normalize_data_by_scaler(
data_in=self.dict_to_plot[data_name],
scaler=self.scaler_data,
data_name=data_name,
name_not_scalable=self.name_not_scalable,
)
lowv, highv = np.min(data_arr), np.max(data_arr)
# Create some 'artificially' small range in case the array is constant
if lowv == highv:
lowv -= 0.005
highv += 0.005
self.range_dict[data_name] = {"low": lowv, "low_default": lowv, "high": highv, "high_default": highv}
def reset_low_high(self, name):
"""Reset low and high value to default based on normalization."""
self.range_dict[name]["low"] = self.range_dict[name]["low_default"]
self.range_dict[name]["high"] = self.range_dict[name]["high_default"]
self.limit_dict[name]["low"] = 0.0
self.limit_dict[name]["high"] = 100.0
self.show_image()
def _init_rgb_dict(self):
self.rgb_dict = {_: None for _ in self.rgb_keys}
def init_limits_and_stat(self):
"""
Set plotting status for all the 2D images.
Note: 'self.map_keys' must be updated before calling this function!
"""
self.stat_dict.clear()
self.stat_dict = {k: False for k in self.map_keys}
self._init_rgb_dict()
self.limit_dict.clear()
self.limit_dict = {k: {"low": 0.0, "high": 100.0} for k in self.map_keys}
self.set_low_high_value()
def preprocess_data(self):
"""
Normalize data or prepare for linear/log plot.
"""
selected_data = []
selected_name = []
quant_norm_applied = []
rgb_color_to_keys = self.get_rgb_items_for_plot()
for data_key in rgb_color_to_keys.values():
if data_key in self.dict_to_plot:
selected_name.append(data_key)
if self.scaler_data is not None:
if np.count_nonzero(self.scaler_data) == 0:
logger.warning("scaler is zero - scaling was not applied")
elif len(self.scaler_data[self.scaler_data == 0]) > 0:
logger.warning("scaler data has zero values")
for i, k in enumerate(selected_name):
q_norm_applied = False
if self.quantitative_normalization:
# Quantitative normalization
(
data_arr,
q_norm_applied,
) = self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(
data_in=self.dict_to_plot[k],
scaler_dict=self.scaler_norm_dict,
scaler_name_default=self.get_selected_scaler_name(),
data_name=k,
name_not_scalable=self.name_not_scalable,
)
else:
# Normalize by the selected scaler in a regular way
data_arr = normalize_data_by_scaler(
data_in=self.dict_to_plot[k],
scaler=self.scaler_data,
data_name=k,
name_not_scalable=self.name_not_scalable,
)
selected_data.append(data_arr)
quant_norm_applied.append(q_norm_applied)
return selected_data, selected_name, rgb_color_to_keys, quant_norm_applied
def show_image(self):
# Don't plot the image if dictionary is empty (causes a lot of issues)
if not self.io_model.img_dict:
return
self.fig.clf()
self.ax = self.fig.add_subplot(111)
self.ax_r, self.ax_g, self.ax_b = make_rgb_axes(self.ax, pad=0.02)
# Check if positions data is available. Positions data may be unavailable
# (not recorded in HDF5 file) if experiment is has not been completed.
# While the data from the completed part of experiment may still be used,
# plotting vs. x-y or scatter plot may not be displayed.
positions_data_available = False
if "positions" in self.io_model.img_dict.keys():
positions_data_available = True
# Create local copy of self.pixel_or_pos and self.grid_interpolate
pixel_or_pos_local = self.pixel_or_pos
grid_interpolate_local = self.grid_interpolate
# Disable plotting vs x-y coordinates if 'positions' data is not available
if not positions_data_available:
if pixel_or_pos_local:
pixel_or_pos_local = 0 # Switch to plotting vs. pixel number
logger.error("'Positions' data is not available. Plotting vs. x-y coordinates is disabled")
if grid_interpolate_local:
grid_interpolate_local = False # Switch to plotting vs. pixel number
logger.error("'Positions' data is not available. Interpolation is disabled.")
selected_data, selected_names, rgb_color_to_keys, quant_norm_applied = self.preprocess_data()
selected_data = np.asarray(selected_data)
# Hide unused axes
if rgb_color_to_keys["red"] is None:
self.ax_r.set_visible(False)
if rgb_color_to_keys["green"] is None:
self.ax_g.set_visible(False)
if rgb_color_to_keys["blue"] is None:
self.ax_b.set_visible(False)
if selected_data.ndim != 3:
# There is no data to display. Hide the last axis and exit
self.ax.set_visible(False)
return
def _compute_equal_axes_ranges(x_min, x_max, y_min, y_max):
"""
Compute ranges for x- and y- axes of the plot. Make sure that the ranges for x- and y-axes are
always equal and fit the maximum of the ranges for x and y values:
max(abs(x_max-x_min), abs(y_max-y_min))
The ranges are set so that the data is always centered in the middle of the ranges
Parameters
----------
x_min, x_max, y_min, y_max : float
lower and upper boundaries of the x and y values
Returns
-------
x_axis_min, x_axis_max, y_axis_min, y_axis_max : float
lower and upper boundaries of the x- and y-axes ranges
"""
x_axis_min, x_axis_max, y_axis_min, y_axis_max = x_min, x_max, y_min, y_max
x_range, y_range = abs(x_max - x_min), abs(y_max - y_min)
if x_range > y_range:
y_center = (y_max + y_min) / 2
y_axis_max = y_center + x_range / 2
y_axis_min = y_center - x_range / 2
else:
x_center = (x_max + x_min) / 2
x_axis_max = x_center + y_range / 2
x_axis_min = x_center - y_range / 2
return x_axis_min, x_axis_max, y_axis_min, y_axis_max
def _adjust_data_range_using_min_ratio(c_min, c_max, c_axis_range, *, min_ratio=0.01):
"""
Adjust the range for plotted data along one axis (x or y). The adjusted range is
applied to the 'extent' attribute of imshow(). The adjusted range is always greater
than 'axis_range * min_ratio'. Such transformation has no physical meaning
and performed for aesthetic reasons: stretching the image presentation of
a scan with only a few lines (1-3) greatly improves visibility of data.
Parameters
----------
c_min, c_max : float
boundaries of the data range (along x or y axis)
c_axis_range : float
range presented along the same axis
Returns
-------
cmin, c_max : float
adjusted boundaries of the data range
"""
c_range = c_max - c_min
if c_range < c_axis_range * min_ratio:
c_center = (c_max + c_min) / 2
c_new_range = c_axis_range * min_ratio
c_min = c_center - c_new_range / 2
c_max = c_center + c_new_range / 2
return c_min, c_max
if pixel_or_pos_local:
# xd_min, xd_max, yd_min, yd_max = min(self.x_pos), max(self.x_pos),
# min(self.y_pos), max(self.y_pos)
x_pos_2D = self.io_model.img_dict["positions"]["x_pos"]
y_pos_2D = self.io_model.img_dict["positions"]["y_pos"]
xd_min, xd_max, yd_min, yd_max = x_pos_2D.min(), x_pos_2D.max(), y_pos_2D.min(), y_pos_2D.max()
xd_axis_min, xd_axis_max, yd_axis_min, yd_axis_max = _compute_equal_axes_ranges(
xd_min, xd_max, yd_min, yd_max
)
xd_min, xd_max = _adjust_data_range_using_min_ratio(xd_min, xd_max, xd_axis_max - xd_axis_min)
yd_min, yd_max = _adjust_data_range_using_min_ratio(yd_min, yd_max, yd_axis_max - yd_axis_min)
# Adjust the direction of each axis depending on the direction in which encoder values changed
# during the experiment. Data is plotted starting from the upper-right corner of the plot
if x_pos_2D[0, 0] > x_pos_2D[0, -1]:
xd_min, xd_max, xd_axis_min, xd_axis_max = xd_max, xd_min, xd_axis_max, xd_axis_min
if y_pos_2D[0, 0] > y_pos_2D[-1, 0]:
yd_min, yd_max, yd_axis_min, yd_axis_max = yd_max, yd_min, yd_axis_max, yd_axis_min
else:
if selected_data.ndim == 3:
# Set equal ranges for the axes data
yd, xd = selected_data.shape[1], selected_data.shape[2]
xd_min, xd_max, yd_min, yd_max = 0, xd, 0, yd
# Select minimum range for data
if (yd <= math.floor(xd / 100)) and (xd >= 200):
yd_min, yd_max = -math.floor(xd / 200), math.ceil(xd / 200)
if (xd <= math.floor(yd / 100)) and (yd >= 200):
xd_min, xd_max = -math.floor(yd / 200), math.ceil(yd / 200)
xd_axis_min, xd_axis_max, yd_axis_min, yd_axis_max = _compute_equal_axes_ranges(
xd_min, xd_max, yd_min, yd_max
)
name_r, data_r, limits_r = "", None, {"low": 0, "high": 100.0}
name_g, data_g, limits_g = "", None, {"low": 0, "high": 100.0}
name_b, data_b, limits_b = "", None, {"low": 0, "high": 100.0}
for color, name in rgb_color_to_keys.items():
if name:
try:
ind = selected_names.index(name)
name_label = name
if quant_norm_applied[ind]:
name_label += " - Q" # Add suffix to name if quantitative normalization was applied
if color == "red":
name_r, data_r = name_label, selected_data[ind]
limits_r = self.limit_dict[name]
elif color == "green":
name_g, data_g = name_label, selected_data[ind]
limits_g = self.limit_dict[name]
elif color == "blue":
name_b, data_b = name_label, selected_data[ind]
limits_b = self.limit_dict[name]
except ValueError:
pass
def _norm_data(data):
"""
Normalize data between (0, 1).
Parameters
----------
data : 2D array
"""
if data is None:
return data
data_min = np.min(data)
c_norm = np.max(data) - data_min
return (data - data_min) / c_norm if (c_norm != 0) else (data - data_min)
def _stretch_range(data_in, v_low, v_high):
# 'data is already normalized, so that the values are in the range 0..1
# v_low, v_high are in the range 0..100
if data_in is None:
return data_in
if (v_low <= 0) and (v_high >= 100):
return data_in
if v_high - v_low < 1: # This should not happen in practice, but check just in case
v_high = v_low + 1
v_low, v_high = v_low / 100.0, v_high / 100.0
c = 1.0 / (v_high - v_low)
data_out = (data_in - v_low) * c
return np.clip(data_out, 0, 1.0)
# Interpolate non-uniformly spaced data to uniform grid
if grid_interpolate_local:
data_r, _, _ = grid_interpolate(
data_r, self.io_model.img_dict["positions"]["x_pos"], self.io_model.img_dict["positions"]["y_pos"]
)
data_g, _, _ = grid_interpolate(
data_g, self.io_model.img_dict["positions"]["x_pos"], self.io_model.img_dict["positions"]["y_pos"]
)
data_b, _, _ = grid_interpolate(
data_b, self.io_model.img_dict["positions"]["x_pos"], self.io_model.img_dict["positions"]["y_pos"]
)
# The dictionaries 'rgb_view_data' and 'pos_limits' are used for monitoring
# the map values at current cursor positions.
rgb_view_data = {_: None for _ in self.rgb_keys}
if data_r is not None:
rgb_view_data["red"] = data_r
if data_g is not None:
rgb_view_data["green"] = data_g
if data_b is not None:
rgb_view_data["blue"] = data_b
pos_limits = {"x_low": xd_min, "x_high": xd_max, "y_low": yd_min, "y_high": yd_max}
# Normalize data
data_r_norm = _norm_data(data_r)
data_g_norm = _norm_data(data_g)
data_b_norm = _norm_data(data_b)
data_r_norm = _stretch_range(data_r_norm, limits_r["low"], limits_r["high"])
data_g_norm = _stretch_range(data_g_norm, limits_g["low"], limits_g["high"])
data_b_norm = _stretch_range(data_b_norm, limits_b["low"], limits_b["high"])
R, G, B, RGB = make_cube(data_r_norm, data_g_norm, data_b_norm)
red_patch = mpatches.Patch(color="red", label=name_r)
green_patch = mpatches.Patch(color="green", label=name_g)
blue_patch = mpatches.Patch(color="blue", label=name_b)
def format_coord_func(x, y, *, pixel_or_pos, rgb_color_to_keys, rgb_view_data, pos_limits, colors=None):
x0, y0 = pos_limits["x_low"], pos_limits["y_low"]
if colors is None:
colors = list(rgb_color_to_keys.keys())
s = ""
for n, color in enumerate(self.rgb_keys):
if (color not in colors) or (rgb_color_to_keys[color] is None) or (rgb_view_data[color] is None):
continue
map = rgb_view_data[color]
ny, nx = map.shape
dy = (pos_limits["y_high"] - y0) / ny if ny else 0
dx = (pos_limits["x_high"] - x0) / nx if nx else 0
cy = 1 / dy if dy else 1
cx = 1 / dx if dx else 1
x_pixel = math.floor((x - x0) * cx)
y_pixel = math.floor((y - y0) * cy)
if (0 <= x_pixel < nx) and (0 <= y_pixel < ny):
# The following line is extremely useful for debugging the feature. Keep it.
# s += f" <b>{rgb_color_to_keys[color]}</b>: {x_pixel} {y_pixel}"
s += f" <b>{rgb_color_to_keys[color]}</b>: {map[y_pixel, x_pixel]:.5g}"
s = " - " + s if s else s # Add dash if something is to be printed
if pixel_or_pos:
# Spatial coordinates (double)
s_coord = f"({x:.5g}, {y:.5g})"
else:
# Pixel coordinates (int)
s_coord = f"({int(x)}, {int(y)})"
return s_coord + s
format_coord = partial(
format_coord_func,
pixel_or_pos=pixel_or_pos_local,
rgb_color_to_keys=rgb_color_to_keys,
rgb_view_data=rgb_view_data,
pos_limits=pos_limits,
)
def format_cursor_data(data):
return "" # Print nothing
kwargs = dict(origin="upper", interpolation="nearest", extent=(xd_min, xd_max, yd_max, yd_min))
if RGB is not None:
img = self.ax.imshow(RGB, **kwargs)
self.ax.format_coord = format_coord
img.format_cursor_data = format_cursor_data
self.ax.set_xlim(xd_axis_min, xd_axis_max)
self.ax.set_ylim(yd_axis_max, yd_axis_min)
if R is not None:
img = self.ax_r.imshow(R, **kwargs)
self.ax_r.set_xlim(xd_axis_min, xd_axis_max)
self.ax_r.set_ylim(yd_axis_max, yd_axis_min)
format_coord_r = partial(format_coord, colors=["red"])
self.ax_r.format_coord = format_coord_r
img.format_cursor_data = format_cursor_data
if G is not None:
img = self.ax_g.imshow(G, **kwargs)
self.ax_g.set_xlim(xd_axis_min, xd_axis_max)
self.ax_g.set_ylim(yd_axis_max, yd_axis_min)
format_coord_g = partial(format_coord, colors=["green"])
self.ax_g.format_coord = format_coord_g
img.format_cursor_data = format_cursor_data
if B is not None:
img = self.ax_b.imshow(B, **kwargs)
self.ax_b.set_xlim(xd_axis_min, xd_axis_max)
self.ax_b.set_ylim(yd_axis_max, yd_axis_min)
format_coord_b = partial(format_coord, colors=["blue"])
self.ax_b.format_coord = format_coord_b
img.format_cursor_data = format_cursor_data
self.ax.xaxis.set_major_locator(mticker.MaxNLocator(nbins="auto"))
self.ax.yaxis.set_major_locator(mticker.MaxNLocator(nbins="auto"))
plt.setp(self.ax_r.get_xticklabels(), visible=False)
plt.setp(self.ax_r.get_yticklabels(), visible=False)
plt.setp(self.ax_g.get_xticklabels(), visible=False)
plt.setp(self.ax_g.get_yticklabels(), visible=False)
plt.setp(self.ax_b.get_xticklabels(), visible=False)
plt.setp(self.ax_b.get_yticklabels(), visible=False)
# self.ax_r.set_xticklabels([])
# self.ax_r.set_yticklabels([])
# sb_x = 38
# sb_y = 46
# sb_length = 10
# sb_height = 1
# ax.add_patch(mpatches.Rectangle(( sb_x, sb_y), sb_length, sb_height, color='white'))
# ax.text(sb_x + sb_length /2, sb_y - 1*sb_height, '100 nm', color='w', ha='center',
# va='bottom', backgroundcolor='black', fontsize=18)
self.ax_r.legend(
loc="upper left",
bbox_to_anchor=(1.1, 0),
frameon=False,
handles=[red_patch, green_patch, blue_patch],
mode="expand",
)
# self.fig.tight_layout(pad=4.0, w_pad=0.8, h_pad=0.8)
# self.fig.tight_layout()
# self.fig.canvas.draw_idle()
# self.fig.suptitle(self.img_title, fontsize=20)
self.fig.canvas.draw_idle()
def get_selected_items_for_plot(self):
"""Collect the selected items for plotting."""
# We want the dictionary to be sorted the same way as 'map_keys'
sdict = self.stat_dict
selected_keys = [_ for _ in self.map_keys if (_ in sdict) and (sdict[_] is True)]
return selected_keys
def get_rgb_items_for_plot(self):
# Verify integrity of the dictionary
if len(self.rgb_dict) != 3:
raise ValueError(
"DrawImageRGB.get_rgb_items_for_plot: dictionary 'rgb_dict' has "
f"{len(self.rgb_dict)} elements. Expected number of elements: "
f"{len(self.rgb_keys)}."
)
for key in self.rgb_keys:
if key not in self.rgb_dict:
raise ValueError(
"DrawImageRGB.get_rgb_items_for_plot: dictionary 'rgb_dict' is "
f"incomplete or contains incorrect set of keys: {list(self.rgb_dict.keys())}. "
f"Expected keys: {self.rgb_keys}: "
)
return self.rgb_dict
def make_cube(r, g, b):
"""
Create 3D array for rgb image.
Parameters
----------
r : 2D array
g : 2D array
b : 2D array
"""
if r is None and g is None and b is None:
logger.error("'make_cube': 'r', 'g' and 'b' input arrays are all None")
R, G, B, RGB = None
else:
for arr in [r, g, b]:
if arr is not None:
ny, nx = arr.shape
break
R = np.zeros([ny, nx, 3])
R[:, :, 0] = r
G = np.zeros_like(R)
G[:, :, 1] = g
B = np.zeros_like(R)
B[:, :, 2] = b
RGB = R + G + B
return R, G, B, RGB
| {
"repo_name": "NSLS-II/PyXRF",
"path": "pyxrf/model/draw_image_rgb.py",
"copies": "1",
"size": "31070",
"license": "bsd-3-clause",
"hash": -6549786174653489000,
"line_mean": 37.452970297,
"line_max": 114,
"alpha_frac": 0.5438043128,
"autogenerated": false,
"ratio": 3.5923228118857673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4636127124685767,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import commah
def runcommand(cosmology='WMAP5'):
""" Example interface commands """
# Return the WMAP5 cosmology concentration predicted for
# z=0 range of masses
Mi = [1e8, 1e9, 1e10]
zi = 0
print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi))
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)
print(output['c'].flatten())
# Return the WMAP5 cosmology concentration predicted for
# z=0 range of masses AND cosmological parameters
Mi = [1e8, 1e9, 1e10]
zi = 0
print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi))
output, cosmo = commah.run(cosmology=cosmology, zi=zi, Mi=Mi,
retcosmo=True)
print(output['c'].flatten())
print(cosmo)
# Return the WMAP5 cosmology concentration predicted for MW
# mass (2e12 Msol) across redshift
Mi = 2e12
z = [0, 0.5, 1, 1.5, 2, 2.5]
output = commah.run(cosmology=cosmology, zi=0, Mi=Mi, z=z)
for zval in z:
print("M(z=0)=%s has c(z=%s)=%s"
% (Mi, zval, output[output['z'] == zval]['c'].flatten()))
# Return the WMAP5 cosmology concentration predicted for MW
# mass (2e12 Msol) across redshift
Mi = 2e12
zi = [0, 0.5, 1, 1.5, 2, 2.5]
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)
for zval in zi:
print("M(z=%s)=%s has concentration %s"
% (zval, Mi, output[(output['zi'] == zval) &
(output['z'] == zval)]['c'].flatten()))
# Return the WMAP5 cosmology concentration and
# rarity of high-z cluster
Mi = 2e14
zi = 6
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)
print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi))
print(output['c'].flatten())
print("Mass variance sigma of haloes of mass %s at z=%s" % (Mi, zi))
print(output['sig'].flatten())
print("Fluctuation for haloes of mass %s at z=%s" % (Mi, zi))
print(output['nu'].flatten())
# Return the WMAP5 cosmology accretion rate prediction
# for haloes at range of redshift and mass
Mi = [1e8, 1e9, 1e10]
zi = [0]
z = [0, 0.5, 1, 1.5, 2, 2.5]
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi, z=z)
for Mval in Mi:
print("dM/dt for halo of mass %s at z=%s across redshift %s is: "
% (Mval, zi, z))
print(output[output['Mi'] == Mval]['dMdt'].flatten())
# Return the WMAP5 cosmology Halo Mass History for haloes with M(z=0) = 1e8
M = [1e8]
z = [0, 0.5, 1, 1.5, 2, 2.5]
print("Halo Mass History for z=0 mass of %s across z=%s" % (M, z))
output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)
print(output['Mz'].flatten())
# Return the WMAP5 cosmology formation redshifts for haloes at
# range of redshift and mass
M = [1e8, 1e9, 1e10]
z = [0]
print("Formation Redshifts for haloes of mass %s at z=%s" % (M, z))
output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)
for Mval in M:
print(output[output['Mi'] == Mval]['zf'].flatten())
return("Done")
def plotcommand(cosmology='WMAP5', plotname=None):
""" Example ways to interrogate the dataset and plot the commah output """
# Plot the c-M relation as a functon of redshift
xarray = 10**(np.arange(1, 15, 0.2))
yval = 'c'
# Specify the redshift range
zarray = np.arange(0, 5, 0.5)
xtitle = r"Halo Mass (M$_{sol}$)"
ytitle = r"Concentration"
linelabel = "z="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
plt.ylim([2, 30])
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray)
# Access the column yval from the data file
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray, label=linelabel+str(zval), color=colors[zind])
# Overplot the D08 predictions in black
ax.plot(xarray, commah.commah.cduffy(zval, xarray), color="black")
ax.set_xscale('log')
ax.set_yscale('log')
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_CM_relation.png'" % (plotname))
fig.savefig(plotname+"_CM_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the c-z relation as a function of mass (so always Mz=M0)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'c'
# Specify the mass range
zarray = 10**np.arange(6, 14, 2)
xtitle = r"Redshift"
ytitle = r"NFW Concentration"
linelabel = r"log$_{10}$ M$_{z}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval)
# Access the column yval from the data file
yarray = output[yval].flatten()
# Plot each line in turn with different colours
ax.plot(xarray, yarray,
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_Cz_relation.png'" % (plotname))
fig.savefig(plotname+"_Cz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the zf-z relation for different masses (so always Mz=M0)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'zf'
# Specify the mass range
zarray = 10**np.arange(6, 14, 2)
xtitle = r"Redshift"
ytitle = r"Formation Redshift"
linelabel = r"log$_{10}$ M$_{z}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray,
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
leg = ax.legend(loc=2)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_zfz_relation.png'" % (plotname))
fig.savefig(plotname+"_zfz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the dM/dt-z relation for different masses (so always Mz=M0)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'dMdt'
# Specify the mass range
zarray = 10**np.arange(10, 14, 0.5)
xtitle = r"log$_{10}$ (1+z)"
ytitle = r"log$_{10}$ Accretion Rate M$_{sol}$ yr$^{-1}$"
linelabel = r"log$_{10}$ M$_z$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
cosmo = commah.getcosmo(cosmology)
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval,
com=False, mah=True)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(np.log10(xarray+1.), np.log10(yarray),
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
# Plot the semi-analytic approximate formula from Correa et al 2015b
semianalytic_approx = 71.6 * (zval / 1e12) * (cosmo['h'] / 0.7) *\
(-0.24 + 0.75 * (xarray + 1)) * np.sqrt(
cosmo['omega_M_0'] * (xarray + 1)**3 + cosmo['omega_lambda_0'])
ax.plot(np.log10(xarray + 1), np.log10(semianalytic_approx),
color='black')
leg = ax.legend(loc=2)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_dMdtz_relation.png'" % (plotname))
fig.savefig(plotname+"_dMdtz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the dMdt-M relation as a function of redshift
xarray = 10**(np.arange(10, 14, 0.5))
yval = 'dMdt'
# Specify the redshift range
zarray = np.arange(0, 5, 0.5)
xtitle = r"Halo Mass M$_{sol}$"
ytitle = r"Accretion Rate M$_{sol}$ yr$^{-1}$"
linelabel = "z="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray,
com=False, mah=True)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray, label=linelabel+str(zval),
color=colors[zind],)
ax.set_xscale('log')
ax.set_yscale('log')
leg = ax.legend(loc=2)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_MAH_M_relation.png'" % (plotname))
fig.savefig(plotname+"_MAH_M_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the (dM/M)dt-M relation as a function of redshift
xarray = 10**(np.arange(10, 14, 0.5))
yval = 'dMdt'
# Specify the redshift range
zarray = np.arange(0, 5, 0.5)
xtitle = r"Halo Mass M$_{sol}$"
ytitle = r"Specific Accretion Rate yr$^{-1}$"
linelabel = "z="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray,
mah=True, com=False)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray/xarray, label=linelabel+str(zval),
color=colors[zind],)
ax.set_xscale('log')
ax.set_yscale('log')
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_specificMAH_M_relation.png'" % (plotname))
fig.savefig(plotname+"_specificMAH_M_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the Mz-z relation as a function of mass
# (so mass is decreasing to zero as z-> inf)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'Mz'
# Specify the mass range
zarray = 10**np.arange(10, 14, 0.5)
xtitle = r"Redshift"
ytitle = r"M(z) (M$_{sol}$)"
linelabel = r"log$_{10}$ M$_{0}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray,
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
ax.set_yscale('log')
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_Mzz_relation.png'" % (plotname))
fig.savefig(plotname+"_Mzz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the Mz/M0-z relation as a function of mass
xarray = 10**(np.arange(0, 1, 0.02)) - 1
yval = 'Mz'
# Specify the mass range
zarray = 10**np.arange(10, 14, 0.5)
xtitle = r"Redshift"
ytitle = r"log$_{10}$ M(z)/M$_{0}$"
linelabel = r"log$_{10}$ M$_{0}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, np.log10(yarray/zval),
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
leg = ax.legend(loc=3)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_MzM0z_relation.png'" % (plotname))
fig.savefig(plotname+"_MzM0z_relation.png", dpi=fig.dpi*5)
else:
plt.show()
return("Done")
| {
"repo_name": "astroduff/commah",
"path": "examples.py",
"copies": "1",
"size": "15060",
"license": "bsd-3-clause",
"hash": -3090736719894958000,
"line_mean": 31.3175965665,
"line_max": 79,
"alpha_frac": 0.5928286853,
"autogenerated": false,
"ratio": 2.9396837790357213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9032512464335721,
"avg_score": 0,
"num_lines": 466
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
__all__ = ['TaylorDiagram']
class TaylorDiagram(object):
"""
Taylor diagram: plot model standard deviation and correlation
to reference (data) sample in a single-quadrant polar plot, with
r=stddev and theta=arccos(correlation).
Taylor diagram (Taylor, 2001) test implementation.
http://www-pcmdi.llnl.gov/about/staff/Taylor/CV/Taylor_diagram_primer.htm
https://gist.github.com/ycopin/3342888
"""
def __init__(self, refstd, fig=None, rect=111, label='_'):
"""
Set up Taylor diagram axes, i.e. single quadrant polar
plot, using mpl_toolkits.axisartist.floating_axes. refstd is
the reference standard deviation to be compared to.
"""
from matplotlib.projections import PolarAxes
from mpl_toolkits.axisartist import floating_axes
from mpl_toolkits.axisartist import grid_finder
self.refstd = refstd # Reference standard deviation.
tr = PolarAxes.PolarTransform()
# Correlation labels.
rlocs = np.concatenate((np.arange(10)/10., [0.95, 0.99]))
tlocs = np.arccos(rlocs) # Conversion to polar angles.
gl1 = grid_finder.FixedLocator(tlocs) # Positions.
dict_formatter = dict(list(zip(tlocs, map(str, rlocs))))
tf1 = grid_finder.DictFormatter(dict_formatter)
# Standard deviation axis extent.
self.smin = 0
self.smax = 1.5*self.refstd
extremes = (0,
np.pi/2, # 1st quadrant.
self.smin,
self.smax)
ghelper = floating_axes.GridHelperCurveLinear(tr,
extremes=extremes,
grid_locator1=gl1,
tick_formatter1=tf1)
if fig is None:
fig = plt.figure()
ax = floating_axes.FloatingSubplot(fig, rect, grid_helper=ghelper)
fig.add_subplot(ax)
# Adjust axes.
ax.axis["top"].set_axis_direction("bottom") # "Angle axis".
ax.axis["top"].toggle(ticklabels=True, label=True)
ax.axis["top"].major_ticklabels.set_axis_direction("top")
ax.axis["top"].label.set_axis_direction("top")
ax.axis["top"].label.set_text("Correlation")
ax.axis["left"].set_axis_direction("bottom") # "X axis".
ax.axis["left"].label.set_text("Standard deviation")
ax.axis["right"].set_axis_direction("top") # "Y axis".
ax.axis["right"].toggle(ticklabels=True)
ax.axis["right"].major_ticklabels.set_axis_direction("left")
ax.axis["bottom"].set_visible(False) # Useless.
# Contours along standard deviations.
ax.grid(False)
self._ax = ax # Graphical axes.
self.ax = ax.get_aux_axes(tr) # Polar coordinates.
# Add reference point and stddev contour.
l, = self.ax.plot([0], self.refstd, 'ko',
ls='', ms=10, label=label)
t = np.linspace(0, np.pi/2)
r = np.zeros_like(t) + self.refstd
self.ax.plot(t, r, 'k--', label='_')
# Collect sample points for latter use (e.g. legend).
self.samplePoints = [l]
def add_sample(self, stddev, corrcoef, *args, **kwargs):
"""
Add sample (stddev, corrcoeff) to the Taylor diagram. args
and kwargs are directly propagated to the Figure.plot
command.
"""
l, = self.ax.plot(np.arccos(corrcoef), stddev,
*args, **kwargs) # (theta, radius).
self.samplePoints.append(l)
return l
def add_contours(self, levels=5, **kwargs):
"""
Add constant centered RMS difference contours.
"""
rs, ts = np.meshgrid(np.linspace(self.smin, self.smax),
np.linspace(0, np.pi/2))
# Compute centered RMS difference.
rms = np.sqrt(self.refstd**2 + rs**2 - 2*self.refstd*rs*np.cos(ts))
contours = self.ax.contour(ts, rs, rms, levels, **kwargs)
return contours
if __name__ == '__main__':
# Reference dataset.
x = np.linspace(0, 4*np.pi, 100)
data = np.sin(x)
refstd = data.std(ddof=1) # Reference standard deviation.
# Models.
m1 = data + 0.2*np.random.randn(len(x)) # Model 1.
m2 = 0.8*data + 0.1*np.random.randn(len(x)) # Model 2.
m3 = np.sin(x-np.pi/10) # Model 3.
# Compute stddev and correlation coefficient of models.
samples = np.array([[m.std(ddof=1), np.corrcoef(data, m)[0, 1]]
for m in (m1, m2, m3)])
fig = plt.figure(figsize=(10, 4))
ax1 = fig.add_subplot(1, 2, 1, xlabel='X', ylabel='Y')
# Taylor diagram.
dia = TaylorDiagram(refstd, fig=fig, rect=122, label="Reference")
colors = plt.matplotlib.cm.jet(np.linspace(0, 1, len(samples)))
ax1.plot(x, data, 'ko', label='Data')
for k, m in enumerate([m1, m2, m3]):
ax1.plot(x, m, c=colors[k], label='Model %d' % (k+1))
ax1.legend(numpoints=1, prop=dict(size='small'), loc='best')
# Add samples to Taylor diagram.
for k, (stddev, corrcoef) in enumerate(samples):
dia.add_sample(stddev, corrcoef, marker='s', ls='', c=colors[k],
label="Model %d" % (k+1))
# Add RMS contours, and label them.
contours = dia.add_contours(colors='0.5')
plt.clabel(contours, inline=1, fontsize=10)
# Add a figure legend.
fig.legend(dia.samplePoints,
[p.get_label() for p in dia.samplePoints],
numpoints=1, prop=dict(size='small'), loc='upper right')
plt.show()
| {
"repo_name": "pyoceans/utilities",
"path": "utilities/taylor_diagram.py",
"copies": "2",
"size": "5767",
"license": "mit",
"hash": 8563092671907484000,
"line_mean": 34.3803680982,
"line_max": 77,
"alpha_frac": 0.576382868,
"autogenerated": false,
"ratio": 3.3883666274970623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4964749495497062,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
dab = plt.imread('parametric_dab.png')
print(dab.shape)
l = dab[500, :, 3]
#plt.plot(l, label="opacity")
r = np.hstack((np.linspace(1, 0, 500), np.linspace(0, 1, 500)[1:]))
#plt.plot(r, label="$r$")
rr = r**2
o = 1.0-rr
#plt.plot(o, label="$1-r^2$")
for i in [1, 2]:
plt.figure(i)
hardness = 0.3
for hardness in [0.1, 0.3, 0.7]:
if i == 2:
rr = np.linspace(0, 1, 1000)
opa = rr.copy()
opa[rr < hardness] = rr[rr < hardness] + 1-(rr[rr < hardness]/hardness)
opa[rr >= hardness] = hardness/(1-hardness)*(1-rr[rr >= hardness])
plt.plot(opa, label="h=%.1f" % hardness)
if i == 2:
plt.xlabel("$r^2$")
plt.legend(loc='best')
else:
plt.xlabel("$d$")
plt.legend(loc='lower center')
plt.ylabel('pixel opacity')
plt.xticks([500], [0])
plt.title("Dab Shape (for different hardness values)")
plt.show()
| {
"repo_name": "achadwick/libmypaint",
"path": "doc/scripts/dab_hardness_plot.py",
"copies": "4",
"size": "1057",
"license": "isc",
"hash": -17189576301207340,
"line_mean": 24.1666666667,
"line_max": 79,
"alpha_frac": 0.5534531693,
"autogenerated": false,
"ratio": 2.759791122715405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0037561853351327033,
"num_lines": 42
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.ma as ma
from pandas import DataFrame
__all__ = ['both_valid',
'mean_bias',
'median_bias',
'rmse',
'r2',
'apply_skill',
'low_pass']
def both_valid(x, y):
"""
Returns a mask where both series are valid.
Examples
--------
>>> import numpy as np
>>> x = [np.NaN, 1, 2, 3, 4, 5]
>>> y = [0, 1, np.NaN, 3, 4, 5]
>>> both_valid(x, y)
array([False, True, False, True, True, True], dtype=bool)
"""
mask_x = np.isnan(x)
mask_y = np.isnan(y)
return np.logical_and(~mask_x, ~mask_y)
def mean_bias(obs, model):
from sklearn.metrics import mean_absolute_error
return mean_absolute_error(obs, model)
def median_bias(obs, model):
from sklearn.metrics import median_absolute_error
return median_absolute_error(obs, model)
def rmse(obs, model):
"""
Compute root mean square between the observed data (`obs`) and the modeled
data (`model`).
>>> obs = [3, -0.5, 2, 7]
>>> model = [2.5, 0.0, 2, 8]
>>> rmse(obs, model)
0.61237243569579447
>>> obs = [[0.5, 1],[-1, 1],[7, -6]]
>>> model = [[0, 2],[-1, 2],[8, -5]]
>>> rmse(obs, model)
0.84162541153017323
"""
from sklearn.metrics import mean_squared_error
return np.sqrt(mean_squared_error(obs, model))
def r2(x, y):
from sklearn.metrics import r2_score
return r2_score(x, y)
def apply_skill(dfs, function, remove_mean=True, filter_tides=False):
skills = dict()
for station, df in dfs.iteritems():
if filter_tides:
df = df.apply(low_pass)
skill = dict()
obs = df.pop('OBS_DATA')
if obs.isnull().all():
# No observations.
skills.update({station: np.NaN})
continue
for model, y in df.iteritems():
# No models.
if y.isnull().all():
skills.update({station: np.NaN})
continue
mask = both_valid(obs, y)
x, y = obs[mask], y[mask]
if remove_mean:
x, y = x-x.mean(), y-y.mean()
if x.size:
ret = function(x, y)
else:
ret = np.NaN
skill.update({model: ret})
skills.update({station: skill})
return DataFrame.from_dict(skills)
def low_pass(series, window_size=193, T=40, dt=360):
"""
This function applies a lanczos filter on a pandas time-series and
returns the low pass data series.
series : Pandas series
window_size : Size of the filter windows (default is 96+1+96).
T : Period of the filter. (The default of 40 hours filter should
get rid of all tides.)
dt : time_delta in seconds. Default is 360 (6 minutes).
"""
from oceans import lanc
T *= 60*60. # To seconds.
freq = dt/T
mask = np.isnan(series)
avg = series.mean()
series = series - avg
series.interpolate(inplace=True)
wt = lanc(window_size, freq)
low = np.convolve(wt, series, mode='same')
low = ma.masked_array(low, mask)
return low+avg
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "pyoceans/utilities",
"path": "utilities/skill_score.py",
"copies": "2",
"size": "3278",
"license": "mit",
"hash": -5608686416070957000,
"line_mean": 25.224,
"line_max": 78,
"alpha_frac": 0.5555216595,
"autogenerated": false,
"ratio": 3.3517382413087935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49072599008087936,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.ma as ma
from pandas.tseries.frequencies import to_offset
from pandas import DatetimeIndex, Series, rolling_median
__all__ = ['has_time_gaps',
'is_monotonically_increasing',
'is_flatline',
'is_spike',
'threshold_series',
'filter_spikes',
'tukey53H']
def has_time_gaps(times, freq):
"""
Check for gaps in a series time-stamp `times`. The `freq` can be a string
or a pandas offset object. Note the `freq` cannot be an ambiguous offset
(like week, months, etc), in those case reduce it to the smallest
unambiguous unit (i.e.: 1 month -> 30 days).
Example
-------
>>> import numpy as np
>>> from pandas import date_range
>>> times = date_range('1980-01-19', periods=48, freq='1H')
>>> has_time_gaps(times, freq='6min')
True
>>> has_time_gaps(times, freq='1H')
False
"""
freq = to_offset(freq)
if hasattr(freq, 'delta'):
times = DatetimeIndex(times)
else:
raise ValueError('Cannot interpret freq {!r} delta'.format(freq))
return (np.diff(times) > freq.delta.to_timedelta64()).any()
def is_monotonically_increasing(series):
"""
Check if a given list or array is monotonically increasing.
Examples
--------
>>> from pandas import date_range
>>> times = date_range('1980-01-19', periods=10)
>>> all(is_monotonically_increasing(times))
True
>>> import numpy as np
>>> all(is_monotonically_increasing(np.r_[times[-2:-1], times]))
False
"""
return [x < y for x, y in zip(series, series[1:])]
def is_flatline(series, reps=10, eps=None):
"""
Check for consecutively repeated values (`reps`) in `series` within a
tolerance `eps`.
Examples
--------
>>> series = np.r_[np.random.rand(10), [10]*15, np.random.rand(10)]
>>> is_flatline(series, reps=10)
array([False, False, False, False, False, False, False, False, False,
False, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, False, False,
False, False, False, False, False, False, False, False], dtype=bool)
"""
series = np.asanyarray(series)
if not eps:
eps = np.finfo(float).eps
if reps < 2:
reps = 2
mask = np.zeros_like(series, dtype='bool')
flatline = 1
for k, current in enumerate(series):
if np.abs(series[k-1] - current) < eps:
flatline += 1
else:
if flatline >= reps:
mask[k-flatline:k] = True
flatline = 1
return mask
def is_spike(series, window_size=3, threshold=3, scale=True):
"""
Flags spikes in an array-like object using a median filter of `window_size`
and a `threshold` for the median difference. If `scale=False` the
differences are not scale by the data standard deviation and the masking
is "aggressive."
Examples
--------
>>> from pandas import Series, date_range
>>> series = [33.43, 33.45, 34.45, 90.0, 35.67, 34.9, 43.5, 34.6, 33.7]
>>> series = Series(series, index=date_range('1980-01-19',
... periods=len(series)))
>>> series[is_spike(series, window_size=3, threshold=3, scale=False)]
1980-01-22 90.0
1980-01-25 43.5
dtype: float64
>>> series[is_spike(series, window_size=3, threshold=3, scale=True)]
1980-01-22 90.0
Freq: D, dtype: float64
"""
# bfill+ffil needs a series and won't affect the median.
series = Series(series)
medians = rolling_median(series, window=window_size, center=True)
medians = medians.fillna(method='bfill').fillna(method='ffill')
difference = np.abs(series - medians).values
if scale:
return difference > (threshold*difference.std())
return difference > threshold
def threshold_series(series, vmin=None, vmax=None):
"""
Threshold an series by flagging with NaN values below `vmin` and above
`vmax`.
Examples
--------
>>> series = [0.1, 20, 30, 35.5, 34.9, 43.5, 34.6, 40]
>>> threshold_series(series, vmin=30, vmax=40)
masked_array(data = [-- -- 30.0 35.5 34.9 -- 34.6 40.0],
mask = [ True True False False False True False False],
fill_value = 1e+20)
<BLANKLINE>
>>> from pandas import Series, date_range
>>> series = Series(series, index=date_range('1980-01-19',
... periods=len(series)))
>>> threshold_series(series, vmin=30, vmax=40)
1980-01-19 NaN
1980-01-20 NaN
1980-01-21 30.0
1980-01-22 35.5
1980-01-23 34.9
1980-01-24 NaN
1980-01-25 34.6
1980-01-26 40.0
Freq: D, dtype: float64
"""
if not vmin:
vmin = min(series)
if not vmax:
vmax = max(series)
masked = ma.masked_outside(series, vmin, vmax)
if masked.mask.any():
if isinstance(series, Series):
series[masked.mask] = np.NaN
return series
return masked
def filter_spikes(series, window_size=3, threshold=3, scale=True):
"""
Filter an array-like object using a median filter and a `threshold`
for the median difference.
Examples
--------
>>> from pandas import Series, date_range
>>> series = [33.43, 33.45, 34.45, 90.0, 35.67, 34.9, 43.5, 34.6, 33.7]
>>> series = Series(series, index=date_range('1980-01-19',
... periods=len(series)))
>>> filter_spikes(series)
1980-01-19 33.43
1980-01-20 33.45
1980-01-21 34.45
1980-01-22 NaN
1980-01-23 35.67
1980-01-24 34.90
1980-01-25 43.50
1980-01-26 34.60
1980-01-27 33.70
Freq: D, dtype: float64
"""
outlier_idx = is_spike(series, window_size=window_size,
threshold=threshold, scale=scale)
if not isinstance(series, Series):
series = np.asanyarray(series)
series[outlier_idx] = np.NaN
return series
def _high_pass(data, alpha=0.5):
"""
Runs a high pass RC filter over the given data.
Parameters
----------
data : array_like
alpha : float
Smoothing factor between 0.0 (exclusive) and 1.0 (inclusive).
A lower value means more filtering. A value of 1.0 equals no
filtering. Defaults is 0.5.
Returns
-------
hpf : array_like
Filtered data.
Based on http://en.wikipedia.org/wiki/High-pass_filter
"""
mean = data.mean()
data = data - mean
hpf = data.copy()
for k in range(1, len(data)):
hpf[k] = alpha * hpf[k-1] + alpha * (data[k] - data[k-1])
return hpf + mean
def tukey53H(series, k=1.5):
"""
Flags suspicious spikes values in `series` using Tukey 53H criteria.
References
----------
.. [1] Goring, Derek G., and Vladimir I. Nikora. "Despiking acoustic
Doppler velocimeter data." Journal of Hydraulic Engineering 128.1 (2002):
117-126. http://dx.doi.org/10.1061/(ASCE)0733-9429(2002)128:1(117)
Examples
--------
>>> from pandas import Series, date_range
>>> series = [33.43, 33.45, 34.45, 90.0, 35.67, 34.9, 43.5, 34.6, 33.7]
>>> series = Series(series, index=date_range('1980-01-19',
... periods=len(series)))
>>> series[tukey53H(series, k=1.5)]
1980-01-22 90.0
Freq: D, dtype: float64
"""
series = np.asanyarray(series)
series = _high_pass(series, 0.99)
series = series - series.mean()
N = len(series)
stddev = series.std()
u1 = np.zeros_like(series)
for n in range(N-4):
if series[n:n+5].any():
u1[n+2] = np.median(series[n:n+5])
u2 = np.zeros_like(series)
for n in range(N-2):
if u1[n:n+3].any():
u2[n+1] = np.median(u1[n:n+3])
u3 = np.zeros_like(series)
u3[1:-1] = 0.25*(u2[:-2] + 2*u2[1:-1] + u2[2:])
delta = np.abs(series-u3)
return delta > k*stddev
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "ocefpaf/utilities",
"path": "utilities/qaqc.py",
"copies": "2",
"size": "8159",
"license": "mit",
"hash": 72616337327250240,
"line_mean": 28.2437275986,
"line_max": 79,
"alpha_frac": 0.5797279078,
"autogenerated": false,
"ratio": 3.218540433925049,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4798268341725049,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from datashape.predicates import isscalar
from toolz import concat, curry, partition_all
from collections import Iterator, Iterable
import datashape
from .core import NetworkDispatcher, ooc_types
from .chunks import chunks, Chunks
from .numpy_dtype import dshape_to_numpy
from .utils import records_to_tuples
convert = NetworkDispatcher('convert')
@convert.register(np.ndarray, pd.DataFrame, cost=0.2)
def dataframe_to_numpy(df, dshape=None, **kwargs):
dtype = dshape_to_numpy(dshape)
x = df.to_records(index=False)
if x.dtype != dtype:
x = x.astype(dtype)
return x
@convert.register(pd.DataFrame, np.ndarray, cost=1.0)
def numpy_to_dataframe(x, **kwargs):
return pd.DataFrame(x)
@convert.register(pd.Series, np.ndarray, cost=1.0)
def numpy_to_series(x, **kwargs):
return pd.Series(x)
@convert.register(pd.Series, pd.DataFrame, cost=0.1)
def DataFrame_to_Series(x, **kwargs):
assert len(x.columns) == 1
return x[x.columns[0]]
@convert.register(pd.DataFrame, pd.Series, cost=0.1)
def series_to_dataframe(x, **kwargs):
return x.to_frame()
@convert.register(np.recarray, np.ndarray, cost=0.0)
def ndarray_to_recarray(x, **kwargs):
return x.view(np.recarray)
@convert.register(np.ndarray, np.recarray, cost=0.0)
def recarray_to_ndarray(x, **kwargs):
return x.view(np.ndarray)
higher_precision_freqs = frozenset(('ns', 'ps', 'fs', 'as'))
@convert.register(np.ndarray, pd.Series, cost=0.1)
def series_to_array(s, dshape=None, **kwargs):
dtype = datashape.to_numpy_dtype(datashape.dshape(dshape))
sdtype = s.dtype
values = s.values
# don't lose precision of datetime64 more precise than microseconds
if ((issubclass(sdtype.type, np.datetime64) and
np.datetime_data(sdtype)[0] in higher_precision_freqs)
or s.dtype == dtype):
return values
try:
return values.astype(dtype)
except ValueError: # object series and record dshape, e.g., a frame row
return values
@convert.register(list, np.ndarray, cost=10.0)
def numpy_to_list(x, **kwargs):
dt = None
if x.dtype == 'M8[ns]':
dt = 'M8[us]' # lose precision when going to Python datetime
if x.dtype.fields and any(x.dtype[n] == 'M8[ns]' for n in x.dtype.names):
dt = [(n, 'M8[us]' if x.dtype[n] == 'M8[ns]' else x.dtype[n])
for n in x.dtype.names]
if dt:
return x.astype(dt).tolist()
else:
return x.tolist()
@convert.register(np.ndarray, chunks(np.ndarray), cost=1.0)
def numpy_chunks_to_numpy(c, **kwargs):
return np.concatenate(list(c))
@convert.register(chunks(np.ndarray), np.ndarray, cost=0.5)
def numpy_to_chunks_numpy(x, chunksize=2**20, **kwargs):
return chunks(np.ndarray)(
lambda: (x[i:i+chunksize] for i in range(0, x.shape[0], chunksize)))
@convert.register(pd.DataFrame, chunks(pd.DataFrame), cost=1.0)
def chunks_dataframe_to_dataframe(c, **kwargs):
c = list(c)
if not c: # empty case
return pd.DataFrame(columns=kwargs.get('dshape').measure.names)
else:
return pd.concat(c, axis=0, ignore_index=True)
@convert.register(chunks(pd.DataFrame), pd.DataFrame, cost=0.5)
def dataframe_to_chunks_dataframe(x, chunksize=2**20, **kwargs):
return chunks(pd.DataFrame)(
lambda: (x.iloc[i:i+chunksize] for i in range(0, x.shape[0], chunksize)))
def ishashable(x):
try:
hash(x)
return True
except:
return False
@convert.register(set, (list, tuple), cost=5.0)
def iterable_to_set(x, **kwargs):
if x and isinstance(x[0], Iterable) and not ishashable(x):
x = map(tuple, x)
return set(x)
@convert.register(list, (tuple, set), cost=1.0)
def iterable_to_list(x, **kwargs):
return list(x)
@convert.register(tuple, (list, set), cost=1.0)
def iterable_to_tuple(x, **kwargs):
return tuple(x)
def element_of(seq):
"""
>>> element_of([1, 2, 3])
1
>>> element_of([[1, 2], [3, 4]])
1
"""
while isinstance(seq, list) and seq:
seq = seq[0]
return seq
@convert.register(np.ndarray, list, cost=10.0)
def list_to_numpy(seq, dshape=None, **kwargs):
if isinstance(element_of(seq), dict):
seq = list(records_to_tuples(dshape, seq))
if (seq and isinstance(seq[0], Iterable)
and not ishashable(seq[0])
and not isscalar(dshape)):
seq = list(map(tuple, seq))
dtype = dshape_to_numpy(dshape)
return np.array(seq, dtype=dtype)
@convert.register(Iterator, list, cost=0.001)
def list_to_iterator(L, **kwargs):
return iter(L)
@convert.register(list, Iterator, cost=1.0)
def iterator_to_list(seq, **kwargs):
return list(seq)
@convert.register(Iterator, (chunks(pd.DataFrame), chunks(np.ndarray)), cost=10.0)
def numpy_chunks_to_iterator(c, **kwargs):
return concat(convert(Iterator, chunk, **kwargs) for chunk in c)
@convert.register(chunks(np.ndarray), Iterator, cost=10.0)
def iterator_to_numpy_chunks(seq, chunksize=1024, **kwargs):
seq2 = partition_all(chunksize, seq)
first, rest = next(seq2), seq2
x = convert(np.ndarray, first, **kwargs)
def _():
yield x
for i in rest:
yield convert(np.ndarray, i, **kwargs)
return chunks(np.ndarray)(_)
@convert.register(chunks(pd.DataFrame), Iterator, cost=10.0)
def iterator_to_DataFrame_chunks(seq, chunksize=1024, **kwargs):
seq2 = partition_all(chunksize, seq)
try:
first, rest = next(seq2), seq2
except StopIteration:
return chunks(pd.DataFrame)([])
df = convert(pd.DataFrame, first, **kwargs)
def _():
yield df
for i in rest:
yield convert(pd.DataFrame, i, **kwargs)
return chunks(pd.DataFrame)(_)
@convert.register(tuple, np.record)
def numpy_record_to_tuple(rec, **kwargs):
return rec.tolist()
@convert.register(chunks(np.ndarray), chunks(pd.DataFrame), cost=0.5)
def chunked_pandas_to_chunked_numpy(c, **kwargs):
return chunks(np.ndarray)(lambda: (convert(np.ndarray, chunk, **kwargs) for chunk in c))
@convert.register(chunks(pd.DataFrame), chunks(np.ndarray), cost=0.5)
def chunked_numpy_to_chunked_pandas(c, **kwargs):
return chunks(pd.DataFrame)(lambda: (convert(pd.DataFrame, chunk, **kwargs) for chunk in c))
@convert.register(chunks(np.ndarray), chunks(list), cost=10.0)
def chunked_list_to_chunked_numpy(c, **kwargs):
return chunks(np.ndarray)(lambda: (convert(np.ndarray, chunk, **kwargs) for chunk in c))
@convert.register(chunks(list), chunks(np.ndarray), cost=10.0)
def chunked_numpy_to_chunked_list(c, **kwargs):
return chunks(list)(lambda: (convert(list, chunk, **kwargs) for chunk in c))
@convert.register(chunks(Iterator), chunks(list), cost=0.1)
def chunked_list_to_chunked_iterator(c, **kwargs):
return chunks(Iterator)(c.data)
@convert.register(chunks(list), chunks(Iterator), cost=0.1)
def chunked_Iterator_to_chunked_list(c, **kwargs):
return chunks(Iterator)(lambda: (convert(Iterator, chunk, **kwargs) for chunk in c))
@convert.register(Iterator, chunks(Iterator), cost=0.1)
def chunked_iterator_to_iterator(c, **kwargs):
return concat(c)
ooc_types |= set([Iterator, Chunks])
| {
"repo_name": "mrocklin/into",
"path": "into/convert.py",
"copies": "1",
"size": "7301",
"license": "bsd-3-clause",
"hash": -4992610469792644000,
"line_mean": 29.1694214876,
"line_max": 96,
"alpha_frac": 0.6659361731,
"autogenerated": false,
"ratio": 3.122754491017964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9277487862177973,
"avg_score": 0.0022405603879980755,
"num_lines": 242
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from datashape.predicates import isscalar
from toolz import concat, partition_all, compose
from collections import Iterator, Iterable
import datashape
from datashape import discover
from .core import NetworkDispatcher, ooc_types
from .chunks import chunks, Chunks
from .numpy_dtype import dshape_to_numpy
from .utils import records_to_tuples
from functools import partial
convert = NetworkDispatcher('convert')
@convert.register(np.ndarray, pd.DataFrame, cost=0.2)
def dataframe_to_numpy(df, dshape=None, **kwargs):
dtype = dshape_to_numpy(dshape or discover(df))
x = df.to_records(index=False)
if x.dtype != dtype:
x = x.astype(dtype)
return x
@convert.register(pd.DataFrame, np.ndarray, cost=1.0)
def numpy_to_dataframe(x, dshape, **kwargs):
dtype = x.dtype
names = dtype.names
if names is None:
if dtype.kind == 'm':
# pandas does not do this conversion for us but doesn't work
# with non 'ns' unit timedeltas
x = x.astype('m8[ns]')
else:
fields = dtype.fields
new_dtype = []
should_astype = False
for name in names:
original_field_value = fields[name][0]
if original_field_value.kind == 'm':
# pandas does not do this conversion for us but doesn't work
# with non 'ns' unit timedeltas
new_dtype.append((name, 'm8[ns]'))
# perform the astype at the end of the loop
should_astype = True
else:
new_dtype.append((name, original_field_value))
if should_astype:
x = x.astype(new_dtype)
df = pd.DataFrame(x, columns=getattr(dshape.measure, 'names', names))
return df
@convert.register(pd.Series, np.ndarray, cost=1.0)
def numpy_to_series(x, **kwargs):
names = x.dtype.names
if names is not None:
if len(names) > 1:
raise ValueError('passed in an ndarray with more than 1 column')
name, = names
return pd.Series(x[name], name=name)
return pd.Series(x)
@convert.register(pd.Series, pd.DataFrame, cost=0.1)
def DataFrame_to_Series(x, **kwargs):
assert len(x.columns) == 1
return x[x.columns[0]]
@convert.register(pd.DataFrame, pd.Series, cost=0.1)
def series_to_dataframe(x, **kwargs):
return x.to_frame()
@convert.register(np.recarray, np.ndarray, cost=0.0)
def ndarray_to_recarray(x, **kwargs):
return x.view(np.recarray)
@convert.register(np.ndarray, np.recarray, cost=0.0)
def recarray_to_ndarray(x, **kwargs):
return x.view(np.ndarray)
higher_precision_freqs = frozenset(('ns', 'ps', 'fs', 'as'))
@convert.register(np.ndarray, pd.Series, cost=0.1)
def series_to_array(s, dshape=None, **kwargs):
# if we come from a node that can't be discovered we need to discover
# on s
dtype = dshape_to_numpy(datashape.dshape(dshape or discover(s)))
sdtype = s.dtype
values = s.values
# don't lose precision of datetime64 more precise than microseconds
if ((issubclass(sdtype.type, np.datetime64) and
np.datetime_data(sdtype)[0] in higher_precision_freqs) or
s.dtype == dtype):
return values
try:
return values.astype(dtype)
except ValueError: # object series and record dshape, e.g., a frame row
return values
@convert.register(list, np.ndarray, cost=10.0)
def numpy_to_list(x, **kwargs):
dt = None
if x.dtype == 'M8[ns]':
dt = 'M8[us]' # lose precision when going to Python datetime
if x.dtype.fields and any(x.dtype[n] == 'M8[ns]' for n in x.dtype.names):
dt = [(n, 'M8[us]' if x.dtype[n] == 'M8[ns]' else x.dtype[n])
for n in x.dtype.names]
if dt:
return x.astype(dt).tolist()
else:
return x.tolist()
@convert.register(np.ndarray, chunks(np.ndarray), cost=1.0)
def numpy_chunks_to_numpy(c, **kwargs):
return np.concatenate(list(c))
@convert.register(chunks(np.ndarray), np.ndarray, cost=0.5)
def numpy_to_chunks_numpy(x, chunksize=2**20, **kwargs):
return chunks(np.ndarray)(
lambda: (x[i:i+chunksize] for i in range(0, x.shape[0], chunksize)))
@convert.register(pd.DataFrame, chunks(pd.DataFrame), cost=1.0)
def chunks_dataframe_to_dataframe(c, **kwargs):
c = list(c)
if not c: # empty case
return pd.DataFrame(columns=kwargs.get('dshape').measure.names)
else:
return pd.concat(c, axis=0, ignore_index=True)
@convert.register(chunks(pd.DataFrame), pd.DataFrame, cost=0.5)
def dataframe_to_chunks_dataframe(x, chunksize=2**20, **kwargs):
return chunks(pd.DataFrame)(
lambda: (x.iloc[i:i+chunksize] for i in range(0, x.shape[0], chunksize)))
def ishashable(x):
try:
hash(x)
return True
except:
return False
@convert.register(set, (list, tuple), cost=5.0)
def iterable_to_set(x, **kwargs):
if x and isinstance(x[0], (tuple, list)) and not ishashable(x):
x = map(tuple, x)
return set(x)
@convert.register(list, (tuple, set), cost=1.0)
def iterable_to_list(x, **kwargs):
return list(x)
@convert.register(tuple, (list, set), cost=1.0)
def iterable_to_tuple(x, **kwargs):
return tuple(x)
def element_of(seq):
"""
>>> element_of([1, 2, 3])
1
>>> element_of([[1, 2], [3, 4]])
1
"""
while isinstance(seq, list) and seq:
seq = seq[0]
return seq
@convert.register(np.ndarray, list, cost=10.0)
def list_to_numpy(seq, dshape=None, **kwargs):
if isinstance(element_of(seq), dict):
seq = list(records_to_tuples(dshape, seq))
if (seq and isinstance(seq[0], Iterable) and not ishashable(seq[0]) and
not isscalar(dshape)):
seq = list(map(tuple, seq))
return np.array(seq, dtype=dshape_to_numpy(dshape))
@convert.register(Iterator, list, cost=0.001)
def list_to_iterator(L, **kwargs):
return iter(L)
@convert.register(list, Iterator, cost=1.0)
def iterator_to_list(seq, **kwargs):
return list(seq)
@convert.register(Iterator, (chunks(pd.DataFrame), chunks(np.ndarray)), cost=10.0)
def numpy_chunks_to_iterator(c, **kwargs):
return concat(convert(Iterator, chunk, **kwargs) for chunk in c)
@convert.register(chunks(np.ndarray), Iterator, cost=10.0)
def iterator_to_numpy_chunks(seq, chunksize=1024, **kwargs):
seq2 = partition_all(chunksize, seq)
try:
first, rest = next(seq2), seq2
except StopIteration: # seq is empty
def _():
yield convert(np.ndarray, [], **kwargs)
else:
x = convert(np.ndarray, first, **kwargs)
def _():
yield x
for i in rest:
yield convert(np.ndarray, i, **kwargs)
return chunks(np.ndarray)(_)
@convert.register(chunks(pd.DataFrame), Iterator, cost=10.0)
def iterator_to_DataFrame_chunks(seq, chunksize=1024, **kwargs):
seq2 = partition_all(chunksize, seq)
add_index = kwargs.get('add_index', False)
if not add_index:
# Simple, we can dispatch to dask...
f = lambda d: convert(pd.DataFrame, d, **kwargs)
data = [partial(f, d) for d in seq2]
if not data:
data = [convert(pd.DataFrame, [], **kwargs)]
return chunks(pd.DataFrame)(data)
# TODO: Decide whether we should support the `add_index` flag at all.
# If so, we need to post-process the converted DataFrame objects sequencially,
# so we can't parallelize the process.
try:
first, rest = next(seq2), seq2
except StopIteration:
def _():
yield convert(pd.DataFrame, [], **kwargs)
else:
df = convert(pd.DataFrame, first, **kwargs)
df1, n1 = _add_index(df, 0)
def _():
n = n1
yield df1
for i in rest:
df = convert(pd.DataFrame, i, **kwargs)
df, n = _add_index(df, n)
yield df
return chunks(pd.DataFrame)(_)
def _add_index(df, start, _idx_type=getattr(pd, 'RangeIndex',
compose(pd.Index, np.arange))):
stop = start + len(df)
idx = _idx_type(start=start, stop=stop)
df.index = idx
return df, stop
@convert.register(tuple, np.record)
def numpy_record_to_tuple(rec, **kwargs):
return rec.tolist()
@convert.register(chunks(np.ndarray), chunks(pd.DataFrame), cost=0.5)
def chunked_pandas_to_chunked_numpy(c, **kwargs):
return chunks(np.ndarray)(lambda: (convert(np.ndarray, chunk, **kwargs) for chunk in c))
@convert.register(chunks(pd.DataFrame), chunks(np.ndarray), cost=0.5)
def chunked_numpy_to_chunked_pandas(c, **kwargs):
return chunks(pd.DataFrame)(lambda: (convert(pd.DataFrame, chunk, **kwargs) for chunk in c))
@convert.register(chunks(np.ndarray), chunks(list), cost=10.0)
def chunked_list_to_chunked_numpy(c, **kwargs):
return chunks(np.ndarray)(lambda: (convert(np.ndarray, chunk, **kwargs) for chunk in c))
@convert.register(chunks(list), chunks(np.ndarray), cost=10.0)
def chunked_numpy_to_chunked_list(c, **kwargs):
return chunks(list)(lambda: (convert(list, chunk, **kwargs) for chunk in c))
@convert.register(chunks(Iterator), chunks(list), cost=0.1)
def chunked_list_to_chunked_iterator(c, **kwargs):
return chunks(Iterator)(c.data)
@convert.register(chunks(list), chunks(Iterator), cost=0.1)
def chunked_Iterator_to_chunked_list(c, **kwargs):
return chunks(Iterator)(lambda: (convert(Iterator, chunk, **kwargs) for chunk in c))
@convert.register(Iterator, chunks(Iterator), cost=0.1)
def chunked_iterator_to_iterator(c, **kwargs):
return concat(c)
ooc_types |= set([Iterator, Chunks])
| {
"repo_name": "ContinuumIO/odo",
"path": "odo/convert.py",
"copies": "4",
"size": "9752",
"license": "bsd-3-clause",
"hash": -4664313578508826000,
"line_mean": 30.2564102564,
"line_max": 96,
"alpha_frac": 0.6350492207,
"autogenerated": false,
"ratio": 3.2957080094626563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011448466766080714,
"num_lines": 312
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from datashape.predicates import isscalar
from toolz import concat, partition_all
from collections import Iterator, Iterable
import datashape
from datashape import discover
from .core import NetworkDispatcher, ooc_types
from .chunks import chunks, Chunks
from .numpy_dtype import dshape_to_numpy
from .utils import records_to_tuples
convert = NetworkDispatcher('convert')
@convert.register(np.ndarray, pd.DataFrame, cost=0.2)
def dataframe_to_numpy(df, dshape=None, **kwargs):
dtype = dshape_to_numpy(dshape or discover(df))
x = df.to_records(index=False)
if x.dtype != dtype:
x = x.astype(dtype)
return x
@convert.register(pd.DataFrame, np.ndarray, cost=1.0)
def numpy_to_dataframe(x, dshape, **kwargs):
return pd.DataFrame(x, columns=getattr(dshape.measure, 'names', None))
@convert.register(pd.Series, np.ndarray, cost=1.0)
def numpy_to_series(x, **kwargs):
names = x.dtype.names
if names is not None:
if len(names) > 1:
raise ValueError('passed in an ndarray with more than 1 column')
name, = names
return pd.Series(x[name], name=name)
return pd.Series(x)
@convert.register(pd.Series, pd.DataFrame, cost=0.1)
def DataFrame_to_Series(x, **kwargs):
assert len(x.columns) == 1
return x[x.columns[0]]
@convert.register(pd.DataFrame, pd.Series, cost=0.1)
def series_to_dataframe(x, **kwargs):
return x.to_frame()
@convert.register(np.recarray, np.ndarray, cost=0.0)
def ndarray_to_recarray(x, **kwargs):
return x.view(np.recarray)
@convert.register(np.ndarray, np.recarray, cost=0.0)
def recarray_to_ndarray(x, **kwargs):
return x.view(np.ndarray)
higher_precision_freqs = frozenset(('ns', 'ps', 'fs', 'as'))
@convert.register(np.ndarray, pd.Series, cost=0.1)
def series_to_array(s, dshape=None, **kwargs):
# if we come from a node that can't be discovered we need to discover
# on s
dtype = dshape_to_numpy(datashape.dshape(dshape or discover(s)))
sdtype = s.dtype
values = s.values
# don't lose precision of datetime64 more precise than microseconds
if ((issubclass(sdtype.type, np.datetime64) and
np.datetime_data(sdtype)[0] in higher_precision_freqs) or
s.dtype == dtype):
return values
try:
return values.astype(dtype)
except ValueError: # object series and record dshape, e.g., a frame row
return values
@convert.register(list, np.ndarray, cost=10.0)
def numpy_to_list(x, **kwargs):
dt = None
if x.dtype == 'M8[ns]':
dt = 'M8[us]' # lose precision when going to Python datetime
if x.dtype.fields and any(x.dtype[n] == 'M8[ns]' for n in x.dtype.names):
dt = [(n, 'M8[us]' if x.dtype[n] == 'M8[ns]' else x.dtype[n])
for n in x.dtype.names]
if dt:
return x.astype(dt).tolist()
else:
return x.tolist()
@convert.register(np.ndarray, chunks(np.ndarray), cost=1.0)
def numpy_chunks_to_numpy(c, **kwargs):
return np.concatenate(list(c))
@convert.register(chunks(np.ndarray), np.ndarray, cost=0.5)
def numpy_to_chunks_numpy(x, chunksize=2**20, **kwargs):
return chunks(np.ndarray)(
lambda: (x[i:i+chunksize] for i in range(0, x.shape[0], chunksize)))
@convert.register(pd.DataFrame, chunks(pd.DataFrame), cost=1.0)
def chunks_dataframe_to_dataframe(c, **kwargs):
c = list(c)
if not c: # empty case
return pd.DataFrame(columns=kwargs.get('dshape').measure.names)
else:
return pd.concat(c, axis=0, ignore_index=True)
@convert.register(chunks(pd.DataFrame), pd.DataFrame, cost=0.5)
def dataframe_to_chunks_dataframe(x, chunksize=2**20, **kwargs):
return chunks(pd.DataFrame)(
lambda: (x.iloc[i:i+chunksize] for i in range(0, x.shape[0], chunksize)))
def ishashable(x):
try:
hash(x)
return True
except:
return False
@convert.register(set, (list, tuple), cost=5.0)
def iterable_to_set(x, **kwargs):
if x and isinstance(x[0], (tuple, list)) and not ishashable(x):
x = map(tuple, x)
return set(x)
@convert.register(list, (tuple, set), cost=1.0)
def iterable_to_list(x, **kwargs):
return list(x)
@convert.register(tuple, (list, set), cost=1.0)
def iterable_to_tuple(x, **kwargs):
return tuple(x)
def element_of(seq):
"""
>>> element_of([1, 2, 3])
1
>>> element_of([[1, 2], [3, 4]])
1
"""
while isinstance(seq, list) and seq:
seq = seq[0]
return seq
@convert.register(np.ndarray, list, cost=10.0)
def list_to_numpy(seq, dshape=None, **kwargs):
if isinstance(element_of(seq), dict):
seq = list(records_to_tuples(dshape, seq))
if (seq and isinstance(seq[0], Iterable) and not ishashable(seq[0]) and
not isscalar(dshape)):
seq = list(map(tuple, seq))
return np.array(seq, dtype=dshape_to_numpy(dshape))
@convert.register(Iterator, list, cost=0.001)
def list_to_iterator(L, **kwargs):
return iter(L)
@convert.register(list, Iterator, cost=1.0)
def iterator_to_list(seq, **kwargs):
return list(seq)
@convert.register(Iterator, (chunks(pd.DataFrame), chunks(np.ndarray)), cost=10.0)
def numpy_chunks_to_iterator(c, **kwargs):
return concat(convert(Iterator, chunk, **kwargs) for chunk in c)
@convert.register(chunks(np.ndarray), Iterator, cost=10.0)
def iterator_to_numpy_chunks(seq, chunksize=1024, **kwargs):
seq2 = partition_all(chunksize, seq)
try:
first, rest = next(seq2), seq2
except StopIteration: # seq is empty
def _():
yield convert(np.ndarray, [], **kwargs)
else:
x = convert(np.ndarray, first, **kwargs)
def _():
yield x
for i in rest:
yield convert(np.ndarray, i, **kwargs)
return chunks(np.ndarray)(_)
@convert.register(chunks(pd.DataFrame), Iterator, cost=10.0)
def iterator_to_DataFrame_chunks(seq, chunksize=1024, **kwargs):
seq2 = partition_all(chunksize, seq)
try:
first, rest = next(seq2), seq2
except StopIteration:
def _():
yield convert(pd.DataFrame, [], **kwargs)
else:
df = convert(pd.DataFrame, first, **kwargs)
def _():
yield df
for i in rest:
yield convert(pd.DataFrame, i, **kwargs)
return chunks(pd.DataFrame)(_)
@convert.register(tuple, np.record)
def numpy_record_to_tuple(rec, **kwargs):
return rec.tolist()
@convert.register(chunks(np.ndarray), chunks(pd.DataFrame), cost=0.5)
def chunked_pandas_to_chunked_numpy(c, **kwargs):
return chunks(np.ndarray)(lambda: (convert(np.ndarray, chunk, **kwargs) for chunk in c))
@convert.register(chunks(pd.DataFrame), chunks(np.ndarray), cost=0.5)
def chunked_numpy_to_chunked_pandas(c, **kwargs):
return chunks(pd.DataFrame)(lambda: (convert(pd.DataFrame, chunk, **kwargs) for chunk in c))
@convert.register(chunks(np.ndarray), chunks(list), cost=10.0)
def chunked_list_to_chunked_numpy(c, **kwargs):
return chunks(np.ndarray)(lambda: (convert(np.ndarray, chunk, **kwargs) for chunk in c))
@convert.register(chunks(list), chunks(np.ndarray), cost=10.0)
def chunked_numpy_to_chunked_list(c, **kwargs):
return chunks(list)(lambda: (convert(list, chunk, **kwargs) for chunk in c))
@convert.register(chunks(Iterator), chunks(list), cost=0.1)
def chunked_list_to_chunked_iterator(c, **kwargs):
return chunks(Iterator)(c.data)
@convert.register(chunks(list), chunks(Iterator), cost=0.1)
def chunked_Iterator_to_chunked_list(c, **kwargs):
return chunks(Iterator)(lambda: (convert(Iterator, chunk, **kwargs) for chunk in c))
@convert.register(Iterator, chunks(Iterator), cost=0.1)
def chunked_iterator_to_iterator(c, **kwargs):
return concat(c)
ooc_types |= set([Iterator, Chunks])
| {
"repo_name": "cowlicks/odo",
"path": "odo/convert.py",
"copies": "5",
"size": "7903",
"license": "bsd-3-clause",
"hash": -3659851321642578400,
"line_mean": 29.5135135135,
"line_max": 96,
"alpha_frac": 0.6575983804,
"autogenerated": false,
"ratio": 3.1854091092301493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6343007489630149,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from toolz import first, partial
from ..core import DataFrame, Series
from ..utils import UNKNOWN_CATEGORIES
from ...base import tokenize, normalize_token
from ...compatibility import PY3
from ...delayed import delayed
from ...bytes.core import get_fs_paths_myopen
try:
import fastparquet
from fastparquet import parquet_thrift
from fastparquet.core import read_row_group_file
from fastparquet.api import _pre_allocate
from fastparquet.util import check_column_names
default_encoding = parquet_thrift.Encoding.PLAIN
except:
fastparquet = False
default_encoding = None
def read_parquet(path, columns=None, filters=None, categories=None, index=None,
storage_options=None):
"""
Read ParquetFile into a Dask DataFrame
This reads a directory of Parquet data into a Dask.dataframe, one file per
partition. It selects the index among the sorted columns if any exist.
This uses the fastparquet project: http://fastparquet.readthedocs.io/en/latest
Parameters
----------
path : string
Source directory for data. May be a glob string.
Prepend with protocol like ``s3://`` or ``hdfs://`` for remote data.
columns: list or None
List of column names to load
filters: list
List of filters to apply, like ``[('x', '>' 0), ...]``
index: string or None (default) or False
Name of index column to use if that column is sorted;
False to force dask to not use any column as the index
categories: list, dict or None
For any fields listed here, if the parquet encoding is Dictionary,
the column will be created with dtype category. Use only if it is
guaranteed that the column is encoded as dictionary in all row-groups.
If a list, assumes up to 2**16-1 labels; if a dict, specify the number
of labels expected; if None, will load categories automatically for
data written by dask/fastparquet, not otherwise.
storage_options : dict
Key/value pairs to be passed on to the file-system backend, if any.
Examples
--------
>>> df = read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP
See Also
--------
to_parquet
"""
if fastparquet is False:
raise ImportError("fastparquet not installed")
if filters is None:
filters = []
fs, paths, myopen = get_fs_paths_myopen(path, None, 'rb',
**(storage_options or {}))
if isinstance(columns, list):
columns = tuple(columns)
if len(paths) > 1:
pf = fastparquet.ParquetFile(paths, open_with=myopen, sep=myopen.fs.sep)
else:
try:
pf = fastparquet.ParquetFile(paths[0] + fs.sep + '_metadata',
open_with=myopen,
sep=fs.sep)
except:
pf = fastparquet.ParquetFile(paths[0], open_with=myopen, sep=fs.sep)
check_column_names(pf.columns, categories)
name = 'read-parquet-' + tokenize(pf, columns, categories)
rgs = [rg for rg in pf.row_groups if
not(fastparquet.api.filter_out_stats(rg, filters, pf.schema)) and
not(fastparquet.api.filter_out_cats(rg, filters))]
# Find an index among the partially sorted columns
minmax = fastparquet.api.sorted_partitioned_columns(pf)
if index is False:
index_col = None
elif len(minmax) == 1:
index_col = first(minmax)
elif len(minmax) > 1:
if index:
index_col = index
elif 'index' in minmax:
index_col = 'index'
else:
raise ValueError("Multiple possible indexes exist: %s. "
"Please select one with index='index-name'"
% sorted(minmax))
else:
index_col = None
if columns is None:
all_columns = tuple(pf.columns + list(pf.cats))
else:
all_columns = columns
if not isinstance(all_columns, tuple):
out_type = Series
all_columns = (all_columns,)
else:
out_type = DataFrame
if index_col and index_col not in all_columns:
all_columns = all_columns + (index_col,)
if categories is None:
categories = pf.categories
dtypes = pf._dtypes(categories)
meta = pd.DataFrame({c: pd.Series([], dtype=d)
for (c, d) in dtypes.items()},
columns=[c for c in pf.columns if c in dtypes])
meta = meta[list(all_columns)]
for cat in categories:
meta[cat] = pd.Series(pd.Categorical([],
categories=[UNKNOWN_CATEGORIES]))
if index_col:
meta = meta.set_index(index_col)
if out_type == Series:
assert len(meta.columns) == 1
meta = meta[meta.columns[0]]
dsk = {(name, i): (_read_parquet_row_group, myopen, pf.row_group_filename(rg),
index_col, all_columns, rg, out_type == Series,
categories, pf.schema, pf.cats, pf.dtypes)
for i, rg in enumerate(rgs)}
if index_col:
divisions = list(minmax[index_col]['min']) + [minmax[index_col]['max'][-1]]
else:
divisions = (None,) * (len(rgs) + 1)
if isinstance(divisions[0], np.datetime64):
divisions = [pd.Timestamp(d) for d in divisions]
return out_type(dsk, name, meta, divisions)
def _read_parquet_row_group(open, fn, index, columns, rg, series, categories,
schema, cs, dt, *args):
if not isinstance(columns, (tuple, list)):
columns = (columns,)
series = True
if index and index not in columns:
columns = columns + type(columns)([index])
df, views = _pre_allocate(rg.num_rows, columns, categories, index, cs, dt)
read_row_group_file(fn, rg, columns, categories, schema, cs,
open=open, assign=views)
if series:
return df[df.columns[0]]
else:
return df
def to_parquet(path, df, compression=None, write_index=None, has_nulls=None,
fixed_text=None, object_encoding=None, storage_options=None,
append=False, ignore_divisions=False):
"""
Store Dask.dataframe to Parquet files
Notes
-----
Each partition will be written to a separate file.
Parameters
----------
path : string
Destination directory for data. Prepend with protocol like ``s3://``
or ``hdfs://`` for remote data.
df : Dask.dataframe
compression : string or dict
Either a string like "SNAPPY" or a dictionary mapping column names to
compressors like ``{"name": "GZIP", "values": "SNAPPY"}``
write_index : boolean
Whether or not to write the index. Defaults to True *if* divisions are
known.
has_nulls : bool, list or None
Specifies whether to write NULLs information for columns. If bools,
apply to all columns, if list, use for only the named columns, if None,
use only for columns which don't have a sentinel NULL marker (currently
object columns only).
fixed_text : dict {col: int}
For column types that are written as bytes (bytes, utf8 strings, or
json and bson-encoded objects), if a column is included here, the
data will be written in fixed-length format, which should be faster
but can potentially result in truncation.
object_encoding : dict {col: bytes|utf8|json|bson} or str
For object columns, specify how to encode to bytes. If a str, same
encoding is applied to all object columns.
storage_options : dict
Key/value pairs to be passed on to the file-system backend, if any.
append: bool (False)
If False, construct data-set from scratch; if True, add new row-group(s)
to existing data-set. In the latter case, the data-set must exist,
and the schema must match the input data.
ignore_divisions: bool (False)
If False raises error when previous divisions overlap with the new
appended divisions. Ignored if append=False.
This uses the fastparquet project: http://fastparquet.readthedocs.io/en/latest
Examples
--------
>>> df = dd.read_csv(...) # doctest: +SKIP
>>> to_parquet('/path/to/output/', df, compression='SNAPPY') # doctest: +SKIP
See Also
--------
read_parquet: Read parquet data to dask.dataframe
"""
if fastparquet is False:
raise ImportError("fastparquet not installed")
fs, paths, myopen = get_fs_paths_myopen(path, None, 'wb',
**(storage_options or {}))
fs.mkdirs(path)
sep = fs.sep
metadata_fn = sep.join([path, '_metadata'])
if write_index is True or write_index is None and df.known_divisions:
new_divisions = df.divisions
df = df.reset_index()
index_col = df.columns[0]
else:
ignore_divisions = True
object_encoding = object_encoding or 'utf8'
if object_encoding == 'infer' or (isinstance(object_encoding, dict) and
'infer' in object_encoding.values()):
raise ValueError('"infer" not allowed as object encoding, '
'because this required data in memory.')
fmd = fastparquet.writer.make_metadata(df._meta, has_nulls=has_nulls,
fixed_text=fixed_text,
object_encoding=object_encoding)
if append:
pf = fastparquet.api.ParquetFile(path, open_with=myopen, sep=sep)
if pf.file_scheme != 'hive':
raise ValueError('Requested file scheme is hive, '
'but existing file scheme is not.')
elif set(pf.columns) != set(df.columns):
raise ValueError('Appended columns not the same.\n'
'New: {} | Previous: {}'
.format(pf.columns, list(df.columns)))
elif set(pf.dtypes.items()) != set(df.dtypes.iteritems()):
raise ValueError('Appended dtypes differ.\n{}'
.format(set(pf.dtypes.items()) ^
set(df.dtypes.iteritems())))
# elif fmd.schema != pf.fmd.schema:
# raise ValueError('Appended schema differs.')
else:
df = df[pf.columns]
fmd = pf.fmd
i_offset = fastparquet.writer.find_max_part(fmd.row_groups)
if not ignore_divisions:
minmax = fastparquet.api.sorted_partitioned_columns(pf)
divisions = list(minmax[index_col]['min']) + [
minmax[index_col]['max'][-1]]
if new_divisions[0] < divisions[-1]:
raise ValueError(
'Appended divisions overlapping with the previous ones.\n'
'New: {} | Previous: {}'
.format(divisions[-1], new_divisions[0]))
else:
i_offset = 0
partitions = df.to_delayed()
filenames = ['part.%i.parquet' % i
for i in range(i_offset, len(partitions) + i_offset)]
outfiles = [sep.join([path, fn]) for fn in filenames]
writes = [delayed(fastparquet.writer.make_part_file)(
myopen(outfile, 'wb'), partition, fmd.schema,
compression=compression)
for outfile, partition in zip(outfiles, partitions)]
out = delayed(writes).compute()
for fn, rg in zip(filenames, out):
if rg is not None:
for chunk in rg.columns:
chunk.file_path = fn
fmd.row_groups.append(rg)
if len(fmd.row_groups) == 0:
raise ValueError("All partitions were empty")
fastparquet.writer.write_common_metadata(metadata_fn, fmd, open_with=myopen,
no_row_groups=False)
fn = sep.join([path, '_common_metadata'])
fastparquet.writer.write_common_metadata(fn, fmd, open_with=myopen)
if fastparquet:
@partial(normalize_token.register, fastparquet.ParquetFile)
def normalize_ParquetFile(pf):
return (type(pf), pf.fn, pf.sep) + normalize_token(pf.open)
if PY3:
DataFrame.to_parquet.__doc__ = to_parquet.__doc__
| {
"repo_name": "cpcloud/dask",
"path": "dask/dataframe/io/parquet.py",
"copies": "1",
"size": "12388",
"license": "bsd-3-clause",
"hash": 4908770258497190000,
"line_mean": 37,
"line_max": 83,
"alpha_frac": 0.597513723,
"autogenerated": false,
"ratio": 3.984560952074622,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5082074675074622,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from toolz import partial
def maybe_wrap_pandas(obj, x):
if isinstance(x, np.ndarray):
if isinstance(obj, pd.Series):
return pd.Series(x, index=obj.index, dtype=x.dtype)
return pd.Index(x)
return x
class Accessor(object):
"""
Base class for pandas Accessor objects cat, dt, and str.
Notes
-----
Subclasses should define the following attributes:
* _accessor
* _accessor_name
"""
def __init__(self, series):
from .core import Series
if not isinstance(series, Series):
raise ValueError('Accessor cannot be initialized')
self._validate(series)
self._series = series
def _validate(self, series):
pass
@staticmethod
def _delegate_property(obj, accessor, attr):
out = getattr(getattr(obj, accessor, obj), attr)
return maybe_wrap_pandas(obj, out)
@staticmethod
def _delegate_method(obj, accessor, attr, args, kwargs):
out = getattr(getattr(obj, accessor, obj), attr)(*args, **kwargs)
return maybe_wrap_pandas(obj, out)
def _property_map(self, attr):
meta = self._delegate_property(self._series._meta,
self._accessor_name, attr)
token = '%s-%s' % (self._accessor_name, attr)
return self._series.map_partitions(self._delegate_property,
self._accessor_name, attr,
token=token, meta=meta)
def _function_map(self, attr, *args, **kwargs):
meta = self._delegate_method(self._series._meta_nonempty,
self._accessor_name, attr, args, kwargs)
token = '%s-%s' % (self._accessor_name, attr)
return self._series.map_partitions(self._delegate_method,
self._accessor_name, attr, args,
kwargs, meta=meta, token=token)
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
dir(self._accessor)))
def __getattr__(self, key):
if key in dir(self._accessor):
if isinstance(getattr(self._accessor, key), property):
return self._property_map(key)
else:
return partial(self._function_map, key)
else:
raise AttributeError(key)
class DatetimeAccessor(Accessor):
""" Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.microsecond # doctest: +SKIP
"""
_accessor = pd.Series.dt
_accessor_name = 'dt'
class StringAccessor(Accessor):
""" Accessor object for string properties of the Series values.
Examples
--------
>>> s.str.lower() # doctest: +SKIP
"""
_accessor = pd.Series.str
_accessor_name = 'str'
def _validate(self, series):
if not series.dtype == 'object':
raise AttributeError("Can only use .str accessor with object dtype")
| {
"repo_name": "mraspaud/dask",
"path": "dask/dataframe/accessor.py",
"copies": "2",
"size": "3172",
"license": "bsd-3-clause",
"hash": -6503069859536104000,
"line_mean": 30.0980392157,
"line_max": 80,
"alpha_frac": 0.5674653216,
"autogenerated": false,
"ratio": 4.280701754385965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5848167075985965,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import gsd.hoomd
import sklearn
import scipy.optimize as opt
import os
import os.path
import pdb
from sklearn.neighbors import BallTree
from sklearn.neighbors import radius_neighbors_graph
from scipy.spatial.distance import cdist,pdist
from scipy.special import erf
from scipy.sparse.csgraph import connected_components
from scipy.sparse import csr_matrix,lil_matrix,coo_matrix
#from .due import due, Doi
from .smoluchowski import massAvSize
from mpi4py import MPI
from cdistances import conOptDistanceCython,alignDistancesCython,subsquashRNG
from cdistances import squashRNGCOOCython
__all__ = ["ClusterSnapshot", "ContactClusterSnapshot",
"OpticalClusterSnapshot","AlignedClusterSnapshot",
"ContactClusterSnapshotXTC","OpticalClusterSnapshotXTC",
"SnapSystem",
"conOptDistance","conOptDistanceC","alignedDistance",
"alignedDistanceC","fixMisplacedArom","checkSymmetry",
"squashRNG","squashRNGCython","squashRNGPy","squashRNGCOO",
"squashRNGCOOCython"]
# Use duecredit (duecredit.org) to provide a citation to relevant work to
# be cited. This does nothing, unless the user has duecredit installed,
# And calls this with duecredit (as in `python -m duecredit script.py`):
'''
due.cite(Doi("10.1167/13.9.30"),
description="Simple data analysis for clustering application",
tags=["data-analysis","clustering"],
path='clustering')
'''
def checkSymmetry(csr):
"""
Checks whether a matrix in CSR sparse format is symmetric.
Parameters
----------
csr: matrix in CSR format
Returns
-------
symyes: bool
True if symmetric, False if not
"""
symyes = not (csr!=csr.transpose()).max()
return symyes
def squashRNG(rng,apermol):
"""
Reduces radius neighbors graph to a new graph based on molecules instead of
atoms.
Parameters
----------
rng: a graph in CSR format as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph in CSR format
Raises
------
RuntimeError: if the original rng is not symmetric
"""
if not checkSymmetry(rng):
raise RuntimeError("Graph is non-symmetrical")
sh = rng.shape
rng = rng.toarray()
newsh = (int(sh[0]/apermol),int(sh[1]/apermol))
#pdb.set_trace()
#molrng = lil_matrix(newsh)
molrng = np.zeros(newsh)
for i in range(0,newsh[0]):
for j in range(i+1,newsh[1]):
subrng = rng[apermol*i:apermol*(i+1),apermol*j:apermol*(j+1)]
if subrng.max():
molrng[i,j] = 1.0
molrng[j,i] = 1.0
return csr_matrix(molrng)
def squashRNGCOO(rng,apermol):
"""
Reduces radius neighbors graph to a new graph based on molecules instead of
atoms.
Uses COO format
Parameters
----------
rng: a graph in CSR format as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph in CSR format
Raises
------
RuntimeError: if the original rng is not symmetric
"""
if not checkSymmetry(rng):
raise RuntimeError("Graph is non-symmetrical")
sh = rng.shape
newsh = (int(sh[0]/apermol),int(sh[1]/apermol))
molrng = lil_matrix(newsh)
rng = coo_matrix(rng)
rows = rng.row//apermol
cols = rng.col//apermol
rowcols = rows * molrng.shape[1] + cols
urowcols = np.unique(rowcols)
rows = urowcols // molrng.shape[1]
cols = urowcols % molrng.shape[1]
#pdb.set_trace()
for i in range(len(rows)):
row = rows[i]
col = cols[i]
if col > row:
molrng[row,col] = 1
#pdb.set_trace()
return csr_matrix(molrng)
def squashRNGCython(rng,apermol):
"""
Reduces radius neighbors graph to a new graph based on molecules instead of
atoms, but uses Cython code to improve speed.
Parameters
----------
rng: a graph in CSR format as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph in CSR format
Raises
------
RuntimeError: if the original rng is not symmetric
"""
if not checkSymmetry(rng):
raise RuntimeError("Graph is non-symmetrical")
sh = rng.shape
rng = rng.toarray()
newsh = (int(sh[0]/apermol),int(sh[1]/apermol))
#pdb.set_trace()
#molrng = lil_matrix(newsh)
molrng = np.zeros(newsh)
molrng = subsquashRNG(rng,molrng,apermol)
return csr_matrix(molrng)
def squashRNGPy(rng,apermol):
"""
Reduces radius neighbors graph to a new graph based on molecules instead of
atoms. Dummy python debug test of Cython algorithm.
Parameters
----------
rng: a graph in CSR format as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph in CSR format
Raises
------
RuntimeError: if the original rng is not symmetric
"""
if not checkSymmetry(rng):
raise RuntimeError("Graph is non-symmetrical")
sh = rng.shape
rng = rng.toarray()
newsh = (int(sh[0]/apermol),int(sh[1]/apermol))
#pdb.set_trace()
#molrng = lil_matrix(newsh)
molrng = np.zeros(newsh)
molrng = subsquashRNGPy(rng,molrng,apermol)
#pdb.set_trace()
return csr_matrix(molrng)
def subsquashRNGPy(rng,molrng,apermol):
"""
Python version of c algorithm that sets the block to 0 when all are 0
and 1 if at least 1 is 1
Parameters
----------
rng: a numpy array as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph
"""
dim = np.shape(molrng)[0]
sz = np.shape(rng)
rng = rng.reshape((1,sz[0]*sz[1]))[0]
molrng = molrng.reshape((1,dim*dim))[0]
for i in range(dim):
for j in range(i+1,dim):
istart = apermol*i;
iend = apermol*(i+1);
jstart = apermol*j;
jend = apermol*(j+1);
curr = 0;
#pdb.set_trace()
for k in range(istart,iend):
for m in range(jstart,jend):
if (rng[k*dim*apermol+m] != 0.):
curr = 1;
#pdb.set_trace()
if (curr == 1):
molrng[dim*i+j] = 1.0;
molrng[dim*j+i] = 1.0;
molrng = molrng.reshape((dim,dim))
return molrng
def fixMisplacedArom(gsdfile,gsdout,idMiss,idPartner,idNotMiss,idNotPartner
,molno,ats,ts):
"""
opens a gsd file, gets the trajectory, then writes out in place with
the incorrectly placed aromatic placed correctly
Parameters
----------
gsdfile: string
filename of the file to be rewritten
gsdout: string
where to write new stuff
idMiss: the id of the misplaced aromatic within the molecule
idPartner: the id of the partner to the misplaced aromatic within the mol
idNotMiss: the complementary correctly placed aromatic
idNotPartner: idNotMiss's partner
ts: which timesteps of the trajectory to rewrite
Notes
-----
pos(idMiss) = pos(idPartner) + (pos(idNotMiss) - pos(idNotPartner))
"""
traj = gsd.hoomd.open(gsdfile)
trajnew = gsd.hoomd.open(gsdout,'wb')
offset = molno
idMisses = offset+idMiss + np.arange(0,molno*(ats-1),ats-1)
idPartners = offset + idPartner + np.arange(0,molno*(ats-1),ats-1)
idNotMisses = offset + idNotMiss + np.arange(0,molno*(ats-1),ats-1)
idNotPartners = offset + idNotPartner + np.arange(0,molno*(ats-1),ats-1)
for t in ts:
snapshot = traj[t]
box = snapshot.configuration.box[0:3]
pos = snapshot.particles.position
pds = pos[idNotMisses] - pos[idNotPartners]
pds = pds - np.around(pds / box) * box
pos[idMisses] = pos[idPartners] + pds
snapnew = snapshot
snapnew.particles.position = pos
trajnew.append(snapnew)
def conOptDistance(x,y):
"""
Function that computes the distance between molecules for contact
or optical clusters
Parameters:
-----------
x : array
The 1D array of size 3*ats representing the first molecule
y : array
The 1D array of size 3*ats representing the second molecule
Returns
-------
r : float
The distance between x and y computed as the minimum distance
between any two beads in the molecules
"""
if len(x) % 3 != 0 or len(y) % 3 != 0:
raise RuntimeError("3D array has a number of entries not divisible \
by 3.")
ats = len(x)/3
xa = np.reshape(x,[ats,3])
ya = np.reshape(y,[ats,3])
#return np.min(euclidean_distances(xa,ya,squared=True))
return np.min(cdist(xa,ya,metric='sqeuclidean'))
def conOptDistanceC(x,y):
"""
Function that computes the distance between molecules for contact
or optical clusters
Parameters:
-----------
x : array
The 1D array of size 3*ats representing the first molecule
y : array
The 1D array of size 3*ats representing the second molecule
Returns
-------
r : float
The distance between x and y computed as the minimum distance
between any two beads in the molecules
Notes
-----
Uses scipy.weave to incorporate a little bit of C code to see if that
will speed things up
"""
if len(x) % 3 != 0 or len(y) % 3 != 0:
raise RuntimeError("3D array has a number of entries not divisible \
by 3.")
#xa = np.reshape(x,[ats,3])
#ya = np.reshape(y,[ats,3])
mind = 10000.0
support = '#include <math.h>'
code = """
int i,j;
return_val = 0;
double d;
for (i = 0; i < Nx[0]/3; i++) {
for (j = 0; j < Nx[0]/3; j++) {
d = (x[3*i] - y[3*j]) * (x[3*i] - y[3*j])
+ (x[3*i + 1] - y[3*j + 1]) * (x[3*i + 1] - y[3*j + 1])
+ (x[3*i + 2] - y[3*j + 2]) * (x[3*i + 2] - y[3*j + 2]);
if (d < mind){
mind = d;
}
}
}
return_val = mind;
"""
mind = weave.inline(code,['x', 'y', 'mind'],
support_code = support, libraries = ['m'])
#return np.min(euclidean_distances(xa,ya,squared=True))
return mind
def alignedDistance(x,y):
"""
Function that computes the distances between molecules for aligned clusters
Parameters:
-----------
x : array
The 1D array of size 3*ats representing the first molecule
y : array
The 1D array of size 3*ats representing the second molecule
Returns
-------
r : float
The distance between x and y computed as the minimum distance
between any two beads in the molecules
Raises
------
RuntimeError
if the array does not have a number of entries divisible by three
because it's supposed to be a flattened array of positions
Notes
-----
Compute the minimum distance of each COM to another COM
Take the three minimum distances of this list
Return the maximum of these three
"""
if len(x) % 3 != 0 or len(y) % 3 != 0:
raise RuntimeError("3D array has a number of entries not divisible \
by 3.")
ats = int(len(x)/3)
xa = np.reshape(x,[ats,3])
ya = np.reshape(y,[ats,3])
distmat = cdist(xa,ya,metric='sqeuclidean')
dists = np.zeros([ats * ats, 3])
dind = 0
for i in range(ats):
for j in range(ats):
dists[dind,0] = distmat[i,j]
dists[dind,1] = i
dists[dind,2] = j
dind += 1
sdists = dists[dists[:,0].argsort()]
i1 = sdists[0,1]
j1 = sdists[0,2]
i2 = i1
j2 = j1
ind2 = 1
while (i2 == i1) or (j2 == j1):
i2 = sdists[ind2,1]
j2 = sdists[ind2,2]
ind2 += 1
ind3 = ind2
i3 = sdists[ind3,1]
j3 = sdists[ind3,2]
while (i3 == i1) or (i3 == i2) or (j3 == j1) or (j3 == j2):
i3 = sdists[ind3,1]
j3 = sdists[ind3,2]
ind3 += 1
return sdists[ind3-1,0]
def alignedDistanceC(x,y):
"""
Function that computes the distances between molecules for aligned clusters
Parameters:
-----------
x : array
The 1D array of size 3*ats representing the first molecule
y : array
The 1D array of size 3*ats representing the second molecule
Returns
-------
r : float
The distance between x and y computed as the minimum distance
between any two beads in the molecules
Raises
------
RuntimeError
if the array does not have a number of entries divisible by three
because it's supposed to be a flattened array of positions
Notes
-----
Compute the minimum distance of each COM to another COM
Take the three minimum distances of this list
Return the maximum of these three
Use scipy.weave and C++ to speed things up
"""
if len(x) % 3 != 0 or len(y) % 3 != 0:
raise RuntimeError("3D array has a number of entries not divisible \
by 3.")
ats = int(len(x)/3)
dists = np.zeros([ats * ats])
distsA = np.zeros([ats * ats])
distsB = np.zeros([ats * ats])
support = '#include <math.h>'
code = """
int i,j,dind = 0;
return_val = 0;
for (i = 0; i < ats; i++){
for (j = 0; j < ats; j++){
dists[dind] = (x[3 * i] - y[3 * j]) * (x[3 * i] - y[3 * j])
+ (x[3 * i + 1] - y[3 * j + 1]) * (x[3 * i + 1] - y[3 * j + 1])
+ (x[3 * i + 2] - y[3 * j + 2]) * (x[3 * i + 2] - y[3 * j + 2]);
distsA[dind] = i;
distsB[dind] = j;
dind++;
}
}
double mind = 10000.0;
int mindi, mindj;
for (int k = 0; k < ats * ats; k++){
if (dists[k] < mind){
mind = dists[k];
mindi = distsA[k];
mindj = distsB[k];
}
}
double mind2 = 10000.0;
int mind2i, mind2j;
for (int k = 0; k < ats * ats; k++){
if ((dists[k] < mind2) && (distsA[k] != mindi) && (distsB[k] != mindj))
{
mind2 = dists[k];
mind2i = distsA[k];
mind2j = distsB[k];
}
}
double mind3 = 10000.0;
for (int k = 0; k < ats * ats; k++){
if ((dists[k] < mind3) && (distsA[k] != mindi) && (distsB[k] != mindj)
&& (distsA[k] != mind2i) && (distsB[k] != mind2j)){
mind3 = dists[k];
}
}
return_val = mind3;
"""
mind3 = weave.inline(code,['x', 'y','dists','distsA','distsB','ats'],
support_code = support, libraries = ['m'])
return mind3
def fixCoords(pos,posinit,box):
"""
fixes all coords based on the initial coordinate and
the periodic boundary conditions
Parameters
----------
pos: 1 x 3*ats numpy array
positions of all the beads in the molecule
posinit: 1 x 3 numpy array
initial position on which the fixing is based
box: 1 x 3 numpy array
box dimensions
"""
for i in range(int(len(pos)/3)):
#pdb.set_trace()
dr = pos[3*i:3*i+3] - posinit
dr = dr - box*np.round(dr/box)
pos[3*i:3*i+3] = dr + posinit
return pos
class SnapSystem(object):
"""Class for running the full suite of analysis software """
def __init__(self, traj, ats, molno, cldict,
clfunc={'contact':conOptDistanceCython,
'optical':conOptDistanceCython,
'aligned':alignDistancesCython},
compairs=np.array([[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]]),
atype=u'LS',ttotal=-1,tstart=0,tpr=None,outGro='conf'):
""" Initialize a full system of gsd snapshots over a trajectory.
Parameters
----------
traj: a gsd.hoomd trajectory or a gro or an xtc file name
ats: dictionary
the number of beads in a single molecule for each cluster type
molno: int
the number of molecules in the system
cldict: dictionary
keys are strings representing cluster types, ie contact, optical,
aligned. values are cutoffs
clfunc: dictionary
keys are strings representing cluster types. values are
functions for distance computation
compairs: numpy array
for finding COM of aromatics for aligned clusters
atype: label
referring to how the aromatic beads are labeled in the trajectory
ttotal: int
the total length of the trajectory to be studied
if -1, assume it is the same as the length of the provided
trajectory
tstart: int
timestep to start at, defaults to zero
(last timestep = tstart + ttotal)
tpr: string
name of tpr file, used only with xtc trajectory
outGro: string
name of file to safe individual gro files to
Attributes
----------
mpi: bool
True if the system can run in MPI, false if it has to run serially
trajectory: gsd.hoomd trajectory
the trajectory of snapshots in the system
ats: int
the number of beads per molecule
molno: int
the number of molecules in the system
cldict: dict
keys are strings representing cluster types, ie contact, optical,
aligned. values are cutoffs
clsnaps: list of lists of clusterSnapshots
a list for each cluster type in the dictionary
each list is the snapshot at each timestep of the appropriate
type. If mpi is True, then this list is padded with dummy clusters
with NaN positions to make Scatter work correctly.
atype = label
referring to how aromatic beads are labeled in the trajectory
comm: MPI communicator
------
Raises
------
NotImplementedError:
- if traj isn't a hoomd trajectory or a file ending
in xtc or gro
- if self.mpi is set to true for non hoomd stuff
ValueError:
- if tpr is set to None with an xtc file
Notes
-----
Allows for MPI implementation of system if the size of the
MPI communicator is greater than 1 AND it's a gsd system rather than
an XTC one
"""
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
self.comm = comm
if size > 1:
self.mpi = True
else:
self.mpi = False
#pdb.set_trace()
if (type(traj) is not str) and (type(traj) is not gsd.hoomd.HOOMDTrajectory):
raise NotImplementedError("Invalid trajectory type")
if (type(traj) is gsd.hoomd.HOOMDTrajectory):
if self.mpi:
raise NotImplementedError("MPI is only available for HOOMD trajectory types")
if (type(traj) is str):
spl = traj.split('.')
ext = spl[len(spl)-1]
if ext != 'gro' and ext != 'xtc':
raise NotImplementedError("Invalid trajectory type")
if ext == 'xtc' and tpr is None:
raise ValueError("tpr must have a value for xtc trajectories")
self.trajectory = traj
self.ats = ats
self.molno = molno
self.cldict = cldict
self.clfunc = clfunc
self.clsnaps = {}
self.atype = atype
if ttotal == -1:
ttotal = len(traj)
if self.mpi:
rank = comm.Get_rank()
num = int(np.floor(ttotal / size))
rem = ttotal % size
if rank == 0:
tslist = np.zeros((num + 1) * size).astype(int)
currid = 0
for r in range(size):
if rem != 0:
if r < rem:
ts = r * (num + 1) + np.arange(num + 1) + tstart
tslist[currid:(len(ts)+currid)] = ts
else:
ts = r * (num + 1) - (r - rem) + np.arange(num) + tstart
tslist[currid:(len(ts)+currid)] = ts
tslist[(len(ts)+currid):(len(ts) \
+ currid + (r-rem)+1)] = -1
currid += num + 1
else:
tslist = np.arange(num * size) + tstart
for ctype in cldict.keys():
if ctype == 'contact':
clusters = [ContactClusterSnapshot(t,traj,ats[ctype],
molno) \
for t in tslist]
elif ctype == 'optical':
clusters = [OpticalClusterSnapshot(t,traj,ats[ctype],
molno,
atype=atype) \
for t in tslist]
elif ctype == 'aligned':
clusters = [AlignedClusterSnapshot(t,traj,ats[ctype],
molno,
compairs=compairs,
atype=atype) \
for t in tslist]
else:
raise NotImplementedError("Unknown cluster type")
self.clsnaps[ctype] = clusters
else:
for ctype in cldict.keys():
if ctype == 'contact':
if type(traj) is str:
if ext == 'gro':
clusters = [ContactClusterSnapshotXTC(t, traj, ats,
molno) \
for t in range(tstart,ttotal+tstart)]
else:
pdb.set_trace()
flag = False
for t in range(tstart,ttotal+tstart):
if not os.path.isfile(outGro+str(t)+'.gro'):
flag = True
break
if flag:
grocall = \
'echo 0 | trjconv -s {0} -f {1} -o {2}.gro -sep'.format(tpr,traj,outGro)
os.system(grocall)
clusters = [ContactClusterSnapshotXTC(t,
'conf'+str(t)+'.gro',ats,molno) \
for t in range(tstart,ttotal+tstart)]
else:
clusters = \
[ContactClusterSnapshot(t,traj,ats[ctype],molno) \
for t in range(tstart,ttotal+tstart)]
elif ctype == 'optical':
if type(traj) is str:
if ext == 'gro':
clusters = [OpticalClusterSnapshotXTC(t,traj,ats,
molno,compairs) \
for t in range(tstart,ttotal+tstart)]
else:
flag = False
for t in range(tstart,ttotal+tstart):
if not os.path.isfile(outGro+str(t)+'.gro'):
flag = True
break
if flag:
grocall = \
'echo 0 | trjconv -s {0} -f {1} -o {2}.gro -sep'.format(tpr,traj,outGro)
os.system(grocall)
clusters = [OpticalClusterSnapshotXTC(t,
'conf'+str(t)+'.gro',ats,molno,
compairs) \
for t in range(tstart,ttotal+tstart)]
else:
clusters = \
[OpticalClusterSnapshot(t,traj,ats[ctype],molno,
atype=atype) \
for t in range(tstart,ttotal+tstart)]
elif ctype == 'aligned':
if type(traj) is str:
raise NotImplementedError("Aligned cluster only available for HOOMD type trajectories")
else:
clusters = \
[AlignedClusterSnapshot(t,traj,ats[ctype],molno,
compairs=compairs,
atype=atype) \
for t in range(tstart,ttotal+tstart)]
else:
raise NotImplementedError("Unknown cluster type")
self.clsnaps[ctype] = clusters
def get_clusters_mpi(self,ctype,ttotal=-1):
""" Compute the clusters in each snapshot of the trajectory, using
MPI parallelization.
Parameters
----------
ctype: string
cluster type (contact, optical, aligned, etc)
ttotal: int
number of timesteps to compute for
Raises
------
NotImplementedError
If the cluster type isn't one that's been programmed yet.
Notes
------
Partition up the snapshots, turn them into numpy arrays of
relevant data, and have each processor compute the cluster IDs,
which is the only step that takes ages right now. Once computed,
gather them all back up at root.
"""
rank = self.comm.Get_rank()
size = self.comm.Get_size()
if ttotal == -1:
ttotal = len(self.trajectory)
num = int(np.floor(ttotal / size))
rem = ttotal % size
traj = self.trajectory
ats = self.ats
molno = self.molno
atype = self.atype
if ctype not in ['contact','optical','aligned']:
raise NotImplementedError('Unknown cluster type \
in get_clusters_mpi')
cutoff = self.cldict[ctype]
if rank == 0:
clusters = self.clsnaps[ctype]
carraylen = clusters[0].getCArrayLen()
clusterarray = np.zeros(carraylen * len(clusters))
cind = 0
for cls in clusters:
carray = cls.toArray()
clusterarray[(cind * carraylen):(cind * carraylen + carraylen)]\
= carray
cind += 1
else:
if ctype == 'contact':
tCSnap = ContactClusterSnapshot(0,traj,ats[ctype],molno)
elif ctype == 'optical':
tCSnap = OpticalClusterSnapshot(0,traj,ats[ctype],molno,
atype=atype)
elif ctype == 'aligned':
tCSnap = AlignedClusterSnapshot(0,traj,ats[ctype],molno,
atype=atype)
else:
tCSnap = ClusterSnapshot(0,traj,ats)
carraylen = tCSnap.getCArrayLen()
clusterarray = None
if rem == 0:
ncsnaps = num
else:
ncsnaps = num + 1
carray_local = np.zeros(ncsnaps * carraylen)
self.comm.Scatter(clusterarray,carray_local,root=0)
#for each local cluster array, turn it into a cluster, compute the
#clusterIDs, pack the whole thing up as an array again, and send back
#to root
for i in range(ncsnaps):
carrayi = carray_local[carraylen * i : (carraylen * i + carraylen)]
#print("From rank {0}, snap {1}, array{2}".format(rank,i,carrayi))
if not np.isnan(carrayi[4]):
if ctype == 'contact':
clustSnap = ContactClusterSnapshot(0,carrayi,ats[ctype],
molno)
elif ctype == 'optical':
clustSnap = OpticalClusterSnapshot(0,carrayi,ats[ctype],
molno,atype=atype)
elif ctype == 'aligned':
clustSnap = AlignedClusterSnapshot(0,carrayi,ats[ctype],
molno,atype=atype)
clustSnap.setClusterID(cutoff)
try:
carray_local[carraylen * i : (carraylen * i + carraylen)]\
= clustSnap.toArray()
except:
pdb.set_trace()
#print("Part 2: From rank {0}, snap {1}, array{2}".format(rank,i,carrayi))
self.comm.Barrier()
self.comm.Gather(carray_local,clusterarray,root=0)
if rank == 0:
ind = 0
nind = 0
while ind < ttotal:
carrayi = clusterarray[(carraylen * nind) : \
(carraylen * nind + carraylen)]
if not np.isnan(carrayi[4]):
if ctype == 'contact':
clustSnap = ContactClusterSnapshot(0,carrayi,
ats[ctype],molno)
elif ctype == 'optical':
clustSnap = OpticalClusterSnapshot(0,carrayi,
ats[ctype],molno,
atype=atype)
elif ctype == 'aligned':
clustSnap = AlignedClusterSnapshot(0,carrayi,
ats[ctype],molno,
atype=atype)
self.clsnaps[ctype][nind].clusterIDs = clustSnap.clusterIDs
#print("current pos: ",clustSnap.pos[0])
#print("current csizes: ",clustSnap.idsToSizes())
ind += 1
nind +=1
def get_clusters_serial(self,ctype,box,lcompute=None):
""" Compute the clusters in each snapshot of the trajectory, doing
so simply in serial.
Parameters
----------
ctype: string
cluster type (contact, optical, aligned, etc)
box: 3 x 1 numpy array
box side lengths
lcompute: string or None
if a string, this is the filename to write the length distributions
to after computation
Raises
------
NotImplementedError
If the cluster type isn't one that's been programmed yet.
"""
if ctype not in self.cldict.keys():
raise NotImplementedError("Unknown cluster type \
in get_clusters_serial.")
clusters = self.clsnaps[ctype]
cutoff = self.cldict[ctype]
func = self.clfunc[ctype]
if lcompute is not None:
lfile = open(lcompute,'w')
tstep = 0
for clustSnap in clusters:
tstep +=1
BT = clustSnap.setClusterID(cutoff)
if lcompute is not None:
ldistrib = clustSnap.getLengthDistribution(cutoff,box,func,
BT=BT)
for lmol in ldistrib:
lfile.write('{0} '.format(lmol))
lfile.write('\n')
if lcompute is not None:
lfile.close()
self.clsnaps[ctype] = clusters
def get_clusters_from_file(self,ctype,fname):
""" Compute the clusters in each snapshot of the trajectory from a
given file name, assuming serial.
Parameters
----------
ctype: string
cluster type (contact, optical, aligned, etc)
fname: string
file name where the cluster ID data is saved
Raises
------
NotImplementedError
If the cluster type isn't one that's been programmed yet
"""
if ctype not in self.cldict.keys():
raise NotImplementedError("Unknown cluster type \
in get_clusters_from_file.")
clusters = self.clsnaps[ctype]
for clustSnap in clusters:
clustSnap.setClusterIDFromFile(fname)
self.clsnaps[ctype] = clusters
def getLengthDistribution(self,ctype,cutoff,box,func=conOptDistanceCython,writegsd=None,
writeldistrib=None):
""" Gets the length distribution at each timestep and optionally
writes it out to file.
Parameters
----------
ctype: string
cluster type (contact, optical, aligned, etc)
cutoff: float
Cutoff for BallTree computation for unwrapping
box: 1x3 numpy array
box side lengths
func: python function
distance metric for BallTree computation
writegsd: string or None
used as the base filename to write out all clusters as separate
gsd files. Mostly useful for debugging purposes.
writeldistrib: string or None
the filename to write out the length distributions of the clusters
Returns
-------
ldistribt: T x molno numpy array
contains the approximate end-end length that the cluster each
molecule participates in is at each timestep
Raises
------
NotImplementedError
If the cluster type isn't one that's been programmed yet
Notes
-----
Computes an approximation to the end-end length as the largest
distance between two participating COM beads. This is not the
best approximation if the aggregates are not very linear or if
they are linear but curl up a lot. It fails for a spanning cluster.
"""
if ctype not in self.cldict.keys():
raise NotImplementedError("Unknown cluster type \
in get_clusters_from_file.")
clusters = self.clsnaps[ctype]
ldistribt = np.zeros([len(self.trajectory),self.molno])
ind = 0
if writeldistrib is not None:
f = open(writeldistrib,'w')
for clustSnap in clusters:
ldistrib = clustSnap.getLengthDistribution(cutoff,box,func,
writegsd=writegsd)
ldistribt[ind,:] = ldistrib
if writeldistrib is not None:
for endendl in ldistrib:
f.write('{0} '.format(endendl))
f.write('\n')
ind += 1
if writeldistrib is not None:
f.close()
return ldistribt
def getMassAvVsTime(self,ctype,tstep=1):
""" Returns a numpy array of two columns, with time on the left and
mass-averaged cluster size on the right.
Parameters
----------
ctype: string
refers to cluster type for which to calculate this
tstep: float
converts timestep to some non-reduced value if desired
default is just 1
Returns
-------
mu2vtime: numpy array
Raises
------
NotImplementedError
If the cluster type is one that hasn't been programmed yet
"""
if self.comm.Get_rank() == 0:
if ctype not in self.cldict.keys():
raise NotImplementedError("Unknown cluster type.")
clsnaps = self.clsnaps[ctype]
mu2vtime = float('NaN')*np.ones([2,len(clsnaps)])
ind = 0
for clsnap in clsnaps:
if not np.isnan(clsnap.pos[0][0]):
mu2vtime[0,ind] = ind * tstep
mu2vtime[1,ind] = massAvSize(clsnap.idsToSizes())
ind += 1
m1 = mu2vtime[0,np.where(~np.isnan(mu2vtime[0]))[0]]
m2 = mu2vtime[1,np.where(~np.isnan(mu2vtime[0]))[0]]
mu2vtime = np.array([m1,m2])
return mu2vtime
def writeCIDs(self,ctype,fname):
""" Write out the cluster indices as a file that can be opened
and loaded later
Parameters
----------
ctype: string
cluster type
fname: string
file name to write to
Raises
------
NotImplementedError
If the cluster type is one that hasn't been programmed yet
"""
if ctype not in self.clsnaps.keys():
raise NotImplementedError("Unknown cluster type in writeCIDs.")
if self.comm.Get_rank() == 0:
fid = open(fname,'w')
clsnaps = self.clsnaps[ctype]
for clsnap in clsnaps:
if not np.isnan(clsnap.pos[0][0]):
cids = clsnap.clusterIDs
for cid in cids:
fid.write('{0} '.format(cid))
fid.write('\n')
fid.close()
def writeSizes(self,ctype,fname):
""" Write out the cluster sizes as a file that can be opened
and loaded later
Parameters
----------
ctype: string
cluster type
fname: string
file name to write to
Raises
------
NotImplementedError
If the cluster type is one that hasn't been programmed yet
"""
if ctype not in self.clsnaps.keys():
raise NotImplementedError("Unknown cluster type in writeSizes")
if self.comm.Get_rank() == 0:
fid = open(fname,'w')
clsnaps = self.clsnaps[ctype]
for clsnap in clsnaps:
if not np.isnan(clsnap.pos[0][0]):
csizes = clsnap.idsToSizes()
for csize in csizes:
fid.write('{0} '.format(csize))
fid.write('\n')
fid.close()
class ClusterSnapshot(object):
"""Class for tracking the location of clusters at each time step"""
def __init__(self, t, traj, ats):
""" Initialize a ClusterSnapshot object.
Parameters
----------
t: timestep
traj: a gsd.hoomd trajectory
ats: the number of beads in a single molecule
Raises
------
RuntimeError
if the number of particles doesn't divide evenly into molecules
"""
snapshot = traj[t]
self.timestep = t
self.ats = ats
binds = np.argsort(snapshot.particles.body)
self.pos = snapshot.particles.position[binds]
sz = np.shape(self.pos)
if sz[0] % ats != 0:
raise RuntimeError("Number of particles not divisible by number \
of beads per molecules.")
self.nclusts = ats
self.clusterIDs = np.zeros(sz[0]/ats)
class ContactClusterSnapshot(ClusterSnapshot):
"""Class for tracking the location of contact clusters at each time step
Attributes
----------
timestep: float
timestep
ats: int
number of beads per molecule
nclusts: int
number of clusters in the snapshot
pos: numpy array [M x 3*ats]
locations of molecules and beads within molecules
each molecule is its own line and then the locations of beads
are flattened within that
clusterIDs: list [len M]
"""
def __init__(self, t, trajectory, ats, molno):
""" Initialize a ClusterSnapshot object.
Parameters
----------
t: timestep
trajectory: gsd.hoomd trajectory or numpy array
numpy array is of size 4 + 3 * ats * molno
(ats is different for optical and aligned clusters)
ats: the number of beads in a single molecule
molno: the number of molecules in the system
Raises
------
RuntimeError
if the number of particles does not divide evenly up into molecules
Notes
-----
You can create a ClusterSnapshot object from either an array (for use
with MPI) or from a HOOMD trajectory
"""
self.timestep = t
self.ats = ats
if type(trajectory) is np.ndarray:
carray = trajectory
self.timestep = int(carray[0])
self.ats = int(carray[2])
self.nclusts = carray[1]
pend = 4 + 3 * ats * molno
self.pos = np.reshape(carray[4:pend],[molno,3*ats])
self.clusterIDs = carray[pend:len(carray)]
else:
if t != -1:
snapshot = trajectory[t]
binds = np.argsort(snapshot.particles.body)
self.pos = snapshot.particles.position[binds]
sz = np.shape(self.pos)
if sz[0] % ats != 0:
raise RuntimeError("Number of particles not divisible by \
number of beads per molecules.")
#pdb.set_trace()
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
else:#create a dummy object to help with mpi scattering
snapshot = trajectory[0]
self.pos = snapshot.particles.position
sz = np.shape(self.pos)
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
self.pos = float('NaN') * self.pos
self.nclusts = molno
self.clusterIDs = range(int(sz[0] / ats))
def getCArrayLen(self):
"""
returns the numpy length of an array made by toArray
Returns
-------
carrayLen: int that is the length of the numpy array made by the
toArray fcn
"""
sz = np.shape(self.pos)
molno = sz[0]
carrayLen = 4 + 3 * self.ats * molno + molno
return carrayLen
def toArray(self):
"""
Put all the cluster information into a numpy array, for use with
mpi4py
Returns
-------
carray: numpy array containing all information in a specific order
Can be turned back into a cluster by calling the arrayToCluster
function in this module
Notes
-----
Contains:
[timestep (size 1) nclusts (size 1) ats (size 1)
positions (size 3 * ats * molno) clusterIDs (size molno)]
"""
sz = np.shape(self.pos)
carray = np.zeros(4 + 3 * self.ats * sz[0] + sz[0])
carray[0] = self.timestep
carray[1] = self.nclusts
carray[2] = self.ats
molno = sz[0]
carray[3] = molno
pend = 4 + 3 * self.ats * molno
carray[4:pend] = np.reshape(self.pos,[1,3*self.ats*molno])
clen = molno
carray[pend:(pend + clen)] = self.clusterIDs
return carray
def setClusterID(self,cutoff):
"""
Set the cluster IDs using getClusterID
Parameters
----------
cutoff: the squared distance molecules have to be within to be
part of the same cluster
Returns
-------
BT: BallTree of the system
"""
(nclusts,clusterIDs,BT) = \
self.getClusterID(self.pos,cutoff,conOptDistanceCython)
self.nclusts = nclusts
self.clusterIDs = clusterIDs
return BT
def setClusterIDFromFile(self,fname):
"""
Set the cluster IDs by opening a file and checking what they are
Parameters
----------
fname: string
the name of the file that contains the clusterIDs
Returns
-------
None, just sets clusterIDs
Notes
-----
File format is as written out by this code package
"""
f = open(fname)
lines = f.readlines()
f.close()
line = self.timestep
cIDs = lines[line].split()
self.clusterIDs = np.array([int(float(cID)) for cID in cIDs])
def getClusterID(self, positions,cutoff,func):
"""
Find the ID of which cluster each molecule is in
Parameters
----------
cutoff: the squared distance molecules have to be within to be
part of the same cluster
Returns
-------
clusterIDs: numpy array of the cluster index of the cluster that
each molecule occupies
nclusts: number of clusters
BT: BallTree for possible other computations
"""
sz = np.shape(positions)
pos3 = positions.reshape((int(sz[0]*sz[1]/3),3))
BT = BallTree(pos3,metric='euclidean')
rng = radius_neighbors_graph(BT,np.sqrt(cutoff))
rng = squashRNGCOOCython(rng,int(sz[1]/3))
(nclusts,clusterIDs) = connected_components(rng,directed=False,
return_labels=True,
connection='weak')
#pdb.set_trace()
return (nclusts,clusterIDs,BT)
def idsToSizes(self):
"""
Takes the cluster IDs and returns a list that for each molecule
gives the size of the cluster it is participating in
Returns
-------
clustSizes: numpy array
"""
clustSizes = np.arange(len(self.clusterIDs))
u,counts = np.unique(self.clusterIDs,return_counts=True)
dcounts = dict(zip(u,counts))
for cid in range(len(self.clusterIDs)):
clustSizes[cid] = dcounts[self.clusterIDs[cid]]
return clustSizes
def fixPBC(self,cID,cutoff,box,func,writegsd=None,BT=None):
"""
return positions for a particular cluster fixed across PBCs for
calculation of structural metrics like end-to-end length
Parameters
----------
cID: int
the cluster index for this particular cluster
cutoff: float
distance within which to search for neighbors
writegsd: bool
if not none, write out a gsd file to this name that shows the
resultant cluster
box: 1x3 numpy array
box side lengths
func: python function
distance metric for BallTree computation
BT: precomputed BallTree for cluster
if this is none, recompute the BallTree
Returns
-------
pos: numpy array of floats
the resultant positions of the cluster
Raises
------
RuntimeError: if there is more than one connected component
Notes
-----
Currently origin is in the center of the box; for these purposes,
all positions are reset such that the origin is at the corner.
"""
inds = np.where(self.clusterIDs==cID)[0]
positions = self.pos[inds,:]
sz = np.shape(positions)
fixedXYZ = positions.copy()
potInds = range(1,int(sz[0]))
#if BT is None:
BT = BallTree(positions.reshape((int(sz[0]*sz[1]/3),3)),
metric='euclidean')
rng = radius_neighbors_graph(BT,np.sqrt(cutoff))
rng = squashRNGCOOCython(rng,int(sz[1]/3))
(nCC,CC) = connected_components(rng,connection='weak')
if nCC != 1:
raise RuntimeError("This isn't a fully connected cluster.")
fixedXYZ[0,:] = fixCoords(fixedXYZ[0,:].copy(),fixedXYZ[0,0:3].copy(),
box)
correctInds = [0]
while len(correctInds) > 0:
mol = correctInds.pop()
#neighs = BT.query_radius(positions[mol,:].reshape(1,-1),r=cutoff)[0]
#neighs = neighs.remove(mol)
neighs = np.where(rng[mol,:].toarray()[0]==1)[0]
for n in neighs:
#pdb.set_trace()
if n in potInds:
potInds.remove(n)
correctInds.append(n)
fixedXYZ[n,:] = fixCoords(fixedXYZ[n,:].copy(),
fixedXYZ[mol,0:3].copy(),box)
else:
continue
if writegsd is not None:
f = gsd.hoomd.open(writegsd,'wb')
s = gsd.hoomd.Snapshot()
s.particles.N = sz[0]*sz[1]/3
s.particles.position = fixedXYZ
s.configuration.box = np.concatenate((box,[0,0,0]))
f.append(s)
return fixedXYZ
def getLengthDistribution(self,cutoff,box,func=conOptDistanceCython,
writegsd=None,BT=None):
""" Finds the end-to-end cluster length distribution
Parameters
----------
cutoff: float
Cutoff for BallTree computation for unwrapping
box: 1x3 numpy array
box side lengths
func: python function
distance metric for BallTree computation
writegsd: string or None
used as the base filename to write out all clusters as separate
gsd files. Mostly useful for debugging purposes.
BT: None or BallTree
BallTree for cluster computation
Recomputes if None
Returns
-------
ldistrib: 1 x molno numpy array
length of the cluster each molecule belongs to
"""
ldistrib = np.zeros(len(self.pos))
for cID in range(self.nclusts):
inds = np.where(self.clusterIDs==cID)[0]
if len(inds) > 1:
if writegsd is not None:
cIDpos = self.fixPBC(cID,cutoff,box,func,
writegsd=writegsd+str(cID)+'.gsd',
BT=BT)
else:
cIDpos = self.fixPBC(cID,cutoff,box,func,BT=BT)
sz = np.shape(cIDpos)
#extract COM positions
xcom = np.sum(cIDpos[:,range(0,sz[1],3)],axis=1)/(sz[1]/3.)
ycom = np.sum(cIDpos[:,range(1,sz[1],3)],axis=1)/(sz[1]/3.)
zcom = np.sum(cIDpos[:,range(2,sz[1],3)],axis=1)/(sz[1]/3.)
cIDposcom = np.array([xcom,ycom,zcom])
endendl = np.sqrt(max(pdist(cIDposcom.transpose(),metric='sqeuclidean')))
ldistrib[inds] = endendl
return ldistrib
class OpticalClusterSnapshot(ContactClusterSnapshot):
"""Class for tracking the location of optical clusters at each time step"""
def __init__(self, t, trajectory, ats, molno, atype=u'LS'):
""" Initialize a ClusterSnapshot object.
Parameters
----------
t: timestep
trajectory: gsd.hoomd trajectory or numpy array
numpy array is of size 4 + 3 * ats * molno
(ats is different for optical and aligned clusters)
ats: the number of aromatics in a single molecule
molno: the number of molecules in the system
compairs: m x n numpy array
these are the comparative indices of the beads making up each
aromatic group, where m is the number of aromatics and n is the
number of beads in the group, eg for two beads representing a
ring in the 3-core model, this should be
[[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]]
atype: hoomd bead type
should be the type referring to the aromatic beads
Raises
------
RuntimeError
if the number of particles does not divide evenly up into molecules
Notes
-----
You can create a ClusterSnapshot object from either an array (for use
with MPI) or from a HOOMD trajectory
An optical cluster snapshot tracks the positions of the COMs of the
optical clusters, rather than the positions of the separate beads,
as the contact cluster does
"""
self.timestep = t
self.ats = ats
if type(trajectory) is np.ndarray:
carray = trajectory
self.timestep = int(carray[0])
self.ats = int(carray[2])
self.nclusts = carray[1]
pend = 4 + 3 * ats * molno
self.pos = np.reshape(carray[4:pend],[molno,3*ats])
self.clusterIDs = carray[pend:len(carray)]
else:
if t != -1:
snapshot = trajectory[t]
#self.pos = self.getComs(compairs,atype,trajectory[t],molno)
tind = snapshot.particles.types.index(atype)
types = snapshot.particles.typeid
self.pos = \
snapshot.particles.position[np.where(types==tind)[0]]
sz = np.shape(self.pos)
if sz[0] % ats != 0:
raise RuntimeError("Number of particles not divisible by \
number of beads per molecules.")
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
else:#create a dummy object to help with mpi scattering
snapshot = trajectory[0]
#self.pos = self.getComs(compairs,atype,snapshot,molno)
tind = snapshot.particles.types.index(atype)
types = snapshot.particles.typeid
self.pos = \
snapshot.particles.position[np.where(types==tind)[0]]
sz = np.shape(self.pos)
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
self.pos = float('NaN') * self.pos
self.nclusts = molno
self.clusterIDs = range(int(sz[0] / ats))
class AlignedClusterSnapshot(OpticalClusterSnapshot):
"""Class for tracking the location of aligned clusters at each time step"""
def getComsGeneral(self,compairs,atype,snapshot,molno):
"""Helper function to get the COMs of a subset of beads
Parameters
----------
compairs: m x n numpy array
these are the comparative indices of the beads making up each
aromatic group, where m is the number of aromatics and n is the
number of beads in the group, eg for two beads representing a
ring in the 3-core model, this should be
[[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]]
atype: hoomd bead type
should be the type referring to the aromatic beads
snapshot: gsd snapshot at the particular time of interest
molno: int
number of molecules in snapshot
Returns
-------
aCOMS: nPairs x 3 numpy array
array of COM positions for each bead
Raises
------
RuntimeError
if the number of beads in the aromatics isn't equal to the
total number of aromatics * beads in an aromatic
Notes
-----
This is the more general way of finding COM and can be used in the
future but currently should not be called.
"""
tind = snapshot.particles.types.index(atype)
types = snapshot.particles.typeid
ats = self.ats
aBeads = snapshot.particles.position[np.where(types==tind)[0]]
pairShape = np.shape(compairs)
nPairs = pairShape[0]
aromSize = pairShape[1]
beadNo = np.shape(aBeads)[0]
if nPairs * aromSize != beadNo / molno:
raise RuntimeError("number of beads ({0} in {1} molecules)\
does not divide cleanly \
among aromatics ({2}) of size {3}".format(beadNo,molno,nPairs,
aromSize))
aCOMs = np.zeros([nPairs * molno,3])
for moli in range(molno):
aBeadsMol = aBeads[(moli * beadNo / molno):(moli * beadNo / molno)\
+ beadNo / molno,:]
for m in range(nPairs):
aCOMs[moli*nPairs + m,:] = np.mean(aBeadsMol[compairs[m]],
axis=0)
return aCOMs
def __init__(self, t, trajectory, ats, molno,
compairs=np.array([[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]]),
atype=u'LS'):
""" Initialize a ClusterSnapshot object.
Parameters
----------
t: timestep
trajectory: gsd.hoomd trajectory or numpy array
numpy array is of size 4 + 3 * ats * molno
(ats is different for optical and aligned clusters)
ats: the number of aromatics in a single molecule
molno: the number of molecules in the system
compairs: m x n numpy array
these are the comparative indices of the beads making up each
aromatic group, where m is the number of aromatics and n is the
number of beads in the group, eg for two beads representing a
ring in the 3-core model, this should be
[[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]]
atype: hoomd bead type
should be the type referring to the aromatic beads
Raises
------
RuntimeError
if the number of particles does not divide evenly up into molecules
Notes
-----
You can create a ClusterSnapshot object from either an array (for use
with MPI) or from a HOOMD trajectory
An aligned cluster snapshot just uses a different distance metric
from an optical cluster snapshot
"""
self.timestep = t
self.ats = ats
if type(trajectory) is np.ndarray:
carray = trajectory
self.timestep = int(carray[0])
self.ats = int(carray[2])
self.nclusts = carray[1]
pend = 4 + 3 * ats * molno
self.pos = np.reshape(carray[4:pend],[molno,3*ats])
self.clusterIDs = carray[pend:len(carray)]
self.box = None
else:
if t != -1:
snapshot = trajectory[t]
self.box = snapshot.configuration.box
self.pos = self.getComs(compairs,atype,trajectory[t],molno)
sz = np.shape(self.pos)
if sz[0] % ats != 0:
raise RuntimeError("Number of particles not divisible by \
number of beads per molecules.")
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
else:#create a dummy object to help with mpi scattering
snapshot = trajectory[0]
self.box = snapshot.configuration.box
self.pos = self.getComs(compairs,atype,snapshot,molno)
sz = np.shape(self.pos)
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
self.pos = float('NaN') * self.pos
self.nclusts = molno
self.clusterIDs = range(int(sz[0] / ats))
def getComs(self,compairs,atype,snapshot,molno,missingID=None):
"""Helper function to get the COMs of a subset of beads
Parameters
----------
compairs: m x n numpy array
these are the comparative indices of the beads making up each
aromatic group, where m is the number of aromatics and n is the
number of beads in the group, eg for two beads representing a
ring in the 3-core model, this should be
[[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]]
atype: hoomd bead type
should be the type referring to the aromatic beads
snapshot: gsd snapshot at the particular time of interest
molno: int
number of molecules in snapshot
Returns
-------
aCOMS: nPairs x 3 numpy array
array of COM positions for each bead
Raises
------
RuntimeError
if the number of beads in the aromatics isn't equal to the
total number of aromatics * beads in an aromatic
RuntimeError
if the pairs aren't pairs -> that requires a DIFFERENT KIND OF
CLUSTER
NotImplementedError
if box isn't set
Notes
-----
For this type of cluster, we check the vector pointing between the
first bead pair and assume that the COM is located at bead1 + 1/2(vec)
for all three COMs
This will *only* work for COM pairs of beads and you need to know
which way rvec should be going! (which depends on which bead is
missing, if any of them are.) If there is a bead missing from the
pairs you MUST check which one it is and pass in whether rvec
should be reversed.
"""
if self.box is None:
raise NotImplementedError("You are running on a cluster created from an array, which does not yet support box type analysis.")
tind = snapshot.particles.types.index(atype)
types = snapshot.particles.typeid
aBeads = snapshot.particles.position[np.where(types==tind)[0]]
pairShape = np.shape(compairs)
nPairs = pairShape[0]
aromSize = pairShape[1]
if pairShape[1] != 2:
raise RuntimeError("Not pairs. Call the general getCOM function")
beadNo = np.shape(aBeads)[0]
if nPairs * aromSize != beadNo / molno:
raise RuntimeError("number of beads ({0} in {1} molecules)\
does not divide cleanly \
among aromatics ({2}) of size {3}".format(beadNo,molno,nPairs,
aromSize))
aCOMs = np.zeros([nPairs * molno,3])
for moli in range(molno):
aBeadsMol = aBeads[(moli * beadNo / molno):(moli * beadNo / molno)\
+ beadNo / molno,:]
#pdb.set_trace()
rvec = (aBeadsMol[compairs[0][1]] - aBeadsMol[compairs[0][0]])/2
rvec = rvec - np.around(rvec/self.box[0:3])*self.box[0:3]
for m in range(nPairs):
if compairs[m][1] == missingID:
rvec = (aBeadsMol[compairs[0][1]] \
- aBeadsMol[compairs[0][0]])/2
rvec = rvec - np.around(rvec/self.box[0:3])*self.box[0:3]
comloc = aBeadsMol[compairs[m][0]]+rvec
#pdb.set_trace()
elif compairs[m][0] == missingID:
rvec = (aBeadsMol[compairs[0][0]] \
- aBeadsMol[compairs[0][1]])/2
rvec = rvec - np.around(rvec/self.box[0:3])*self.box[0:3]
comloc = aBeadsMol[compairs[m][0]]+rvec
#pdb.set_trace()
else:
cv = aBeadsMol[compairs[m][0]] - aBeadsMol[compairs[m][1]]
cv = cv - np.around(cv/self.box[0:3])*self.box[0:3]
comloc = aBeadsMol[compairs[m][1]]+cv/2
#comloc = np.mean(aBeadsMol[compairs[m]],axis=0)
#pdb.set_trace()
if np.isclose(comloc,np.array([9.360982,-1.270450,1.375538])).all():
pdb.set_trace()
aCOMs[moli*nPairs + m,:] = comloc
return aCOMs
def writeCOMsGSD(self,gsdname):
""" Write out a GSD file of this snapshot that shows the locations of
the aligned COMs after initialization
Parameters
----------
gsdname: string
what name to save the file to
"""
try:
gsdf = gsd.hoomd.open(gsdname,'ab')
except IOError:
gsdf = gsd.hoomd.open(gsdname,'wb')
sz = np.shape(self.pos)
molno = sz[0]
pos = np.reshape(self.pos,[sz[0]*self.ats,3])
#pdb.set_trace()
pN = sz[0]*self.ats
ptypes = ['A']
ptypeid = np.zeros(molno*self.ats).astype(int)
pbox = self.box
s = gsd.hoomd.Snapshot()
s.particles.N = pN
s.configuration.step = self.timestep
s.particles.types = ptypes
s.particles.typeid = ptypeid
s.configuration.box = pbox
s.particles.position = pos
gsdf.append(s)
def setClusterID(self,cutoff):
"""
Set the cluster IDs using getClusterID
Parameters
----------
cutoff: the squared distance molecules have to be within to be
part of the same cluster
Returns
-------
BT: BallTree
for length computation
"""
(nclusts,clusterIDs,BT) = \
self.getClusterID(self.pos,cutoff,alignDistancesCython)
self.nclusts = nclusts
self.clusterIDs = clusterIDs
return BT
def getClusterID(self, positions,cutoff,func):
"""
Find the ID of which cluster each molecule is in
Parameters
----------
cutoff: the squared distance molecules have to be within to be
part of the same cluster
Returns
-------
clusterIDs: numpy array of the cluster index of the cluster that
each molecule occupies
nclusts: number of clusters
BT: BallTree for possible other computations
"""
BT = BallTree(positions,metric='pyfunc',
func=func)
rng = radius_neighbors_graph(BT,cutoff)
(nclusts,clusterIDs) = connected_components(rng,directed=False,
return_labels=True)
return (nclusts,clusterIDs,BT)
class ContactClusterSnapshotXTC(ContactClusterSnapshot):
""" Class for tracking contact cluster locations that are initialized
from an xtc/Gromacs file instead of a HOOMD one
Attributes
----------
timestep: float
timestep
ats: int
number of beads per molecule
nclusts: int
number of clusters in the snapshot
pos: numpy array [M x 3*ats]
locations of molecules and beads within molecules
each molecule is its own line and then the locations of beads
are flattened within that
clusterIDs: list [len M]
"""
def readGro(self,fName):
""" Get a list of positions from a Gromacs .gro file
Parameters
----------
fname: string
name of .gro file
Returns
-------
pos: numpy vector [len 3 * molecules * ats]
1D list of positions in .gro file
"""
with open(fName, 'r') as myF:
myLns = myF.read().splitlines()
boxL1 = float(myLns[len(myLns)-1].split()[0])
boxL2 = float(myLns[len(myLns)-1].split()[1])
boxL3 = float(myLns[len(myLns)-1].split()[2])
return (np.array([[float(myLns[i][20:].split()[0]),
float(myLns[i][20:].split()[1]),
float(myLns[i][20:].split()[2])]\
for i in range(2, len(myLns)-1)]).flatten(),
np.array([boxL1,boxL2,boxL3]))
def __init__(self, t, trj, ats, molno):
""" Initialize a ContactClusterSnapshotXTC object.
Parameters
----------
t: timestep
trj: Gromacs trajectory name (xtc format)
tpr: Gromacs run file name (tpr format)
outGro: name for an output Gromacs .gro file (no file extension)
ats: the number of beads in a single molecule
molno: the number of molecules in the system
the index of the cluster that each molecule belongs to
Raises
------
RuntimeError
if the number of particles does not divide evenly up into molecules
Notes
-----
You can create a ClusterSnapshot object from either an array (for use
with MPI) or from a HOOMD trajectory
"""
self.timestep = t
self.ats = ats
self.nclusts = molno
self.clusterIDs = np.zeros(molno)
self.pos = self.readGro(trj)[0]
if len(self.pos) != 3 * molno * ats:
raise RuntimeError("incorrect number of atoms or molecules")
#pdb.set_trace()
self.pos = np.reshape(self.pos,[molno,3*ats])
class OpticalClusterSnapshotXTC(ContactClusterSnapshotXTC):
""" Class for tracking optical cluster locations that are initialized
from an xtc/Gromacs file instead of a HOOMD one
Attributes
----------
timestep: float
timestep
ats: int
number of beads per molecule
nclusts: int
number of clusters in the snapshot
pos: numpy array [M x 3*ats]
locations of molecules and beads within molecules
each molecule is its own line and then the locations of beads
are flattened within that
clusterIDs: list [len M]
"""
def __init__(self, t, trj, ats, molno, comIDs):
""" Initialize a ContactClusterSnapshotXTC object.
Parameters
----------
t: timestep
trj: Gromacs trajectory name (xtc format)
tpr: Gromacs run file name (tpr format)
outGro: name for an output Gromacs .gro file
ats: the number of beads in a single molecule
molno: the number of molecules in the system
the index of the cluster that each molecule belongs to
comIDs: N x M numpy array of ints
bead IDs of the beads in the N cores with M participating beads
each
Raises
------
RuntimeError
if the number of particles does not divide evenly up into molecules
Notes
-----
You can create a ClusterSnapshot object from either an array (for use
with MPI) or from a HOOMD trajectory
"""
self.timestep = t
self.ats = ats
self.nclusts = molno
self.clusterIDs = np.zeros(molno)
self.pos = self.readGro(trj)[0]
if len(self.pos) != 3 * molno * ats:
raise RuntimeError("incorrect number of atoms or molecules")
#pdb.set_trace()
self.pos = np.reshape(self.pos,[molno,3*ats])
M = np.shape(comIDs)[1]
pos = np.zeros((molno,3*np.shape(comIDs)[0]))
for mol in range(molno):
for com in range(np.shape(comIDs)[0]):
inds = comIDs[com,:]
compos = np.array([self.pos[mol,3*inds+i].sum()/M \
for i in range(3)])
pos[mol,3*com:(3*com+3)] = compos
self.pos = pos
| {
"repo_name": "ramansbach/cluster_analysis",
"path": "clustering/scripts/old-scripts/clustering_temp.py",
"copies": "1",
"size": "73255",
"license": "mit",
"hash": -7672692854043939000,
"line_mean": 34.6298638132,
"line_max": 138,
"alpha_frac": 0.5161831957,
"autogenerated": false,
"ratio": 4.178121257058119,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5194304452758118,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from .common import is_np_datetime_like, _contains_datetime_like_objects
from .pycompat import dask_array_type
def _season_from_months(months):
"""Compute season (DJF, MAM, JJA, SON) from month ordinal
"""
# TODO: Move "season" accessor upstream into pandas
seasons = np.array(['DJF', 'MAM', 'JJA', 'SON'])
months = np.asarray(months)
return seasons[(months // 3) % 4]
def _access_through_cftimeindex(values, name):
"""Coerce an array of datetime-like values to a CFTimeIndex
and access requested datetime component
"""
from ..coding.cftimeindex import CFTimeIndex
values_as_cftimeindex = CFTimeIndex(values.ravel())
if name == 'season':
months = values_as_cftimeindex.month
field_values = _season_from_months(months)
else:
field_values = getattr(values_as_cftimeindex, name)
return field_values.reshape(values.shape)
def _access_through_series(values, name):
"""Coerce an array of datetime-like values to a pandas Series and
access requested datetime component
"""
values_as_series = pd.Series(values.ravel())
if name == "season":
months = values_as_series.dt.month.values
field_values = _season_from_months(months)
else:
field_values = getattr(values_as_series.dt, name).values
return field_values.reshape(values.shape)
def _get_date_field(values, name, dtype):
"""Indirectly access pandas' libts.get_date_field by wrapping data
as a Series and calling through `.dt` attribute.
Parameters
----------
values : np.ndarray or dask.array-like
Array-like container of datetime-like values
name : str
Name of datetime field to access
dtype : dtype-like
dtype for output date field values
Returns
-------
datetime_fields : same type as values
Array-like of datetime fields accessed for each element in values
"""
if is_np_datetime_like(values.dtype):
access_method = _access_through_series
else:
access_method = _access_through_cftimeindex
if isinstance(values, dask_array_type):
from dask.array import map_blocks
return map_blocks(access_method,
values, name, dtype=dtype)
else:
return access_method(values, name)
def _round_series(values, name, freq):
"""Coerce an array of datetime-like values to a pandas Series and
apply requested rounding
"""
values_as_series = pd.Series(values.ravel())
method = getattr(values_as_series.dt, name)
field_values = method(freq=freq).values
return field_values.reshape(values.shape)
def _round_field(values, name, freq):
"""Indirectly access pandas rounding functions by wrapping data
as a Series and calling through `.dt` attribute.
Parameters
----------
values : np.ndarray or dask.array-like
Array-like container of datetime-like values
name : str (ceil, floor, round)
Name of rounding function
freq : a freq string indicating the rounding resolution
Returns
-------
rounded timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
if isinstance(values, dask_array_type):
from dask.array import map_blocks
return map_blocks(_round_series,
values, name, freq=freq, dtype=np.datetime64)
else:
return _round_series(values, name, freq)
class DatetimeAccessor(object):
"""Access datetime fields for DataArrays with datetime-like dtypes.
Similar to pandas, fields can be accessed through the `.dt` attribute
for applicable DataArrays:
>>> ds = xarray.Dataset({'time': pd.date_range(start='2000/01/01',
... freq='D', periods=100)})
>>> ds.time.dt
<xarray.core.accessors.DatetimeAccessor at 0x10c369f60>
>>> ds.time.dt.dayofyear[:5]
<xarray.DataArray 'dayofyear' (time: 5)>
array([1, 2, 3, 4, 5], dtype=int32)
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...
All of the pandas fields are accessible here. Note that these fields are
not calendar-aware; if your datetimes are encoded with a non-Gregorian
calendar (e.g. a 360-day calendar) using cftime, then some fields like
`dayofyear` may not be accurate.
"""
def __init__(self, xarray_obj):
if not _contains_datetime_like_objects(xarray_obj):
raise TypeError("'dt' accessor only available for "
"DataArray with datetime64 timedelta64 dtype or "
"for arrays containing cftime datetime "
"objects.")
self._obj = xarray_obj
def _tslib_field_accessor(name, docstring=None, dtype=None):
def f(self, dtype=dtype):
if dtype is None:
dtype = self._obj.dtype
obj_type = type(self._obj)
result = _get_date_field(self._obj.data, name, dtype)
return obj_type(result, name=name,
coords=self._obj.coords, dims=self._obj.dims)
f.__name__ = name
f.__doc__ = docstring
return property(f)
year = _tslib_field_accessor('year', "The year of the datetime", np.int64)
month = _tslib_field_accessor(
'month', "The month as January=1, December=12", np.int64
)
day = _tslib_field_accessor('day', "The days of the datetime", np.int64)
hour = _tslib_field_accessor('hour', "The hours of the datetime", np.int64)
minute = _tslib_field_accessor(
'minute', "The minutes of the datetime", np.int64
)
second = _tslib_field_accessor(
'second', "The seconds of the datetime", np.int64
)
microsecond = _tslib_field_accessor(
'microsecond', "The microseconds of the datetime", np.int64
)
nanosecond = _tslib_field_accessor(
'nanosecond', "The nanoseconds of the datetime", np.int64
)
weekofyear = _tslib_field_accessor(
'weekofyear', "The week ordinal of the year", np.int64
)
week = weekofyear
dayofweek = _tslib_field_accessor(
'dayofweek', "The day of the week with Monday=0, Sunday=6", np.int64
)
weekday = dayofweek
weekday_name = _tslib_field_accessor(
'weekday_name', "The name of day in a week (ex: Friday)", object
)
dayofyear = _tslib_field_accessor(
'dayofyear', "The ordinal day of the year", np.int64
)
quarter = _tslib_field_accessor('quarter', "The quarter of the date")
days_in_month = _tslib_field_accessor(
'days_in_month', "The number of days in the month", np.int64
)
daysinmonth = days_in_month
season = _tslib_field_accessor(
"season", "Season of the year (ex: DJF)", object
)
time = _tslib_field_accessor(
"time", "Timestamps corresponding to datetimes", object
)
def _tslib_round_accessor(self, name, freq):
obj_type = type(self._obj)
result = _round_field(self._obj.data, name, freq)
return obj_type(result, name=name,
coords=self._obj.coords, dims=self._obj.dims)
def floor(self, freq):
'''
Round timestamps downward to specified frequency resolution.
Parameters
----------
freq : a freq string indicating the rounding resolution
e.g. 'D' for daily resolution
Returns
-------
floor-ed timestamps : same type as values
Array-like of datetime fields accessed for each element in values
'''
return self._tslib_round_accessor("floor", freq)
def ceil(self, freq):
'''
Round timestamps upward to specified frequency resolution.
Parameters
----------
freq : a freq string indicating the rounding resolution
e.g. 'D' for daily resolution
Returns
-------
ceil-ed timestamps : same type as values
Array-like of datetime fields accessed for each element in values
'''
return self._tslib_round_accessor("ceil", freq)
def round(self, freq):
'''
Round timestamps to specified frequency resolution.
Parameters
----------
freq : a freq string indicating the rounding resolution
e.g. 'D' for daily resolution
Returns
-------
rounded timestamps : same type as values
Array-like of datetime fields accessed for each element in values
'''
return self._tslib_round_accessor("round", freq)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/accessors.py",
"copies": "1",
"size": "8808",
"license": "apache-2.0",
"hash": -1113364668170456800,
"line_mean": 32.8769230769,
"line_max": 79,
"alpha_frac": 0.6192098093,
"autogenerated": false,
"ratio": 3.9927470534904805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5111956862790481,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from .core import Series, DataFrame, map_partitions, apply_concat_apply
from . import methods
from .utils import is_categorical_dtype, is_scalar, has_known_categories
###############################################################
# Dummies
###############################################################
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False):
"""
Convert categorical variable into dummy/indicator variables. Data must
have category dtype to infer result's ``columns``
Parameters
----------
data : Series or DataFrame with category dtype
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternativly, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`category` dtype will be converted.
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
Returns
-------
dummies : DataFrame
"""
if isinstance(data, (pd.Series, pd.DataFrame)):
return pd.get_dummies(data, prefix=prefix,
prefix_sep=prefix_sep, dummy_na=dummy_na,
columns=columns, sparse=sparse,
drop_first=drop_first)
not_cat_msg = ("`get_dummies` with non-categorical dtypes is not "
"supported. Please use `df.categorize()` beforehand to "
"convert to categorical dtype.")
unknown_cat_msg = ("`get_dummies` with unknown categories is not "
"supported. Please use `column.cat.as_known()` or "
"`df.categorize()` beforehand to ensure known "
"categories")
if isinstance(data, Series):
if not is_categorical_dtype(data):
raise NotImplementedError(not_cat_msg)
if not has_known_categories(data):
raise NotImplementedError(unknown_cat_msg)
elif isinstance(data, DataFrame):
if columns is None:
if (data.dtypes == 'object').any():
raise NotImplementedError(not_cat_msg)
columns = data._meta.select_dtypes(include=['category']).columns
else:
if not all(is_categorical_dtype(data[c]) for c in columns):
raise NotImplementedError(not_cat_msg)
if not all(has_known_categories(data[c]) for c in columns):
raise NotImplementedError(unknown_cat_msg)
if sparse:
raise NotImplementedError('sparse=True is not supported')
return map_partitions(pd.get_dummies, data, prefix=prefix,
prefix_sep=prefix_sep, dummy_na=dummy_na,
columns=columns, sparse=sparse,
drop_first=drop_first)
###############################################################
# Pivot table
###############################################################
def pivot_table(df, index=None, columns=None,
values=None, aggfunc='mean'):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.
Parameters
----------
data : DataFrame
values : scalar
column to aggregate
index : scalar
column to be index
columns : scalar
column to be columns
aggfunc : {'mean', 'sum', 'count'}, default 'mean'
Returns
-------
table : DataFrame
"""
if not is_scalar(index) or index is None:
raise ValueError("'index' must be the name of an existing column")
if not is_scalar(columns) or columns is None:
raise ValueError("'columns' must be the name of an existing column")
if not is_categorical_dtype(df[columns]):
raise ValueError("'columns' must be category dtype")
if not has_known_categories(df[columns]):
raise ValueError("'columns' must have known categories. Please use "
"`df[columns].cat.as_known()` beforehand to ensure "
"known categories")
if not is_scalar(values) or values is None:
raise ValueError("'values' must be the name of an existing column")
if not is_scalar(aggfunc) or aggfunc not in ('mean', 'sum', 'count'):
raise ValueError("aggfunc must be either 'mean', 'sum' or 'count'")
# _emulate can't work for empty data
# the result must have CategoricalIndex columns
new_columns = pd.CategoricalIndex(df[columns].cat.categories, name=columns)
meta = pd.DataFrame(columns=new_columns, dtype=np.float64)
kwargs = {'index': index, 'columns': columns, 'values': values}
pv_sum = apply_concat_apply([df],
chunk=methods.pivot_sum,
aggregate=methods.pivot_agg,
meta=meta,
token='pivot_table_sum',
chunk_kwargs=kwargs)
pv_count = apply_concat_apply([df],
chunk=methods.pivot_count,
aggregate=methods.pivot_agg,
meta=meta,
token='pivot_table_count',
chunk_kwargs=kwargs)
if aggfunc == 'sum':
return pv_sum
elif aggfunc == 'count':
return pv_count
elif aggfunc == 'mean':
return pv_sum / pv_count
else:
raise ValueError
###############################################################
# Melt
###############################################################
def melt(frame, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from dask.dataframe.core import no_default
return frame.map_partitions(pd.melt, meta=no_default, id_vars=id_vars,
value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level, token='melt')
| {
"repo_name": "cpcloud/dask",
"path": "dask/dataframe/reshape.py",
"copies": "1",
"size": "6863",
"license": "bsd-3-clause",
"hash": 4905754351346982000,
"line_mean": 38.4425287356,
"line_max": 79,
"alpha_frac": 0.5568993152,
"autogenerated": false,
"ratio": 4.646580907244414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5703480222444415,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from ..external.six import string_types
__all__ = ['unique', 'shape_to_string', 'view_shape', 'stack_view',
'coerce_numeric', 'check_sorted']
def unique(array):
"""
Return the unique elements of the array U, as well as
the index array I such that U[I] == array
:param array: The array to use
:returns: U, I
:rtype: tuple of arrays
"""
# numpy.unique doesn't handle mixed-types on python3,
# so we use pandas
U, I = pd.factorize(array, sort=True)
return I, U
def shape_to_string(shape):
"""
On Windows, shape tuples use long ints which results in formatted shapes
such as (2L, 3L). This function ensures that the shape is always formatted
without the Ls.
"""
return "({0})".format(", ".join(str(int(item)) for item in shape))
def view_shape(shape, view):
"""Return the shape of a view of an array
:param shape: Tuple describing shape of the array
:param view: View object -- a valid index into a numpy array, or None
Returns equivalent of np.zeros(shape)[view].shape
"""
if view is None:
return shape
shp = tuple(slice(0, s, 1) for s in shape)
xy = np.broadcast_arrays(*np.ogrid[shp])
assert xy[0].shape == shape
return xy[0][view].shape
def stack_view(shape, *views):
shp = tuple(slice(0, s, 1) for s in shape)
result = np.broadcast_arrays(*np.ogrid[shp])
for v in views:
if isinstance(v, string_types) and v == 'transpose':
result = [r.T for r in result]
continue
result = [r[v] for r in result]
return tuple(result)
def coerce_numeric(arr):
"""Coerce an array into a numeric array, replacing
non-numeric elements with nans.
If the array is already a numeric type, it is returned
unchanged
:param arr: array to coerce
:type arr: :class:`numpy.ndarray`
:returns: array.
"""
# already numeric type
if np.issubdtype(arr.dtype, np.number):
return arr
if np.issubdtype(arr.dtype, np.bool_):
return arr.astype(np.int)
# a string dtype, or anything else
return pd.Series(arr).convert_objects(convert_numeric=True).values
def check_sorted(array):
""" Return True if the array is sorted, False otherwise.
"""
# this ignores NANs, and does the right thing if nans
# are concentrated at beginning or end of array
# otherwise, it will miss things at nan/finite boundaries
array = np.asarray(array)
return not (array[:-1] > array[1:]).any()
| {
"repo_name": "JudoWill/glue",
"path": "glue/utils/array.py",
"copies": "1",
"size": "2656",
"license": "bsd-3-clause",
"hash": 1656557842688455000,
"line_mean": 26.6666666667,
"line_max": 78,
"alpha_frac": 0.6381777108,
"autogenerated": false,
"ratio": 3.6685082872928176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4806685998092818,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from glue.external.six import string_types
__all__ = ['unique', 'shape_to_string', 'view_shape', 'stack_view',
'coerce_numeric', 'check_sorted', 'broadcast_to']
def unique(array):
"""
Return the unique elements of the array U, as well as
the index array I such that U[I] == array
Parameters
----------
array : `numpy.ndarray`
The array to use
Returns
-------
U : `numpy.ndarray`
The unique elements of the array
I : `numpy.ndarray`
The indices such that ``U[I] == array``
"""
# numpy.unique doesn't handle mixed-types on python3,
# so we use pandas
U, I = pd.factorize(array, sort=True)
return I, U
def shape_to_string(shape):
"""
On Windows, shape tuples use long ints which results in formatted shapes
such as (2L, 3L). This function ensures that the shape is always formatted
without the Ls.
"""
return "({0})".format(", ".join(str(int(item)) for item in shape))
def view_shape(shape, view):
"""
Return the shape of a view of an array.
Returns equivalent of ``np.zeros(shape)[view].shape``
Parameters
----------
shape : tuple
The shape of the array
view : slice
A valid index into a Numpy array, or None
"""
if view is None:
return shape
shp = tuple(slice(0, s, 1) for s in shape)
xy = np.broadcast_arrays(*np.ogrid[shp])
assert xy[0].shape == shape
return xy[0][view].shape
def stack_view(shape, *views):
shp = tuple(slice(0, s, 1) for s in shape)
result = np.broadcast_arrays(*np.ogrid[shp])
for v in views:
if isinstance(v, string_types) and v == 'transpose':
result = [r.T for r in result]
continue
result = [r[v] for r in result]
return tuple(result)
def coerce_numeric(arr):
"""
Coerce an array into a numeric array, replacing non-numeric elements with
nans.
If the array is already a numeric type, it is returned unchanged
Parameters
----------
arr : `numpy.ndarray`
The array to coerce
"""
# already numeric type
if np.issubdtype(arr.dtype, np.number):
return arr
if np.issubdtype(arr.dtype, np.bool_):
return arr.astype(np.int)
# a string dtype, or anything else
try:
return pd.to_numeric(arr, errors='coerce')
except AttributeError: # older versions of pandas
return pd.Series(arr).convert_objects(convert_numeric=True).values
def check_sorted(array):
"""
Return `True` if the array is sorted, `False` otherwise.
"""
# this ignores NANs, and does the right thing if nans
# are concentrated at beginning or end of array
# otherwise, it will miss things at nan/finite boundaries
array = np.asarray(array)
return not (array[:-1] > array[1:]).any()
def pretty_number(numbers):
"""
Convert a list/array of numbers into a nice list of strings
Parameters
----------
numbers : list
The numbers to convert
"""
try:
return [pretty_number(n) for n in numbers]
except TypeError:
pass
n = numbers
if n == 0:
result = '0'
elif (abs(n) < 1e-3) or (abs(n) > 1e3):
result = "%0.3e" % n
elif abs(int(n) - n) < 1e-3 and int(n) != 0:
result = "%i" % n
else:
result = "%0.3f" % n
if result.find('.') != -1:
result = result.rstrip('0')
return result
def broadcast_to(array, shape):
"""
Compatibility function - can be removed once we support only Numpy 1.10
and above
"""
try:
return np.broadcast_to(array, shape)
except AttributeError:
return array * np.ones(shape, array.dtype)
| {
"repo_name": "saimn/glue",
"path": "glue/utils/array.py",
"copies": "1",
"size": "3861",
"license": "bsd-3-clause",
"hash": -3558369945914845000,
"line_mean": 24.0714285714,
"line_max": 78,
"alpha_frac": 0.598031598,
"autogenerated": false,
"ratio": 3.7089337175792507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9806192279091929,
"avg_score": 0.0001546072974644403,
"num_lines": 154
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from pandas.types.common import (is_categorical_dtype, is_numeric_dtype,
is_datetime64_dtype, is_timedelta64_dtype)
from pandas.lib import is_bool_array
from .utils import PANDAS_VERSION
# In Pandas 0.19.2, a function to hash pandas objects was introduced. Object
# arrays are assumed to be strings, and are hashed with a cython implementation
# of siphash. However, the version in 0.19.2 hashes categoricals based on their
# integer codes, instead of taking into account the values they represent. This
# is fixed in pandas > 0.19.2. To support versions 0.19.0 and up, we do do the
# following:
#
# - For versions > 0.19.2, we use the provided `hash_pandas_object` function.
# - For 0.19.0 through 0.19.2, we copy the definition of `hash_pandas_object`
# from pandas master (will be released as 0.20.0).
# - For 0.19.0 and 0.19.1, we use python's `hash` builtin to hash strings.
# - For 0.19.2, we use the `hash_object_array` method provided in pandas
# (an implementation of siphash)
#
# When dask drops support for pandas <= 0.19.2, all this can be removed.
# XXX: Pandas uses release branches > 0.19.0, which doesn't play well with
# versioneer, since the release tags aren't ancestors of master. As such, we
# need to use this hacky awfulness to check if we're > 0.19.2.
if PANDAS_VERSION not in ('0.19.1', '0.19.2') and PANDAS_VERSION > '0.19.0+340':
from pandas.tools.hashing import hash_pandas_object
else:
if PANDAS_VERSION == '0.19.2':
from pandas._hash import hash_object_array
else: # 0.19.0 and 0.19.1
def hash_object_array(x, hash_key, encoding):
return np.array([hash(i) for i in x], dtype=np.uint64)
# 16 byte long hashing key
_default_hash_key = '0123456789123456'
def hash_pandas_object(obj, index=True, encoding='utf8', hash_key=None,
categorize=True):
if hash_key is None:
hash_key = _default_hash_key
def adder(h, hashed_to_add):
h = np.multiply(h, np.uint(3), h)
return np.add(h, hashed_to_add, h)
if isinstance(obj, pd.Index):
h = hash_array(obj.values, encoding, hash_key,
categorize).astype('uint64')
h = pd.Series(h, index=obj, dtype='uint64')
elif isinstance(obj, pd.Series):
h = hash_array(obj.values, encoding, hash_key,
categorize).astype('uint64')
if index:
h = adder(h, hash_pandas_object(obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize).values)
h = pd.Series(h, index=obj.index, dtype='uint64')
elif isinstance(obj, pd.DataFrame):
cols = obj.iteritems()
first_series = next(cols)[1]
h = hash_array(first_series.values, encoding,
hash_key, categorize).astype('uint64')
for _, col in cols:
h = adder(h, hash_array(col.values, encoding, hash_key,
categorize))
if index:
h = adder(h, hash_pandas_object(obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize).values)
h = pd.Series(h, index=obj.index, dtype='uint64')
else:
raise TypeError("Unexpected type for hashing %s" % type(obj))
return h
def _hash_categorical(c, encoding, hash_key):
cat_hashed = hash_array(c.categories.values, encoding, hash_key,
categorize=False).astype(np.uint64, copy=False)
return c.rename_categories(cat_hashed).astype(np.uint64)
def hash_array(vals, encoding='utf8', hash_key=None, categorize=True):
if hash_key is None:
hash_key = _default_hash_key
# For categoricals, we hash the categories, then remap the codes to the
# hash values. (This check is above the complex check so that we don't
# ask numpy if categorical is a subdtype of complex, as it will choke.
if is_categorical_dtype(vals.dtype):
return _hash_categorical(vals, encoding, hash_key)
# we'll be working with everything as 64-bit values, so handle this
# 128-bit value early
if np.issubdtype(vals.dtype, np.complex128):
return hash_array(vals.real) + 23 * hash_array(vals.imag)
# First, turn whatever array this is into unsigned 64-bit ints, if we
# can manage it.
if is_bool_array(vals):
vals = vals.astype('u8')
elif ((is_datetime64_dtype(vals) or
is_timedelta64_dtype(vals) or
is_numeric_dtype(vals)) and vals.dtype.itemsize <= 8):
vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8')
else:
# With repeated values, its MUCH faster to categorize object
# dtypes, then hash and rename categories. We allow skipping the
# categorization when the values are known/likely to be unique.
if categorize:
codes, categories = pd.factorize(vals, sort=False)
cat = pd.Categorical(codes, pd.Index(categories),
ordered=False, fastpath=True)
return _hash_categorical(cat, encoding, hash_key)
vals = hash_object_array(vals, hash_key, encoding)
# Then, redistribute these 64-bit ints within the space of 64-bit ints
vals ^= vals >> 30
vals *= np.uint64(0xbf58476d1ce4e5b9)
vals ^= vals >> 27
vals *= np.uint64(0x94d049bb133111eb)
vals ^= vals >> 31
return vals
| {
"repo_name": "cpcloud/dask",
"path": "dask/dataframe/hashing.py",
"copies": "1",
"size": "6179",
"license": "bsd-3-clause",
"hash": -8676651381093628000,
"line_mean": 45.8106060606,
"line_max": 80,
"alpha_frac": 0.5766305227,
"autogenerated": false,
"ratio": 3.8739811912225703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.495061171392257,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import datashape
from datashape import discover
from ..append import append
from ..convert import convert, ooc_types
from ..chunks import chunks
from ..resource import resource
from ..utils import filter_kwargs
@discover.register(pd.HDFStore)
def discover_hdfstore(f):
d = dict()
for key in f.keys():
d2 = d
key2 = key.lstrip('/')
while '/' in key2:
group, key2 = key2.split('/', 1)
if group not in d2:
d2[group] = dict()
d2 = d2[group]
d2[key2] = f.get_storer(key)
return discover(d)
@discover.register(pd.io.pytables.Fixed)
def discover_hdfstore_storer(storer):
f = storer.parent
n = storer.shape
if isinstance(n, list):
n = n[0]
measure = discover(f.select(storer.pathname, start=0, stop=10)).measure
return n * measure
@convert.register(chunks(pd.DataFrame), pd.io.pytables.AppendableFrameTable)
def hdfstore_to_chunks_dataframes(data, chunksize=100000, **kwargs):
if (isinstance(chunksize, (float, np.floating)) and
not chunksize.is_integer()):
raise TypeError('chunksize argument must be an integer, got %s' %
chunksize)
chunksize = int(chunksize)
def f():
k = min(chunksize, 100)
yield data.parent.select(data.pathname, start=0, stop=k)
for chunk in data.parent.select(data.pathname, chunksize=chunksize,
start=k):
yield chunk
return chunks(pd.DataFrame)(f)
@convert.register(pd.DataFrame, (pd.io.pytables.AppendableFrameTable,
pd.io.pytables.FrameFixed))
def hdfstore_to_chunks_dataframes(data, **kwargs):
return data.read()
pytables_h5py_explanation = """
You've run in to a conflict between the two HDF5 libraries in Python,
H5Py and PyTables. You're trying to do something that requires PyTables but
H5Py was loaded first and the two libraries don't share well.
To resolve this you'll have to restart your Python process and ensure that you
import tables
before you import projects like odo or into or blaze."""
from collections import namedtuple
EmptyHDFStoreDataset = namedtuple('EmptyHDFStoreDataset', 'parent,pathname,dshape')
@resource.register('hdfstore://.+', priority=11)
def resource_hdfstore(uri, datapath=None, dshape=None, **kwargs):
# TODO:
# 1. Support nested datashapes (e.g. groups)
# 2. Try translating unicode to ascii? (PyTables fails here)
fn = uri.split('://')[1]
try:
f = pd.HDFStore(fn, **filter_kwargs(pd.HDFStore, kwargs))
except RuntimeError as e:
raise type(e)(pytables_h5py_explanation)
if dshape is None:
return f.get_storer(datapath) if datapath else f
dshape = datashape.dshape(dshape)
# Already exists, return it
if datapath in f:
return f.get_storer(datapath)
# Need to create new dataset.
# HDFStore doesn't support empty datasets, so we use a proxy object.
return EmptyHDFStoreDataset(f, datapath, dshape)
@append.register((pd.io.pytables.Fixed, EmptyHDFStoreDataset), pd.DataFrame)
def append_dataframe_to_hdfstore(store, df, **kwargs):
store.parent.append(store.pathname, df, append=True)
return store.parent.get_storer(store.pathname)
@append.register((pd.io.pytables.Fixed, EmptyHDFStoreDataset),
chunks(pd.DataFrame))
def append_chunks_dataframe_to_hdfstore(store, c, **kwargs):
parent = store.parent
for chunk in c:
parent.append(store.pathname, chunk)
return parent.get_storer(store.pathname)
@append.register((pd.io.pytables.Fixed, EmptyHDFStoreDataset), object)
def append_object_to_hdfstore(store, o, **kwargs):
return append(store, convert(chunks(pd.DataFrame), o, **kwargs), **kwargs)
ooc_types.add(pd.io.pytables.AppendableFrameTable)
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/hdfstore.py",
"copies": "9",
"size": "3965",
"license": "bsd-3-clause",
"hash": -7413218275420359000,
"line_mean": 31.2357723577,
"line_max": 83,
"alpha_frac": 0.6756620429,
"autogenerated": false,
"ratio": 3.58499095840868,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004949947458658259,
"num_lines": 123
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import xarray as xr
from . import parameterized, randn, requires_dask
nx = 3000
long_nx = 30000000
ny = 2000
nt = 1000
window = 20
randn_xy = randn((nx, ny), frac_nan=0.1)
randn_xt = randn((nx, nt))
randn_t = randn((nt, ))
randn_long = randn((long_nx, ), frac_nan=0.1)
class Rolling:
def setup(self, *args, **kwargs):
self.ds = xr.Dataset(
{'var1': (('x', 'y'), randn_xy),
'var2': (('x', 't'), randn_xt),
'var3': (('t', ), randn_t)},
coords={'x': np.arange(nx),
'y': np.linspace(0, 1, ny),
't': pd.date_range('1970-01-01', periods=nt, freq='D'),
'x_coords': ('x', np.linspace(1.1, 2.1, nx))})
self.da_long = xr.DataArray(randn_long, dims='x',
coords={'x': np.arange(long_nx) * 0.1})
@parameterized(['func', 'center'],
(['mean', 'count'], [True, False]))
def time_rolling(self, func, center):
getattr(self.ds.rolling(x=window, center=center), func)().load()
@parameterized(['func', 'pandas'],
(['mean', 'count'], [True, False]))
def time_rolling_long(self, func, pandas):
if pandas:
se = self.da_long.to_series()
getattr(se.rolling(window=window), func)()
else:
getattr(self.da_long.rolling(x=window), func)().load()
@parameterized(['window_', 'min_periods'],
([20, 40], [5, None]))
def time_rolling_np(self, window_, min_periods):
self.ds.rolling(x=window_, center=False,
min_periods=min_periods).reduce(
getattr(np, 'nanmean')).load()
@parameterized(['center', 'stride'],
([True, False], [1, 200]))
def time_rolling_construct(self, center, stride):
self.ds.rolling(x=window, center=center).construct(
'window_dim', stride=stride).mean(dim='window_dim').load()
class RollingDask(Rolling):
def setup(self, *args, **kwargs):
requires_dask()
super(RollingDask, self).setup(**kwargs)
self.ds = self.ds.chunk({'x': 100, 'y': 50, 't': 50})
self.da_long = self.da_long.chunk({'x': 10000})
| {
"repo_name": "shoyer/xray",
"path": "asv_bench/benchmarks/rolling.py",
"copies": "1",
"size": "2349",
"license": "apache-2.0",
"hash": 7576704411820002000,
"line_mean": 33.5441176471,
"line_max": 75,
"alpha_frac": 0.5291613453,
"autogenerated": false,
"ratio": 3.341394025604552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4370555370904552,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import xarray as xr
from . import randint, randn, requires_dask
nx = 3000
ny = 2000
nt = 1000
basic_indexes = {
'1slice': {'x': slice(0, 3)},
'1slice-1scalar': {'x': 0, 'y': slice(None, None, 3)},
'2slicess-1scalar': {'x': slice(3, -3, 3), 'y': 1, 't': slice(None, -3, 3)}
}
basic_assignment_values = {
'1slice': xr.DataArray(randn((3, ny), frac_nan=0.1), dims=['x', 'y']),
'1slice-1scalar': xr.DataArray(randn(int(ny / 3) + 1, frac_nan=0.1),
dims=['y']),
'2slicess-1scalar': xr.DataArray(randn(int((nx - 6) / 3), frac_nan=0.1),
dims=['x'])
}
outer_indexes = {
'1d': {'x': randint(0, nx, 400)},
'2d': {'x': randint(0, nx, 500), 'y': randint(0, ny, 400)},
'2d-1scalar': {'x': randint(0, nx, 100), 'y': 1, 't': randint(0, nt, 400)}
}
outer_assignment_values = {
'1d': xr.DataArray(randn((400, ny), frac_nan=0.1), dims=['x', 'y']),
'2d': xr.DataArray(randn((500, 400), frac_nan=0.1), dims=['x', 'y']),
'2d-1scalar': xr.DataArray(randn(100, frac_nan=0.1), dims=['x'])
}
vectorized_indexes = {
'1-1d': {'x': xr.DataArray(randint(0, nx, 400), dims='a')},
'2-1d': {'x': xr.DataArray(randint(0, nx, 400), dims='a'),
'y': xr.DataArray(randint(0, ny, 400), dims='a')},
'3-2d': {'x': xr.DataArray(randint(0, nx, 400).reshape(4, 100),
dims=['a', 'b']),
'y': xr.DataArray(randint(0, ny, 400).reshape(4, 100),
dims=['a', 'b']),
't': xr.DataArray(randint(0, nt, 400).reshape(4, 100),
dims=['a', 'b'])},
}
vectorized_assignment_values = {
'1-1d': xr.DataArray(randn((400, 2000)), dims=['a', 'y'],
coords={'a': randn(400)}),
'2-1d': xr.DataArray(randn(400), dims=['a', ], coords={'a': randn(400)}),
'3-2d': xr.DataArray(randn((4, 100)), dims=['a', 'b'],
coords={'a': randn(4), 'b': randn(100)})
}
class Base(object):
def setup(self, key):
self.ds = xr.Dataset(
{'var1': (('x', 'y'), randn((nx, ny), frac_nan=0.1)),
'var2': (('x', 't'), randn((nx, nt))),
'var3': (('t', ), randn(nt))},
coords={'x': np.arange(nx),
'y': np.linspace(0, 1, ny),
't': pd.date_range('1970-01-01', periods=nt, freq='D'),
'x_coords': ('x', np.linspace(1.1, 2.1, nx))})
class Indexing(Base):
def time_indexing_basic(self, key):
self.ds.isel(**basic_indexes[key]).load()
time_indexing_basic.param_names = ['key']
time_indexing_basic.params = [list(basic_indexes.keys())]
def time_indexing_outer(self, key):
self.ds.isel(**outer_indexes[key]).load()
time_indexing_outer.param_names = ['key']
time_indexing_outer.params = [list(outer_indexes.keys())]
def time_indexing_vectorized(self, key):
self.ds.isel(**vectorized_indexes[key]).load()
time_indexing_vectorized.param_names = ['key']
time_indexing_vectorized.params = [list(vectorized_indexes.keys())]
class Assignment(Base):
def time_assignment_basic(self, key):
ind = basic_indexes[key]
val = basic_assignment_values[key]
self.ds['var1'][ind.get('x', slice(None)),
ind.get('y', slice(None))] = val
time_assignment_basic.param_names = ['key']
time_assignment_basic.params = [list(basic_indexes.keys())]
def time_assignment_outer(self, key):
ind = outer_indexes[key]
val = outer_assignment_values[key]
self.ds['var1'][ind.get('x', slice(None)),
ind.get('y', slice(None))] = val
time_assignment_outer.param_names = ['key']
time_assignment_outer.params = [list(outer_indexes.keys())]
def time_assignment_vectorized(self, key):
ind = vectorized_indexes[key]
val = vectorized_assignment_values[key]
self.ds['var1'][ind.get('x', slice(None)),
ind.get('y', slice(None))] = val
time_assignment_vectorized.param_names = ['key']
time_assignment_vectorized.params = [list(vectorized_indexes.keys())]
class IndexingDask(Indexing):
def setup(self, key):
requires_dask()
super(IndexingDask, self).setup(key)
self.ds = self.ds.chunk({'x': 100, 'y': 50, 't': 50})
| {
"repo_name": "chunweiyuan/xarray",
"path": "asv_bench/benchmarks/indexing.py",
"copies": "2",
"size": "4503",
"license": "apache-2.0",
"hash": -8966021481000969000,
"line_mean": 34.7380952381,
"line_max": 79,
"alpha_frac": 0.5356429047,
"autogenerated": false,
"ratio": 3.0282447881640886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4563887692864088,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from blaze.compute.core import compute, compute_up
from blaze.expr import Symbol, union, by, exp, Symbol
from datashape import discover
t = Symbol('t', 'var * {id: int, name: string, amount: int}')
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', 'i8'), ('name', 'S7'), ('amount', 'i8')])
def eq(a, b):
return (a == b).all()
def test_symbol():
assert eq(compute(t, x), x)
def test_eq():
assert eq(compute(t['amount'] == 100, x),
x['amount'] == 100)
def test_selection():
assert eq(compute(t[t['amount'] == 100], x), x[x['amount'] == 0])
assert eq(compute(t[t['amount'] < 0], x), x[x['amount'] < 0])
def test_arithmetic():
assert eq(compute(t['amount'] + t['id'], x),
x['amount'] + x['id'])
assert eq(compute(t['amount'] * t['id'], x),
x['amount'] * x['id'])
assert eq(compute(t['amount'] % t['id'], x),
x['amount'] % x['id'])
def test_UnaryOp():
assert eq(compute(exp(t['amount']), x),
np.exp(x['amount']))
def test_Neg():
assert eq(compute(-t['amount'], x),
-x['amount'])
def test_invert_not():
assert eq(compute(~(t.amount > 0), x),
~(x['amount'] > 0))
def test_union_1d():
t = Symbol('t', 'var * int')
x = np.array([1, 2, 3])
assert eq(compute(union(t, t), x), np.array([1, 2, 3, 1, 2, 3]))
def test_union():
result = compute(union(t, t), x)
assert result.shape == (x.shape[0] * 2,)
assert eq(result[:5], x)
assert eq(result[5:], x)
result = compute(union(t.id, t.id), x)
assert eq(result, np.array([1, 2, 3, 4, 5, 1, 2, 3, 4, 5]))
def test_Reductions():
assert compute(t['amount'].mean(), x) == x['amount'].mean()
assert compute(t['amount'].count(), x) == len(x['amount'])
assert compute(t['amount'].sum(), x) == x['amount'].sum()
assert compute(t['amount'].min(), x) == x['amount'].min()
assert compute(t['amount'].max(), x) == x['amount'].max()
assert compute(t['amount'].nunique(), x) == len(np.unique(x['amount']))
assert compute(t['amount'].var(), x) == x['amount'].var()
assert compute(t['amount'].std(), x) == x['amount'].std()
assert compute(t['amount'].var(unbiased=True), x) == x['amount'].var(ddof=1)
assert compute(t['amount'].std(unbiased=True), x) == x['amount'].std(ddof=1)
assert compute((t['amount'] > 150).any(), x) == True
assert compute((t['amount'] > 250).all(), x) == False
def test_count_nan():
t = Symbol('t', '3 * ?real')
x = np.array([1.0, np.nan, 2.0])
assert compute(t.count(), x) == 2
def test_Distinct():
x = np.array([('Alice', 100),
('Alice', -200),
('Bob', 100),
('Bob', 100)],
dtype=[('name', 'S5'), ('amount', 'i8')])
t = Symbol('t', 'var * {name: string, amount: int64}')
assert eq(compute(t['name'].distinct(), x),
np.unique(x['name']))
assert eq(compute(t.distinct(), x),
np.unique(x))
def test_sort():
assert eq(compute(t.sort('amount'), x),
np.sort(x, order='amount'))
assert eq(compute(t.sort('amount', ascending=False), x),
np.sort(x, order='amount')[::-1])
assert eq(compute(t.sort(['amount', 'id']), x),
np.sort(x, order=['amount', 'id']))
def test_head():
assert eq(compute(t.head(2), x),
x[:2])
def test_label():
expected = x['amount'] * 10
expected = np.array(expected, dtype=[('foo', 'i8')])
assert eq(compute((t['amount'] * 10).label('foo'), x),
expected)
def test_relabel():
expected = np.array(x, dtype=[('ID', 'i8'), ('NAME', 'S7'), ('amount', 'i8')])
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), x)
assert result.dtype.names == expected.dtype.names
assert eq(result, expected)
def test_by():
from blaze.api.into import into
expr = by(t.amount > 0, t.id.count())
result = compute(expr, x)
assert set(map(tuple, into([], result))) == set([(False, 2), (True, 3)])
def test_compute_up_field():
assert eq(compute(t['name'], x), x['name'])
def test_compute_up_projection():
assert eq(compute_up(t[['name', 'amount']], x), x[['name', 'amount']])
def test_slice():
for s in [0, slice(2), slice(1, 3), slice(None, None, 2)]:
assert (compute(t[s], x) == x[s]).all()
ax = np.arange(30, dtype='f4').reshape((5, 3, 2))
a = Symbol('a', discover(ax))
def test_array_reductions():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis), ax), ax.sum(axis=axis))
def test_array_reductions_with_keepdims():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis, keepdims=True), ax),
ax.sum(axis=axis, keepdims=True))
| {
"repo_name": "vitan/blaze",
"path": "blaze/compute/tests/test_numpy_compute.py",
"copies": "1",
"size": "5057",
"license": "bsd-3-clause",
"hash": -4378522455388237300,
"line_mean": 27.4101123596,
"line_max": 82,
"alpha_frac": 0.5291674906,
"autogenerated": false,
"ratio": 3.029958058717795,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9051678351091303,
"avg_score": 0.0014894396452983696,
"num_lines": 178
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
import re
from addie.databases.oncat.oncat import pyoncatGetTemplate
try:
import pyoncat
ONCAT_ENABLED = True
except ImportError:
print('pyoncat module not found. Functionality disabled')
ONCAT_ENABLED = False
class OncatTemplateRetriever:
"""Retrieves the up to date ONCat template of the given instrument. The needed information
are then retrieved from the template such as column name and path to oncat database. The
output of this class is a dictionary with those informations."""
template_information = {} # {'0': {'title': 'Run #', 'path': 'indexed.run_number'},
# '1': ....
# }
_oncat_default_template = {}
def __init__(self, parent=None):
self.parent = parent
if self.parent.oncat:
try:
self.retrieve_template()
self.isolate_relevant_information()
except pyoncat.InvalidRefreshTokenError:
self.template_information = {}
return
else:
return None
def retrieve_template(self):
instrument = self.parent.instrument['short_name']
facility = self.parent.facility
list_templates = pyoncatGetTemplate(oncat=self.parent.oncat,
instrument=instrument,
facility=facility)
for template in list_templates:
if hasattr(template, "default"):
if template.default:
self._oncat_default_template = template["columns"]
return
_default_template = list_templates[0]["columns"]
self._oncat_default_template = _default_template
def isolate_relevant_information(self):
"""from all the information provided by the ONCat template, we are only interested by the following infos
[name, path and units]. We isolate those into the template_information dictionary"""
def get_formula(oncat_formula):
"""will need to go from something like
"${value/10e11}`"
to something more pythonic
"{value/10e11}"""
regular_expression = r'\$(?P<formula>.+)\`'
m = re.search(regular_expression, oncat_formula)
if m:
return m.group('formula')
else:
return ""
template_information = {}
for _index, _element in enumerate(self._oncat_default_template):
_title = _element["name"]
_path = _element["path"]
if "units" in _element:
_units = _element["units"]
else:
_units = ""
if "transform" in _element:
_formula = get_formula(_element["transform"])
else:
_formula = ""
template_information[_index] = {'title': _title,
'path': _path,
'units': _units,
'formula': _formula}
self.template_information = template_information
def get_template_information(self):
return self.template_information
@staticmethod
def create_oncat_projection_from_template(with_location=False,
template={}):
"""Using the ONCat template to create projection used by oncat
to return full information"""
projection = []
if with_location:
projection = ['location']
nbr_columns = len(template)
for _col in np.arange(nbr_columns):
projection.append(template[_col]['path'])
return projection
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/import_from_database/oncat_template_retriever.py",
"copies": "1",
"size": "3873",
"license": "mit",
"hash": -6953795083008404000,
"line_mean": 34.8611111111,
"line_max": 113,
"alpha_frac": 0.5450555125,
"autogenerated": false,
"ratio": 4.883984867591425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5929040380091426,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import struct
from six.moves import range
class Alphabet(object):
def __init__(self, config_file):
self._config_file = config_file
self._label_to_str = {}
self._str_to_label = {}
self._size = 0
if config_file:
with open(config_file, 'r', encoding='utf-8') as fin:
for line in fin:
if line[0:2] == '\\#':
line = '#\n'
elif line[0] == '#':
continue
self._label_to_str[self._size] = line[:-1] # remove the line ending
self._str_to_label[line[:-1]] = self._size
self._size += 1
def _string_from_label(self, label):
return self._label_to_str[label]
def _label_from_string(self, string):
try:
return self._str_to_label[string]
except KeyError as e:
raise KeyError(
'ERROR: Your transcripts contain characters (e.g. \'{}\') which do not occur in \'{}\'! Use ' \
'util/check_characters.py to see what characters are in your [train,dev,test].csv transcripts, and ' \
'then add all these to \'{}\'.'.format(string, self._config_file, self._config_file)
).with_traceback(e.__traceback__)
def has_char(self, char):
return char in self._str_to_label
def encode(self, string):
res = []
for char in string:
if char == '-':
res.append(self._size)
else:
res.append(self._label_from_string(char))
return res
def decode(self, labels):
res = ''
for label in labels:
res += self._string_from_label(label)
return res
def serialize(self):
# Serialization format is a sequence of (key, value) pairs, where key is
# a uint16_t and value is a uint16_t length followed by `length` UTF-8
# encoded bytes with the label.
res = bytearray()
# We start by writing the number of pairs in the buffer as uint16_t.
res += struct.pack('<H', self._size)
for key, value in self._label_to_str.items():
value = value.encode('utf-8')
# struct.pack only takes fixed length strings/buffers, so we have to
# construct the correct format string with the length of the encoded
# label.
res += struct.pack('<HH{}s'.format(len(value)), key, len(value), value)
return bytes(res)
def size(self):
return self._size
def config_file(self):
return self._config_file
class UTF8Alphabet(object):
@staticmethod
def _string_from_label(_):
assert False
@staticmethod
def _label_from_string(_):
assert False
@staticmethod
def encode(string):
# 0 never happens in the data, so we can shift values by one, use 255 for
# the CTC blank, and keep the alphabet size = 256
return np.frombuffer(string.encode('utf-8'), np.uint8).astype(np.int32) - 1
@staticmethod
def decode(labels):
# And here we need to shift back up
return bytes(np.asarray(labels, np.uint8) + 1).decode('utf-8', errors='replace')
@staticmethod
def size():
return 255
@staticmethod
def serialize():
res = bytearray()
res += struct.pack('<h', 255)
for i in range(255):
# Note that we also shift back up in the mapping constructed here
# so that the native client sees the correct byte values when decoding.
res += struct.pack('<hh1s', i, 1, bytes([i+1]))
return bytes(res)
@staticmethod
def deserialize(buf):
size = struct.unpack('<I', buf)[0]
assert size == 255
return UTF8Alphabet()
@staticmethod
def config_file():
return ''
def text_to_char_array(transcript, alphabet, context=''):
r"""
Given a transcript string, map characters to
integers and return a numpy array representing the processed string.
Use a string in `context` for adding text to raised exceptions.
"""
try:
transcript = alphabet.encode(transcript)
if len(transcript) == 0:
raise ValueError('While processing {}: Found an empty transcript! '
'You must include a transcript for all training data.'
.format(context))
return transcript
except KeyError as e:
# Provide the row context (especially wav_filename) for alphabet errors
raise ValueError('While processing: {}\n{}'.format(context, e))
# The following code is from: http://hetland.org/coding/python/levenshtein.py
# This is a straightforward implementation of a well-known algorithm, and thus
# probably shouldn't be covered by copyright to begin with. But in case it is,
# the author (Magnus Lie Hetland) has, to the extent possible under law,
# dedicated all copyright and related and neighboring rights to this software
# to the public domain worldwide, by distributing it under the CC0 license,
# version 1.0. This software is distributed without any warranty. For more
# information, see <http://creativecommons.org/publicdomain/zero/1.0>
def levenshtein(a, b):
"Calculates the Levenshtein distance between a and b."
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
current = list(range(n+1))
for i in range(1, m+1):
previous, current = current, [i]+[0]*n
for j in range(1, n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
| {
"repo_name": "googleinterns/deepspeech-reconstruction",
"path": "src/deepspeech_training/util/text.py",
"copies": "1",
"size": "5957",
"license": "apache-2.0",
"hash": -8143314163359580000,
"line_mean": 34.0411764706,
"line_max": 118,
"alpha_frac": 0.5820043646,
"autogenerated": false,
"ratio": 3.989953114534494,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012767409217803044,
"num_lines": 170
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
#from tensorflow.contrib.data.python.ops.dataset_ops import Dataset
from niftynet.engine.image_window import ImageWindow
from niftynet.layer.base_layer import Layer
from niftynet.layer.grid_warper import AffineGridWarperLayer
from niftynet.layer.resampler import ResamplerLayer
from niftynet.layer.linear_resize import LinearResizeLayer as Resize
#from niftynet.layer.approximated_smoothing import SmoothingLayer as Smooth
class PairwiseUniformSampler(Layer):
def __init__(self,
reader_0,
reader_1,
data_param,
batch_size=1):
Layer.__init__(self, name='pairwise_sampler_uniform')
# reader for the fixed images
self.reader_0 = reader_0
# reader for the moving images
self.reader_1 = reader_1
# TODO:
# 0) check the readers should have the same length file list
# 1) detect window shape mismatches or defaulting
# windows to the fixed image reader properties
# 2) reshape images to (supporting multi-modal data)
# [batch, x, y, channel] or [batch, x, y, z, channels]
# 3) infer spatial rank
# 4) make ``label`` optional
self.batch_size = batch_size
self.spatial_rank = 3
self.window = ImageWindow.from_data_reader_properties(
self.reader_0.input_sources,
self.reader_0.shapes,
self.reader_0.tf_dtypes,
data_param)
if self.window.has_dynamic_shapes:
tf.logging.fatal('Dynamic shapes not supported.\nPlease specify '
'all spatial dims of the input data, for the '
'spatial_window_size parameter.')
raise NotImplementedError
# TODO: check spatial dims the same across input modalities
self.image_shape = \
self.reader_0.shapes['fixed_image'][:self.spatial_rank]
self.moving_image_shape = \
self.reader_1.shapes['moving_image'][:self.spatial_rank]
self.window_size = self.window.shapes['fixed_image'][1:]
# initialise a dataset prefetching pairs of image and label volumes
n_subjects = len(self.reader_0.output_list)
rand_ints = np.random.randint(n_subjects, size=[n_subjects])
image_dataset = tf.data.Dataset.from_tensor_slices(rand_ints)
# mapping random integer id to 4 volumes moving/fixed x image/label
# tf.py_func wrapper of ``get_pairwise_inputs``
image_dataset = image_dataset.map(
lambda image_id: tuple(tf.py_func(
self.get_pairwise_inputs, [image_id],
[tf.int64, tf.float32, tf.float32, tf.int32, tf.int32])),
num_parallel_calls=4) # supported by tf 1.4?
image_dataset = image_dataset.repeat() # num_epochs can be param
image_dataset = image_dataset.shuffle(
buffer_size=self.batch_size * 20)
image_dataset = image_dataset.batch(self.batch_size)
self.iterator = image_dataset.make_initializable_iterator()
def get_pairwise_inputs(self, image_id):
# fetch fixed image
fixed_inputs = []
fixed_inputs.append(self._get_image('fixed_image', image_id)[0])
fixed_inputs.append(self._get_image('fixed_label', image_id)[0])
fixed_inputs = np.concatenate(fixed_inputs, axis=-1)
fixed_shape = np.asarray(fixed_inputs.shape).T.astype(np.int32)
# fetch moving image
moving_inputs = []
moving_inputs.append(self._get_image('moving_image', image_id)[0])
moving_inputs.append(self._get_image('moving_label', image_id)[0])
moving_inputs = np.concatenate(moving_inputs, axis=-1)
moving_shape = np.asarray(moving_inputs.shape).T.astype(np.int32)
return image_id, fixed_inputs, moving_inputs, fixed_shape, moving_shape
def _get_image(self, image_source_type, image_id):
# returns a random image from either the list of fixed images
# or the list of moving images
try:
image_source_type = image_source_type.decode()
except AttributeError:
pass
if image_source_type.startswith('fixed'):
_, data, _ = self.reader_0(idx=image_id)
else: # image_source_type.startswith('moving'):
_, data, _ = self.reader_1(idx=image_id)
image = np.asarray(data[image_source_type]).astype(np.float32)
image_shape = list(image.shape)
image = np.reshape(image, image_shape[:self.spatial_rank] + [-1])
image_shape = np.asarray(image.shape).astype(np.int32)
return image, image_shape
def layer_op(self):
"""
This function concatenate image and label volumes at the last dim
and randomly cropping the volumes (also the cropping margins)
"""
image_id, fixed_inputs, moving_inputs, fixed_shape, moving_shape = \
self.iterator.get_next()
# TODO preprocessing layer modifying
# image shapes will not be supported
# assuming the same shape across modalities, using the first
image_id.set_shape((self.batch_size,))
image_id = tf.to_float(image_id)
fixed_inputs.set_shape(
(self.batch_size,) + (None,) * self.spatial_rank + (2,))
# last dim is 1 image + 1 label
moving_inputs.set_shape(
(self.batch_size,) + self.moving_image_shape + (2,))
fixed_shape.set_shape((self.batch_size, self.spatial_rank + 1))
moving_shape.set_shape((self.batch_size, self.spatial_rank + 1))
# resizing the moving_inputs to match the target
# assumes the same shape across the batch
target_spatial_shape = \
tf.unstack(fixed_shape[0], axis=0)[:self.spatial_rank]
moving_inputs = Resize(new_size=target_spatial_shape)(moving_inputs)
combined_volume = tf.concat([fixed_inputs, moving_inputs], axis=-1)
# smoothing_layer = Smoothing(
# sigma=1, truncate=3.0, type_str='gaussian')
# combined_volume = tf.unstack(combined_volume, axis=-1)
# combined_volume[0] = tf.expand_dims(combined_volume[0], axis=-1)
# combined_volume[1] = smoothing_layer(
# tf.expand_dims(combined_volume[1]), axis=-1)
# combined_volume[2] = tf.expand_dims(combined_volume[2], axis=-1)
# combined_volume[3] = smoothing_layer(
# tf.expand_dims(combined_volume[3]), axis=-1)
# combined_volume = tf.stack(combined_volume, axis=-1)
# TODO affine data augmentation here
if self.spatial_rank == 3:
window_channels = np.prod(self.window_size[self.spatial_rank:]) * 4
# TODO if no affine augmentation:
img_spatial_shape = target_spatial_shape
win_spatial_shape = [tf.constant(dim) for dim in
self.window_size[:self.spatial_rank]]
# when img==win make sure shift => 0.0
# otherwise interpolation is out of bound
batch_shift = [
tf.random_uniform(
shape=(self.batch_size, 1),
minval=0,
maxval=tf.maximum(tf.to_float(img - win - 1), 0.01))
for (win, img) in zip(win_spatial_shape, img_spatial_shape)]
batch_shift = tf.concat(batch_shift, axis=1)
affine_constraints = ((1.0, 0.0, 0.0, None),
(0.0, 1.0, 0.0, None),
(0.0, 0.0, 1.0, None))
computed_grid = AffineGridWarperLayer(
source_shape=(None, None, None),
output_shape=self.window_size[:self.spatial_rank],
constraints=affine_constraints)(batch_shift)
computed_grid.set_shape((self.batch_size,) +
self.window_size[:self.spatial_rank] +
(self.spatial_rank,))
resampler = ResamplerLayer(
interpolation='linear', boundary='replicate')
windows = resampler(combined_volume, computed_grid)
out_shape = [self.batch_size] + \
list(self.window_size[:self.spatial_rank]) + \
[window_channels]
windows.set_shape(out_shape)
image_id = tf.reshape(image_id, (self.batch_size, 1))
start_location = tf.zeros((self.batch_size, self.spatial_rank))
locations = tf.concat([
image_id, start_location, batch_shift], axis=1)
return windows, locations
# return windows, [tf.reduce_max(computed_grid), batch_shift]
# overriding input buffers
def run_threads(self, session, *args, **argvs):
"""
To be called at the beginning of running graph variables
"""
session.run(self.iterator.initializer)
return
def close_all(self):
# do nothing
pass
| {
"repo_name": "NifTK/NiftyNet",
"path": "niftynet/contrib/sampler_pairwise/sampler_pairwise_uniform.py",
"copies": "2",
"size": "9106",
"license": "apache-2.0",
"hash": -1156011925598209800,
"line_mean": 45.4591836735,
"line_max": 79,
"alpha_frac": 0.5974083022,
"autogenerated": false,
"ratio": 3.855207451312447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5452615753512446,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from util.cnn import fc_layer as fc, conv_layer as conv
from util.empty_safe_conv import empty_safe_1x1_conv as _1x1_conv
from util.empty_safe_conv import empty_safe_conv as _conv
class Modules:
def __init__(self, image_feat_grid, word_vecs, num_choices):
self.image_feat_grid = image_feat_grid
self.word_vecs = word_vecs
self.num_choices = num_choices
# Capture the variable scope for creating all variables
with tf.variable_scope('module_variables') as module_variable_scope:
self.module_variable_scope = module_variable_scope
# Flatten word vecs for efficient slicing
# word_vecs has shape [T_decoder, N, D]
word_vecs_shape = tf.shape(word_vecs)
T_full = word_vecs_shape[0]
self.N_full = word_vecs_shape[1]
D_word = word_vecs.get_shape().as_list()[-1]
self.word_vecs_flat = tf.reshape(
word_vecs, to_T([T_full*self.N_full, D_word]))
# create each dummy modules here so that weights won't get initialized again
att_shape = image_feat_grid.get_shape().as_list()[:-1] + [1]
self.att_shape = att_shape
input_att = tf.placeholder(tf.float32, att_shape)
time_idx = tf.placeholder(tf.int32, [None])
batch_idx = tf.placeholder(tf.int32, [None])
self.SceneModule(time_idx, batch_idx, reuse=False)
self.FindModule(time_idx, batch_idx, reuse=False)
self.FindSamePropertyModule(input_att, time_idx, batch_idx, reuse=False)
self.TransformModule(input_att, time_idx, batch_idx, reuse=False)
self.AndModule(input_att, input_att, time_idx, batch_idx, reuse=False)
self.FilterModule(input_att, time_idx, batch_idx, reuse=False)
self.OrModule(input_att, input_att, time_idx, batch_idx, reuse=False)
self.ExistModule(input_att, time_idx, batch_idx, reuse=False)
self.CountModule(input_att, time_idx, batch_idx, reuse=False)
self.EqualNumModule(input_att, input_att, time_idx, batch_idx, reuse=False)
self.MoreNumModule(input_att, input_att, time_idx, batch_idx, reuse=False)
self.LessNumModule(input_att, input_att, time_idx, batch_idx, reuse=False)
self.SamePropertyModule(input_att, input_att, time_idx, batch_idx, reuse=False)
self.DescribeModule(input_att, time_idx, batch_idx, reuse=False)
def _slice_image_feat_grid(self, batch_idx):
# In TF Fold, batch_idx is a [N_batch, 1] tensor
return tf.gather(self.image_feat_grid, batch_idx)
def _slice_word_vecs(self, time_idx, batch_idx):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
# time is highest dim in word_vecs
joint_index = time_idx*self.N_full + batch_idx
return tf.gather(self.word_vecs_flat, joint_index)
# All the layers are wrapped with td.ScopedLayer
def SceneModule(self, time_idx, batch_idx, pos_val=3, scope='SceneModule',
reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
# Mapping: None -> att_grid
# Output:
# att_grid: [N, H, W, 1]
#
# Implementation:
# 1. Just output a positive attention everywhere
N = tf.shape(time_idx)[0]
att_grid = pos_val*tf.ones(to_T([N]+self.att_shape[1:]))
return att_grid
def FindModule(self, time_idx, batch_idx, map_dim=250, scope='FindModule',
reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
image_feat_grid = self._slice_image_feat_grid(batch_idx)
text_param = self._slice_word_vecs(time_idx, batch_idx)
# Mapping: image_feat_grid x text_param -> att_grid
# Input:
# image_feat_grid: [N, H, W, D_im]
# text_param: [N, D_txt]
# Output:
# att_grid: [N, H, W, 1]
#
# Implementation:
# 1. Elementwise multiplication between image_feat_grid and text_param
# 2. L2-normalization
# 3. Linear classification
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
image_shape = tf.shape(image_feat_grid)
N = tf.shape(time_idx)[0]
H = image_shape[1]
W = image_shape[2]
D_im = image_feat_grid.get_shape().as_list()[-1]
D_txt = text_param.get_shape().as_list()[-1]
# image_feat_mapped has shape [N, H, W, map_dim]
image_feat_mapped = _1x1_conv('conv_image', image_feat_grid,
output_dim=map_dim)
text_param_mapped = fc('fc_text', text_param, output_dim=map_dim)
text_param_mapped = tf.reshape(text_param_mapped, to_T([N, 1, 1, map_dim]))
eltwise_mult = tf.nn.l2_normalize(image_feat_mapped * text_param_mapped, 3)
att_grid = _1x1_conv('conv_eltwise', eltwise_mult, output_dim=1)
att_grid.set_shape(self.att_shape)
return att_grid
def FilterModule(self, input_0, time_idx, batch_idx, map_dim=250,
scope='FilterModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
image_feat_grid = self._slice_image_feat_grid(batch_idx)
text_param = self._slice_word_vecs(time_idx, batch_idx)
# Mapping: att_grid x image_feat_grid x text_param -> att_grid
# Input:
# input_0: [N, H, W, 1]
# image_feat_grid: [N, H, W, D_im]
# text_param: [N, D_txt]
# Output:
# att_grid: [N, H, W, 1]
#
# Implementation:
# This is just Find + And
find_result = self.FindModule(time_idx, batch_idx, reuse=True)
att_grid = self.AndModule(input_0, find_result, None, None, reuse=True)
att_grid.set_shape(input_0.get_shape())
return att_grid
def FindSamePropertyModule(self, input_0, time_idx, batch_idx, map_dim=250,
scope='FindSamePropertyModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
image_feat_grid = self._slice_image_feat_grid(batch_idx)
text_param = self._slice_word_vecs(time_idx, batch_idx)
# Mapping: att_grid x image_feat_grid x text_param -> att_grid
# Input:
# input_0: [N, H, W, 1]
# image_feat_grid: [N, H, W, D_im]
# text_param: [N, D_txt]
# Output:
# att_grid: [N, H, W, 1]
#
# Implementation:
# 1. Extract visual features using the input attention map, and
# linear transform to map_dim
# 2. linear transform language features to map_dim
# 3. Convolve image features to map_dim
# 4. Element-wise multiplication of the three, l2_normalize, linear transform.
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
image_shape = tf.shape(image_feat_grid)
N = tf.shape(time_idx)[0]
H = image_shape[1]
W = image_shape[2]
D_im = image_feat_grid.get_shape().as_list()[-1]
D_txt = text_param.get_shape().as_list()[-1]
# image_feat_mapped has shape [N, H, W, map_dim]
image_feat_mapped = _1x1_conv('conv_image', image_feat_grid,
output_dim=map_dim)
text_param_mapped = fc('fc_text', text_param, output_dim=map_dim)
text_param_mapped = tf.reshape(text_param_mapped, to_T([N, 1, 1, map_dim]))
att_softmax = tf.reshape(
tf.nn.softmax(tf.reshape(input_0, to_T([N, H*W]))),
to_T([N, H, W, 1]))
# att_feat has shape [N, D_vis]
att_feat = tf.reduce_sum(image_feat_grid * att_softmax, axis=[1, 2])
att_feat_mapped = tf.reshape(
fc('fc_att', att_feat, output_dim=map_dim), to_T([N, 1, 1, map_dim]))
eltwise_mult = tf.nn.l2_normalize(
image_feat_mapped * text_param_mapped * att_feat_mapped, 3)
att_grid = _1x1_conv('conv_eltwise', eltwise_mult, output_dim=1)
att_grid.set_shape(self.att_shape)
return att_grid
def TransformModule(self, input_0, time_idx, batch_idx, kernel_size=5,
map_dim=250, scope='TransformModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
text_param = self._slice_word_vecs(time_idx, batch_idx)
# Mapping: att_grid x text_param -> att_grid
# Input:
# input_0: [N, H, W, 1]
# text_param: [N, D_txt]
# Output:
# att_grid: [N, H, W, 1]
#
# Implementation:
# Convolutional layer that also involve text_param
# A 'soft' convolutional kernel that is modulated by text_param
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
att_shape = tf.shape(input_0)
N = att_shape[0]
H = att_shape[1]
W = att_shape[2]
att_maps = _conv('conv_maps', input_0, kernel_size=kernel_size,
stride=1, output_dim=map_dim)
text_param_mapped = fc('text_fc', text_param, output_dim=map_dim)
text_param_mapped = tf.reshape(text_param_mapped, to_T([N, 1, 1, map_dim]))
eltwise_mult = tf.nn.l2_normalize(att_maps * text_param_mapped, 3)
att_grid = _1x1_conv('conv_eltwise', eltwise_mult, output_dim=1)
att_grid.set_shape(self.att_shape)
return att_grid
def AndModule(self, input_0, input_1, time_idx, batch_idx,
scope='AndModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
# Mapping: att_grid x att_grid -> att_grid
# Input:
# input_0: [N, H, W, 1]
# input_1: [N, H, W, 1]
# Output:
# att_grid: [N, H, W, 1]
#
# Implementation:
# Take the elementwise-min
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
att_grid = tf.minimum(input_0, input_1)
att_grid.set_shape(self.att_shape)
return att_grid
def OrModule(self, input_0, input_1, time_idx, batch_idx,
scope='OrModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
# Mapping: att_grid x att_grid -> att_grid
# Input:
# input_0: [N, H, W, 1]
# input_1: [N, H, W, 1]
# Output:
# att_grid: [N, H, W, 1]
#
# Implementation:
# Take the elementwise-max
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
att_grid = tf.maximum(input_0, input_1)
att_grid.set_shape(self.att_shape)
return att_grid
def ExistModule(self, input_0, time_idx, batch_idx,
scope='ExistModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
# Mapping: att_grid -> answer probs
# Input:
# att_grid: [N, H, W, 1]
# Output:
# answer_scores: [N, self.num_choices]
#
# Implementation:
# 1. Max-pool over att_grid
# 2. a linear mapping layer (without ReLU)
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
att_min = tf.reduce_min(input_0, axis=[1, 2])
att_avg = tf.reduce_mean(input_0, axis=[1, 2])
att_max = tf.reduce_max(input_0, axis=[1, 2])
# att_reduced has shape [N, 3]
att_reduced = tf.concat([att_min, att_avg, att_max], axis=1)
scores = fc('fc_scores', att_reduced, output_dim=self.num_choices)
return scores
def CountModule(self, input_0, time_idx, batch_idx,
scope='CountModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
# Mapping: att_grid -> answer probs
# Input:
# input_0: [N, H, W, 1]
# Output:
# answer_scores: [N, self.num_choices]
#
# Implementation:
# 1. linear transform of the attention map (also including max and min)
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
H, W = self.att_shape[1:3]
att_all = tf.reshape(input_0, to_T([-1, H*W]))
att_min = tf.reduce_min(input_0, axis=[1, 2])
att_max = tf.reduce_max(input_0, axis=[1, 2])
# att_reduced has shape [N, 3]
att_concat = tf.concat([att_all, att_min, att_max], axis=1)
scores = fc('fc_scores', att_concat, output_dim=self.num_choices)
return scores
def EqualNumModule(self, input_0, input_1, time_idx, batch_idx,
scope='EqualNumModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
# Mapping: att_grid x att_grid -> answer probs
# Input:
# input_0: [N, H, W, 1]
# input_1: [N, H, W, 1]
# Output:
# answer_scores: [N, self.num_choices]
#
# Implementation:
# 1. linear transform of the attention map (also including max and min)
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
att_shape = tf.shape(input_0)
H, W = self.att_shape[1:3]
att_all_0 = tf.reshape(input_0, to_T([-1, H*W]))
att_min_0 = tf.reduce_min(input_0, axis=[1, 2])
att_max_0 = tf.reduce_max(input_0, axis=[1, 2])
att_all_1 = tf.reshape(input_1, to_T([-1, H*W]))
att_min_1 = tf.reduce_min(input_1, axis=[1, 2])
att_max_1 = tf.reduce_max(input_1, axis=[1, 2])
# att_reduced has shape [N, 3]
att_concat = tf.concat([att_all_0, att_min_0, att_max_0,
att_all_1, att_min_1, att_max_1],
axis=1)
scores = fc('fc_scores', att_concat, output_dim=self.num_choices)
return scores
def MoreNumModule(self, input_0, input_1, time_idx, batch_idx,
scope='MoreNumModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
# Mapping: att_grid x att_grid -> answer probs
# Input:
# input_0: [N, H, W, 1]
# input_1: [N, H, W, 1]
# Output:
# answer_scores: [N, self.num_choices]
#
# Implementation:
# 1. linear transform of the attention map (also including max and min)
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
att_shape = tf.shape(input_0)
H, W = self.att_shape[1:3]
att_all_0 = tf.reshape(input_0, to_T([-1, H*W]))
att_min_0 = tf.reduce_min(input_0, axis=[1, 2])
att_max_0 = tf.reduce_max(input_0, axis=[1, 2])
att_all_1 = tf.reshape(input_1, to_T([-1, H*W]))
att_min_1 = tf.reduce_min(input_1, axis=[1, 2])
att_max_1 = tf.reduce_max(input_1, axis=[1, 2])
# att_reduced has shape [N, 3]
att_concat = tf.concat([att_all_0, att_min_0, att_max_0,
att_all_1, att_min_1, att_max_1],
axis=1)
scores = fc('fc_scores', att_concat, output_dim=self.num_choices)
return scores
def LessNumModule(self, input_0, input_1, time_idx, batch_idx,
scope='LessNumModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
# Mapping: att_grid x att_grid -> answer probs
# Input:
# input_0: [N, H, W, 1]
# input_1: [N, H, W, 1]
# Output:
# answer_scores: [N, self.num_choices]
#
# Implementation:
# 1. linear transform of the attention map (also including max and min)
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
att_shape = tf.shape(input_0)
H, W = self.att_shape[1:3]
att_all_0 = tf.reshape(input_0, to_T([-1, H*W]))
att_min_0 = tf.reduce_min(input_0, axis=[1, 2])
att_max_0 = tf.reduce_max(input_0, axis=[1, 2])
att_all_1 = tf.reshape(input_1, to_T([-1, H*W]))
att_min_1 = tf.reduce_min(input_1, axis=[1, 2])
att_max_1 = tf.reduce_max(input_1, axis=[1, 2])
# att_reduced has shape [N, 3]
att_concat = tf.concat([att_all_0, att_min_0, att_max_0,
att_all_1, att_min_1, att_max_1],
axis=1)
scores = fc('fc_scores', att_concat, output_dim=self.num_choices)
return scores
def SamePropertyModule(self, input_0, input_1, time_idx, batch_idx,
map_dim=250, scope='SamePropertyModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
image_feat_grid = self._slice_image_feat_grid(batch_idx)
text_param = self._slice_word_vecs(time_idx, batch_idx)
# Mapping: att_grid x att_grid -> answer probs
# Input:
# input_0: [N, H, W, 1]
# input_1: [N, H, W, 1]
# Output:
# answer_scores: [N, self.num_choices]
#
# Implementation:
# 1. Extract visual features using the input attention map, and
# linear transform to map_dim
# 2. linear transform language features to map_dim
# 3. Convolve image features to map_dim
# 4. Element-wise multiplication of the three, l2_normalize, linear transform.
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
image_shape = tf.shape(image_feat_grid)
N = tf.shape(time_idx)[0]
H = image_shape[1]
W = image_shape[2]
D_im = image_feat_grid.get_shape().as_list()[-1]
D_txt = text_param.get_shape().as_list()[-1]
text_param_mapped = fc('fc_text', text_param, output_dim=map_dim)
att_softmax_0 = tf.reshape(
tf.nn.softmax(tf.reshape(input_0, to_T([N, H*W]))),
to_T([N, H, W, 1]))
att_softmax_1 = tf.reshape(
tf.nn.softmax(tf.reshape(input_1, to_T([N, H*W]))),
to_T([N, H, W, 1]))
# att_feat_0, att_feat_1 has shape [N, D_vis]
att_feat_0 = tf.reduce_sum(image_feat_grid * att_softmax_0, axis=[1, 2])
att_feat_1 = tf.reduce_sum(image_feat_grid * att_softmax_1, axis=[1, 2])
att_feat_mapped_0 = tf.reshape(
fc('fc_att_0', att_feat_0, output_dim=map_dim),
to_T([N, map_dim]))
att_feat_mapped_1 = tf.reshape(
fc('fc_att_1', att_feat_1, output_dim=map_dim),
to_T([N, map_dim]))
eltwise_mult = tf.nn.l2_normalize(
att_feat_mapped_0 * text_param_mapped * att_feat_mapped_1, 1)
scores = fc('fc_eltwise', eltwise_mult, output_dim=self.num_choices)
return scores
def DescribeModule(self, input_0, time_idx, batch_idx,
map_dim=250, scope='DescribeModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
image_feat_grid = self._slice_image_feat_grid(batch_idx)
text_param = self._slice_word_vecs(time_idx, batch_idx)
# Mapping: att_grid -> answer probs
# Input:
# input_0: [N, H, W, 1]
# Output:
# answer_scores: [N, self.num_choices]
#
# Implementation:
# 1. Extract visual features using the input attention map, and
# linear transform to map_dim
# 2. linear transform language features to map_dim
# 3. Element-wise multiplication of the two, l2_normalize, linear transform.
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
image_shape = tf.shape(image_feat_grid)
N = tf.shape(time_idx)[0]
H = image_shape[1]
W = image_shape[2]
D_im = image_feat_grid.get_shape().as_list()[-1]
D_txt = text_param.get_shape().as_list()[-1]
text_param_mapped = fc('fc_text', text_param, output_dim=map_dim)
att_softmax = tf.reshape(
tf.nn.softmax(tf.reshape(input_0, to_T([N, H*W]))),
to_T([N, H, W, 1]))
# att_feat, att_feat_1 has shape [N, D_vis]
att_feat = tf.reduce_sum(image_feat_grid * att_softmax, axis=[1, 2])
att_feat_mapped = tf.reshape(
fc('fc_att', att_feat, output_dim=map_dim),
to_T([N, map_dim]))
eltwise_mult = tf.nn.l2_normalize(text_param_mapped * att_feat_mapped, 1)
scores = fc('fc_eltwise', eltwise_mult, output_dim=self.num_choices)
return scores
| {
"repo_name": "ronghanghu/n2nmn",
"path": "models_clevr/nmn3_modules.py",
"copies": "1",
"size": "22175",
"license": "bsd-2-clause",
"hash": -8720192153346534000,
"line_mean": 43.797979798,
"line_max": 91,
"alpha_frac": 0.5446674183,
"autogenerated": false,
"ratio": 3.338100255908475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4382767674208475,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.bijectors.exp import Exp
from tensorflow_probability.python.distributions import (
LogNormal, TransformedDistribution, Uniform)
from tensorflow_probability.python.internal import dtype_util
__all__ = [
"LogUniform",
]
class LogUniform(TransformedDistribution):
"""The log-uniform distribution (i.e. the logarithm of the
samples from this distribution are Uniform) """
def __init__(self,
low=0.,
high=1.,
validate_args=False,
allow_nan_stats=True,
name="LogUniform"):
"""Construct a log-normal distribution.
The LogNormal distribution models positive-valued random variables
whose logarithm is normally distributed with mean `loc` and
standard deviation `scale`. It is constructed as the exponential
transformation of a Normal distribution.
Args:
low: Floating point tensor, lower boundary of the output interval. Must
have `low < high`.
high: Floating point tensor, upper boundary of the output interval. Must
have `low < high`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([low, high], tf.float32)
super(LogUniform, self).__init__(distribution=Uniform(
low=tf.convert_to_tensor(value=low, name="low", dtype=dtype),
high=tf.convert_to_tensor(value=high, name="high", dtype=dtype),
allow_nan_stats=allow_nan_stats),
bijector=Exp(),
validate_args=validate_args,
parameters=parameters,
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("low", "high"),
([tf.convert_to_tensor(value=sample_shape, dtype=tf.int32)] * 2)))
@classmethod
def _params_event_ndims(cls):
return dict(low=0, high=0)
@property
def low(self):
"""Lower boundary of the output interval."""
return self.distribution.low
@property
def high(self):
"""Upper boundary of the output interval."""
return self.distribution.high
def range(self, name="range"):
"""`high - low`."""
with self._name_scope(name):
return self.high - self.low
def _entropy(self):
raise NotImplementedError
def _mean(self):
raise NotImplementedError
def _variance(self):
raise NotImplementedError
def _stddev(self):
raise NotImplementedError
| {
"repo_name": "imito/odin",
"path": "odin/bay/distributions/logarizmed.py",
"copies": "1",
"size": "3226",
"license": "mit",
"hash": 7329559124094396000,
"line_mean": 33.688172043,
"line_max": 78,
"alpha_frac": 0.6429014259,
"autogenerated": false,
"ratio": 4.389115646258503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5532017072158503,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import confusion_matrix as tf_cm
from odin.backend.maths import to_llr
from odin.backend.tensor import nonzeros, transpose
from odin.utils import as_tuple, is_number
# ===========================================================================
# Losses
# ===========================================================================
def binary_accuracy(y_true,
y_pred,
threshold=0.5,
reduction=tf.reduce_mean,
name=None):
""" Non-differentiable """
with tf.name_scope(name, "binary_accuracy", [y_pred, y_true, threshold]):
if y_pred.shape.ndims > 1:
y_pred = tf.reshape(y_pred, (-1,))
if y_true.shape.ndims > 1:
y_true = tf.reshape(y_true, (-1,))
y_pred = tf.greater_equal(y_pred, threshold)
match_values = tf.cast(tf.equal(tf.cast(y_pred, 'int32'),
tf.cast(y_true, 'int32')),
dtype='int32')
return reduction(match_values)
def categorical_accuracy(y_true,
y_pred,
top_k=1,
reduction=tf.reduce_mean,
name=None):
""" Non-differentiable """
with tf.name_scope(name, "categorical_accuracy", [y_true, y_pred]):
if y_true.shape.ndims == y_pred.shape.ndims:
y_true = tf.argmax(y_true, axis=-1)
elif y_true.shape.ndims != y_pred.shape.ndims - 1:
raise TypeError('rank mismatch between y_true and y_pred')
if top_k == 1:
# standard categorical accuracy
top = tf.argmax(y_pred, axis=-1)
y_true = tf.cast(y_true, top.dtype.base_dtype)
match_values = tf.equal(top, y_true)
else:
match_values = tf.nn.in_top_k(y_pred, tf.cast(y_true, 'int32'), k=top_k)
match_values = tf.cast(match_values, dtype='float32')
return reduction(match_values)
def confusion_matrix(y_true, y_pred, labels=None, normalize=False, name=None):
"""
Computes the confusion matrix of given vectors containing
actual observations and predicted observations.
Parameters
----------
y_true : 1-d or 2-d tensor variable
true values
y_pred : 1-d or 2-d tensor variable
prediction values
normalize : bool
if True, normalize each row to [0., 1.]
labels : array, shape = [nb_classes], int (nb_classes)
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Note
----
if you want to calculate: Precision, Recall, F1 scores from the
confusion matrix, set `normalize=False`
"""
with tf.name_scope(name, 'confusion_matrix', [y_true, y_pred]):
nb_classes = None
if y_true.shape.ndims == 2:
nb_classes = y_true.shape.as_list()[-1]
y_true = tf.argmax(y_true, -1)
elif y_true.shape.ndims != 1:
raise ValueError('actual must be 1-d or 2-d tensor variable')
if y_pred.shape.ndims == 2:
nb_classes = y_pred.shape.as_list()[-1]
y_pred = tf.argmax(y_pred, -1)
elif y_pred.shape.ndims != 1:
raise ValueError('pred must be 1-d or 2-d tensor variable')
# check valid labels
if labels is None:
if nb_classes is None:
raise RuntimeError(
"Cannot infer the number of classes for confusion matrix")
labels = int(nb_classes)
elif is_number(labels):
labels = int(labels)
elif hasattr(labels, '__len__'):
labels = len(labels)
# transpose to match the format of sklearn
cm = tf_cm(labels=y_true, predictions=y_pred, num_classes=labels)
if normalize:
cm = tf.cast(cm, dtype='float32')
cm = cm / tf.reduce_sum(cm, axis=1, keep_dims=True)
return cm
def detection_matrix(y_true, y_pred):
# TODO
pass
# ===========================================================================
# Speech task metrics
# ===========================================================================
def compute_Cavg(y_llr,
y_true,
cluster_idx=None,
Ptrue=0.5,
Cfa=1.,
Cmiss=1.,
probability_input=False):
''' Fast calculation of Cavg (for only 1 clusters)
Parameters
----------
y_llr: (nb_samples, nb_classes)
log likelihood ratio: llr = log (P(data|target) / P(data|non-target))
y_true: numpy array of shape (nb_samples,)
Class labels.
cluster_idx: list,
Each element is a list that represents a particular language
cluster and contains all class labels that belong to the cluster.
Ptar: float, optional
Probability of a target trial.
Cfa: float, optional
Cost for False Acceptance error.
Cmiss: float, optional
Cost for False Rejection error.
probability_input: boolean
if True, `y_llr` is the output probability from softmax and perform
llr transform for `y_llr`
Returns
-------
cluster_cost: numpy array of shape (n_clusters,)
It contains average percentage costs for each cluster as defined by
NIST LRE-15 language detection task. See
http://www.nist.gov/itl/iad/mig/upload/LRE15_EvalPlan_v22-3.pdf
total_cost: float
An average percentage cost over all clusters.
'''
if probability_input:
y_llr = to_llr(y_llr)
thresh = np.log(Cfa / Cmiss) - np.log(Ptrue / (1 - Ptrue))
nb_classes = y_llr.shape[1].value
if isinstance(y_true, (list, tuple)):
y_true = np.asarray(y_true)
if y_true.shape.ndims == 1:
y_true = tf.one_hot(y_true, depth=nb_classes, axis=-1)
y_true = tf.cast(y_true, y_llr.dtype.base_dtype)
# ====== statistics ====== #
# invert of y_true, False Negative mask
y_false = 1. - y_true
y_positive = tf.cast(tf.greater_equal(y_llr, thresh), y_llr.dtype.base_dtype)
# invert of y_positive
y_negative = tf.cast(tf.less(y_llr, thresh), y_llr.dtype.base_dtype)
distribution = tf.clip_by_value(tf.reduce_sum(y_true, axis=0), 10e-8,
10e8) # no zero values
# ====== Pmiss ====== #
miss = tf.reduce_sum(y_true * y_negative, axis=0)
Pmiss = 100 * (Cmiss * Ptrue * miss) / distribution
# ====== Pfa ====== # This calculation give different results
fa = tf.reduce_sum(y_false * y_positive, axis=0)
Pfa = 100 * (Cfa * (1 - Ptrue) * fa) / distribution
Cavg = tf.reduce_mean(Pmiss) + tf.reduce_mean(Pfa) / (nb_classes - 1)
return Cavg
def compute_Cnorm(y_true,
y_score,
Ptrue=[0.1, 0.5],
Cfa=1.,
Cmiss=1.,
probability_input=False):
""" Computes normalized detection cost function (DCF) given
the costs for false accepts and false rejects as well as a priori
probability for target speakers.
* This is the actual cost, different from the min cost (minDCF)
(By convention, the more positive the score,
the more likely is the target hypothesis.)
Parameter
---------
y_true: {array [n_samples], or list of array}
each array is labels of binary or multi-classes
detection tasks, each array can be an array of
classes indices, or one-hot-encoded matrix.
If multiple array are given, calculating `equalized cost`
of all partitions, an example of 2 partitions are:
VAST and MLSR14 files
y_score: {array [n_samples, n_classes], or list of array}
the outputs scores, can be probabilities values or log-likelihood
values by default, the
Ptrue: float [0.,1.], or list of float
hypothesized prior probabilities of positive class,
you can given multiple values by providing an array
Cfa: float
weight for False Alarm - False Positive error
Cmiss: float
weight for Miss - False Negative error
Return
------
C_norm: array [len(Ptrue)]
minimum detection cost accordingly for each given value of `Ptrue`.
C_norm_array: array [len(Ptrue), n_classes]
minimum detection cost for each class, accordingly to each
given value of `Ptrue`
"""
y_true = as_tuple(y_true, t=np.ndarray)
y_score = as_tuple(y_score, t=np.ndarray)
if len(y_true) != len(y_score):
raise ValueError("There are %d partitions for `y_true`, but %d "
"partitions for `y_score`." % (len(y_true), len(y_score)))
if len(set(i.shape[1] for i in y_score)) != 1:
raise ValueError(
"The number of classes among scores array is inconsistent.")
nb_partitions = len(y_true)
# ====== preprocessing ====== #
y_true = [np.argmax(i, axis=-1) if i.ndim >= 2 else i for i in y_true]
nb_classes = y_score[0].shape[1]
# threshold
Ptrue = np.asarray(as_tuple(Ptrue), dtype=float)
nb_threshold = len(Ptrue)
# log(beta) is threshold, i.e.
# if Ptrue=0.5 => beta=1. => threshold=0.
beta = (Cfa / Cmiss) * ((1 - Ptrue) / Ptrue)
beta = np.clip(beta, a_min=np.finfo(float).eps, a_max=np.inf)
# ====== Cavg ====== #
global_cm_array = np.zeros(shape=(nb_threshold, nb_classes, nb_classes))
# Apply threshold on the scores and compute the confusion matrix
for scores, labels in zip(y_score, y_true):
actual_TP_per_class = np.lib.arraysetops.unique(ar=labels,
return_counts=True)[1]
if probability_input: # special case input is probability values
scores = to_llr(scores)
for theta_ix, theta in enumerate(np.log(beta)):
thresholded_scores = (scores > theta).astype(int)
# compute confusion matrix, this is different from
# general implementation of confusion matrix above
cm = np.zeros(shape=(nb_classes, nb_classes), dtype=np.int64)
for i, (trial, target) in enumerate(zip(thresholded_scores, labels)):
cm[target, :] += trial
# miss and fa
predic_TP_per_class = cm.diagonal()
# Compute the number of miss per class
nb_miss_per_class = actual_TP_per_class - predic_TP_per_class
cm_miss_fa = cm
cm_miss_fa[np.diag_indices_from(cm)] = nb_miss_per_class
cm_probabilities = cm_miss_fa / actual_TP_per_class[:, None]
# update global
global_cm_array[theta_ix] += cm_probabilities
# normalize by partitions
global_cm_array /= nb_partitions
# Extract probabilities of false negatives from confusion matrix
p_miss_arr = global_cm_array.diagonal(0, 1, 2)
p_miss = p_miss_arr.mean(1)
# Extract probabilities of false positives from confusion matrix
p_false_alarm_arr = (global_cm_array.sum(1) - p_miss_arr) / (nb_classes - 1)
p_false_alarm = p_false_alarm_arr.mean(1)
# Compute costs per languages
C_Norm_arr = p_miss_arr + beta[:, None] * p_false_alarm_arr
# Compute overall cost
C_Norm = p_miss + beta * p_false_alarm
return C_Norm, C_Norm_arr
def compute_minDCF(Pfa, Pmiss, Cmiss=1, Cfa=1, Ptrue=0.5):
""" Estimating the min value of the detection
cost function (DCF)
Parameters
----------
Pfa: array, [n_samples]
false alarm rate or false positive rate
Pmiss: array, [n_samples]
miss rate or false negative rate
Cmiss: scalar
weight for false positive mistakes
Cfa: scalar
weight for false negative mistakes
Ptrue: scalar [0., 1.]
prior probability of positive cases.
Return
------
min_DCF: scalar
minimum value of the detection cost function for
a given detection error trade-off curve
Pfa_optimum: scalar
and false alarm trade-off probabilities.
Pmiss_optimum: scalar
the correcponding miss
"""
assert Pmiss.shape == Pfa.shape
Pfalse = 1 - Ptrue
# detection cost function vector
DCF_vector = (Cmiss * Pmiss * Ptrue) + \
(Cfa * Pfa * Pfalse)
# get the optimal value and corresponding index
min_idx = np.argmin(DCF_vector)
min_val = DCF_vector[min_idx]
return min_val, Pfa[min_idx], Pmiss[min_idx]
def compute_EER(Pfa, Pmiss):
""" computes the equal error rate (EER) given
Pmiss or False Negative Rate
and
Pfa or False Positive Rate
calculated for a range of operating points on the DET curve
@Author: "Timothee Kheyrkhah, Omid Sadjadi"
"""
fpr, fnr = Pfa, Pmiss
diff_pm_fa = fnr - fpr
x1 = np.flatnonzero(diff_pm_fa >= 0)[0]
x2 = np.flatnonzero(diff_pm_fa < 0)[-1]
a = (fnr[x1] - fpr[x1]) / (fpr[x2] - fpr[x1] - (fnr[x2] - fnr[x1]))
return fnr[x1] + a * (fnr[x2] - fnr[x1])
def compute_AUC(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`. For an alternative
way to summarize a precision-recall curve, see
:func:`average_precision_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
"""
from sklearn.metrics import auc
return auc(x, y, reorder)
def roc_curve(y_true,
y_score,
pos_label=None,
sample_weight=None,
drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC)
@copy from sklearn for convenience
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int or str, default=None
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
drop_intermediate : boolean, optional (default=True)
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
from sklearn.metrics import roc_curve
return roc_curve(y_true, y_score, pos_label, sample_weight, drop_intermediate)
def prc_curve(y_true, y_probas, pos_label=None, sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
y_probas : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int or str, default=None
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
from sklearn.metrics import precision_recall_curve
return precision_recall_curve(y_true, y_probas, pos_label, sample_weight)
def det_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Detection Error Tradeoff
Compute error rates for different probability thresholds
@Original implementaion from NIST
The function is adapted to take input format same as
NIST original code and `sklearn.metrics`
Note: this implementation is restricted to the binary classification task.
(By convention, the more positive the score,
the more likely is the target hypothesis.)
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
y_score : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
with `n_samples = n_true_samples + n_false_samples`
P_fa: array, shape = [n_samples]
fpr - False Positive rate, or false alarm probabilities
P_miss : array, shape = [n_samples]
fnr - False Negative rate, or miss probabilities
References
----------
.. [1] `Wikipedia entry for Detection error tradeoff
<https://en.wikipedia.org/wiki/Detection_error_tradeoff>`_
.. [2] `The DET Curve in Assessment of Detection Task Performance
<http://www.itl.nist.gov/iad/mig/publications/storage_paper/det.pdf>`_
.. [3] `2008 NIST Speaker Recognition Evaluation Results
<http://www.itl.nist.gov/iad/mig/tests/sre/2008/official_results/>`_
.. [4] `DET-Curve Plotting software for use with MATLAB
<http://www.itl.nist.gov/iad/mig/tools/DETware_v2.1.targz.htm>`_
Examples
--------
>>> import numpy as np
>>> from odin import backend as K
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fnr, fpr = K.metrics.det_curve(y_true, y_scores)
>>> print(fpr)
array([ 0.5, 0.5, 0. ])
>>> print(fnr)
array([ 0. , 0.5, 0.5])
>>> print(thresholds)
array([ 0.35, 0.4 , 0.8 ])
"""
# ====== ravel everything in cased of multi-classes ====== #
y_score = y_score.ravel()
y_true = np.array(y_true)
if y_true.ndim >= 2:
y_true = np.argmax(y_true, axis=-1)
nb_classes = len(np.lib.arraysetops.unique(y_true))
# multi-classes
if nb_classes > 2:
total_samples = nb_classes * len(y_true)
indices = np.arange(0, total_samples, nb_classes) + y_true
y_true = np.zeros(total_samples, dtype=np.int)
y_true[indices] = 1
# ====== check weights ====== #
if sample_weight is not None:
if len(sample_weight) != len(y_score):
raise ValueError("Provided `sample_weight` for %d samples, but got "
"scores for %d samples." %
(len(sample_weight), len(y_score)))
else:
sample_weight = np.ones(shape=(len(y_score),), dtype=y_score.dtype)
# ====== processing ====== #
if pos_label is not None:
y_true = (y_true == pos_label).astype(np.int)
# ====== start ====== #
sorted_ndx = np.argsort(y_score, kind='mergesort')
y_true = y_true[sorted_ndx]
# sort the weights also, dont forget this
sample_weight = sample_weight[sorted_ndx]
tgt_weights = sample_weight * y_true
imp_weights = sample_weight * (1 - y_true)
# FNR
Pmiss = np.cumsum(tgt_weights) / np.sum(tgt_weights)
# FPR
Pfa = 1 - np.cumsum(imp_weights) / np.sum(imp_weights)
return Pfa, Pmiss
| {
"repo_name": "imito/odin",
"path": "odin/backend/metrics.py",
"copies": "1",
"size": "22119",
"license": "mit",
"hash": 2755289892301110000,
"line_mean": 35.6208609272,
"line_max": 80,
"alpha_frac": 0.6299561463,
"autogenerated": false,
"ratio": 3.4615023474178406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9553876409324839,
"avg_score": 0.0075164168786003875,
"num_lines": 604
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from tensorflow import convert_to_tensor as to_T
from models_clevr.nmn3_netgen_att import AttentionSeq2Seq
from models_clevr.nmn3_modules import Modules
from models_clevr.nmn3_assembler import INVALID_EXPR
from util.cnn import fc_layer as fc, conv_layer as conv
class NMN3Model:
def __init__(self, image_feat_grid, text_seq_batch, seq_length_batch,
T_decoder, num_vocab_txt, embed_dim_txt, num_vocab_nmn,
embed_dim_nmn, lstm_dim, num_layers, assembler,
encoder_dropout, decoder_dropout, decoder_sampling,
num_choices, use_gt_layout=None, gt_layout_batch=None,
scope='neural_module_network', reuse=None):
with tf.variable_scope(scope, reuse=reuse):
# Part 0: Visual feature from CNN
self.image_feat_grid = image_feat_grid
# Part 1: Seq2seq RNN to generate module layout tokensa
with tf.variable_scope('layout_generation'):
att_seq2seq = AttentionSeq2Seq(text_seq_batch,
seq_length_batch, T_decoder, num_vocab_txt,
embed_dim_txt, num_vocab_nmn, embed_dim_nmn, lstm_dim,
num_layers, assembler, encoder_dropout, decoder_dropout,
decoder_sampling, use_gt_layout, gt_layout_batch)
self.att_seq2seq = att_seq2seq
predicted_tokens = att_seq2seq.predicted_tokens
token_probs = att_seq2seq.token_probs
word_vecs = att_seq2seq.word_vecs
neg_entropy = att_seq2seq.neg_entropy
self.atts = att_seq2seq.atts
self.predicted_tokens = predicted_tokens
self.token_probs = token_probs
self.word_vecs = word_vecs
self.neg_entropy = neg_entropy
# log probability of each generated sequence
self.log_seq_prob = tf.reduce_sum(tf.log(token_probs), axis=0)
# Part 2: Neural Module Network
with tf.variable_scope('layout_execution'):
modules = Modules(image_feat_grid, word_vecs, num_choices)
self.modules = modules
# Recursion of modules
att_shape = image_feat_grid.get_shape().as_list()[1:-1] + [1]
# Forward declaration of module recursion
att_expr_decl = td.ForwardDeclaration(td.PyObjectType(), td.TensorType(att_shape))
# _Scene
case_scene = td.Record([('time_idx', td.Scalar(dtype='int32')),
('batch_idx', td.Scalar(dtype='int32'))])
case_scene = case_scene >> td.Function(modules.SceneModule)
# _Find
case_find = td.Record([('time_idx', td.Scalar(dtype='int32')),
('batch_idx', td.Scalar(dtype='int32'))])
case_find = case_find >> td.Function(modules.FindModule)
# _Filter
case_filter = td.Record([('input_0', att_expr_decl()),
('time_idx', td.Scalar(dtype='int32')),
('batch_idx', td.Scalar(dtype='int32'))])
case_filter = case_filter >> td.Function(modules.FilterModule)
# _FindSameProperty
case_find_same_property = td.Record([('input_0', att_expr_decl()),
('time_idx', td.Scalar(dtype='int32')),
('batch_idx', td.Scalar(dtype='int32'))])
case_find_same_property = case_find_same_property >> \
td.Function(modules.FindSamePropertyModule)
# _Transform
case_transform = td.Record([('input_0', att_expr_decl()),
('time_idx', td.Scalar('int32')),
('batch_idx', td.Scalar('int32'))])
case_transform = case_transform >> td.Function(modules.TransformModule)
# _And
case_and = td.Record([('input_0', att_expr_decl()),
('input_1', att_expr_decl()),
('time_idx', td.Scalar('int32')),
('batch_idx', td.Scalar('int32'))])
case_and = case_and >> td.Function(modules.AndModule)
# _Or
case_or = td.Record([('input_0', att_expr_decl()),
('input_1', att_expr_decl()),
('time_idx', td.Scalar('int32')),
('batch_idx', td.Scalar('int32'))])
case_or = case_or >> td.Function(modules.OrModule)
# _Exist
case_exist = td.Record([('input_0', att_expr_decl()),
('time_idx', td.Scalar('int32')),
('batch_idx', td.Scalar('int32'))])
case_exist = case_exist >> td.Function(modules.ExistModule)
# _Count
case_count = td.Record([('input_0', att_expr_decl()),
('time_idx', td.Scalar('int32')),
('batch_idx', td.Scalar('int32'))])
case_count = case_count >> td.Function(modules.CountModule)
# _EqualNum
case_equal_num = td.Record([('input_0', att_expr_decl()),
('input_1', att_expr_decl()),
('time_idx', td.Scalar('int32')),
('batch_idx', td.Scalar('int32'))])
case_equal_num = case_equal_num >> td.Function(modules.EqualNumModule)
# _MoreNum
case_more_num = td.Record([('input_0', att_expr_decl()),
('input_1', att_expr_decl()),
('time_idx', td.Scalar('int32')),
('batch_idx', td.Scalar('int32'))])
case_more_num = case_more_num >> td.Function(modules.MoreNumModule)
# _LessNum
case_less_num = td.Record([('input_0', att_expr_decl()),
('input_1', att_expr_decl()),
('time_idx', td.Scalar('int32')),
('batch_idx', td.Scalar('int32'))])
case_less_num = case_less_num >> td.Function(modules.LessNumModule)
# _SameProperty
case_same_property = td.Record([('input_0', att_expr_decl()),
('input_1', att_expr_decl()),
('time_idx', td.Scalar('int32')),
('batch_idx', td.Scalar('int32'))])
case_same_property = case_same_property >> \
td.Function(modules.SamePropertyModule)
# _Describe
case_describe = td.Record([('input_0', att_expr_decl()),
('time_idx', td.Scalar('int32')),
('batch_idx', td.Scalar('int32'))])
case_describe = case_describe >> \
td.Function(modules.DescribeModule)
recursion_cases = td.OneOf(td.GetItem('module'), {
'_Scene': case_scene,
'_Find': case_find,
'_Filter': case_filter,
'_FindSameProperty': case_find_same_property,
'_Transform': case_transform,
'_And': case_and,
'_Or': case_or})
att_expr_decl.resolve_to(recursion_cases)
# For invalid expressions, define a dummy answer
# so that all answers have the same form
dummy_scores = td.Void() >> td.FromTensor(np.zeros(num_choices, np.float32))
output_scores = td.OneOf(td.GetItem('module'), {
'_Exist': case_exist,
'_Count': case_count,
'_EqualNum': case_equal_num,
'_MoreNum': case_more_num,
'_LessNum': case_less_num,
'_SameProperty': case_same_property,
'_Describe': case_describe,
INVALID_EXPR: dummy_scores})
# compile and get the output scores
self.compiler = td.Compiler.create(output_scores)
self.scores = self.compiler.output_tensors[0]
# Regularization: Entropy + L2
self.entropy_reg = tf.reduce_mean(neg_entropy)
module_weights = [v for v in tf.trainable_variables()
if (scope in v.op.name and
v.op.name.endswith('weights'))]
self.l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in module_weights])
| {
"repo_name": "ronghanghu/n2nmn",
"path": "models_clevr/nmn3_model.py",
"copies": "1",
"size": "9336",
"license": "bsd-2-clause",
"hash": -6509452412698723000,
"line_mean": 55.2409638554,
"line_max": 98,
"alpha_frac": 0.469151671,
"autogenerated": false,
"ratio": 4.414184397163121,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.538333606816312,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from tensorflow import convert_to_tensor as to_T
# the number of attention input to each module
_module_input_num = {'_Find': 0,
'_Transform': 1,
'_And': 2,
'_Filter': 1,
'_Answer': 1}
_module_output_type = {'_Find': 'att',
'_Transform': 'att',
'_And': 'att',
'_Filter': 'att',
'_Answer': 'ans'}
INVALID_EXPR = 'INVALID_EXPR'
class Assembler:
def __init__(self, module_vocab_file):
# read the module list, and record the index of each module and <eos>
with open(module_vocab_file) as f:
self.module_names = [s.strip() for s in f.readlines()]
# find the index of <eos>
for n_s in range(len(self.module_names)):
if self.module_names[n_s] == '<eos>':
self.EOS_idx = n_s
break
# build a dictionary from module name to token index
self.name2idx_dict = {name: n_s for n_s, name in enumerate(self.module_names)}
def module_list2tokens(self, module_list, T=None):
layout_tokens = [self.name2idx_dict[name] for name in module_list]
if T is not None:
if len(module_list) >= T:
raise ValueError('Not enough time steps to add <eos>')
layout_tokens += [self.EOS_idx]*(T-len(module_list))
return layout_tokens
def _layout_tokens2str(self, layout_tokens):
return ' '.join([self.module_names[idx] for idx in layout_tokens])
def _invalid_expr(self, layout_tokens, error_str):
return {'module': INVALID_EXPR,
'expr_str': self._layout_tokens2str(layout_tokens),
'error': error_str}
def _assemble_layout_tokens(self, layout_tokens, batch_idx):
# All modules takes a time_idx as the index from LSTM hidden states
# (even if it doesn't need it, like _And), and different arity of
# attention inputs. The output type can be either attention or answer
#
# The final assembled expression for each instance is as follows:
# expr_type :=
# {'module': '_Find', 'output_type': 'att', 'time_idx': idx}
# | {'module': '_Transform', 'output_type': 'att', 'time_idx': idx,
# 'inputs_0': <expr_type>}
# | {'module': '_And', 'output_type': 'att', 'time_idx': idx,
# 'inputs_0': <expr_type>, 'inputs_1': <expr_type>)}
# | {'module': '_Answer', 'output_type': 'ans', 'time_idx': idx,
# 'inputs_0': <expr_type>}
# | {'module': INVALID_EXPR, 'expr_str': '...', 'error': '...',
# 'assembly_loss': <float32>} (for invalid expressions)
#
# A valid layout must contain <eos>. Assembly fails if it doesn't.
if not np.any(layout_tokens == self.EOS_idx):
return self._invalid_expr(layout_tokens, 'cannot find <eos>')
# Decoding Reverse Polish Notation with a stack
decoding_stack = []
for t in range(len(layout_tokens)):
# decode a module/operation
module_idx = layout_tokens[t]
if module_idx == self.EOS_idx:
break
module_name = self.module_names[module_idx]
expr = {'module': module_name,
'output_type': _module_output_type[module_name],
'time_idx': t, 'batch_idx': batch_idx}
input_num = _module_input_num[module_name]
# Check if there are enough input in the stack
if len(decoding_stack) < input_num:
# Invalid expression. Not enough input.
return self._invalid_expr(layout_tokens, 'not enough input for ' + module_name)
# Get the input from stack
for n_input in range(input_num-1, -1, -1):
stack_top = decoding_stack.pop()
if stack_top['output_type'] != 'att':
# Invalid expression. Input must be attention
return self._invalid_expr(layout_tokens, 'input incompatible for ' + module_name)
expr['input_%d' % n_input] = stack_top
decoding_stack.append(expr)
# After decoding the reverse polish expression, there should be exactly
# one expression in the stack
if len(decoding_stack) != 1:
return self._invalid_expr(layout_tokens, 'final stack size not equal to 1 (%d remains)' % len(decoding_stack))
result = decoding_stack[0]
# The result type should be answer, not attention
if result['output_type'] != 'ans':
return self._invalid_expr(layout_tokens, 'result type must be ans, not att')
return result
def assemble(self, layout_tokens_batch):
# layout_tokens_batch is a numpy array with shape [T, N],
# containing module tokens and <eos>, in Reverse Polish Notation.
_, N = layout_tokens_batch.shape
expr_list = [self._assemble_layout_tokens(layout_tokens_batch[:, n], n)
for n in range(N)]
expr_validity = np.array([expr['module'] != INVALID_EXPR
for expr in expr_list], np.bool)
return expr_list, expr_validity
| {
"repo_name": "ronghanghu/n2nmn",
"path": "models_shapes/nmn3_assembler.py",
"copies": "1",
"size": "5457",
"license": "bsd-2-clause",
"hash": -3211805029516441600,
"line_mean": 44.475,
"line_max": 122,
"alpha_frac": 0.557265897,
"autogenerated": false,
"ratio": 3.9146341463414633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9967320693054896,
"avg_score": 0.0009158700573134569,
"num_lines": 120
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import torch
from scipy.special import logsumexp
from odin.backend import tensor as ts
from odin.backend.tensor import _normalize_axis
# ===========================================================================
# Linear Algebra
# ===========================================================================
def matmul(x, y):
""" Matrix product of two tensors
This function support broadcasting.
Example:
(2, 3).(4, 3, 5) => (4, 2, 5)
(2, 3, 4).(4, 5) => (2, 3, 5)
(5, 3, 4).(5, 4, 6) => (5, 3, 6)
"""
if tf.is_tensor(x) or tf.is_tensor(y):
return tf.matmul(x, y)
if torch.is_tensor(x) or torch.is_tensor(y):
return torch.matmul(x, y)
return np.matmul(x, y)
# ===========================================================================
# Normalization
# ===========================================================================
def length_norm(x, axis=-1, epsilon=1e-12, ord=2):
""" L2-normalization (or vector unit length normalization)
Parameters
----------
x : array
axis : int
ord : int
order of norm (1 for L1-norm, 2 for Frobenius or Euclidean)
"""
ord = int(ord)
if ord not in (1, 2):
raise ValueError(
"only support `ord`: 1 for L1-norm; 2 for Frobenius or Euclidean")
if ord == 2:
x_norm = tf.sqrt(
tf.maximum(tf.reduce_sum(x**2, axis=axis, keepdims=True), epsilon))
else:
x_norm = tf.maximum(tf.reduce_sum(tf.abs(x), axis=axis, keepdims=True),
epsilon)
return x / x_norm
def calc_white_mat(X):
""" calculates the whitening transformation for cov matrix X
"""
return tf.linalg.cholesky(tf.linalg.inv(X))
def log_norm(x, axis=1, scale_factor=10000, eps=1e-8):
""" Seurat log-normalize
y = log(X / (sum(X, axis) + epsilon) * scale_factor)
where `log` is natural logarithm
"""
eps = tf.cast(eps, x.dtype)
return tf.math.log1p(x / (tf.reduce_sum(x, axis=axis, keepdims=True) + eps) *
scale_factor)
def delog_norm(x, x_sum=1, scale_factor=10000):
""" This perform de-log normalization of `log_norm` values
if `x_sum` is not given (i.e. default value 1), then all the
"""
return (tf.exp(x) - 1) / scale_factor * (x_sum + EPS)
# ===========================================================================
# Activation function
# ===========================================================================
def softmax(x, axis=-1):
""" `f(x) = exp(x_i) / sum_j(exp(x_j))` """
if tf.is_tensor(x):
return tf.nn.softmax(x, axis=axis)
if torch.is_tensor(x):
return torch.nn.functional.softmax(x, dim=axis)
return tf.nn.softmax(x, axis=axis).numpy()
def softmin(x, axis=None):
""" `f(x) = exp(-x_i) / sum_j(exp(-x_j))` """
if torch.is_tensor(x):
return torch.softmin(x, dim=axis)
return softmax(-x, axis=axis)
def relu(x):
""" `f(x) = max(x, 0)` """
if tf.is_tensor(x):
return tf.nn.relu(x)
if torch.is_tensor(x):
return torch.relu(x)
return np.max(x, 0)
def selu(x):
""" `f(x) = scale * [max(0, x) + min(0, alpha*(exp(x)-1))]`
where:
scale = ~1.0507
alpha = ~1.6733
chose by solving Eq.(4) and Eq.(5), with the fixed point
`(mean, variance) = (0, 1)`, which is typical for activation normalization.
Reference
---------
[1] Klambauer, G., Unterthiner, T., Mayr, A., Hochreiter, S., 2017.
Self-Normalizing Neural Networks. arXiv:1706.02515 [cs, stat].
"""
if tf.is_tensor(x):
return tf.nn.selu(x)
if torch.is_tensor(x):
return torch.nn.functional.selu(x)
scale = 1.0507009873554804934193349852946
alpha = 1.6732632423543772848170429916717
return scale * (np.maximum(x, 0) + np.minimum(alpha * (np.exp(x) - 1), 0))
def tanh(x):
""" `f(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))` """
if tf.is_tensor(x):
return tf.math.tanh(x)
if torch.is_tensor(x):
return torch.tanh(x)
return np.tanh(x)
def softsign(x):
""" `f(x) = x / (1 + |x|)`"""
if tf.is_tensor(x):
return tf.math.softsign(x)
if torch.is_tensor(x):
return torch.nn.functional.softsign(x)
return x / (1 + np.abs(x))
def softplus(x, beta=1, threshold=20):
""" `f(x) = 1/beta * log(exp(beta * x) + 1)`
threshold : values above this revert to a linear function
"""
if tf.is_tensor(x):
mask = (x * beta) > threshold
beta = tf.cast(beta, dtype=x.dtype)
return tf.where(mask, x, 1 / beta * tf.nn.softplus(x * beta))
if torch.is_tensor(x):
return torch.nn.functional.softplus(x, beta=beta, threshold=threshold)
return torch.nn.functional.softplus(torch.from_numpy(x),
beta=beta,
threshold=threshold).numpy()
def sigmoid(x):
if tf.is_tensor(x):
return tf.math.sigmoid(x)
if torch.is_tensor(x):
return torch.sigmoid(x)
return 1 / (1 + np.exp(-x))
def mish(x, beta=1, threshold=20):
""" Mish: A Self Regularized Non-Monotonic Neural Activation Function
`f(x) = x * tanh(softplus(x))`
Reference
---------
[1] Misra, D., 2019. Mish: A Self Regularized Non-Monotonic Neural Activation
Function. arXiv:1908.08681 [cs, stat].
"""
return x * tanh(softplus(x, beta=beta, threshold=threshold))
def swish(x):
""" Swish: smooth, non-monotonic function
`f(x) = x * sigmoid(x)`
"""
return x * sigmoid(x)
# ===========================================================================
# Math
# ===========================================================================
def sqrt(x):
if tf.is_tensor(x):
return tf.math.sqrt(x)
if torch.is_tensor(x):
return torch.sqrt(x)
return np.sqrt(x)
def power(x, exponent):
if tf.is_tensor(x):
return tf.math.pow(x, exponent)
if torch.is_tensor(x):
return x.pow(exponent)
return np.power(x, exponent)
def square(x):
if tf.is_tensor(x):
return tf.math.square(x)
if torch.is_tensor(x):
return torch.mul(x, x)
return np.square(x)
def renorm_rms(X, axis=1, target_rms=1.0):
""" Scales the data such that RMS of the features dimension is 1.0
scale = sqrt(x^t x / (D * target_rms^2)).
NOTE
----
by defaults, assume the features dimension is `1`
"""
D = sqrt(X.shape[axis])
D = ts.cast(D, X.dtype)
l2norm = sqrt(ts.reduce_sum(X**2, axis=axis, keepdims=True))
X_rms = l2norm / D
X_rms = ts.where(ts.equal(X_rms, 0.),
x=ts.ones_like(X_rms, dtype=X_rms.dtype),
y=X_rms)
return target_rms * X / X_rms
# ===========================================================================
# Statistics and reduction
# ===========================================================================
def _torch_axis(x, axis):
if axis is None:
axis = list(range(x.ndim))
return axis
def moments(x, axis=None, keepdims=False):
""" Calculates the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
"""
if tf.is_tensor(x):
mean, variance = tf.nn.moments(x, axes=axis, keepdims=keepdims)
elif torch.is_tensor(x):
mean = reduce_mean(x, axis=axis, keepdims=True)
devs_squared = (x - mean)**2
variance = reduce_mean(devs_squared, axis=axis, keepdims=keepdims)
if not keepdims:
mean = mean.squeeze(axis)
else:
mean = np.mean(x, axis=axis, keepdims=keepdims)
variance = np.var(x, axis=axis, keepdims=keepdims)
return mean, variance
def reduce_var(x, axis=None, keepdims=False, mean=None):
""" Calculate the variance of `x` along given `axis`
if `mean` is given,
"""
if isinstance(x, np.ndarray):
return np.var(x, axis=axis, keepdims=keepdims)
ndim = x.ndim
axis = _normalize_axis(axis, ndim)
m = reduce_mean(x, axis=axis, keepdims=True) if mean is None else mean
devs_squared = (x - m)**2
return reduce_mean(devs_squared, axis=axis, keepdims=keepdims)
def reduce_std(x, axis=None, keepdims=False):
return sqrt(reduce_var(x, axis=axis, keepdims=keepdims))
def reduce_min(x, axis=None, keepdims=False):
if tf.is_tensor(x):
return tf.reduce_min(x, axis=axis, keepdims=keepdims)
if torch.is_tensor(x):
return x.min(dim=_torch_axis(x, axis), keepdim=keepdims)[0]
return np.min(x, axis=axis, keepdims=keepdims)
def reduce_max(x, axis=None, keepdims=False):
if tf.is_tensor(x):
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
if torch.is_tensor(x):
return x.max(dim=_torch_axis(x, axis), keepdim=keepdims)[0]
return np.max(x, axis=axis, keepdims=keepdims)
def reduce_mean(x, axis=None, keepdims=False):
if tf.is_tensor(x):
return tf.reduce_mean(x, axis=axis, keepdims=keepdims)
if torch.is_tensor(x):
return x.mean(dim=_torch_axis(x, axis), keepdim=keepdims)
return np.mean(x, axis=axis, keepdims=keepdims)
def reduce_sum(x, axis=None, keepdims=False):
if tf.is_tensor(x):
return tf.reduce_sum(x, axis=axis, keepdims=keepdims)
if torch.is_tensor(x):
return x.sum(dim=_torch_axis(x, axis), keepdim=keepdims)
return np.sum(x, axis=axis, keepdims=keepdims)
def reduce_prod(x, axis=None, keepdims=False):
if tf.is_tensor(x):
return tf.reduce_prod(x, axis=axis, keepdims=keepdims)
if torch.is_tensor(x):
return x.prod(dim=_torch_axis(x, axis), keepdim=keepdims)
return np.prod(x, axis=axis, keepdims=keepdims)
def reduce_all(x, axis=None, keepdims=False):
if tf.is_tensor(x):
return tf.reduce_all(tf.cast(x, tf.bool), axis=axis, keepdims=keepdims)
if torch.is_tensor(x):
return x.bool().all(dim=_torch_axis(x, axis), keepdim=keepdims)
return np.all(x, axis=axis, keepdims=keepdims)
def reduce_any(x, axis=None, keepdims=False):
if tf.is_tensor(x):
return tf.reduce_any(tf.cast(x, tf.bool), axis=axis, keepdims=keepdims)
if torch.is_tensor(x):
return x.bool().any(dim=_torch_axis(x, axis), keepdim=keepdims)
return np.any(x, axis=axis, keepdims=keepdims)
def reduce_logsumexp(x, axis=None, keepdims=False):
if tf.is_tensor(x):
return tf.reduce_logsumexp(x, axis=axis, keepdims=keepdims)
if torch.is_tensor(x):
return x.logsumexp(dim=_torch_axis(x, axis), keepdim=keepdims)
return logsumexp(x, axis=axis, keepdims=keepdims)
def reduce_logexp(x, reduction_function=tf.reduce_mean, axis=None, name=None):
""" log-reduction-exp over axis to avoid overflow and underflow
Parameters
----------
`x` : [nb_sample, feat_dim]
`axis` should be features dimension
"""
with tf.name_scope(name, "logreduceexp"):
x_max = tf.reduce_max(x, axis=axis, keepdims=True)
y = tf.log(reduction_function(tf.exp(x - x_max), axis=axis,
keepdims=True)) + x_max
return tf.squeeze(y)
def cumsum(x, axis):
if tf.is_tensor(x):
return tf.math.cumsum(x, axis=axis)
if torch.is_tensor(x):
return torch.cumsum(x, dim=_torch_axis(x, axis))
return np.cumsum(x, axis=axis)
# ===========================================================================
# Conversion
# ===========================================================================
def to_llh(x, name=None):
''' Convert a matrix of probabilities into log-likelihood
:math:`LLH = log(prob(data|target))`
'''
with tf.name_scope(name, "log_likelihood", [x]):
x /= tf.reduce_sum(x, axis=-1, keepdims=True)
x = tf.clip_by_value(x, EPS, 1 - EPS)
return tf.log(x)
def to_llr(x, name=None):
''' Convert a matrix of probabilities into log-likelihood ratio
:math:`LLR = log(\\frac{prob(data|target)}{prob(data|non-target)})`
'''
with tf.name_scope(name, "log_likelihood_ratio", [x]):
nb_classes = x.shape.as_list()[-1]
new_arr = []
for j in range(nb_classes):
scores_copy = tf.transpose(
tf.gather(tf.transpose(x), [i for i in range(nb_classes) if i != j]))
scores_copy -= tf.expand_dims(x[:, j], axis=-1)
new_arr.append(-logsumexp(scores_copy, 1))
return tf.concat(new_arr, axis=-1) + np.log(13)
def to_sample_weights(indices, weights, name=None):
""" Convert indices or one-hot matrix and
give weights to sample weights for training """
with tf.name_scope(name, "to_sample_weights", [indices]):
# ====== preprocess indices ====== #
ndim = len(indices.shape)
if ndim <= 1: # indices vector
indices = tf.cast(indices, dtype=tf.int64)
else:
indices = tf.argmax(indices, axis=-1)
# ====== prior weights ====== #
if isinstance(weights, (tuple, list, np.ndarray)):
prior_weights = tf.constant(weights, dtype=floatX, name="prior_weights")
# ====== sample weights ====== #
weights = tf.gather(prior_weights, indices)
return weights
| {
"repo_name": "imito/odin",
"path": "odin/backend/maths.py",
"copies": "1",
"size": "12730",
"license": "mit",
"hash": 1417662115188327700,
"line_mean": 29.5275779376,
"line_max": 79,
"alpha_frac": 0.589316575,
"autogenerated": false,
"ratio": 3.0696889317578973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.910690229690122,
"avg_score": 0.010420641971335276,
"num_lines": 417
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
# components
from tensorflow.python.ops.nn import dropout as drop
from util.cnn import conv_layer as conv
from util.cnn import conv_relu_layer as conv_relu
from util.cnn import pooling_layer as pool
from util.cnn import fc_layer as fc
from util.cnn import fc_relu_layer as fc_relu
channel_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32)
def vgg_pool5(input_batch, name, reuse=None):
with tf.variable_scope(name, reuse=reuse):
# layer 1
conv1_1 = conv_relu('conv1_1', input_batch,
kernel_size=3, stride=1, output_dim=64)
conv1_2 = conv_relu('conv1_2', conv1_1,
kernel_size=3, stride=1, output_dim=64)
pool1 = pool('pool1', conv1_2, kernel_size=2, stride=2)
# layer 2
conv2_1 = conv_relu('conv2_1', pool1,
kernel_size=3, stride=1, output_dim=128)
conv2_2 = conv_relu('conv2_2', conv2_1,
kernel_size=3, stride=1, output_dim=128)
pool2 = pool('pool2', conv2_2, kernel_size=2, stride=2)
# layer 3
conv3_1 = conv_relu('conv3_1', pool2,
kernel_size=3, stride=1, output_dim=256)
conv3_2 = conv_relu('conv3_2', conv3_1,
kernel_size=3, stride=1, output_dim=256)
conv3_3 = conv_relu('conv3_3', conv3_2,
kernel_size=3, stride=1, output_dim=256)
pool3 = pool('pool3', conv3_3, kernel_size=2, stride=2)
# layer 4
conv4_1 = conv_relu('conv4_1', pool3,
kernel_size=3, stride=1, output_dim=512)
conv4_2 = conv_relu('conv4_2', conv4_1,
kernel_size=3, stride=1, output_dim=512)
conv4_3 = conv_relu('conv4_3', conv4_2,
kernel_size=3, stride=1, output_dim=512)
pool4 = pool('pool4', conv4_3, kernel_size=2, stride=2)
# layer 5
conv5_1 = conv_relu('conv5_1', pool4,
kernel_size=3, stride=1, output_dim=512)
conv5_2 = conv_relu('conv5_2', conv5_1,
kernel_size=3, stride=1, output_dim=512)
conv5_3 = conv_relu('conv5_3', conv5_2,
kernel_size=3, stride=1, output_dim=512)
pool5 = pool('pool5', conv5_3, kernel_size=2, stride=2)
return pool5
| {
"repo_name": "ronghanghu/n2nmn",
"path": "models_clevr/vgg_net.py",
"copies": "1",
"size": "2502",
"license": "bsd-2-clause",
"hash": -3425599510829359000,
"line_mean": 45.3333333333,
"line_max": 69,
"alpha_frac": 0.553157474,
"autogenerated": false,
"ratio": 3.171102661596958,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4224260135596958,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import exp as exp_bijector
from tensorflow_probability.python.distributions import (
NegativeBinomial, Normal, QuantizedDistribution, TransformedDistribution,
Uniform)
from tensorflow_probability.python.internal import dtype_util
__all__ = ["qUniform", "qNormal"]
class qNormal(QuantizedDistribution):
def __init__(self,
loc=0.,
scale=1.,
min_value=None,
max_value=None,
validate_args=False,
allow_nan_stats=True,
name="qNormal"):
super(qNormal,
self).__init__(distribution=Normal(loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats),
low=min_value,
high=max_value,
name=name)
class qUniform(QuantizedDistribution):
def __init__(self,
low=0.,
high=1.,
min_value=None,
max_value=None,
validate_args=False,
allow_nan_stats=True,
name="qUniform"):
super(qUniform,
self).__init__(distribution=Uniform(low=low,
high=high,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats),
low=min_value,
high=max_value,
name=name)
| {
"repo_name": "imito/odin",
"path": "odin/bay/distributions/quantized.py",
"copies": "1",
"size": "1797",
"license": "mit",
"hash": 6858980939219588000,
"line_mean": 34.2352941176,
"line_max": 79,
"alpha_frac": 0.4830272677,
"autogenerated": false,
"ratio": 4.923287671232877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5906314938932877,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import theano
import theano.tensor as T
class Optimizer(object):
def __init__(self, lr_init=1e-3):
self.lr = theano.shared(
np.asarray(lr_init, dtype=theano.config.floatX), borrow=True)
def set_learning_rate(self, lr):
self.lr.set_value(np.asarray(lr, dtype=theano.config.floatX))
def mult_learning_rate(self, factor=0.5):
new_lr = self.lr.get_value() * factor
self.lr.set_value(np.asarray(new_lr, dtype=theano.config.floatX))
print(' * change learning rate to %.2e' % (new_lr))
def get_updates_cost(self, cost, params, scheme='nadam'):
if scheme == 'adagrad':
updates = self.get_updates_adagrad(cost, params)
elif scheme == 'adadelta':
updates = self.get_updates_adadelta(cost, params)
elif scheme == 'rmsprop':
updates = self.get_updates_rmsprop(cost, params)
elif scheme == 'adam':
updates = self.get_updates_adam(cost, params)
elif scheme == 'nadam':
updates = self.get_updates_nadam(cost, params)
elif scheme == 'sgd':
# updates = self.get_updates_sgd_momentum(cost, params)
updates = self.get_updates_sgd_momentum(
cost, params, grad_clip=0.01)
else:
raise ValueError(
'Select the proper scheme: '
'adagrad / adadelta / rmsprop / adam / nadam / sgd')
return updates
def get_updates_adagrad(self, cost, params, eps=1e-8):
lr = self.lr
print(' - Adagrad: lr = %.2e' % (lr.get_value(borrow=True)))
grads = T.grad(cost, params)
updates = []
for p, g in zip(params, grads):
value = p.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=p.broadcastable)
accu_new = accu + g ** 2
new_p = p - (lr * g / T.sqrt(accu_new + eps))
updates.append((accu, accu_new))
updates.append((p, new_p))
return updates
def get_updates_adadelta(self, cost, params, rho=0.95, eps=1e-6):
lr = self.lr
print(' - Adadelta: lr = %.2e' % (lr.get_value(borrow=True)))
one = T.constant(1.)
grads = T.grad(cost, params)
updates = []
for p, g in zip(params, grads):
value = p.get_value(borrow=True)
# accu: accumulate gradient magnitudes
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=p.broadcastable)
# delta_accu: accumulate update magnitudes (recursively!)
delta_accu = theano.shared(
np.zeros(value.shape, dtype=value.dtype),
broadcastable=p.broadcastable)
# update accu (as in rmsprop)
accu_new = rho * accu + (one - rho) * g ** 2
updates.append((accu, accu_new))
# compute parameter update, using the 'old' delta_accu
update = (g * T.sqrt(delta_accu + eps) /
T.sqrt(accu_new + eps))
new_param = p - lr * update
updates.append((p, new_param))
# update delta_accu (as accu, but accumulating updates)
delta_accu_new = rho * delta_accu + (one - rho) * update ** 2
updates.append((delta_accu, delta_accu_new))
return updates
def get_updates_rmsprop(self, cost, params, rho=0.9, eps=1e-8):
lr = self.lr
print(' - RMSprop: lr = %.2e' % (lr.get_value(borrow=True)))
one = T.constant(1.)
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
value = p.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=p.broadcastable)
accu_new = rho * accu + (one - rho) * g ** 2
gradient_scaling = T.sqrt(accu_new + eps)
g = g / gradient_scaling
updates.append((accu, accu_new))
updates.append((p, p - lr * g))
return updates
def get_updates_adam(self, cost, params,
beta1=0.9, beta2=0.999, epsilon=1e-8):
"""
Adam optimizer.
Parameters
----------
lr: float >= 0. Learning rate.
beta1/beta2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0.
References
----------
[1] Adam - A Method for Stochastic Optimization
[2] Lasage:
https://github.com/Lasagne/Lasagne/blob/master/lasagne/updates.py
"""
lr = self.lr
print(' - Adam: lr = %.2e' % (lr.get_value(borrow=True)))
one = T.constant(1.)
self.iterations = theano.shared(
np.asarray(0., dtype=theano.config.floatX), borrow=True)
grads = T.grad(cost, params)
updates = [(self.iterations, self.iterations + 1)]
t = self.iterations + 1.
lr_t = lr * (T.sqrt(one - beta2 ** t) / (one - beta1 ** t))
for p, g in zip(params, grads):
p_val = p.get_value(borrow=True)
m = theano.shared(np.zeros(p_val.shape, dtype=p_val.dtype),
broadcastable=p.broadcastable)
v = theano.shared(np.zeros(p_val.shape, dtype=p_val.dtype),
broadcastable=p.broadcastable)
m_t = (beta1 * m) + (one - beta1) * g
v_t = (beta2 * v) + (one - beta2) * g ** 2
p_t = p - lr_t * m_t / (T.sqrt(v_t) + epsilon)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
return updates
def get_updates_nadam(self, cost, params,
beta1=0.9, beta2=0.999,
epsilon=1e-8, schedule_decay=0.004):
"""
Nesterov Adam.
Keras implementation.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum.
Parameters
----------
lr: float >= 0. Learning rate.
beta1/beta2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0.
References
----------
[1] Nadam report - http://cs229.stanford.edu/proj2015/054_report.pdf
[2] On the importance of initialization and momentum in deep learning -
http://www.cs.toronto.edu/~fritz/absps/momentum.pdf
"""
lr = self.lr
print(' - Nesterov Adam: lr = %.2e' % (lr.get_value(borrow=True)))
one = T.constant(1.)
self.iterations = theano.shared(
np.asarray(0., dtype=theano.config.floatX), borrow=True)
self.m_schedule = theano.shared(
np.asarray(1., dtype=theano.config.floatX), borrow=True)
self.beta1 = theano.shared(
np.asarray(beta1, dtype=theano.config.floatX), borrow=True)
self.beta2 = theano.shared(
np.asarray(beta2, dtype=theano.config.floatX), borrow=True)
self.schedule_decay = schedule_decay
grads = T.grad(cost, params)
updates = [(self.iterations, self.iterations + 1)]
t = self.iterations + 1.
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta1 * (
one - 0.5 * (T.pow(0.96, t * self.schedule_decay)))
momentum_cache_t_1 = self.beta1 * (
one - 0.5 * (T.pow(0.96, (t + 1.) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = (self.m_schedule * momentum_cache_t *
momentum_cache_t_1)
updates.append((self.m_schedule, m_schedule_new))
for p, g in zip(params, grads):
p_val = p.get_value(borrow=True)
m = theano.shared(np.zeros(p_val.shape, dtype=p_val.dtype),
broadcastable=p.broadcastable)
v = theano.shared(np.zeros(p_val.shape, dtype=p_val.dtype),
broadcastable=p.broadcastable)
# the following equations given in [1]
g_prime = g / (one - m_schedule_new)
m_t = self.beta1 * m + (one - self.beta1) * g
m_t_prime = m_t / (one - m_schedule_next)
v_t = self.beta2 * v + (one - self.beta2) * g ** 2
v_t_prime = v_t / (one - T.pow(self.beta2, t))
m_t_bar = ((one - momentum_cache_t) * g_prime +
momentum_cache_t_1 * m_t_prime)
updates.append((m, m_t))
updates.append((v, v_t))
p_t = p - self.lr * m_t_bar / (T.sqrt(v_t_prime) + epsilon)
updates.append((p, p_t))
return updates
def get_updates_sgd_momentum(self, cost, params,
decay_mode=None, decay=0.,
momentum=0.9, nesterov=False,
grad_clip=None, constant_clip=True):
print(' - SGD: lr = %.2e' % (self.lr.get_value(borrow=True)), end='')
print(', decay = %.2f' % (decay), end='')
print(', momentum = %.2f' % (momentum), end='')
print(', nesterov =', nesterov, end='')
print(', grad_clip =', grad_clip)
self.grad_clip = grad_clip
self.constant_clip = constant_clip
self.iterations = theano.shared(
np.asarray(0., dtype=theano.config.floatX), borrow=True)
# lr = self.lr_float
lr = self.lr * (1.0 / (1.0 + decay * self.iterations))
# lr = self.lr * (decay ** T.floor(self.iterations / decay_step))
updates = [(self.iterations, self.iterations + 1.)]
# Get gradients and apply clipping
if self.grad_clip is None:
grads = T.grad(cost, params)
else:
assert self.grad_clip > 0
if self.constant_clip:
# Constant clipping using theano.gradient.grad_clip
clip = self.grad_clip
grads = T.grad(
theano.gradient.grad_clip(cost, -clip, clip),
params)
else:
# Adaptive clipping
clip = self.grad_clip / lr
grads_ = T.grad(cost, params)
grads = [T.clip(g, -clip, clip) for g in grads_]
for p, g in zip(params, grads):
# v_prev = theano.shared(p.get_value(borrow=True) * 0.)
p_val = p.get_value(borrow=True)
v_prev = theano.shared(np.zeros(p_val.shape, dtype=p_val.dtype),
broadcastable=p.broadcastable)
v = momentum * v_prev - lr * g
updates.append((v_prev, v))
if nesterov:
new_p = p + momentum * v - lr * g
else:
new_p = p + v
updates.append((p, new_p))
return updates
| {
"repo_name": "jongyookim/IQA_BIECON_release",
"path": "IQA_BIECON_release/optimizer.py",
"copies": "1",
"size": "11117",
"license": "mit",
"hash": 8556638613956893000,
"line_mean": 37.4671280277,
"line_max": 79,
"alpha_frac": 0.5225330575,
"autogenerated": false,
"ratio": 3.5483562081072453,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45708892656072453,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import time
from numpy.core.umath_tests import inner1d
from distributed import Client
from distributed.utils_test import cluster, loop
from dask_patternsearch import search
def sphere(x):
"""Minimum at 0"""
return x.dot(x)
def sphere_p1(x):
"""Minimum at 0.1"""
x = x - 0.1
return x.dot(x)
def sphere_vectorized(Xs):
"""Vecterized version of sphere"""
return inner1d(Xs, Xs)
def test_convergence_2d_simple(loop):
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client:
x0 = np.array([10., 15])
stepsize = np.array([1., 1])
stopratio = 1e-2
best, results = search(sphere, x0, stepsize, client=client, stopratio=stopratio)
assert (np.abs(best.point) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
best, results = search(sphere_p1, x0, stepsize, client=client, stopratio=stopratio)
assert (np.abs(best.point - 0.1) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
best, results = search(sphere, x0, stepsize, client=client, stopratio=stopratio,
max_queue_size=20)
assert (np.abs(best.point) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
best, results = search(sphere, x0, stepsize, client=client, stopratio=stopratio,
max_queue_size=1)
assert (np.abs(best.point) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
best, results = search(sphere, x0, stepsize, client=client, stopratio=stopratio,
min_new_submit=4)
assert (np.abs(best.point) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
best, results = search(sphere, x0, stepsize, client=client, stopratio=stopratio,
max_tasks=10)
assert len(results) == 10
assert best.result == min(x.result for x in results)
best, results = search(sphere, x0, stepsize, client=client, stopratio=stopratio,
max_stencil_size=4)
assert (np.abs(best.point) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
best, results = search(sphere, x0, stepsize, client=client, stopratio=stopratio,
max_stencil_size=4, min_new_submit=4)
assert (np.abs(best.point) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
best, results = search(sphere, x0, stepsize, client=client, stopratio=stopratio,
batchsize=5)
assert (np.abs(best.point) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
assert len(results) % 5 == 0
best, results = search(sphere_vectorized, x0, stepsize, client=client, stopratio=stopratio,
batchsize=5, vectorize=True)
assert (np.abs(best.point) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
assert len(results) % 5 == 0
best, results = search(sphere_vectorized, x0, stepsize, client=client, stopratio=stopratio,
batchsize=5, max_tasks=2)
assert best.result == min(x.result for x in results)
assert len(results) == 10
def test_convergence_2d_integers(loop):
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client:
x0 = np.array([10., 15])
stepsize = np.array([1., 1])
stopratio = 1e-2
best, results = search(sphere, x0, stepsize, client=client, stopratio=stopratio,
integer_dimensions=[0])
assert (np.abs(best.point) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
best, results = search(sphere_p1, x0, stepsize, client=client, stopratio=stopratio,
integer_dimensions=[0])
assert (np.abs(best.point - np.array([0, 0.1])) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
best, results = search(sphere_p1, x0, stepsize, client=client, stopratio=stopratio,
integer_dimensions=[0, 1])
assert (np.abs(best.point) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
def test_convergence_2d_serial():
x0 = np.array([10., 15])
stepsize = np.array([1., 1])
stopratio = 1e-2
best, results = search(sphere, x0, stepsize, stopratio=stopratio)
assert (np.abs(best.point) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
best, results = search(sphere_p1, x0, stepsize, stopratio=stopratio)
assert (np.abs(best.point - 0.1) < 2*stopratio).all()
assert best.result == min(x.result for x in results)
| {
"repo_name": "eriknw/dask-patternsearch",
"path": "dask_patternsearch/tests/test_search.py",
"copies": "1",
"size": "5313",
"license": "bsd-3-clause",
"hash": -630019021505689900,
"line_mean": 41.504,
"line_max": 103,
"alpha_frac": 0.5727460945,
"autogenerated": false,
"ratio": 3.6793628808864267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9705724949714767,
"avg_score": 0.009276805134332015,
"num_lines": 125
} |
from __future__ import absolute_import, division, print_function
import numpy as np
__all__ = ['points_inside_poly', 'polygon_line_intersections']
def points_inside_poly(x, y, vx, vy):
from matplotlib.path import Path
p = Path(np.column_stack((vx, vy)))
keep = ((x >= np.min(vx)) &
(x <= np.max(vx)) &
(y >= np.min(vy)) &
(y <= np.max(vy)))
inside = np.zeros(len(x), bool)
x = x[keep]
y = y[keep]
coords = np.column_stack((x, y))
inside[keep] = p.contains_points(coords).astype(bool)
return inside
def polygon_line_intersections(px, py, xval=None, yval=None):
"""
Find all the segments of intersection between a polygon and an infinite
horizontal/vertical line.
The polygon is assumed to be closed. Due to numerical precision, the
behavior at the edges of polygons is not always predictable, i.e. a point
on the edge of a polygon may be considered inside or outside the polygon.
Parameters
----------
px, py : `~numpy.ndarray`
The vertices of the polygon
xval : float, optional
The x coordinate of the line (for vertical lines). This should only be
specified if yval is not specified.
yval : float, optional
The y coordinate of the line (for horizontal lines). This should only be
specified if xval is not specified.
Returns
-------
segments : list
A list of segments given as tuples of coordinates along the line.
"""
if xval is not None and yval is not None:
raise ValueError("Only one of xval or yval should be specified")
elif xval is None and yval is None:
raise ValueError("xval or yval should be specified")
if yval is not None:
return polygon_line_intersections(py, px, xval=yval)
px = np.asarray(px, dtype=float)
py = np.asarray(py, dtype=float)
# Make sure that the polygon is closed
if px[0] != px[-1] or py[0] != py[-1]:
px = np.hstack([px, px[0]])
py = np.hstack([py, py[0]])
# For convenience
x1, x2 = px[:-1], px[1:]
y1, y2 = py[:-1], py[1:]
# Vertices that intersect
keep1 = (px == xval)
points1 = py[keep1]
# Segments (excluding vertices) that intersect
keep2 = ((x1 < xval) & (x2 > xval)) | ((x2 < xval) & (x1 > xval))
points2 = (y1 + (y2 - y1) * (xval - x1) / (x2 - x1))[keep2]
# Make unique and sort
points = np.array(np.sort(np.unique(np.hstack([points1, points2]))))
# Because of various corner cases, we don't actually know which pairs of
# points are inside the polygon, so we check this using the mid-points
ymid = 0.5 * (points[:-1] + points[1:])
xmid = np.repeat(xval, len(ymid))
keep = points_inside_poly(xmid, ymid, px, py)
segments = list(zip(points[:-1][keep], points[1:][keep]))
return segments
| {
"repo_name": "saimn/glue",
"path": "glue/utils/geometry.py",
"copies": "1",
"size": "2873",
"license": "bsd-3-clause",
"hash": -2218235461582738000,
"line_mean": 29.5638297872,
"line_max": 80,
"alpha_frac": 0.6132962061,
"autogenerated": false,
"ratio": 3.4448441247002397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.455814033080024,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
# (comments based on t_tide)
# Coefficients of the formulas in the Explan. Suppl.
_sc = np.array([270.434164, 13.1763965268, -0.0000850, 0.000000039])
_hc = np.array([279.696678, 0.9856473354, 0.00002267, 0.000000000])
_pc = np.array([334.329556, 0.1114040803, -0.0007739, -0.00000026])
_npc = np.array([-259.183275, 0.0529539222, -0.0001557, -0.000000050])
# First coeff was 281.220833 in Foreman but Expl. Suppl. has 44.
_ppc = np.array([281.220844, 0.0000470684, 0.0000339, 0.000000070])
_coefs = np.vstack((_sc, _hc, _pc, _npc, _ppc))
def ut_astron(jd):
"""
Compute the astronomical variables and their time derivatives.
Parameters
----------
jd : float, scalar or sequence
Time (UTC) in days starting with 1 on 1 Jan. of the year 1
in the proleptic Gregorian calendar as in
`datetime.date.toordinal`.
Returns
-------
astro : array, (6, nt)
rows are tau, s, h, p, np, pp (cycles)
ader : array, (6, nt)
time derivatives of the above (cycles/day)
Notes
-----
2-D arrays are always returned.
Variables are:
=== ====================================================
tau lunar time
s mean longitude of the moon
h mean longitude of the sun
p mean longitude of the lunar perigee
np negative of the longitude of the mean ascending node
pp mean longitude of the perihelion (solar perigee)
=== ====================================================
Based on UTide v1p0 9/2011 d.codiga@gso.uri.edu, which
in turn came from t_tide's t_astron.m, Pawlowicz et al 2002
For more background information from t_tide, see the t_tide_doc
string variable in this module.
"""
jd = np.atleast_1d(jd).flatten()
# Shift epoch to 1899-12-31 at noon:
# daten = 693961.500000000 Matlab datenum version
daten = 693595.5 # Python epoch is 366 days later than Matlab's
d = jd - daten
D = d / 10000
args = np.vstack((np.ones(jd.shape), d, D*D, D**3))
astro = np.fmod((np.dot(_coefs, args) / 360), 1)
# lunar time: fractional part of solar day
# plus hour angle to longitude of sun
# minus longitude of moon
tau = jd % 1 + astro[1, :] - astro[0, :]
astro = np.vstack((tau, astro))
# derivatives (polynomial)
dargs = np.vstack((np.zeros(jd.shape), np.ones(jd.shape),
2.0e-4*D, 3.0e-4*D*D))
ader = np.dot(_coefs, dargs)/360.0
dtau = 1.0 + ader[1, :] - ader[0, :]
ader = np.vstack((dtau, ader))
return astro, ader
t_tide_doc = """
The following is taken verbatim from the t_tide t_astron.m file, with
permission.
The formulae for calculating these ephemerides (other than tau)
were taken from pages 98 and 107 of the Explanatory Supplement to
the Astronomical Ephemeris and the American Ephemeris and Nautical
Almanac (1961). They require EPHEMERIS TIME (ET), now TERRESTRIAL
TIME (TT) and are based on observations made in the 1700/1800s.
In a bizarre twist, the current definition of time is derived
by reducing observations of planetary motions using these formulas.
The current world master clock is INTERNATIONAL ATOMIC TIME (TAI).
The length of the second is based on inverting the actual
locations of the planets over the period 1956-65 into "time"
using these formulas, and an offset added to keep the scale
continuous with previous defns. Thus
TT = TAI + 32.184 seconds.
Universal Time UT is a time scale that is 00:00 at midnight (i.e.,
based on the earth's rotation rather than on planetary motions).
Coordinated Universal Time (UTC) is kept by atomic clocks, the
length of the second is the same as for TAI but leap seconds are
inserted at intervals so that it provides UT to within 1 second.
This is necessary because the period of the earth's rotation is
slowly increasing (the day was exactly 86400 seconds around 1820,
it is now about 2 ms longer). 22 leap seconds have been added in
the last 27 years.
As of 1/1/99, TAI = UTC + 32 seconds.
Thus, TT = UTC + 62.184 seconds
GPS time was synchronized with UTC 6/1/1980 ( = TAI - 19 secs),
but is NOT adjusted for leap seconds. Your receiver might do this
automatically...or it might not.
Does any of this matter? The moon longitude is the fastest changing
parameter at 13 deg/day. A time error of one minute implies a
position error of less than 0.01 deg. This would almost always be
unimportant for tidal work.
The lunar time (tau) calculation requires UT as a base. UTC is
close enough - an error of 1 second, the biggest difference that
can occur between UT and UTC, implies a Greenwich phase error of
0.01 deg. In Doodson's definition (Proc R. Soc. A, vol 100,
reprinted in International Hydrographic Review, Appendix to
Circular Letter 4-H, 1954) mean lunar time is taken to begin at
"lunar midnight".
B. Beardsley 12/29/98, 1/11/98
R. Pawlowicz 9/1/01
Version 1.0
"""
| {
"repo_name": "efiring/UTide",
"path": "utide/astronomy.py",
"copies": "1",
"size": "5044",
"license": "mit",
"hash": 6271385915165698000,
"line_mean": 34.2727272727,
"line_max": 70,
"alpha_frac": 0.672482157,
"autogenerated": false,
"ratio": 3.2250639386189257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43975460956189255,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
from .astronomy import ut_astron
from . import ut_constants
from . import constit_index_dict
from .utilities import Bunch
def ut_cnstitsel(tref, minres, incnstit, infer):
"""
UT_CNSTITSEL()
carry out constituent selection
inputs
tref = reference time (datenum UTC)
minres = freq separation (cph) used in decision tree
incnstit = 'cnstit' input to ut_solv
infer = 'opt.infer' input to ut_solv
outputs
nNR,nR,nI = number non-reference, reference, inferred constituents
cnstit.NR.name = cellstr of 4-char names of NR constits
cnstit.NR.frq = frequencies (cph) of NR constits
cnstit.NR.lind = list indices (in ut_constants.mat) of NR constits
cnstit.R = empty if no inference; otherwise, for each (i'th) R constit:
cnstit.R{i}.name, .frq, .lind = as above, but for R constits
cnstit.R{i}.I{j}.name, .frq, .lind = as above for j'th I constit
coef.name = cellstr of names of all constituents (NR, R, and I)
coef.aux.frq = frequencies (cph) of all constituents
coef.aux.lind = list indices of all constituents
coef.aux.reftime = tref
UTide v1p0 9/2011 d.codiga@gso.uri.edu
"""
shallow = ut_constants.shallow
const = ut_constants.const
cnstit = Bunch()
coef = Bunch()
astro, ader = ut_astron(tref)
ii = np.isfinite(const.ishallow)
const.freq[~ii] = np.dot(const.doodson[~ii, :], ader[:, 0]) / 24
for k in ii.nonzero()[0]:
ik = const.ishallow[k]+np.arange(const.nshallow[k])
ik = ik.astype(int)-1
const.freq[k] = np.sum(const.freq[shallow.iname[ik] - 1] *
shallow.coef[ik])
# cnstit.NR
cnstit['NR'] = Bunch()
# if incnstit.lower() == 'auto':
if incnstit == 'auto':
cnstit['NR']['lind'] = np.where(const.df >= minres)[0]
else:
ilist = [constit_index_dict[n] for n in incnstit]
cnstit['NR']['lind'] = np.array(ilist, dtype=int)
# if ordercnstit == 'frq':
# seq = const.freq[cnstit['NR']['lind']].argsort()
# tmp = cnstit['NR']['lind'][seq].astype(int).
# cnstit['NR']['lind'] = tmp.flatten()
# Skipped some stuff here cause they involve infer.
cnstit['NR']['frq'] = const.freq[cnstit['NR']['lind']]
cnstit['NR']['name'] = const.name[cnstit['NR']['lind']]
nNR = len(cnstit['NR']['frq'])
# cnstit.R
nR = 0
nI = 0
cnstit['R'] = [] # Empty because inference is not supported yet.
coef['name'] = cnstit['NR']['name']
coef['aux'] = Bunch(frq=cnstit.NR.frq,
lind=cnstit.NR.lind,
reftime=tref)
return nNR, nR, nI, cnstit, coef
| {
"repo_name": "efiring/UTide",
"path": "utide/constituent_selection.py",
"copies": "1",
"size": "2801",
"license": "mit",
"hash": -6971785203903852000,
"line_mean": 32.7469879518,
"line_max": 77,
"alpha_frac": 0.5958586219,
"autogenerated": false,
"ratio": 3.0612021857923497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.915706080769235,
"avg_score": 0,
"num_lines": 83
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
from ._ciso import _zslice
def zslice(q, p, p0):
"""
Returns a 2D slice of the variable `q` from a 3D field defined by `p`,
along an iso-surface at `p0` using a linear interpolation.
The result `q_iso` is a projection of variable at property == iso-value
in the first non-singleton dimension.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from ciso import zslice
>>> z = np.linspace(-100, 0, 30)[:, None, None] * np.ones((50, 70))
>>> x, y = np.mgrid[0:20:50j, 0:20:70j]
>>> s = np.sin(x) + z
>>> s50 = zslice(s, z, -50)
>>> plt.pcolormesh(s50)
"""
if q.shape != p.shape:
msg = "Arrays q {} and p {} must be of the same shape.".format
raise ValueError(msg(q.shape, p.shape))
if np.array(p0).squeeze().ndim != 0:
msg = "p0 must be a float number or 0-dim array. Got {!r}.".format
raise ValueError(msg(p0))
if p0 < p.min() or p.max() < p0:
msg = "p0 {} is outise p bounds ({}, {}).".format
raise ValueError(msg(p0, p.min(), p.max()))
q = np.asfarray(q)
p = np.asfarray(p)
if q.ndim == 3:
K, J, I = q.shape
iso = _zslice(q.reshape(K, -1), p.reshape(K, -1), p0)
return iso.reshape(J, I)
elif q.ndim == 2:
return _zslice(q, p, p0)
else:
msg = "Expected 2D (UGRID) or 3D (S/RGRID) arrays. Got {}D.".format
raise ValueError(msg(q.ndim))
| {
"repo_name": "hetland/ciso",
"path": "ciso/ciso.py",
"copies": "1",
"size": "1556",
"license": "bsd-2-clause",
"hash": 5685608459427760000,
"line_mean": 29.5098039216,
"line_max": 76,
"alpha_frac": 0.5604113111,
"autogenerated": false,
"ratio": 3.0331384015594542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9089778973594598,
"avg_score": 0.0007541478129713424,
"num_lines": 51
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from .common import Benchmark, get_squares_, get_indexes_rand, TYPES1
class Eindot(Benchmark):
def setup(self):
self.a = np.arange(60000.0).reshape(150, 400)
self.ac = self.a.copy()
self.at = self.a.T
self.atc = self.a.T.copy()
self.b = np.arange(240000.0).reshape(400, 600)
self.c = np.arange(600)
self.d = np.arange(400)
self.a3 = np.arange(480000.).reshape(60, 80, 100)
self.b3 = np.arange(192000.).reshape(80, 60, 40)
def time_dot_a_b(self):
np.dot(self.a, self.b)
def time_dot_d_dot_b_c(self):
np.dot(self.d, np.dot(self.b, self.c))
def time_dot_trans_a_at(self):
np.dot(self.a, self.at)
def time_dot_trans_a_atc(self):
np.dot(self.a, self.atc)
def time_dot_trans_at_a(self):
np.dot(self.at, self.a)
def time_dot_trans_atc_a(self):
np.dot(self.atc, self.a)
def time_einsum_i_ij_j(self):
np.einsum('i,ij,j', self.d, self.b, self.c)
def time_einsum_ij_jk_a_b(self):
np.einsum('ij,jk', self.a, self.b)
def time_einsum_ijk_jil_kl(self):
np.einsum('ijk,jil->kl', self.a3, self.b3)
def time_inner_trans_a_a(self):
np.inner(self.a, self.a)
def time_inner_trans_a_ac(self):
np.inner(self.a, self.ac)
def time_matmul_a_b(self):
np.matmul(self.a, self.b)
def time_matmul_d_matmul_b_c(self):
np.matmul(self.d, np.matmul(self.b, self.c))
def time_matmul_trans_a_at(self):
np.matmul(self.a, self.at)
def time_matmul_trans_a_atc(self):
np.matmul(self.a, self.atc)
def time_matmul_trans_at_a(self):
np.matmul(self.at, self.a)
def time_matmul_trans_atc_a(self):
np.matmul(self.atc, self.a)
def time_tensordot_a_b_axes_1_0_0_1(self):
np.tensordot(self.a3, self.b3, axes=([1, 0], [0, 1]))
class Linalg(Benchmark):
params = [['svd', 'pinv', 'det', 'norm'],
TYPES1]
param_names = ['op', 'type']
def setup(self, op, typename):
np.seterr(all='ignore')
self.func = getattr(np.linalg, op)
if op == 'cholesky':
# we need a positive definite
self.a = np.dot(get_squares_()[typename],
get_squares_()[typename].T)
else:
self.a = get_squares_()[typename]
# check that dtype is supported at all
try:
self.func(self.a[:2, :2])
except TypeError:
raise NotImplementedError()
def time_op(self, op, typename):
self.func(self.a)
class Lstsq(Benchmark):
def setup(self):
self.a = get_squares_()['float64']
self.b = get_indexes_rand()[:100].astype(np.float64)
def time_numpy_linalg_lstsq_a__b_float64(self):
np.linalg.lstsq(self.a, self.b)
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/numpy-master/benchmarks/benchmarks/bench_linalg.py",
"copies": "1",
"size": "2930",
"license": "mit",
"hash": -8949765669061906000,
"line_mean": 25.880733945,
"line_max": 69,
"alpha_frac": 0.5675767918,
"autogenerated": false,
"ratio": 2.8641251221896384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39317019139896386,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from .common import Benchmark, get_squares
class Copy(Benchmark):
params = ["int8", "int16", "float32", "float64",
"complex64", "complex128"]
param_names = ['type']
def setup(self, typename):
dtype = np.dtype(typename)
self.d = np.arange((50 * 500), dtype=dtype).reshape((500, 50))
self.e = np.arange((50 * 500), dtype=dtype).reshape((50, 500))
self.e_d = self.e.reshape(self.d.shape)
self.dflat = np.arange((50 * 500), dtype=dtype)
def time_memcpy(self, typename):
self.d[...] = self.e_d
def time_cont_assign(self, typename):
self.d[...] = 1
def time_strided_copy(self, typename):
self.d[...] = self.e.T
def time_strided_assign(self, typename):
self.dflat[::2] = 2
class CopyTo(Benchmark):
def setup(self):
self.d = np.ones(50000)
self.e = self.d.copy()
self.m = (self.d == 1)
self.im = (~ self.m)
self.m8 = self.m.copy()
self.m8[::8] = (~ self.m[::8])
self.im8 = (~ self.m8)
def time_copyto(self):
np.copyto(self.d, self.e)
def time_copyto_sparse(self):
np.copyto(self.d, self.e, where=self.m)
def time_copyto_dense(self):
np.copyto(self.d, self.e, where=self.im)
def time_copyto_8_sparse(self):
np.copyto(self.d, self.e, where=self.m8)
def time_copyto_8_dense(self):
np.copyto(self.d, self.e, where=self.im8)
class Savez(Benchmark):
def setup(self):
self.squares = get_squares()
def time_vb_savez_squares(self):
np.savez('tmp.npz', self.squares)
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/numpy-master/benchmarks/benchmarks/bench_io.py",
"copies": "1",
"size": "1710",
"license": "mit",
"hash": 5155000040640725000,
"line_mean": 25.71875,
"line_max": 70,
"alpha_frac": 0.5760233918,
"autogenerated": false,
"ratio": 3.0052724077328645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40812957995328647,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from .common import Benchmark, get_squares_
ufuncs = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin',
'arcsinh', 'arctan', 'arctan2', 'arctanh', 'bitwise_and',
'bitwise_not', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil',
'conj', 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad',
'degrees', 'divide', 'equal', 'exp', 'exp2', 'expm1',
'fabs', 'floor', 'floor_divide', 'fmax', 'fmin', 'fmod',
'frexp', 'greater', 'greater_equal', 'hypot', 'invert',
'isfinite', 'isinf', 'isnan', 'ldexp', 'left_shift', 'less',
'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp',
'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
'logical_xor', 'maximum', 'minimum', 'mod', 'modf',
'multiply', 'negative', 'nextafter', 'not_equal', 'power',
'rad2deg', 'radians', 'reciprocal', 'remainder',
'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh',
'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh',
'true_divide', 'trunc']
for name in dir(np):
if isinstance(getattr(np, name, None), np.ufunc) and name not in ufuncs:
print("Missing ufunc %r" % (name,))
class Broadcast(Benchmark):
def setup(self):
self.d = np.ones((50000, 100), dtype=np.float64)
self.e = np.ones((100,), dtype=np.float64)
def time_broadcast(self):
self.d - self.e
class UFunc(Benchmark):
params = [ufuncs]
param_names = ['ufunc']
timeout = 10
def setup(self, ufuncname):
np.seterr(all='ignore')
try:
self.f = getattr(np, ufuncname)
except AttributeError:
raise NotImplementedError()
self.args = []
for t, a in get_squares_().items():
arg = (a,) * self.f.nin
try:
self.f(*arg)
except TypeError:
continue
self.args.append(arg)
def time_ufunc_types(self, ufuncname):
[self.f(*arg) for arg in self.args]
class Custom(Benchmark):
def setup(self):
self.b = np.ones(20000, dtype=np.bool)
def time_nonzero(self):
np.nonzero(self.b)
def time_not_bool(self):
(~self.b)
def time_and_bool(self):
(self.b & self.b)
def time_or_bool(self):
(self.b | self.b)
class CustomInplace(Benchmark):
def setup(self):
self.c = np.ones(500000, dtype=np.int8)
self.i = np.ones(150000, dtype=np.int32)
self.f = np.zeros(150000, dtype=np.float32)
self.d = np.zeros(75000, dtype=np.float64)
# fault memory
self.f *= 1.
self.d *= 1.
def time_char_or(self):
np.bitwise_or(self.c, 0, out=self.c)
np.bitwise_or(0, self.c, out=self.c)
def time_char_or_temp(self):
0 | self.c | 0
def time_int_or(self):
np.bitwise_or(self.i, 0, out=self.i)
np.bitwise_or(0, self.i, out=self.i)
def time_int_or_temp(self):
0 | self.i | 0
def time_float_add(self):
np.add(self.f, 1., out=self.f)
np.add(1., self.f, out=self.f)
def time_float_add_temp(self):
1. + self.f + 1.
def time_double_add(self):
np.add(self.d, 1., out=self.d)
np.add(1., self.d, out=self.d)
def time_double_add_temp(self):
1. + self.d + 1.
class CustomScalar(Benchmark):
params = [np.float32, np.float64]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.ones(20000, dtype=dtype)
def time_add_scalar2(self, dtype):
np.add(self.d, 1)
def time_divide_scalar2(self, dtype):
np.divide(self.d, 1)
def time_divide_scalar2_inplace(self, dtype):
np.divide(self.d, 1, out=self.d)
def time_less_than_scalar2(self, dtype):
(self.d < 1)
class Scalar(Benchmark):
def setup(self):
self.x = np.asarray(1.0)
self.y = np.asarray((1.0 + 1j))
self.z = complex(1.0, 1.0)
def time_add_scalar(self):
(self.x + self.x)
def time_add_scalar_conv(self):
(self.x + 1.0)
def time_add_scalar_conv_complex(self):
(self.y + self.z)
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/numpy-master/benchmarks/benchmarks/bench_ufunc.py",
"copies": "1",
"size": "4274",
"license": "mit",
"hash": -6777944010876848000,
"line_mean": 27.3046357616,
"line_max": 76,
"alpha_frac": 0.5486663547,
"autogenerated": false,
"ratio": 3.0726096333572968,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9121275988057297,
"avg_score": 0,
"num_lines": 151
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from .common import Benchmark
class Bincount(Benchmark):
def setup(self):
self.d = np.arange(80000, dtype=np.intp)
self.e = self.d.astype(np.float64)
def time_bincount(self):
np.bincount(self.d)
def time_weights(self):
np.bincount(self.d, weights=self.e)
class Median(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_even(self):
np.median(self.e)
def time_odd(self):
np.median(self.o)
def time_even_inplace(self):
np.median(self.e, overwrite_input=True)
def time_odd_inplace(self):
np.median(self.o, overwrite_input=True)
def time_even_small(self):
np.median(self.e[:500], overwrite_input=True)
def time_odd_small(self):
np.median(self.o[:500], overwrite_input=True)
class Percentile(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_quartile(self):
np.percentile(self.e, [25, 75])
def time_percentile(self):
np.percentile(self.e, [25, 35, 55, 65, 75])
class Select(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = [(self.d > 4), (self.d < 2)]
self.cond_large = [(self.d > 4), (self.d < 2)] * 10
def time_select(self):
np.select(self.cond, [self.d, self.e])
def time_select_larger(self):
np.select(self.cond_large, ([self.d, self.e] * 10))
class Sort(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
np.random.seed(25)
np.random.shuffle(self.o)
# quicksort implementations can have issues with equal elements
self.equal = np.ones(10000)
self.many_equal = np.sort(np.arange(10000) % 10)
# quicksort median of 3 worst case
self.worst = np.arange(1000000)
x = self.worst
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
def time_sort(self):
np.sort(self.e)
def time_sort_random(self):
np.sort(self.o)
def time_sort_inplace(self):
self.e.sort()
def time_sort_equal(self):
self.equal.sort()
def time_sort_many_equal(self):
self.many_equal.sort()
def time_sort_worst(self):
np.sort(self.worst)
def time_argsort(self):
self.e.argsort()
def time_argsort_random(self):
self.o.argsort()
class Where(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = (self.d > 5000)
def time_1(self):
np.where(self.cond)
def time_2(self):
np.where(self.cond, self.d, self.e)
def time_2_broadcast(self):
np.where(self.cond, self.d, 0)
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/numpy-master/benchmarks/benchmarks/bench_function_base.py",
"copies": "1",
"size": "3086",
"license": "mit",
"hash": -6118017979300846000,
"line_mean": 23.4920634921,
"line_max": 71,
"alpha_frac": 0.5836033701,
"autogenerated": false,
"ratio": 3.1266464032421477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42102497733421473,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from .common import Benchmark
class Core(Benchmark):
def setup(self):
self.l100 = range(100)
self.l50 = range(50)
self.l = [np.arange(1000), np.arange(1000)]
self.l10x10 = np.ones((10, 10))
def time_array_1(self):
np.array(1)
def time_array_empty(self):
np.array([])
def time_array_l1(self):
np.array([1])
def time_array_l100(self):
np.array(self.l100)
def time_array_l(self):
np.array(self.l)
def time_vstack_l(self):
np.vstack(self.l)
def time_hstack_l(self):
np.hstack(self.l)
def time_dstack_l(self):
np.dstack(self.l)
def time_arange_100(self):
np.arange(100)
def time_zeros_100(self):
np.zeros(100)
def time_ones_100(self):
np.ones(100)
def time_empty_100(self):
np.empty(100)
def time_eye_100(self):
np.eye(100)
def time_identity_100(self):
np.identity(100)
def time_eye_3000(self):
np.eye(3000)
def time_identity_3000(self):
np.identity(3000)
def time_diag_l100(self):
np.diag(self.l100)
def time_diagflat_l100(self):
np.diagflat(self.l100)
def time_diagflat_l50_l50(self):
np.diagflat([self.l50, self.l50])
def time_triu_l10x10(self):
np.triu(self.l10x10)
def time_tril_l10x10(self):
np.tril(self.l10x10)
class MA(Benchmark):
def setup(self):
self.l100 = range(100)
self.t100 = ([True] * 100)
def time_masked_array(self):
np.ma.masked_array()
def time_masked_array_l100(self):
np.ma.masked_array(self.l100)
def time_masked_array_l100_t100(self):
np.ma.masked_array(self.l100, self.t100)
class CorrConv(Benchmark):
params = [[50, 1000, 1e5],
[10, 100, 1000, 1e4],
['valid', 'same', 'full']]
param_names = ['size1', 'size2', 'mode']
def setup(self, size1, size2, mode):
self.x1 = np.linspace(0, 1, num=size1)
self.x2 = np.cos(np.linspace(0, 2 * np.pi, num=size2))
def time_correlate(self, size1, size2, mode):
np.correlate(self.x1, self.x2, mode=mode)
def time_convolve(self, size1, size2, mode):
np.convolve(self.x1, self.x2, mode=mode)
class CountNonzero(Benchmark):
param_names = ['numaxes', 'size', 'dtype']
params = [
[1, 2, 3],
[100, 10000, 1000000],
[bool, int, str, object]
]
def setup(self, numaxes, size, dtype):
self.x = np.empty(shape=(
numaxes, size), dtype=dtype)
def time_count_nonzero(self, numaxes, size, dtype):
np.count_nonzero(self.x)
def time_count_nonzero_axis(self, numaxes, size, dtype):
np.count_nonzero(self.x, axis=self.x.ndim - 1)
def time_count_nonzero_multi_axis(self, numaxes, size, dtype):
if self.x.ndim >= 2:
np.count_nonzero(self.x, axis=(
self.x.ndim - 1, self.x.ndim - 2))
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/numpy-master/benchmarks/benchmarks/bench_core.py",
"copies": "1",
"size": "3090",
"license": "mit",
"hash": 4030334099422983700,
"line_mean": 22.4090909091,
"line_max": 66,
"alpha_frac": 0.574433657,
"autogenerated": false,
"ratio": 2.9740134744947064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9047718693266269,
"avg_score": 0.0001456876456876457,
"num_lines": 132
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from .common import Benchmark, TYPES1, get_squares
class AddReduce(Benchmark):
def setup(self):
self.squares = get_squares().values()
def time_axis_0(self):
[np.add.reduce(a, axis=0) for a in self.squares]
def time_axis_1(self):
[np.add.reduce(a, axis=1) for a in self.squares]
class AddReduceSeparate(Benchmark):
params = [[0, 1], TYPES1]
param_names = ['axis', 'type']
def setup(self, axis, typename):
self.a = get_squares()[typename]
def time_reduce(self, axis, typename):
np.add.reduce(self.a, axis=axis)
class AnyAll(Benchmark):
def setup(self):
self.zeros = np.zeros(100000, np.bool)
self.ones = np.ones(100000, np.bool)
def time_all_fast(self):
self.zeros.all()
def time_all_slow(self):
self.ones.all()
def time_any_fast(self):
self.ones.any()
def time_any_slow(self):
self.zeros.any()
class MinMax(Benchmark):
params = [np.float32, np.float64, np.intp]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.ones(20000, dtype=dtype)
def time_min(self, dtype):
np.min(self.d)
def time_max(self, dtype):
np.max(self.d)
class SmallReduction(Benchmark):
def setup(self):
self.d = np.ones(100, dtype=np.float32)
def time_small(self):
np.sum(self.d)
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/numpy-master/benchmarks/benchmarks/bench_reduce.py",
"copies": "1",
"size": "1469",
"license": "mit",
"hash": 4681237434933942000,
"line_mean": 20.9253731343,
"line_max": 64,
"alpha_frac": 0.6099387338,
"autogenerated": false,
"ratio": 3.2074235807860263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9317362314586026,
"avg_score": 0,
"num_lines": 67
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from ..core.client import Client
from ..core import message as msg
from ..core.data import Data, CategoricalComponent
from ..core.subset import RangeSubsetState, CategoricalRoiSubsetState
from ..core.exceptions import IncompatibleDataException, IncompatibleAttribute
from ..core.edit_subset_mode import EditSubsetMode
from .layer_artist import HistogramLayerArtist, LayerArtistContainer
from .util import update_ticks, visible_limits
from ..core.callback_property import CallbackProperty, add_callback
from ..utils import lookup_class
class UpdateProperty(CallbackProperty):
"""Descriptor that calls client's sync_all() method when changed"""
def __init__(self, default, relim=False):
super(UpdateProperty, self).__init__(default)
self.relim = relim
def __set__(self, instance, value):
changed = value != self.__get__(instance)
super(UpdateProperty, self).__set__(instance, value)
if not changed:
return
instance.sync_all()
if self.relim:
instance._relim()
def update_on_true(func):
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if result:
args[0].sync_all()
return result
return wrapper
class HistogramClient(Client):
"""
A client class to display histograms
"""
normed = UpdateProperty(False)
cumulative = UpdateProperty(False)
autoscale = UpdateProperty(True)
nbins = UpdateProperty(30)
xlog = UpdateProperty(False, relim=True)
ylog = UpdateProperty(False)
def __init__(self, data, figure, artist_container=None):
super(HistogramClient, self).__init__(data)
self._artists = artist_container or LayerArtistContainer()
self._axes = figure.add_subplot(111)
self._component = None
self._saved_nbins = None
self._xlim = {}
try:
self._axes.figure.set_tight_layout(True)
except AttributeError: # pragma: nocover (matplotlib < 1.1)
pass
@property
def bins(self):
""" An array of bin edges for the histogram.
Returns None if no histogram has been computed yet.
"""
for art in self._artists:
if not isinstance(art, HistogramLayerArtist):
continue
return art.x
@property
def axes(self):
return self._axes
@property
def xlimits(self):
try:
return self._xlim[self.component]
except KeyError:
pass
lo, hi = self._default_limits()
self._xlim[self.component] = lo, hi
return lo, hi
def _default_limits(self):
if self.component is None:
return 0, 1
lo, hi = np.inf, -np.inf
for a in self._artists:
try:
data = a.layer[self.component]
except IncompatibleAttribute:
continue
if data.size == 0:
continue
lo = min(lo, np.nanmin(data))
hi = max(hi, np.nanmax(data))
return lo, hi
@xlimits.setter
@update_on_true
def xlimits(self, value):
lo, hi = value
old = self.xlimits
if lo is None:
lo = old[0]
if hi is None:
hi = old[1]
self._xlim[self.component] = min(lo, hi), max(lo, hi)
self._relim()
return True
def layer_present(self, layer):
return layer in self._artists
def add_layer(self, layer):
if layer.data not in self.data:
raise IncompatibleDataException("Layer not in data collection")
self._ensure_layer_data_present(layer)
if self.layer_present(layer):
return self._artists[layer][0]
art = HistogramLayerArtist(layer, self._axes)
self._artists.append(art)
self._ensure_subsets_present(layer)
self._sync_layer(layer)
self._redraw()
return art
def _redraw(self):
self._axes.figure.canvas.draw()
def _ensure_layer_data_present(self, layer):
if layer.data is layer:
return
if not self.layer_present(layer.data):
self.add_layer(layer.data)
def _ensure_subsets_present(self, layer):
for subset in layer.data.subsets:
self.add_layer(subset)
@update_on_true
def remove_layer(self, layer):
if not self.layer_present(layer):
return
for a in self._artists.pop(layer):
a.clear()
if isinstance(layer, Data):
for subset in layer.subsets:
self.remove_layer(subset)
return True
@update_on_true
def set_layer_visible(self, layer, state):
if not self.layer_present(layer):
return
for a in self._artists[layer]:
a.visible = state
return True
def is_layer_visible(self, layer):
if not self.layer_present(layer):
return False
return any(a.visible for a in self._artists[layer])
def _update_axis_labels(self):
xlabel = self.component.label if self.component is not None else ''
if self.xlog:
xlabel = "Log %s" % xlabel
ylabel = 'N'
self._axes.set_xlabel(xlabel)
self._axes.set_ylabel(ylabel)
components = list(self._get_data_components('x'))
if components:
bins = update_ticks(self.axes, 'x',
components, False)
return
if bins is not None:
prev_bins = self.nbins
auto_bins = self._auto_nbin(calculate_only=True)
if prev_bins == auto_bins:
# try to assign a bin to each category,
# but only if self.nbins hasn't been overridden
# from auto_nbin
self.nbins = min(bins, 100)
self.xlimits = (-0.5, bins - 0.5)
def _get_data_components(self, coord):
""" Returns the components for each dataset for x and y axes.
"""
if coord == 'x':
attribute = self.component
else:
raise TypeError('coord must be x')
for data in self._data:
try:
yield data.get_component(attribute)
except IncompatibleAttribute:
pass
def _auto_nbin(self, calculate_only=False):
data = set(a.layer.data for a in self._artists)
if len(data) == 0:
return
dx = np.mean([d.size for d in data])
val = min(max(5, (dx / 1000) ** (1. / 3.) * 30), 100)
c = list(self._get_data_components('x'))
if c:
c = c[0]
if isinstance(c, CategoricalComponent):
val = min(c._categories.size, 100)
if not calculate_only:
self.xlimits = (-0.5, c._categories.size - 0.5)
if not calculate_only:
self.nbins = val
return val
def _sync_layer(self, layer, force=False):
for a in self._artists[layer]:
a.lo, a.hi = self.xlimits
a.nbins = self.nbins
a.xlog = self.xlog
a.ylog = self.ylog
a.cumulative = self.cumulative
a.normed = self.normed
a.att = self._component
a.update() if not force else a.force_update()
def sync_all(self, force=False):
layers = set(a.layer for a in self._artists)
for l in layers:
self._sync_layer(l, force=force)
self._update_axis_labels()
if self.autoscale:
lim = visible_limits(self._artists, 1)
if lim is not None:
lo = 1e-5 if self.ylog else 0
hi = lim[1]
# pad the top
if self.ylog:
hi = lo * (hi / lo) ** 1.03
else:
hi *= 1.03
self._axes.set_ylim(lo, hi)
yscl = 'log' if self.ylog else 'linear'
self._axes.set_yscale(yscl)
self._redraw()
@property
def component(self):
return self._component
@component.setter
def component(self, value):
self.set_component(value)
def set_component(self, component):
"""
Redefine which component gets plotted
Parameters
----------
component: ComponentID
The new component to plot
"""
if self._component is component:
return
iscat = lambda x: isinstance(x, CategoricalComponent)
def comp_obj():
# the current Component (not ComponentID) object
x = list(self._get_data_components('x'))
if x:
x = x[0]
return x
prev = comp_obj()
old = self.nbins
first_add = self._component is None
self._component = component
cur = comp_obj()
if first_add or iscat(cur):
self._auto_nbin()
# save old bins if switch from non-category to category
if prev and not iscat(prev) and iscat(cur):
self._saved_nbins = old
# restore old bins if switch from category to non-category
if iscat(prev) and cur and not iscat(cur) and self._saved_nbins is not None:
self.nbins = self._saved_nbins
self._saved_nbins = None
self.sync_all()
self._relim()
def _relim(self):
lim = self.xlimits
if self.xlog:
lim = list(np.log10(lim))
if not np.isfinite(lim[0]):
lim[0] = 1e-5
if not np.isfinite(lim[1]):
lim[1] = 1
self._axes.set_xlim(lim)
self._redraw()
def _numerical_data_changed(self, message):
data = message.sender
self.sync_all(force=True)
def _on_component_replaced(self, msg):
if self.component is msg.old:
self.set_component(msg.new)
def _update_data(self, message):
self.sync_all()
def _update_subset(self, message):
self._sync_layer(message.subset)
self._redraw()
def _add_subset(self, message):
self.add_layer(message.sender)
assert self.layer_present(message.sender)
assert self.is_layer_visible(message.sender)
def _remove_data(self, message):
self.remove_layer(message.data)
def _remove_subset(self, message):
self.remove_layer(message.subset)
def apply_roi(self, roi):
x, _ = roi.to_polygon()
lo = min(x)
hi = max(x)
# expand roi to match bin edges
bins = self.bins
if lo >= bins.min():
lo = bins[bins <= lo].max()
if hi <= bins.max():
hi = bins[bins >= hi].min()
if self.xlog:
lo = 10 ** lo
hi = 10 ** hi
comp = list(self._get_data_components('x'))
if comp:
comp = comp[0]
if comp.categorical:
state = CategoricalRoiSubsetState.from_range(comp, self.component,
lo, hi)
else:
state = RangeSubsetState(lo, hi)
state.att = self.component
mode = EditSubsetMode()
visible = [d for d in self.data if self.is_layer_visible(d)]
focus = visible[0] if len(visible) > 0 else None
mode.update(self.data, state, focus_data=focus)
def register_to_hub(self, hub):
dfilter = lambda x: x.sender.data in self._artists
dcfilter = lambda x: x.data in self._artists
subfilter = lambda x: x.subset in self._artists
hub.subscribe(self,
msg.SubsetCreateMessage,
handler=self._add_subset,
filter=dfilter)
hub.subscribe(self,
msg.SubsetUpdateMessage,
handler=self._update_subset,
filter=subfilter)
hub.subscribe(self,
msg.SubsetDeleteMessage,
handler=self._remove_subset)
hub.subscribe(self,
msg.DataUpdateMessage,
handler=self._update_data,
filter=dfilter)
hub.subscribe(self,
msg.DataCollectionDeleteMessage,
handler=self._remove_data)
hub.subscribe(self,
msg.NumericalDataChangedMessage,
handler=self._numerical_data_changed)
hub.subscribe(self,
msg.ComponentReplacedMessage,
handler=self._on_component_replaced)
def restore_layers(self, layers, context):
for layer in layers:
lcls = lookup_class(layer.pop('_type'))
if lcls != HistogramLayerArtist:
raise ValueError("Cannot restore layers of type %s" % lcls)
data_or_subset = context.object(layer.pop('layer'))
result = self.add_layer(data_or_subset)
result.properties = layer
| {
"repo_name": "JudoWill/glue",
"path": "glue/clients/histogram_client.py",
"copies": "1",
"size": "13239",
"license": "bsd-3-clause",
"hash": -3576257610285740500,
"line_mean": 29.5750577367,
"line_max": 84,
"alpha_frac": 0.5449807387,
"autogenerated": false,
"ratio": 4.09242658423493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.513740732293493,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from .core import compute, optimize
from ..expr import Expr, Arithmetic, Math, Map, UnaryOp
from ..expr.broadcast import broadcast_collect, Broadcast
from toolz import memoize
import datashape
import numba
from .pyfunc import funcstr
Broadcastable = Arithmetic, Math, Map, UnaryOp
def optimize_ndarray(expr, *data, **kwargs):
return broadcast_collect(expr, Broadcastable=Broadcastable,
WantToBroadcast=Broadcastable)
for i in range(1, 11):
optimize.register(Expr, *([np.ndarray] * i))(optimize_ndarray)
def get_numba_type(dshape):
"""Get the ``numba`` type corresponding to the ``datashape.Mono`` instance
`dshape`
Parameters
----------
dshape : datashape.Mono
Returns
-------
restype : numba.types.Type
Examples
--------
>>> import datashape
>>> import numba
>>> get_numba_type(datashape.bool_)
bool
>>> get_numba_type(datashape.date_)
datetime64(D)
>>> get_numba_type(datashape.datetime_)
datetime64(us)
>>> get_numba_type(datashape.timedelta_) # default unit is microseconds
timedelta64(us)
>>> get_numba_type(datashape.TimeDelta('D'))
timedelta64(D)
>>> get_numba_type(datashape.int64)
int64
>>> get_numba_type(datashape.String(7, "A"))
[char x 7]
>>> get_numba_type(datashape.String(None, "A"))
str
>>> get_numba_type(datashape.String(7))
[unichr x 7]
>>> get_numba_type(datashape.string)
Traceback (most recent call last):
...
TypeError: Numba cannot handle variable length strings
>>> get_numba_type(datashape.object_)
Traceback (most recent call last):
...
TypeError: Numba cannot handle object datashape
>>> get_numba_type(datashape.dshape('10 * {a: int64}'))
Traceback (most recent call last):
...
TypeError: Invalid datashape to numba type: dshape("{a: int64}")
See Also
--------
compute_signature
"""
measure = dshape.measure
if measure == datashape.bool_:
restype = numba.bool_ # str(bool_) == 'bool' so we can't use getattr
elif measure == datashape.date_:
restype = numba.types.NPDatetime('D')
elif measure == datashape.datetime_:
restype = numba.types.NPDatetime('us')
elif isinstance(measure, datashape.TimeDelta): # isinstance for diff freqs
restype = numba.types.NPTimedelta(measure.unit)
elif isinstance(measure, datashape.String):
encoding = measure.encoding
fixlen = measure.fixlen
if fixlen is None:
if encoding == 'A':
return numba.types.string
raise TypeError("Numba cannot handle variable length strings")
typ = (numba.types.CharSeq
if encoding == 'A' else numba.types.UnicodeCharSeq)
return typ(fixlen or 0)
elif measure == datashape.object_:
raise TypeError("Numba cannot handle object datashape")
else:
try:
restype = getattr(numba, str(measure))
except AttributeError:
raise TypeError('Invalid datashape to numba type: %r' % measure)
return restype
def compute_signature(expr):
"""Get the ``numba`` *function signature* corresponding to ``DataShape``
Examples
--------
>>> from blaze import symbol
>>> s = symbol('s', 'int64')
>>> t = symbol('t', 'float32')
>>> d = symbol('d', 'datetime')
>>> expr = s + t
>>> compute_signature(expr)
float64(int64, float32)
>>> expr = d.truncate(days=1)
>>> compute_signature(expr)
datetime64(D)(datetime64(us))
>>> expr = d.day + 1
>>> compute_signature(expr) # only looks at leaf nodes
int64(datetime64(us))
Notes
-----
* This could potentially be adapted/refactored to deal with
``datashape.Function`` types.
* Cannot handle ``datashape.Record`` types.
"""
assert datashape.isscalar(expr.schema)
restype = get_numba_type(expr.schema)
argtypes = [get_numba_type(e.schema) for e in expr._leaves()]
return restype(*argtypes)
def _get_numba_ufunc(expr):
"""Construct a numba ufunc from a blaze expression
Parameters
----------
expr : blaze.expr.Expr
Returns
-------
f : function
A numba vectorized function
Examples
--------
>>> from blaze import symbol
>>> import numpy as np
>>> s = symbol('s', 'float64')
>>> t = symbol('t', 'float64')
>>> x = np.array([1.0, 2.0, 3.0])
>>> y = np.array([2.0, 3.0, 4.0])
>>> f = get_numba_ufunc(s + t)
>>> f(x, y)
array([ 3., 5., 7.])
See Also
--------
get_numba_type
compute_signature
"""
if isinstance(expr, Broadcast):
leaves = expr._scalars
expr = expr._scalar_expr
else:
leaves = expr._leaves()
# we may not have a Broadcast instance because arithmetic expressions can
# be vectorized so we use getattr
s, scope = funcstr(leaves, expr)
scope = dict((k, numba.jit(nopython=True)(v) if callable(v) else v)
for k, v in scope.items())
func = eval(s, scope)
sig = compute_signature(expr)
return numba.vectorize([sig], nopython=True)(func)
# do this here so we can run our doctest
get_numba_ufunc = memoize(_get_numba_ufunc)
def broadcast_numba(t, *data, **kwargs):
try:
ufunc = get_numba_ufunc(t)
except TypeError: # strings and objects aren't supported very well yet
return compute(t, dict(zip(t._leaves(), data)))
else:
return ufunc(*data)
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/compute/numba.py",
"copies": "1",
"size": "5639",
"license": "bsd-3-clause",
"hash": -8224640763478712000,
"line_mean": 25.5990566038,
"line_max": 79,
"alpha_frac": 0.6126972868,
"autogenerated": false,
"ratio": 3.571247625079164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9683944911879164,
"avg_score": 0,
"num_lines": 212
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from dask.array.chunk import coarsen, keepdims_wrapper, trim
def test_keepdims_wrapper_no_axis():
def summer(a, axis=None):
return a.sum(axis=axis)
summer_wrapped = keepdims_wrapper(summer)
assert summer_wrapped != summer
assert summer_wrapped == keepdims_wrapper(summer_wrapped)
a = np.arange(24).reshape(1, 2, 3, 4)
r = summer(a)
rw = summer_wrapped(a, keepdims=True)
rwf = summer_wrapped(a, keepdims=False)
assert r.ndim == 0
assert r.shape == tuple()
assert r == 276
assert rw.ndim == 4
assert rw.shape == (1, 1, 1, 1)
assert (rw == 276).all()
assert rwf.ndim == 0
assert rwf.shape == tuple()
assert rwf == 276
def test_keepdims_wrapper_one_axis():
def summer(a, axis=None):
return a.sum(axis=axis)
summer_wrapped = keepdims_wrapper(summer)
assert summer_wrapped != summer
assert summer_wrapped == keepdims_wrapper(summer_wrapped)
a = np.arange(24).reshape(1, 2, 3, 4)
r = summer(a, axis=2)
rw = summer_wrapped(a, axis=2, keepdims=True)
rwf = summer_wrapped(a, axis=2, keepdims=False)
assert r.ndim == 3
assert r.shape == (1, 2, 4)
assert (r == np.array([[[12, 15, 18, 21], [48, 51, 54, 57]]])).all()
assert rw.ndim == 4
assert rw.shape == (1, 2, 1, 4)
assert (rw == np.array([[[[12, 15, 18, 21]], [[48, 51, 54, 57]]]])).all()
assert rwf.ndim == 3
assert rwf.shape == (1, 2, 4)
assert (rwf == np.array([[[12, 15, 18, 21], [48, 51, 54, 57]]])).all()
def test_keepdims_wrapper_two_axes():
def summer(a, axis=None):
return a.sum(axis=axis)
summer_wrapped = keepdims_wrapper(summer)
assert summer_wrapped != summer
assert summer_wrapped == keepdims_wrapper(summer_wrapped)
a = np.arange(24).reshape(1, 2, 3, 4)
r = summer(a, axis=(1, 3))
rw = summer_wrapped(a, axis=(1, 3), keepdims=True)
rwf = summer_wrapped(a, axis=(1, 3), keepdims=False)
assert r.ndim == 2
assert r.shape == (1, 3)
assert (r == np.array([[60, 92, 124]])).all()
assert rw.ndim == 4
assert rw.shape == (1, 1, 3, 1)
assert (rw == np.array([[[[60], [92], [124]]]])).all()
assert rwf.ndim == 2
assert rwf.shape == (1, 3)
assert (rwf == np.array([[60, 92, 124]])).all()
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_coarsen():
x = np.random.randint(10, size=(24, 24))
y = coarsen(np.sum, x, {0: 2, 1: 4})
assert y.shape == (12, 6)
assert y[0, 0] == np.sum(x[:2, :4])
"""
def test_coarsen_on_uneven_shape():
x = np.random.randint(10, size=(23, 24))
y = coarsen(np.sum, x, {0: 2, 1: 4})
assert y.shape == (12, 6)
assert y[0, 0] == np.sum(x[:2, :4])
assert eq(y[11, :], x[23, :])
"""
| {
"repo_name": "minrk/dask",
"path": "dask/array/tests/test_chunk.py",
"copies": "1",
"size": "2888",
"license": "bsd-3-clause",
"hash": -6686791176658658000,
"line_mean": 24.5575221239,
"line_max": 77,
"alpha_frac": 0.5730609418,
"autogenerated": false,
"ratio": 2.809338521400778,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3882399463200778,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from ..data import Component, Data
from ...external import six
from .helpers import set_default_factory, __factories__, has_extension
__all__ = ['tabular_data', 'sextractor_factory', 'astropy_tabular_data',
'formatted_table_factory']
def _ascii_identifier_v02(origin, args, kwargs):
# this works for astropy v0.2
if isinstance(args[0], six.string_types):
return args[0].endswith(('csv', 'tsv', 'txt', 'tbl', 'dat',
'csv.gz', 'tsv.gz', 'txt.gz', 'tbl.gz',
'dat.gz'))
else:
return False
def _ascii_identifier_v03(origin, *args, **kwargs):
# this works for astropy v0.3
return _ascii_identifier_v02(origin, args, kwargs)
def astropy_tabular_data(*args, **kwargs):
"""
Build a data set from a table. We restrict ourselves to tables
with 1D columns.
All arguments are passed to
astropy.table.Table.read(...).
"""
from distutils.version import LooseVersion
from astropy import __version__
if LooseVersion(__version__) < LooseVersion("0.2"):
raise RuntimeError("Glue requires astropy >= v0.2. Please update")
result = Data()
# Read the table
from astropy.table import Table
# Add identifiers for ASCII data
from astropy.io import registry
if LooseVersion(__version__) < LooseVersion("0.3"):
registry.register_identifier('ascii', Table, _ascii_identifier_v02,
force=True)
else:
# Basically, we always want the plain ascii reader for now.
# But astropy will complain about ambiguous formats (or use another reader)
# unless we remove other registry identifiers and set up our own reader
nope = lambda *a, **k: False
registry.register_identifier('ascii.glue', Table, _ascii_identifier_v03,
force=True)
registry.register_identifier('ascii.csv', Table, nope, force=True)
registry.register_identifier('ascii.fast_csv', Table, nope, force=True)
registry.register_identifier('ascii', Table, nope, force=True)
registry.register_reader('ascii.glue', Table,
lambda path: Table.read(path, format='ascii'),
force=True)
try:
table = Table.read(*args, **kwargs)
except:
# In Python 3, as of Astropy 0.4, if the format is not specified, the
# automatic format identification will fail (astropy/astropy#3013).
# This is only a problem for ASCII formats however, because it is due
# to the fact that the file object in io.ascii does not rewind to the
# start between guesses (due to a bug), so here we can explicitly try
# the ASCII format if the format keyword was not already present.
if 'format' not in kwargs:
table = Table.read(*args, format='ascii.glue', **kwargs)
else:
raise
# Loop through columns and make component list
for column_name in table.columns:
c = table[column_name]
u = c.unit if hasattr(c, 'unit') else c.units
if table.masked:
# fill array for now
try:
c = c.filled(fill_value=np.nan)
except ValueError: # assigning nan to integer dtype
c = c.filled(fill_value=-1)
nc = Component.autotyped(c, units=u)
result.add_component(nc, column_name)
return result
astropy_tabular_data.label = "Catalog (Astropy Parser)"
astropy_tabular_data.identifier = has_extension('xml vot csv txt tsv tbl dat fits '
'xml.gz vot.gz csv.gz txt.gz tbl.bz '
'dat.gz fits.gz')
def tabular_data(path, **kwargs):
from .pandas import pandas_read_table
for fac in [astropy_tabular_data, pandas_read_table]:
try:
return fac(path, **kwargs)
except:
pass
else:
raise IOError("Could not parse file: %s" % path)
tabular_data.label = "Catalog"
tabular_data.identifier = has_extension('xml vot csv txt tsv tbl dat fits '
'xml.gz vot.gz csv.gz txt.gz tbl.bz '
'dat.gz fits.gz')
__factories__.append(tabular_data)
set_default_factory('xml', tabular_data)
set_default_factory('vot', tabular_data)
set_default_factory('csv', tabular_data)
set_default_factory('txt', tabular_data)
set_default_factory('tsv', tabular_data)
set_default_factory('tbl', tabular_data)
set_default_factory('dat', tabular_data)
__factories__.append(astropy_tabular_data)
# Add explicit factories for the formats which astropy.table
# can parse, but does not auto-identify
def formatted_table_factory(format, label):
def factory(file, **kwargs):
kwargs['format'] = 'ascii.%s' % format
return tabular_data(file, **kwargs)
factory.label = label
factory.identifier = lambda *a, **k: False
# rename function to its variable reference below
# allows pickling to work
factory.__name__ = '%s_factory' % format
return factory
sextractor_factory = formatted_table_factory('sextractor', 'SExtractor Catalog')
cds_factory = formatted_table_factory('cds', 'CDS Catalog')
daophot_factory = formatted_table_factory('daophot', 'DAOphot Catalog')
ipac_factory = formatted_table_factory('ipac', 'IPAC Catalog')
aastex_factory = formatted_table_factory('aastex', 'AASTeX Table')
latex_factory = formatted_table_factory('latex', 'LaTeX Table')
__factories__.extend([sextractor_factory, cds_factory, daophot_factory,
ipac_factory, aastex_factory, latex_factory]) | {
"repo_name": "JudoWill/glue",
"path": "glue/core/data_factories/tables.py",
"copies": "1",
"size": "5823",
"license": "bsd-3-clause",
"hash": 3340281704511874000,
"line_mean": 37.0653594771,
"line_max": 85,
"alpha_frac": 0.6223596084,
"autogenerated": false,
"ratio": 3.897590361445783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5019949969845783,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from ..data import Data, Component, CategoricalComponent
from .helpers import has_extension, __factories__
__all__ = ['pandas_read_table']
def panda_process(indf):
"""
Build a data set from a table using pandas. This attempts to respect
categorical data input by letting pandas.read_csv infer the type
"""
result = Data()
for name, column in indf.iteritems():
if (column.dtype == np.object) | (column.dtype == np.bool):
# try to salvage numerical data
coerced = column.convert_objects(convert_numeric=True)
if (coerced.dtype != column.dtype) and coerced.isnull().mean() < 0.4:
c = Component(coerced.values)
else:
# pandas has a 'special' nan implementation and this doesn't
# play well with np.unique
c = CategoricalComponent(column.fillna(''))
else:
c = Component(column.values)
# strip off leading #
name = name.strip()
if name.startswith('#'):
name = name[1:].strip()
result.add_component(c, name)
return result
def pandas_read_table(path, **kwargs):
""" A factory for reading tabular data using pandas
:param path: path/to/file
:param kwargs: All kwargs are passed to pandas.read_csv
:returns: :class:`glue.core.data.Data` object
"""
import pandas as pd
try:
from pandas.parser import CParserError
except ImportError:
from pandas._parser import CParserError
# iterate over common delimiters to search for best option
delimiters = kwargs.pop('delimiter', [None] + list(',|\t '))
fallback = None
for d in delimiters:
try:
indf = pd.read_csv(path, delimiter=d, **kwargs)
# ignore files parsed to empty dataframes
if len(indf) == 0:
continue
# only use files parsed to single-column dataframes
# if we don't find a better strategy
if len(indf.columns) < 2:
fallback = indf
continue
return panda_process(indf)
except CParserError:
continue
if fallback is not None:
return panda_process(fallback)
raise IOError("Could not parse %s using pandas" % path)
pandas_read_table.label = "Pandas Table"
pandas_read_table.identifier = has_extension('csv csv txt tsv tbl dat')
__factories__.append(pandas_read_table)
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/data_factories/pandas.py",
"copies": "1",
"size": "2561",
"license": "bsd-3-clause",
"hash": -7117801495059398000,
"line_mean": 29.4880952381,
"line_max": 81,
"alpha_frac": 0.6099180008,
"autogenerated": false,
"ratio": 4.198360655737705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004355400696864112,
"num_lines": 84
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from ..data import Data
from .helpers import has_extension, set_default_factory, __factories__
from ..coordinates import coordinates_from_wcs
img_fmt = ['jpg', 'jpeg', 'bmp', 'png', 'tiff', 'tif']
__all__ = ['img_data']
def img_loader(file_name):
"""Load an image to a numpy array, using either PIL or skimage
:param file_name: Path of file to load
:rtype: Numpy array
"""
try:
from skimage import img_as_ubyte
from skimage.io import imread
return np.asarray(img_as_ubyte(imread(file_name)))
except ImportError:
pass
try:
from PIL import Image
return np.asarray(Image.open(file_name))
except ImportError:
raise ImportError("Reading %s requires PIL or scikit-image" %
file_name)
def img_data(file_name):
"""Load common image files into a Glue data object"""
result = Data()
data = img_loader(file_name)
data = np.flipud(data)
shp = data.shape
comps = []
labels = []
# split 3 color images into each color plane
if len(shp) == 3 and shp[2] in [3, 4]:
comps.extend([data[:, :, 0], data[:, :, 1], data[:, :, 2]])
labels.extend(['red', 'green', 'blue'])
if shp[2] == 4:
comps.append(data[:, :, 3])
labels.append('alpha')
else:
comps = [data]
labels = ['PRIMARY']
# look for AVM coordinate metadata
try:
from pyavm import AVM
avm = AVM(str(file_name)) # avoid unicode
wcs = avm.to_wcs()
except:
pass
else:
result.coords = coordinates_from_wcs(wcs)
for c, l in zip(comps, labels):
result.add_component(c, l)
return result
img_data.label = "Image"
img_data.identifier = has_extension(' '.join(img_fmt))
for i in img_fmt:
set_default_factory(i, img_data)
__factories__.append(img_data) | {
"repo_name": "JudoWill/glue",
"path": "glue/core/data_factories/image.py",
"copies": "1",
"size": "1974",
"license": "bsd-3-clause",
"hash": -4695404824515251000,
"line_mean": 24.3205128205,
"line_max": 70,
"alpha_frac": 0.5916919959,
"autogenerated": false,
"ratio": 3.550359712230216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4642051708130216,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.core.callback_property import CallbackProperty
from glue.core.edit_subset_mode import EditSubsetMode
from glue.core.exceptions import IncompatibleDataException, IncompatibleAttribute
from glue.core.data import Data
from glue.core import message as msg
from glue.core.client import Client
from glue.core.roi import RangeROI
from glue.core.state import lookup_class_with_patches
from glue.core.layer_artist import LayerArtistContainer
from glue.core.util import update_ticks, visible_limits
from glue.viewers.common.viz_client import init_mpl, update_appearance_from_settings
from .layer_artist import HistogramLayerArtist
class UpdateProperty(CallbackProperty):
"""
Descriptor that calls client's sync_all() method when changed
"""
def __init__(self, default, relim=False):
super(UpdateProperty, self).__init__(default)
self.relim = relim
def __set__(self, instance, value):
changed = value != self.__get__(instance)
super(UpdateProperty, self).__set__(instance, value)
if not changed:
return
instance.sync_all()
if self.relim:
instance._relim()
def update_on_true(func):
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if result:
args[0].sync_all()
return result
return wrapper
class HistogramClient(Client):
"""
A client class to display histograms
"""
normed = UpdateProperty(False)
cumulative = UpdateProperty(False)
autoscale = UpdateProperty(True)
nbins = UpdateProperty(30)
xlog = UpdateProperty(False, relim=True)
ylog = UpdateProperty(False)
xmin = UpdateProperty(None, relim=True)
xmax = UpdateProperty(None, relim=True)
def __init__(self, data, figure, layer_artist_container=None):
super(HistogramClient, self).__init__(data)
self._artists = layer_artist_container or LayerArtistContainer()
self._figure, self._axes = init_mpl(figure=figure, axes=None)
self._component = None
self._saved_nbins = None
self._xlim_cache = {}
self._xlog_cache = {}
self._sync_enabled = True
self._xlog_curr = False
@property
def bins(self):
"""
An array of bin edges for the histogram.
This returns `None` if no histogram has been computed yet.
"""
for art in self._artists:
if not isinstance(art, HistogramLayerArtist):
continue
return art.x
@property
def axes(self):
return self._axes
@property
def xlimits(self):
return self.xmin, self.xmax
@xlimits.setter
def xlimits(self, value):
lo, hi = value
old = self.xlimits
if lo is None:
lo = old[0]
if hi is None:
hi = old[1]
self.xmin = min(lo, hi)
self.xmax = max(lo, hi)
def layer_present(self, layer):
return layer in self._artists
def add_layer(self, layer):
if layer.data not in self.data:
raise IncompatibleDataException("Layer not in data collection")
self._ensure_layer_data_present(layer)
if self.layer_present(layer):
return self._artists[layer][0]
art = HistogramLayerArtist(layer, self._axes)
self._artists.append(art)
self._ensure_subsets_present(layer)
self._sync_layer(layer)
self._redraw()
return art
def _redraw(self):
self._axes.figure.canvas.draw()
def _ensure_layer_data_present(self, layer):
if layer.data is layer:
return
if not self.layer_present(layer.data):
self.add_layer(layer.data)
def _ensure_subsets_present(self, layer):
for subset in layer.data.subsets:
self.add_layer(subset)
@update_on_true
def remove_layer(self, layer):
if not self.layer_present(layer):
return
for a in self._artists.pop(layer):
a.clear()
if isinstance(layer, Data):
for subset in layer.subsets:
self.remove_layer(subset)
return True
@update_on_true
def set_layer_visible(self, layer, state):
if not self.layer_present(layer):
return
for a in self._artists[layer]:
a.visible = state
return True
def is_layer_visible(self, layer):
if not self.layer_present(layer):
return False
return any(a.visible for a in self._artists[layer])
def _update_axis_labels(self):
xlabel = self.component.label if self.component is not None else ''
if self.xlog:
xlabel = "Log %s" % xlabel
ylabel = 'N'
self._axes.set_xlabel(xlabel)
self._axes.set_ylabel(ylabel)
components = list(self._get_data_components('x'))
if components:
bins = update_ticks(self.axes, 'x',
components, False)
return
if bins is not None:
prev_bins = self.nbins
auto_bins = self._auto_nbin(calculate_only=True)
if prev_bins == auto_bins:
# try to assign a bin to each category,
# but only if self.nbins hasn't been overridden
# from auto_nbin
self.nbins = min(bins, 100)
self.xlimits = (-0.5, bins - 0.5)
def _get_data_components(self, coord):
""" Returns the components for each dataset for x and y axes.
"""
if coord == 'x':
attribute = self.component
else:
raise TypeError('coord must be x')
for data in self._data:
try:
yield data.get_component(attribute)
except IncompatibleAttribute:
pass
def _auto_nbin(self, calculate_only=False):
data = set(a.layer.data for a in self._artists)
if len(data) == 0:
return
dx = np.mean([d.size for d in data])
val = min(max(5, (dx / 1000) ** (1. / 3.) * 30), 100)
c = list(self._get_data_components('x'))
if c:
c = c[0]
if c.categorical:
val = min(c.categories.size, 100)
if not calculate_only:
self.xlimits = (-0.5, c.categories.size - 0.5)
if not calculate_only:
self.nbins = val
return val
def _auto_limits(self):
lo, hi = np.inf, -np.inf
for a in self._artists:
try:
data = a.layer[self.component]
except IncompatibleAttribute:
continue
if data.size == 0:
continue
if self.xlog:
positive = data > 0
if np.any(positive):
positive_data = data[positive]
lo = min(lo, np.nanmin(positive_data))
hi = max(hi, np.nanmax(positive_data))
else:
lo = 1
hi = 10
else:
lo = min(lo, np.nanmin(data))
hi = max(hi, np.nanmax(data))
self.xmin = lo
self.xmax = hi
def _sync_layer(self, layer, force=False):
for a in self._artists[layer]:
a.lo = self.xmin
a.hi = self.xmax
a.nbins = self.nbins
a.xlog = self.xlog
a.ylog = self.ylog
a.cumulative = self.cumulative
a.normed = self.normed
a.att = self._component
a.update() if not force else a.force_update()
def sync_all(self, force=False):
if not self._sync_enabled:
return
if self.component is not None:
if not (self.xlog, self.component) in self._xlim_cache or not self.component in self._xlog_cache:
self._auto_limits()
self._xlim_cache[(self.xlog, self.component)] = self.xmin, self.xmax
self._xlog_cache[self.component] = self.xlog
elif self.xlog is self._xlog_curr:
self._xlim_cache[(self.xlog, self.component)] = self.xmin, self.xmax
else:
self._xlog_cache[self.component] = self.xlog
self.xmin, self.xmax = self._xlim_cache[(self.xlog, self.component)]
self._xlog_curr = self.xlog
layers = set(a.layer for a in self._artists)
for l in layers:
self._sync_layer(l, force=force)
self._update_axis_labels()
if self.autoscale:
lim = visible_limits(self._artists, 1)
if lim is not None:
lo = 1e-5 if self.ylog else 0
hi = lim[1]
# pad the top
if self.ylog:
hi = lo * (hi / lo) ** 1.03
else:
hi *= 1.03
self._axes.set_ylim(lo, hi)
yscl = 'log' if self.ylog else 'linear'
self._axes.set_yscale(yscl)
self._redraw()
@property
def component(self):
return self._component
@component.setter
def component(self, value):
self.set_component(value)
def set_component(self, component):
"""
Redefine which component gets plotted
Parameters
----------
component: `~glue.core.component_id.ComponentID`
The new component to plot
"""
if self._component is component:
return
self._sync_enabled = False
iscat = lambda x: x.categorical
def comp_obj():
# the current Component (not ComponentID) object
x = list(self._get_data_components('x'))
if x:
x = x[0]
return x
prev = comp_obj()
old = self.nbins
first_add = self._component is None
self._component = component
cur = comp_obj()
if self.component in self._xlog_cache:
self.xlog = self._xlog_cache[self.component]
else:
self.xlog = False
self._xlog_cache[self.component] = self.xlog
if (self.xlog, self.component) in self._xlim_cache:
self.xmin, self.xmax = self._xlim_cache[(self.xlog, self.component)]
else:
self._auto_limits()
self._xlim_cache[(self.xlog, self.component)] = self.xmin, self.xmax
self._sync_enabled = True
if first_add or iscat(cur):
self._auto_nbin()
# save old bins if switch from non-category to category
if prev and not iscat(prev) and iscat(cur):
self._saved_nbins = old
# restore old bins if switch from category to non-category
if prev and iscat(prev) and cur and not iscat(cur) and self._saved_nbins is not None:
self.nbins = self._saved_nbins
self._saved_nbins = None
self.sync_all()
self._relim()
def _relim(self):
xmin, xmax = self.xmin, self.xmax
if self.xlog:
if xmin is None or not np.isfinite(xmin):
xmin = 0
else:
xmin = np.log10(xmin)
if xmax is None or not np.isfinite(xmax):
xmax = 1
else:
xmax = np.log10(xmax)
self._axes.set_xlim((xmin, xmax))
self._redraw()
def _numerical_data_changed(self, message):
data = message.sender
self.sync_all(force=True)
def _on_component_replaced(self, msg):
if self.component is msg.old:
self.set_component(msg.new)
def _update_data(self, message):
self.sync_all()
def _update_subset(self, message):
self._sync_layer(message.subset)
self._redraw()
def _add_subset(self, message):
self.add_layer(message.sender)
assert self.layer_present(message.sender)
assert self.is_layer_visible(message.sender)
def _remove_data(self, message):
self.remove_layer(message.data)
def _remove_subset(self, message):
self.remove_layer(message.subset)
def apply_roi(self, roi):
x, _ = roi.to_polygon()
lo = min(x)
hi = max(x)
# expand roi to match bin edges
bins = self.bins
if lo >= bins.min():
lo = bins[bins <= lo].max()
if hi <= bins.max():
hi = bins[bins >= hi].min()
if self.xlog:
lo = 10 ** lo
hi = 10 ** hi
nroi = RangeROI(min=lo, max=hi, orientation='x')
for comp in self._get_data_components('x'):
state = comp.subset_from_roi(self.component, nroi, coord='x')
mode = EditSubsetMode()
visible = [d for d in self.data if self.is_layer_visible(d)]
focus = visible[0] if len(visible) > 0 else None
mode.update(self.data, state, focus_data=focus)
def register_to_hub(self, hub):
dfilter = lambda x: x.sender.data in self._artists
dcfilter = lambda x: x.data in self._artists
subfilter = lambda x: x.subset in self._artists
hub.subscribe(self,
msg.SubsetCreateMessage,
handler=self._add_subset,
filter=dfilter)
hub.subscribe(self,
msg.SubsetUpdateMessage,
handler=self._update_subset,
filter=subfilter)
hub.subscribe(self,
msg.SubsetDeleteMessage,
handler=self._remove_subset)
hub.subscribe(self,
msg.DataUpdateMessage,
handler=self._update_data,
filter=dfilter)
hub.subscribe(self,
msg.DataCollectionDeleteMessage,
handler=self._remove_data)
hub.subscribe(self,
msg.NumericalDataChangedMessage,
handler=self._numerical_data_changed)
hub.subscribe(self,
msg.ComponentReplacedMessage,
handler=self._on_component_replaced)
def is_appearance_settings(msg):
return ('BACKGROUND_COLOR' in msg.settings
or 'FOREGROUND_COLOR' in msg.settings)
hub.subscribe(self, msg.SettingsChangeMessage,
self._update_appearance_from_settings,
filter=is_appearance_settings)
def _update_appearance_from_settings(self, message):
update_appearance_from_settings(self.axes)
self._redraw()
def restore_layers(self, layers, context):
for layer in layers:
lcls = lookup_class_with_patches(layer.pop('_type'))
if lcls != HistogramLayerArtist:
raise ValueError("Cannot restore layers of type %s" % lcls)
data_or_subset = context.object(layer.pop('layer'))
result = self.add_layer(data_or_subset)
result.properties = layer
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/histogram/client.py",
"copies": "2",
"size": "15209",
"license": "bsd-3-clause",
"hash": 3499433717232905700,
"line_mean": 29.8498985801,
"line_max": 109,
"alpha_frac": 0.5490170294,
"autogenerated": false,
"ratio": 4.011870218939594,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5560887248339593,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.core.component import Component, CategoricalComponent
from glue.core.data import Data
def test_histogram_data():
data = Data(label="Test Data")
comp_a = Component(np.random.uniform(size=500))
comp_b = Component(np.random.normal(size=500))
data.add_component(comp_a, 'uniform')
data.add_component(comp_b, 'normal')
return data
def test_data():
data = Data(label="Test Data 1")
data2 = Data(label="Teset Data 2")
comp_a = Component(np.array([1, 2, 3]))
comp_b = Component(np.array([1, 2, 3]))
comp_c = Component(np.array([2, 4, 6]))
comp_d = Component(np.array([1, 3, 5]))
data.add_component(comp_a, 'a')
data.add_component(comp_b, 'b')
data2.add_component(comp_c, 'c')
data2.add_component(comp_d, 'd')
return data, data2
def test_categorical_data():
data = Data(label="Test Cat Data 1")
data2 = Data(label="Teset Cat Data 2")
comp_x1 = CategoricalComponent(np.array(['a', 'a', 'b']))
comp_y1 = Component(np.array([1, 2, 3]))
comp_x2 = CategoricalComponent(np.array(['c', 'a', 'b']))
comp_y2 = Component(np.array([1, 3, 5]))
data.add_component(comp_x1, 'x1')
data.add_component(comp_y1, 'y1')
data2.add_component(comp_x2, 'x2')
data2.add_component(comp_y2, 'y2')
return data, data2
def test_image():
data = Data(label="Test Image")
comp_a = Component(np.ones((25, 25)))
data.add_component(comp_a, 'test_1')
comp_b = Component(np.zeros((25, 25)))
data.add_component(comp_b, 'test_2')
return data
def test_cube():
data = Data(label="Test Cube")
comp_a = Component(np.ones((16, 16, 16)))
data.add_component(comp_a, 'test_3')
return data
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/tests/example_data.py",
"copies": "5",
"size": "1793",
"license": "bsd-3-clause",
"hash": 4269739040068948500,
"line_mean": 27.9193548387,
"line_max": 64,
"alpha_frac": 0.6307863915,
"autogenerated": false,
"ratio": 2.915447154471545,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6046233545971544,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.core.coordinates import coordinates_from_wcs
from glue.core.data_factories.helpers import has_extension
from glue.core.data import Data
from glue.config import data_factory
IMG_FMT = ['jpg', 'jpeg', 'bmp', 'png', 'tiff', 'tif']
__all__ = ['img_data']
def img_loader(file_name):
"""Load an image to a numpy array, using either PIL or skimage
:param file_name: Path of file to load
:rtype: Numpy array
"""
try:
from skimage import img_as_ubyte
from skimage.io import imread
return np.asarray(img_as_ubyte(imread(file_name)))
except ImportError:
pass
try:
from PIL import Image
return np.asarray(Image.open(file_name))
except ImportError:
raise ImportError("Reading %s requires PIL or scikit-image" %
file_name)
@data_factory(label='Image', identifier=has_extension(' '.join(IMG_FMT)))
def img_data(file_name):
"""Load common image files into a Glue data object"""
result = Data()
data = img_loader(file_name)
data = np.flipud(data)
shp = data.shape
comps = []
labels = []
# split 3 color images into each color plane
if len(shp) == 3 and shp[2] in [3, 4]:
comps.extend([data[:, :, 0], data[:, :, 1], data[:, :, 2]])
labels.extend(['red', 'green', 'blue'])
if shp[2] == 4:
comps.append(data[:, :, 3])
labels.append('alpha')
else:
comps = [data]
labels = ['PRIMARY']
# look for AVM coordinate metadata
try:
from pyavm import AVM
avm = AVM(str(file_name)) # avoid unicode
wcs = avm.to_wcs()
except:
pass
else:
result.coords = coordinates_from_wcs(wcs)
for c, l in zip(comps, labels):
result.add_component(c, l)
return result
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/data_factories/image.py",
"copies": "4",
"size": "1922",
"license": "bsd-3-clause",
"hash": 4836984818129985000,
"line_mean": 25.3287671233,
"line_max": 73,
"alpha_frac": 0.5972944849,
"autogenerated": false,
"ratio": 3.599250936329588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001141552511415525,
"num_lines": 73
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.core.coordinates import Coordinates
from glue.viewers.common.qt.data_slice_widget import SliceWidget
from glue.viewers.image.state import AggregateSlice
from glue.utils.decorators import avoid_circular
__all__ = ['MultiSliceWidgetHelper']
class MultiSliceWidgetHelper(object):
def __init__(self, viewer_state=None, layout=None):
self.viewer_state = viewer_state
self.layout = layout
self.layout.setSpacing(4)
self.layout.setContentsMargins(0, 3, 0, 3)
self.viewer_state.add_callback('x_att', self.sync_sliders_from_state)
self.viewer_state.add_callback('y_att', self.sync_sliders_from_state)
self.viewer_state.add_callback('slices', self.sync_sliders_from_state)
self.viewer_state.add_callback('reference_data', self.sync_sliders_from_state)
self._sliders = []
self.sync_sliders_from_state()
@property
def data(self):
return self.viewer_state.reference_data
def _clear(self):
for _ in range(self.layout.count()):
self.layout.takeAt(0)
for s in self._sliders:
if s is not None:
s.close()
self._sliders = []
@avoid_circular
def sync_state_from_sliders(self, *args):
slices = []
for i, slider in enumerate(self._sliders):
if slider is not None:
slices.append(slider.state.slice_center)
else:
slices.append(self.viewer_state.slices[i])
self.viewer_state.slices = tuple(slices)
@avoid_circular
def sync_sliders_from_state(self, *args):
if self.data is None or self.viewer_state.x_att is None or self.viewer_state.y_att is None:
return
if self.viewer_state.x_att is self.viewer_state.y_att:
return
# TODO: figure out why there are no current circular calls (normally
# we should need to add @avoid_circular)
# Update number of sliders if needed
if self.data.ndim != len(self._sliders):
reinitialize = True
else:
for i in range(self.data.ndim):
if i == self.viewer_state.x_att.axis or i == self.viewer_state.y_att.axis:
if self._sliders[i] is not None:
reinitialize = True
break
else:
if self._sliders[i] is None:
reinitialize = True
break
else:
reinitialize = False
if reinitialize:
self._clear()
for i in range(self.data.ndim):
if i == self.viewer_state.x_att.axis or i == self.viewer_state.y_att.axis:
self._sliders.append(None)
continue
# TODO: For now we simply pass a single set of world coordinates,
# but we will need to generalize this in future. We deliberately
# check the type of data.coords here since we want to treat
# subclasses differently.
if type(self.data.coords) != Coordinates:
world = self.data.coords.world_axis(self.data, i)
world_unit = self.data.coords.world_axis_unit(i)
world_warning = len(self.data.coords.dependent_axes(i)) > 1
else:
world = None
world_unit = None
world_warning = False
slider = SliceWidget(self.data.get_world_component_id(i).label,
hi=self.data.shape[i] - 1, world=world,
world_unit=world_unit, world_warning=world_warning)
self.slider_state = slider.state
self.slider_state.add_callback('slice_center', self.sync_state_from_sliders)
self._sliders.append(slider)
self.layout.addWidget(slider)
for i in range(self.data.ndim):
if self._sliders[i] is not None:
if isinstance(self.viewer_state.slices[i], AggregateSlice):
self._sliders[i].state.slice_center = self.viewer_state.slices[i].center
else:
self._sliders[i].state.slice_center = self.viewer_state.slices[i]
if __name__ == "__main__":
from glue.core import Data
from glue.utils.qt import get_qapp
from glue.external.echo import CallbackProperty
from glue.core.state_objects import State
app = get_qapp()
class FakeViewerState(State):
x_att = CallbackProperty()
y_att = CallbackProperty()
reference_data = CallbackProperty()
slices = CallbackProperty()
viewer_state = FakeViewerState()
data = Data(x=np.random.random((3, 50, 20, 5, 3)))
viewer_state.reference_data = data
viewer_state.x_att = data.get_pixel_component_id(0)
viewer_state.y_att = data.get_pixel_component_id(3)
viewer_state.slices = [0] * 5
widget = MultiSliceWidgetHelper(viewer_state)
widget.show()
app.exec_()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/image/qt/slice_widget.py",
"copies": "2",
"size": "5216",
"license": "bsd-3-clause",
"hash": -6426266291832692000,
"line_mean": 33.091503268,
"line_max": 99,
"alpha_frac": 0.5778374233,
"autogenerated": false,
"ratio": 3.9725818735719725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007345249293605253,
"num_lines": 153
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.core.data_factories.helpers import has_extension
from glue.core.data import Component, Data
from glue.config import data_factory, qglue_parser
__all__ = ['astropy_tabular_data', 'sextractor_factory', 'cds_factory',
'daophot_factory', 'ipac_factory', 'aastex_factory',
'latex_factory']
# In this file, we define data factories based on the Astropy table reader.
def is_readable_by_astropy(filename, **kwargs):
# This identifier is not efficient, because it involves actually trying
# to read in the table. However, we only use this as the identifier for
# the astropy_tabular_data factory which has a priority of 0 and is
# therefore only used as a last attempt if all else fails.
try:
astropy_table_read(filename, **kwargs)
except:
return False
else:
return True
def astropy_table_read(*args, **kwargs):
from astropy.table import Table
# In Python 3, as of Astropy 0.4, if the format is not specified, the
# automatic format identification will fail (astropy/astropy#3013).
# This is only a problem for ASCII formats however, because it is due
# to the fact that the file object in io.ascii does not rewind to the
# start between guesses (due to a bug), so here we can explicitly try
# the ASCII format if the format keyword was not already present. But
# also more generally, we should first try the ASCII readers.
if 'format' not in kwargs:
try:
return Table.read(*args, format='ascii', **kwargs)
except:
pass
# If the above didn't work, attempt to read with no specified format
return Table.read(*args, **kwargs)
@data_factory(label="Catalog (astropy.table parser)",
identifier=is_readable_by_astropy,
priority=0)
def astropy_tabular_data(*args, **kwargs):
"""
Build a data set from a table. We restrict ourselves to tables
with 1D columns.
All arguments are passed to
astropy.table.Table.read(...).
"""
result = Data()
table = astropy_table_read(*args, **kwargs)
result.meta = table.meta
# Loop through columns and make component list
for column_name in table.columns:
c = table[column_name]
u = c.unit if hasattr(c, 'unit') else c.units
if table.masked:
# fill array for now
try:
c = c.filled(fill_value=np.nan)
except ValueError: # assigning nan to integer dtype
c = c.filled(fill_value=-1)
nc = Component.autotyped(c, units=u)
result.add_component(nc, column_name)
return result
@data_factory(label="VO table",
identifier=has_extension('xml vot xml.gz vot.gz'),
priority=1)
def astropy_tabular_data_votable(*args, **kwargs):
kwargs['format'] = 'votable'
return astropy_tabular_data(*args, **kwargs)
@data_factory(label="FITS table",
identifier=has_extension('fits fits.gz fit fit.gz'),
priority=1)
def astropy_tabular_data_fits(*args, **kwargs):
kwargs['format'] = 'fits'
return astropy_tabular_data(*args, **kwargs)
# Add explicit factories for the formats which astropy.table
# can parse, but does not auto-identify
def formatted_table_factory(format, label):
@data_factory(label=label, identifier=lambda *a, **k: False)
def factory(file, **kwargs):
kwargs['format'] = 'ascii.%s' % format
return astropy_tabular_data(file, **kwargs)
# rename function to its variable reference below
# allows pickling to work
factory.__name__ = '%s_factory' % format
return factory
sextractor_factory = formatted_table_factory('sextractor', 'SExtractor Catalog')
cds_factory = formatted_table_factory('cds', 'CDS Catalog')
daophot_factory = formatted_table_factory('daophot', 'DAOphot Catalog')
ipac_factory = formatted_table_factory('ipac', 'IPAC Catalog')
aastex_factory = formatted_table_factory('aastex', 'AASTeX Table')
latex_factory = formatted_table_factory('latex', 'LaTeX Table')
try:
from astropy.table import Table
except ImportError:
pass
else:
@qglue_parser(Table)
def _parse_data_astropy_table(data, label):
kwargs = dict((c, data[c]) for c in data.columns)
return [Data(label=label, **kwargs)]
| {
"repo_name": "saimn/glue",
"path": "glue/core/data_factories/astropy_table.py",
"copies": "4",
"size": "4422",
"license": "bsd-3-clause",
"hash": -4348215220937463300,
"line_mean": 31.5147058824,
"line_max": 80,
"alpha_frac": 0.6625961104,
"autogenerated": false,
"ratio": 3.7570093457943927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6419605456194393,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.core.exceptions import IncompatibleAttribute
from glue.core.subset import Subset
from glue.core.layer_artist import MatplotlibLayerArtist, ChangedTrigger
class DendroLayerArtist(MatplotlibLayerArtist):
# X vertices of structure i are in layout[0][3*i: 3*i+3]
layout = ChangedTrigger()
def __init__(self, layer, ax):
super(DendroLayerArtist, self).__init__(layer, ax)
def _recalc(self):
self.clear()
assert len(self.artists) == 0
if self.layout is None:
return
# layout[0] is [x0, x0, x[parent0], nan, ...]
# layout[1] is [y0, y[parent0], y[parent0], nan, ...]
ids = 3 * np.arange(self.layer.data.size)
try:
if isinstance(self.layer, Subset):
ids = ids[self.layer.to_mask()]
x, y = self.layout
blank = np.zeros(ids.size) * np.nan
x = np.column_stack([x[ids], x[ids + 1],
x[ids + 2], blank]).ravel()
y = np.column_stack([y[ids], y[ids + 1],
y[ids + 2], blank]).ravel()
except IncompatibleAttribute as exc:
self.disable_invalid_attributes(*exc.args)
return False
self.artists = self._axes.plot(x, y, '--')
return True
def update(self, view=None):
self._check_subset_state_changed()
if self._changed: # erase and make a new artist
if not self._recalc(): # no need to update style
return
self._changed = False
self._sync_style()
def _sync_style(self):
super(DendroLayerArtist, self)._sync_style()
style = self.layer.style
lw = 4 if isinstance(self.layer, Subset) else 2
for artist in self.artists:
artist.set_linestyle('-')
artist.set_marker(None)
artist.set_color(style.color)
artist.set_linewidth(lw)
| {
"repo_name": "saimn/glue",
"path": "glue/plugins/dendro_viewer/layer_artist.py",
"copies": "3",
"size": "2042",
"license": "bsd-3-clause",
"hash": -7868561518471582000,
"line_mean": 31.935483871,
"line_max": 72,
"alpha_frac": 0.5636630754,
"autogenerated": false,
"ratio": 3.746788990825688,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5810452066225689,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.core import Data
from glue.external.echo import delay_callback
from glue.viewers.matplotlib.state import (MatplotlibDataViewerState,
MatplotlibLayerState,
DeferredDrawCallbackProperty as DDCProperty,
DeferredDrawSelectionCallbackProperty as DDSCProperty)
from glue.core.state_objects import (StateAttributeLimitsHelper,
StateAttributeHistogramHelper)
from glue.core.exceptions import IncompatibleAttribute
from glue.core.data_combo_helper import ComponentIDComboHelper
from glue.utils import defer_draw
from glue.utils.decorators import avoid_circular
__all__ = ['HistogramViewerState', 'HistogramLayerState']
class HistogramViewerState(MatplotlibDataViewerState):
"""
A state class that includes all the attributes for a histogram viewer.
"""
x_att = DDSCProperty(docstring='The attribute to compute the histograms for')
cumulative = DDCProperty(False, docstring='Whether to show the histogram as '
'a cumulative histogram')
normalize = DDCProperty(False, docstring='Whether to normalize the histogram '
'(based on the total sum)')
hist_x_min = DDCProperty(docstring='The minimum value used to compute the '
'histogram')
hist_x_max = DDCProperty(docstring='The maxumum value used to compute the '
'histogram')
hist_n_bin = DDCProperty(docstring='The number of bins in the histogram')
common_n_bin = DDCProperty(True, docstring='The number of bins to use for '
'all numerical components')
def __init__(self, **kwargs):
super(HistogramViewerState, self).__init__()
self.hist_helper = StateAttributeHistogramHelper(self, 'x_att', lower='hist_x_min',
upper='hist_x_max', n_bin='hist_n_bin',
common_n_bin='common_n_bin')
self.x_lim_helper = StateAttributeLimitsHelper(self, 'x_att', lower='x_min',
upper='x_max', log='x_log')
self.add_callback('layers', self._layers_changed)
self.x_att_helper = ComponentIDComboHelper(self, 'x_att',
pixel_coord=True, world_coord=True)
self.update_from_dict(kwargs)
# This should be added after update_from_dict since we don't want to
# influence the restoring of sessions.
self.add_callback('hist_x_min', self.update_view_to_bins)
self.add_callback('hist_x_max', self.update_view_to_bins)
self.add_callback('x_log', self._reset_x_limits, priority=1000)
def _reset_x_limits(self, *args):
with delay_callback(self, 'hist_x_min', 'hist_x_max', 'x_min', 'x_max', 'x_log'):
self.x_lim_helper.percentile = 100
self.x_lim_helper.update_values(force=True)
self.update_bins_to_view()
def reset_limits(self):
self._reset_x_limits()
self.y_min = min(getattr(layer, '_y_min', np.inf) for layer in self.layers)
self.y_max = max(getattr(layer, '_y_max', 0) for layer in self.layers)
def _update_priority(self, name):
if name == 'layers':
return 2
elif name.endswith('_log'):
return 0.5
elif name.endswith(('_min', '_max', '_bin')):
return 0
else:
return 1
def flip_x(self):
"""
Flip the x_min/x_max limits.
"""
self.x_lim_helper.flip_limits()
@avoid_circular
def update_bins_to_view(self, *args):
"""
Update the bins to match the current view.
"""
with delay_callback(self, 'hist_x_min', 'hist_x_max'):
if self.x_max > self.x_min:
self.hist_x_min = self.x_min
self.hist_x_max = self.x_max
else:
self.hist_x_min = self.x_max
self.hist_x_max = self.x_min
@avoid_circular
def update_view_to_bins(self, *args):
"""
Update the view to match the histogram interval
"""
with delay_callback(self, 'x_min', 'x_max'):
self.x_min = self.hist_x_min
self.x_max = self.hist_x_max
def _get_x_components(self):
if self.x_att is None:
return []
# Construct list of components over all layers
components = []
for layer_state in self.layers:
if isinstance(layer_state.layer, Data):
layer = layer_state.layer
else:
layer = layer_state.layer.data
try:
components.append(layer.get_component(self.x_att))
except IncompatibleAttribute:
pass
return components
@property
def bins(self):
"""
The position of the bins for the histogram based on the current state.
"""
if self.hist_x_min is None or self.hist_x_max is None or self.hist_n_bin is None:
return None
if self.x_log:
return np.logspace(np.log10(self.hist_x_min),
np.log10(self.hist_x_max),
self.hist_n_bin + 1)
else:
return np.linspace(self.hist_x_min, self.hist_x_max,
self.hist_n_bin + 1)
@defer_draw
def _layers_changed(self, *args):
self.x_att_helper.set_multiple_data(self.layers_data)
class HistogramLayerState(MatplotlibLayerState):
"""
A state class that includes all the attributes for layers in a histogram plot.
"""
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/histogram/state.py",
"copies": "1",
"size": "6012",
"license": "bsd-3-clause",
"hash": 6003203792581838000,
"line_mean": 35.4363636364,
"line_max": 97,
"alpha_frac": 0.5590485695,
"autogenerated": false,
"ratio": 4.109364319890636,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001044213537209289,
"num_lines": 165
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.core import Subset
from glue.config import data_exporter
__all__ = []
@data_exporter(label='FITS (1 component/HDU)', extension=['fits', 'fit'])
def fits_writer(data, filename):
"""
Write a dataset or a subset to a FITS file.
Parameters
----------
data: `~glue.core.data.Data` or `~glue.core.subset.Subset`
The data or subset to export
"""
if isinstance(data, Subset):
mask = data.to_mask()
data = data.data
else:
mask = None
from astropy.io import fits
hdus = fits.HDUList()
for cid in data.visible_components:
comp = data.get_component(cid)
if comp.categorical:
# TODO: emit warning
continue
else:
values = comp.data.copy()
if mask is not None:
values[~mask] = np.nan
# TODO: special behavior for PRIMARY?
hdu = fits.ImageHDU(values, name=cid.label)
hdus.append(hdu)
hdus.writeto(filename, clobber=True)
| {
"repo_name": "saimn/glue",
"path": "glue/core/data_exporters/gridded_fits.py",
"copies": "2",
"size": "1098",
"license": "bsd-3-clause",
"hash": 332672681950101760,
"line_mean": 21.4081632653,
"line_max": 73,
"alpha_frac": 0.5965391621,
"autogenerated": false,
"ratio": 3.7220338983050847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5318573060405085,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.external.echo import (CallbackProperty, ListCallbackProperty,
SelectionCallbackProperty, keep_in_sync,
delay_callback)
from glue.core.state_objects import State
from glue.core.message import LayerArtistUpdatedMessage
from glue.core.exceptions import IncompatibleAttribute
from glue.utils import defer_draw
__all__ = ['DeferredDrawSelectionCallbackProperty', 'DeferredDrawCallbackProperty',
'MatplotlibDataViewerState', 'MatplotlibLayerState']
class DeferredDrawCallbackProperty(CallbackProperty):
"""
A callback property where drawing is deferred until
after notify has called all callback functions.
"""
@defer_draw
def notify(self, *args, **kwargs):
super(DeferredDrawCallbackProperty, self).notify(*args, **kwargs)
class DeferredDrawSelectionCallbackProperty(SelectionCallbackProperty):
"""
A callback property where drawing is deferred until
after notify has called all callback functions.
"""
@defer_draw
def notify(self, *args, **kwargs):
super(DeferredDrawSelectionCallbackProperty, self).notify(*args, **kwargs)
class MatplotlibDataViewerState(State):
"""
A base class that includes common attributes for viewers based on
Matplotlib.
"""
x_min = DeferredDrawCallbackProperty(docstring='Lower limit of the visible x range')
x_max = DeferredDrawCallbackProperty(docstring='Upper limit of the visible x range')
y_min = DeferredDrawCallbackProperty(docstring='Lower limit of the visible y range')
y_max = DeferredDrawCallbackProperty(docstring='Upper limit of the visible y range')
x_log = DeferredDrawCallbackProperty(False, docstring='Whether the x axis is logarithmic')
y_log = DeferredDrawCallbackProperty(False, docstring='Whether the y axis is logarithmic')
aspect = DeferredDrawCallbackProperty('auto', docstring='Aspect ratio for the axes')
layers = ListCallbackProperty(docstring='A collection of all layers in the viewer')
@property
def layers_data(self):
return [layer_state.layer for layer_state in self.layers]
class MatplotlibLayerState(State):
"""
A base class that includes common attributes for all layers in viewers based
on Matplotlib.
"""
layer = DeferredDrawCallbackProperty(docstring='The :class:`~glue.core.data.Data` '
'or :class:`~glue.core.subset.Subset` '
'represented by the layer')
color = DeferredDrawCallbackProperty(docstring='The color used to display '
'the data')
alpha = DeferredDrawCallbackProperty(docstring='The transparency used to '
'display the data')
zorder = DeferredDrawCallbackProperty(0, docstring='A value used to indicate '
'which layers are shown in '
'front of which (larger '
'zorder values are on top '
'of other layers)')
visible = DeferredDrawCallbackProperty(True, docstring='Whether the layer '
'is currently visible')
def __init__(self, viewer_state=None, **kwargs):
super(MatplotlibLayerState, self).__init__(**kwargs)
self.viewer_state = viewer_state
self.color = self.layer.style.color
self.alpha = self.layer.style.alpha
self._sync_color = keep_in_sync(self, 'color', self.layer.style, 'color')
self._sync_alpha = keep_in_sync(self, 'alpha', self.layer.style, 'alpha')
self.add_global_callback(self._notify_layer_update)
def _notify_layer_update(self, **kwargs):
message = LayerArtistUpdatedMessage(self)
if self.layer is not None and self.layer.hub is not None:
self.layer.hub.broadcast(message)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/matplotlib/state.py",
"copies": "1",
"size": "4200",
"license": "bsd-3-clause",
"hash": -3179074087328102000,
"line_mean": 39.7766990291,
"line_max": 94,
"alpha_frac": 0.6342857143,
"autogenerated": false,
"ratio": 4.751131221719457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002252743566962345,
"num_lines": 103
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.external.echo import (CallbackProperty, SelectionCallbackProperty,
delay_callback, ListCallbackProperty)
from glue.core.state_objects import StateAttributeLimitsHelper
from glue.viewers.common.state import ViewerState
__all__ = ['Vispy3DViewerState']
class Vispy3DViewerState(ViewerState):
"""
A common state object for all vispy 3D viewers
"""
x_att = SelectionCallbackProperty()
x_min = CallbackProperty(0)
x_max = CallbackProperty(1)
x_stretch = CallbackProperty(1.)
y_att = SelectionCallbackProperty(default_index=1)
y_min = CallbackProperty(0)
y_max = CallbackProperty(1)
y_stretch = CallbackProperty(1.)
z_att = SelectionCallbackProperty(default_index=2)
z_min = CallbackProperty(0)
z_max = CallbackProperty(1)
z_stretch = CallbackProperty(1.)
visible_axes = CallbackProperty(True)
perspective_view = CallbackProperty(False)
clip_data = CallbackProperty(True)
native_aspect = CallbackProperty(False)
layers = ListCallbackProperty()
limits_cache = CallbackProperty()
def _update_priority(self, name):
if name == 'layers':
return 2
elif name.endswith(('_min', '_max')):
return 0
else:
return 1
def __init__(self, **kwargs):
super(Vispy3DViewerState, self).__init__(**kwargs)
if self.limits_cache is None:
self.limits_cache = {}
self.x_lim_helper = StateAttributeLimitsHelper(self, attribute='x_att',
lower='x_min', upper='x_max',
cache=self.limits_cache)
self.y_lim_helper = StateAttributeLimitsHelper(self, attribute='y_att',
lower='y_min', upper='y_max',
cache=self.limits_cache)
self.z_lim_helper = StateAttributeLimitsHelper(self, attribute='z_att',
lower='z_min', upper='z_max',
cache=self.limits_cache)
# TODO: if limits_cache is re-assigned to a different object, we need to
# update the attribute helpers. However if in future we make limits_cache
# into a smart dictionary that can call callbacks when elements are
# changed then we shouldn't always call this. It'd also be nice to
# avoid this altogether and make it more clean.
self.add_callback('limits_cache', self._update_limits_cache)
def reset_limits(self):
self.x_lim_helper.log = False
self.x_lim_helper.percentile = 100.
self.x_lim_helper.update_values(force=True)
self.y_lim_helper.log = False
self.y_lim_helper.percentile = 100.
self.y_lim_helper.update_values(force=True)
self.z_lim_helper.log = False
self.z_lim_helper.percentile = 100.
self.z_lim_helper.update_values(force=True)
def _update_limits_cache(self, *args):
self.x_lim_helper._cache = self.limits_cache
self.x_lim_helper._update_attribute()
self.y_lim_helper._cache = self.limits_cache
self.y_lim_helper._update_attribute()
self.z_lim_helper._cache = self.limits_cache
self.z_lim_helper._update_attribute()
@property
def aspect(self):
# TODO: this could be cached based on the limits, but is not urgent
aspect = np.array([1, 1, 1], dtype=float)
if self.native_aspect:
aspect[0] = 1.
aspect[1] = (self.y_max - self.y_min) / (self.x_max - self.x_min)
aspect[2] = (self.z_max - self.z_min) / (self.x_max - self.x_min)
aspect /= aspect.max()
return aspect
def reset(self):
pass
def flip_x(self):
self.x_lim_helper.flip_limits()
def flip_y(self):
self.y_lim_helper.flip_limits()
def flip_z(self):
self.z_lim_helper.flip_limits()
@property
def clip_limits(self):
return (self.x_min, self.x_max,
self.y_min, self.y_max,
self.z_min, self.z_max)
def set_limits(self, x_min, x_max, y_min, y_max, z_min, z_max):
with delay_callback(self, 'x_min', 'x_max', 'y_min', 'y_max', 'z_min', 'z_max'):
self.x_min = x_min
self.x_max = x_max
self.y_min = y_min
self.y_max = y_max
self.z_min = z_min
self.z_max = z_max
| {
"repo_name": "astrofrog/glue-vispy-viewers",
"path": "glue_vispy_viewers/common/viewer_state.py",
"copies": "2",
"size": "4667",
"license": "bsd-2-clause",
"hash": 4051047346239132000,
"line_mean": 34.6259541985,
"line_max": 88,
"alpha_frac": 0.5770302121,
"autogenerated": false,
"ratio": 3.724660814046289,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005425257531817083,
"num_lines": 131
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.external import six
from glue.core.data_factories.helpers import has_extension
from glue.core.component import Component, CategoricalComponent
from glue.core.data import Data
from glue.config import data_factory, qglue_parser
__all__ = ['pandas_read_table']
def panda_process(indf):
"""
Build a data set from a table using pandas. This attempts to respect
categorical data input by letting pandas.read_csv infer the type
"""
result = Data()
for name, column in indf.iteritems():
if (column.dtype == np.object) | (column.dtype == np.bool):
# try to salvage numerical data
coerced = column.convert_objects(convert_numeric=True)
if (coerced.dtype != column.dtype) and coerced.isnull().mean() < 0.4:
c = Component(coerced.values)
else:
# pandas has a 'special' nan implementation and this doesn't
# play well with np.unique
c = CategoricalComponent(column.fillna(''))
else:
c = Component(column.values)
# convert header to string - in some cases if the first row contains
# numbers, these are cast to numerical types, so we want to change that
# here.
if not isinstance(name, six.string_types):
name = str(name)
# strip off leading #
name = name.strip()
if name.startswith('#'):
name = name[1:].strip()
result.add_component(c, name)
return result
@data_factory(label="Pandas Table", identifier=has_extension('csv csv txt tsv tbl dat'))
def pandas_read_table(path, **kwargs):
""" A factory for reading tabular data using pandas
:param path: path/to/file
:param kwargs: All kwargs are passed to pandas.read_csv
:returns: :class:`glue.core.data.Data` object
"""
import pandas as pd
try:
from pandas.parser import CParserError
except ImportError: # pragma: no cover
from pandas._parser import CParserError
# iterate over common delimiters to search for best option
delimiters = kwargs.pop('delimiter', [None] + list(',|\t '))
fallback = None
for d in delimiters:
try:
indf = pd.read_csv(path, delimiter=d, **kwargs)
# ignore files parsed to empty dataframes
if len(indf) == 0:
continue
# only use files parsed to single-column dataframes
# if we don't find a better strategy
if len(indf.columns) < 2:
fallback = indf
continue
return panda_process(indf)
except CParserError:
continue
if fallback is not None:
return panda_process(fallback)
raise IOError("Could not parse %s using pandas" % path)
try:
import pandas as pd
except ImportError:
pass
else:
@qglue_parser(pd.DataFrame)
def _parse_data_dataframe(data, label):
label = label or 'Data'
result = Data(label=label)
for c in data.columns:
result.add_component(data[c], str(c))
return [result]
| {
"repo_name": "saimn/glue",
"path": "glue/core/data_factories/pandas.py",
"copies": "2",
"size": "3213",
"license": "bsd-3-clause",
"hash": -3678337327898444300,
"line_mean": 29.8942307692,
"line_max": 88,
"alpha_frac": 0.617491441,
"autogenerated": false,
"ratio": 4.135135135135135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002252988173788393,
"num_lines": 104
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.utils import defer_draw
from glue.core.exceptions import IncompatibleAttribute
from glue.core.subset import Subset
# from glue.core.layer_artist import MatplotlibLayerArtist, ChangedTrigger
from glue.viewers.matplotlib.layer_artist import MatplotlibLayerArtist
from glue.plugins.dendro_viewer.state import DendrogramLayerState
class DendrogramLayerArtist(MatplotlibLayerArtist):
# X vertices of structure i are in layout[0][3*i: 3*i+3]
# layout = ChangedTrigger()
_layer_state_cls = DendrogramLayerState
def __init__(self, axes, viewer_state, layer_state=None, layer=None):
super(DendrogramLayerArtist, self).__init__(axes, viewer_state,
layer_state=layer_state, layer=layer)
# Watch for changes in the viewer state which would require the
# layers to be redrawn
self._viewer_state.add_global_callback(self._update)
self.state.add_global_callback(self._update)
# TODO: following is temporary
self.state.data_collection = self._viewer_state.data_collection
self.data_collection = self._viewer_state.data_collection
self.reset_cache()
def reset_cache(self):
self._last_viewer_state = {}
self._last_layer_state = {}
@defer_draw
def _update_dendrogram(self):
self.remove()
if self.state.viewer_state._layout is None:
return
# layout[0] is [x0, x0, x[parent0], nan, ...]
# layout[1] is [y0, y[parent0], y[parent0], nan, ...]
ids = 3 * np.arange(self.layer.data.size)
try:
if isinstance(self.layer, Subset):
ids = ids[self.layer.to_mask()]
except IncompatibleAttribute as exc:
self.disable_invalid_attributes(*exc.args)
return False
x, y = self.state.viewer_state._layout.xy
blank = np.zeros(ids.size) * np.nan
x = np.column_stack([x[ids], x[ids + 1],
x[ids + 2], blank]).ravel()
y = np.column_stack([y[ids], y[ids + 1],
y[ids + 2], blank]).ravel()
self.mpl_artists = self.axes.plot(x, y, '-')
@defer_draw
def _update_visual_attributes(self):
if not self.enabled:
return
for mpl_artist in self.mpl_artists:
mpl_artist.set_visible(self.state.visible)
mpl_artist.set_zorder(self.state.zorder)
mpl_artist.set_color(self.state.color)
mpl_artist.set_alpha(self.state.alpha)
mpl_artist.set_linewidth(self.state.linewidth)
self.redraw()
@defer_draw
def _update(self, force=False, **kwargs):
if (self._viewer_state.height_att is None or
self._viewer_state.parent_att is None or
self._viewer_state.order_att is None or
self.state.layer is None):
return
# Figure out which attributes are different from before. Ideally we shouldn't
# need this but currently this method is called multiple times if an
# attribute is changed due to x_att changing then hist_x_min, hist_x_max, etc.
# If we can solve this so that _update_dendrogram is really only called once
# then we could consider simplifying this. Until then, we manually keep track
# of which properties have changed.
changed = set()
if not force:
for key, value in self._viewer_state.as_dict().items():
if value != self._last_viewer_state.get(key, None):
changed.add(key)
for key, value in self.state.as_dict().items():
if value != self._last_layer_state.get(key, None):
changed.add(key)
self._last_viewer_state.update(self._viewer_state.as_dict())
self._last_layer_state.update(self.state.as_dict())
if force or any(prop in changed for prop in ('layer', 'height_att', 'parent_att', 'order_att')):
self._update_dendrogram()
force = True # make sure scaling and visual attributes are updated
if force or any(prop in changed for prop in ('linewidth', 'alpha', 'color', 'zorder', 'visible')):
self._update_visual_attributes()
@defer_draw
def update(self):
# Recompute the histogram
self._update(force=True)
# Reset the axes stack so that pressing the home button doesn't go back
# to a previous irrelevant view.
self.axes.figure.canvas.toolbar.update()
self.redraw()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/plugins/dendro_viewer/layer_artist.py",
"copies": "1",
"size": "4685",
"license": "bsd-3-clause",
"hash": -760405994624302100,
"line_mean": 34.4924242424,
"line_max": 106,
"alpha_frac": 0.6098185699,
"autogenerated": false,
"ratio": 3.8783112582781456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9985232271824477,
"avg_score": 0.0005795112707337011,
"num_lines": 132
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.utils import defer_draw
from glue.viewers.histogram.state import HistogramLayerState
from glue.viewers.matplotlib.layer_artist import MatplotlibLayerArtist
from glue.core.exceptions import IncompatibleAttribute
class HistogramLayerArtist(MatplotlibLayerArtist):
_layer_state_cls = HistogramLayerState
def __init__(self, axes, viewer_state, layer_state=None, layer=None):
super(HistogramLayerArtist, self).__init__(axes, viewer_state,
layer_state=layer_state, layer=layer)
# Watch for changes in the viewer state which would require the
# layers to be redrawn
self._viewer_state.add_global_callback(self._update_histogram)
self.state.add_global_callback(self._update_histogram)
self.reset_cache()
def remove(self):
super(HistogramLayerArtist, self).remove()
self.mpl_hist_unscaled = np.array([])
self.mpl_hist = np.array([])
self.mpl_bins = np.array([])
def reset_cache(self):
self._last_viewer_state = {}
self._last_layer_state = {}
@defer_draw
def _calculate_histogram(self):
self.remove()
try:
x = self.layer[self._viewer_state.x_att]
except AttributeError:
return
except (IncompatibleAttribute, IndexError):
self.disable_invalid_attributes(self._viewer_state.x_att)
return
else:
self.enable()
x = x[~np.isnan(x) & (x >= self._viewer_state.hist_x_min) &
(x <= self._viewer_state.hist_x_max)]
if len(x) == 0:
self.redraw()
return
# For histogram
xmin, xmax = sorted([self._viewer_state.hist_x_min, self._viewer_state.hist_x_max])
if self._viewer_state.x_log:
range = None
bins = np.logspace(np.log10(xmin), np.log10(xmax), self._viewer_state.hist_n_bin)
else:
range = [xmin, xmax]
bins = self._viewer_state.hist_n_bin
self.mpl_hist_unscaled, self.mpl_bins, self.mpl_artists = self.axes.hist(x, range=range, bins=bins)
@defer_draw
def _scale_histogram(self):
if self.mpl_bins.size == 0 or self.mpl_hist_unscaled.sum() == 0:
return
self.mpl_hist = self.mpl_hist_unscaled.astype(np.float)
dx = self.mpl_bins[1] - self.mpl_bins[0]
if self._viewer_state.cumulative:
self.mpl_hist = self.mpl_hist.cumsum()
if self._viewer_state.normalize:
self.mpl_hist /= self.mpl_hist.max()
elif self._viewer_state.normalize:
self.mpl_hist /= (self.mpl_hist.sum() * dx)
bottom = 0 if not self._viewer_state.y_log else 1e-100
for mpl_artist, y in zip(self.mpl_artists, self.mpl_hist):
mpl_artist.set_height(y)
x, y = mpl_artist.get_xy()
mpl_artist.set_xy((x, bottom))
# We have to do the following to make sure that we reset the y_max as
# needed. We can't simply reset based on the maximum for this layer
# because other layers might have other values, and we also can't do:
#
# self._viewer_state.y_max = max(self._viewer_state.y_max, result[0].max())
#
# because this would never allow y_max to get smaller.
self.state._y_max = self.mpl_hist.max()
if self._viewer_state.y_log:
self.state._y_max *= 2
else:
self.state._y_max *= 1.2
if self._viewer_state.y_log:
self.state._y_min = self.mpl_hist[self.mpl_hist > 0].min() / 10
else:
self.state._y_min = 0
largest_y_max = max(getattr(layer, '_y_max', 0) for layer in self._viewer_state.layers)
if largest_y_max != self._viewer_state.y_max:
self._viewer_state.y_max = largest_y_max
smallest_y_min = min(getattr(layer, '_y_min', np.inf) for layer in self._viewer_state.layers)
if smallest_y_min != self._viewer_state.y_min:
self._viewer_state.y_min = smallest_y_min
self.redraw()
@defer_draw
def _update_visual_attributes(self):
if not self.enabled:
return
for mpl_artist in self.mpl_artists:
mpl_artist.set_visible(self.state.visible)
mpl_artist.set_zorder(self.state.zorder)
mpl_artist.set_edgecolor('none')
mpl_artist.set_facecolor(self.state.color)
mpl_artist.set_alpha(self.state.alpha)
self.redraw()
def _update_histogram(self, force=False, **kwargs):
if (self._viewer_state.hist_x_min is None or
self._viewer_state.hist_x_max is None or
self._viewer_state.hist_n_bin is None or
self._viewer_state.x_att is None or
self.state.layer is None):
return
# Figure out which attributes are different from before. Ideally we shouldn't
# need this but currently this method is called multiple times if an
# attribute is changed due to x_att changing then hist_x_min, hist_x_max, etc.
# If we can solve this so that _update_histogram is really only called once
# then we could consider simplifying this. Until then, we manually keep track
# of which properties have changed.
changed = set()
if not force:
for key, value in self._viewer_state.as_dict().items():
if value != self._last_viewer_state.get(key, None):
changed.add(key)
for key, value in self.state.as_dict().items():
if value != self._last_layer_state.get(key, None):
changed.add(key)
self._last_viewer_state.update(self._viewer_state.as_dict())
self._last_layer_state.update(self.state.as_dict())
if force or any(prop in changed for prop in ('layer', 'x_att', 'hist_x_min', 'hist_x_max', 'hist_n_bin', 'x_log')):
self._calculate_histogram()
force = True # make sure scaling and visual attributes are updated
if force or any(prop in changed for prop in ('y_log', 'normalize', 'cumulative')):
self._scale_histogram()
if force or any(prop in changed for prop in ('alpha', 'color', 'zorder', 'visible')):
self._update_visual_attributes()
@defer_draw
def update(self):
self._update_histogram(force=True)
self.redraw()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/histogram/layer_artist.py",
"copies": "1",
"size": "6605",
"license": "bsd-3-clause",
"hash": 1384985816923060200,
"line_mean": 35.2912087912,
"line_max": 123,
"alpha_frac": 0.5910673732,
"autogenerated": false,
"ratio": 3.700280112044818,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4791347485244818,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.utils import unbroadcast, broadcast_to
__all__ = ['points_inside_poly', 'polygon_line_intersections']
def points_inside_poly(x, y, vx, vy):
original_shape = x.shape
x = unbroadcast(x)
y = unbroadcast(y)
x, y = np.broadcast_arrays(x, y)
reduced_shape = x.shape
x = x.flat
y = y.flat
from matplotlib.path import Path
p = Path(np.column_stack((vx, vy)))
keep = ((x >= np.min(vx)) &
(x <= np.max(vx)) &
(y >= np.min(vy)) &
(y <= np.max(vy)))
inside = np.zeros(len(x), bool)
x = x[keep]
y = y[keep]
coords = np.column_stack((x, y))
inside[keep] = p.contains_points(coords).astype(bool)
good = np.isfinite(x) & np.isfinite(y)
inside[keep][~good] = False
inside = inside.reshape(reduced_shape)
inside = broadcast_to(inside, original_shape)
return inside
def polygon_line_intersections(px, py, xval=None, yval=None):
"""
Find all the segments of intersection between a polygon and an infinite
horizontal/vertical line.
The polygon is assumed to be closed. Due to numerical precision, the
behavior at the edges of polygons is not always predictable, i.e. a point
on the edge of a polygon may be considered inside or outside the polygon.
Parameters
----------
px, py : `~numpy.ndarray`
The vertices of the polygon
xval : float, optional
The x coordinate of the line (for vertical lines). This should only be
specified if yval is not specified.
yval : float, optional
The y coordinate of the line (for horizontal lines). This should only be
specified if xval is not specified.
Returns
-------
segments : list
A list of segments given as tuples of coordinates along the line.
"""
if xval is not None and yval is not None:
raise ValueError("Only one of xval or yval should be specified")
elif xval is None and yval is None:
raise ValueError("xval or yval should be specified")
if yval is not None:
return polygon_line_intersections(py, px, xval=yval)
px = np.asarray(px, dtype=float)
py = np.asarray(py, dtype=float)
# Make sure that the polygon is closed
if px[0] != px[-1] or py[0] != py[-1]:
px = np.hstack([px, px[0]])
py = np.hstack([py, py[0]])
# For convenience
x1, x2 = px[:-1], px[1:]
y1, y2 = py[:-1], py[1:]
# Vertices that intersect
keep1 = (px == xval)
points1 = py[keep1]
# Segments (excluding vertices) that intersect
keep2 = ((x1 < xval) & (x2 > xval)) | ((x2 < xval) & (x1 > xval))
points2 = (y1 + (y2 - y1) * (xval - x1) / (x2 - x1))[keep2]
# Make unique and sort
points = np.array(np.sort(np.unique(np.hstack([points1, points2]))))
# Because of various corner cases, we don't actually know which pairs of
# points are inside the polygon, so we check this using the mid-points
ymid = 0.5 * (points[:-1] + points[1:])
xmid = np.repeat(xval, len(ymid))
keep = points_inside_poly(xmid, ymid, px, py)
segments = list(zip(points[:-1][keep], points[1:][keep]))
return segments
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/utils/geometry.py",
"copies": "3",
"size": "3267",
"license": "bsd-3-clause",
"hash": -2994857391096635400,
"line_mean": 27.9115044248,
"line_max": 80,
"alpha_frac": 0.6155494337,
"autogenerated": false,
"ratio": 3.413793103448276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010925379656943078,
"num_lines": 113
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.viewers.common.qt.data_viewer_with_state import DataViewerWithState
from glue.external.echo import delay_callback
from qtpy import QtWidgets
from qtpy.QtCore import Qt
from ..extern.vispy.util import keys
from .vispy_widget import VispyWidgetHelper
from .viewer_options import VispyOptionsWidget
from .toolbar import VispyViewerToolbar
from .viewer_state import Vispy3DViewerState
from .compat import update_viewer_state
BROKEN_PYQT5_MESSAGE = ("The version of PyQt5 you are using does not appear to "
"support OpenGL. See <a href='http://docs.glueviz.org/en"
"/stable/known_issues.html#d-viewers-not-working-on-linux"
"-with-pyqt5'>here</a> for more information about fixing "
"this issue.")
class BaseVispyViewer(DataViewerWithState):
_state_cls = Vispy3DViewerState
_toolbar_cls = VispyViewerToolbar
_options_cls = VispyOptionsWidget
tools = ['vispy:reset', 'vispy:rotate']
subtools = {'save': ['vispy:save']}
# If imageio is available, we can add the record icon
try:
import imageio # noqa
except ImportError:
pass
else:
tools.insert(1, 'vispy:record')
def __init__(self, session, state=None, parent=None):
super(BaseVispyViewer, self).__init__(session, state=state, parent=parent)
self._vispy_widget = VispyWidgetHelper(viewer_state=self.state)
self.setCentralWidget(self._vispy_widget.canvas.native)
self.state.add_callback('clip_data', self._update_clip)
self.state.add_callback('x_min', self._update_clip)
self.state.add_callback('x_max', self._update_clip)
self.state.add_callback('y_min', self._update_clip)
self.state.add_callback('y_max', self._update_clip)
self.state.add_callback('z_min', self._update_clip)
self.state.add_callback('z_max', self._update_clip)
self.status_label = None
self._opengl_ok = None
self._ready_draw = False
viewbox = self._vispy_widget.view.camera.viewbox
viewbox.events.mouse_wheel.connect(self.camera_mouse_wheel)
viewbox.events.mouse_move.connect(self.camera_mouse_move)
viewbox.events.mouse_press.connect(self.camera_mouse_press)
viewbox.events.mouse_release.connect(self.camera_mouse_release)
def paintEvent(self, *args, **kwargs):
super(BaseVispyViewer, self).paintEvent(*args, **kwargs)
if self._opengl_ok is None:
self._opengl_ok = self._vispy_widget.canvas.native.context() is not None
if not self._opengl_ok:
QtWidgets.QMessageBox.critical(self, "Error", BROKEN_PYQT5_MESSAGE)
self.close(warn=False)
self._vispy_widget.canvas.native.close()
def _update_appearance_from_settings(self, message):
self._vispy_widget._update_appearance_from_settings()
def redraw(self):
if self._ready_draw:
self._vispy_widget.canvas.render()
def get_layer_artist(self, cls, layer=None, layer_state=None):
return cls(self, layer=layer, layer_state=layer_state)
def show_status(self, text):
statusbar = self.statusBar()
statusbar.showMessage(text)
def _update_clip(self, *args):
for layer_artist in self._layer_artist_container:
if self.state.clip_data:
layer_artist.set_clip(self.state.clip_limits)
else:
layer_artist.set_clip(None)
@staticmethod
def update_viewer_state(rec, context):
return update_viewer_state(rec, context)
def camera_mouse_wheel(self, event=None):
scale = (1.1 ** - event.delta[1])
with delay_callback(self.state, 'x_min', 'x_max', 'y_min', 'y_max', 'z_min', 'z_max'):
xmid = 0.5 * (self.state.x_min + self.state.x_max)
dx = (self.state.x_max - xmid) * scale
self.state.x_min = xmid - dx
self.state.x_max = xmid + dx
ymid = 0.5 * (self.state.y_min + self.state.y_max)
dy = (self.state.y_max - ymid) * scale
self.state.y_min = ymid - dy
self.state.y_max = ymid + dy
zmid = 0.5 * (self.state.z_min + self.state.z_max)
dz = (self.state.z_max - zmid) * scale
self.state.z_min = zmid - dz
self.state.z_max = zmid + dz
self._update_clip()
event.handled = True
def camera_mouse_press(self, event=None):
self._initial_position = (self.state.x_min, self.state.x_max,
self.state.y_min, self.state.y_max,
self.state.z_min, self.state.z_max)
self._width = (self.state.x_max - self.state.x_min,
self.state.y_max - self.state.y_min,
self.state.z_max - self.state.z_min)
def camera_mouse_release(self, event=None):
self._initial_position = None
self._width = None
def camera_mouse_move(self, event=None):
if 1 in event.buttons and keys.SHIFT in event.mouse_event.modifiers:
camera = self._vispy_widget.view.camera
norm = np.mean(camera._viewbox.size)
p1 = event.mouse_event.press_event.pos
p2 = event.mouse_event.pos
dist = (p1 - p2) / norm * camera._scale_factor
dist[1] *= -1
dx, dy, dz = camera._dist_to_trans(dist)
with delay_callback(self.state, 'x_min', 'x_max', 'y_min', 'y_max', 'z_min', 'z_max'):
self.state.x_min = self._initial_position[0] + self._width[0] * dx
self.state.x_max = self._initial_position[1] + self._width[0] * dx
self.state.y_min = self._initial_position[2] + self._width[1] * dy
self.state.y_max = self._initial_position[3] + self._width[1] * dy
self.state.z_min = self._initial_position[4] + self._width[2] * dz
self.state.z_max = self._initial_position[5] + self._width[2] * dz
event.handled = True
def show(self):
# WORKAROUND:
# Due to a bug in Qt5, a hidden toolbar in glue causes a grey
# rectangle to be overlaid on top of the glue window. Therefore
# we check if the toolbar is hidden, and if so we make it into a
# floating toolbar temporarily - still hidden, so this will not
# be noticeable to the user.
# tbar.setAllowedAreas(Qt.NoToolBarArea)
if self._session.application is not None:
tbar = self._session.application._mode_toolbar
hidden = tbar.isHidden()
if hidden:
original_flags = tbar.windowFlags()
tbar.setWindowFlags(Qt.Window | Qt.FramelessWindowHint)
else:
hidden = False
super(BaseVispyViewer, self).show()
if hidden:
tbar.setWindowFlags(original_flags)
tbar.hide()
| {
"repo_name": "astrofrog/glue-3d-viewer",
"path": "glue_vispy_viewers/common/vispy_data_viewer.py",
"copies": "2",
"size": "7093",
"license": "bsd-2-clause",
"hash": -8522056509379993000,
"line_mean": 35.9427083333,
"line_max": 98,
"alpha_frac": 0.6017200056,
"autogenerated": false,
"ratio": 3.494088669950739,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5095808675550739,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.viewers.common.qt.mouse_mode import PathMode
from glue.viewers.image.qt import StandaloneImageViewer
from glue.config import viewer_tool
from glue.utils import defer_draw
@viewer_tool
class PVSlicerMode(PathMode):
icon = 'glue_slice'
tool_id = 'slice'
action_text = 'Slice Extraction'
tool_tip = ('Extract a slice from an arbitrary path\n'
' ENTER accepts the path\n'
' ESCAPE clears the path')
status_tip = 'Draw a path then press ENTER to extract slice, or press ESC to cancel'
shortcut = 'P'
def __init__(self, viewer, **kwargs):
super(PVSlicerMode, self).__init__(viewer, **kwargs)
self._roi_callback = self._extract_callback
self._slice_widget = None
self.viewer.state.add_callback('reference_data', self._on_reference_data_change)
def _on_reference_data_change(self, reference_data):
if reference_data is not None:
self.enabled = reference_data.ndim == 3
def _clear_path(self):
self.viewer.hide_crosshairs()
self.clear()
def _extract_callback(self, mode):
"""
Extract a PV-like slice, given a path traced on the widget
"""
vx, vy = mode.roi().to_polygon()
self._build_from_vertices(vx, vy)
def _build_from_vertices(self, vx, vy):
pv_slice, x, y, wcs = _slice_from_path(vx, vy, self.viewer.state.reference_data,
self.viewer.state.layers[0].attribute,
self.viewer.state.wcsaxes_slice[::-1])
if self._slice_widget is None:
self._slice_widget = PVSliceWidget(image=pv_slice, wcs=wcs,
image_viewer=self.viewer,
x=x, y=y, interpolation='nearest')
self.viewer._session.application.add_widget(self._slice_widget,
label='Custom Slice')
self._slice_widget.window_closed.connect(self._clear_path)
else:
self._slice_widget.set_image(image=pv_slice, wcs=wcs,
x=x, y=y, interpolation='nearest')
result = self._slice_widget
result.axes.set_xlabel("Position Along Slice")
result.axes.set_ylabel(_slice_label(self.viewer.state.reference_data, self.viewer.state.wcsaxes_slice[::-1]))
result.show()
def close(self):
if self._slice_widget:
self._slice_widget.close()
return super(PVSlicerMode, self).close()
class PVSliceWidget(StandaloneImageViewer):
""" A standalone image widget with extra interactivity for PV slices """
def __init__(self, image=None, wcs=None, image_viewer=None,
x=None, y=None, **kwargs):
"""
:param image: 2D Numpy array representing the PV Slice
:param wcs: WCS for the PV slice
:param image_viewer: Parent ImageViewer this was extracted from
:param kwargs: Extra keywords are passed to imshow
"""
self._crosshairs = None
self._parent = image_viewer
super(PVSliceWidget, self).__init__(image=image, wcs=wcs, **kwargs)
conn = self.axes.figure.canvas.mpl_connect
self._down_id = conn('button_press_event', self._on_click)
self._move_id = conn('motion_notify_event', self._on_move)
self.axes.format_coord = self._format_coord
self._x = x
self._y = y
self._parent.state.add_callback('x_att', self.reset)
self._parent.state.add_callback('y_att', self.reset)
def _format_coord(self, x, y):
"""
Return a formatted location label for the taskbar
:param x: x pixel location in slice array
:param y: y pixel location in slice array
"""
# xy -> xyz in image view
pix = self._pos_in_parent(xdata=x, ydata=y)
# xyz -> data pixel coords
# accounts for fact that image might be shown transposed/rotated
s = list(self._slc)
idx = _slice_index(self._parent.state.reference_data, self._slc)
s[s.index('x')] = pix[0]
s[s.index('y')] = pix[1]
s[idx] = pix[2]
# labels = self._parent.coordinate_labels(s)
# return ' '.join(labels)
return ''
def set_image(self, image=None, wcs=None, x=None, y=None, **kwargs):
super(PVSliceWidget, self).set_image(image=image, wcs=wcs, **kwargs)
self._axes.set_aspect('auto')
self._axes.set_xlim(-0.5, image.shape[1] - 0.5)
self._axes.set_ylim(-0.5, image.shape[0] - 0.5)
self._slc = self._parent.state.wcsaxes_slice[::-1]
self._x = x
self._y = y
@defer_draw
def _sync_slice(self, event):
s = list(self._slc)
# XXX breaks if display_data changes
_, _, z = self._pos_in_parent(event)
s[_slice_index(self._parent.state.reference_data, s)] = int(z)
self._parent.state.slices = tuple(s)
@defer_draw
def _draw_crosshairs(self, event):
x, y, _ = self._pos_in_parent(event)
self._parent.show_crosshairs(x, y)
@defer_draw
def _on_move(self, event):
if not event.button:
return
if not event.inaxes or event.canvas.toolbar.mode != '':
return
self._sync_slice(event)
self._draw_crosshairs(event)
def _pos_in_parent(self, event=None, xdata=None, ydata=None):
if event is not None:
xdata = event.xdata
ydata = event.ydata
# Find position slice where cursor is
ind = int(round(np.clip(xdata, 0, self._im_array.shape[1] - 1)))
# Find pixel coordinate in input image for this slice
x = self._x[ind]
y = self._y[ind]
# The 3-rd coordinate in the input WCS is simply the second
# coordinate in the PV slice.
z = ydata
return x, y, z
def _on_click(self, event):
if not event.inaxes or event.canvas.toolbar.mode != '':
return
self._sync_slice(event)
self._draw_crosshairs(event)
def reset(self, *args):
self.close()
def _slice_from_path(x, y, data, attribute, slc):
"""
Extract a PV-like slice from a cube
:param x: An array of x values to extract (pixel units)
:param y: An array of y values to extract (pixel units)
:param data: :class:`~glue.core.data.Data`
:param attribute: :claass:`~glue.core.data.Component`
:param slc: orientation of the image widget that `pts` are defined on
:returns: (slice, x, y)
slice is a 2D Numpy array, corresponding to a "PV ribbon"
cutout from the cube
x and y are the resampled points along which the
ribbon is extracted
:note: For >3D cubes, the "V-axis" of the PV slice is the longest
cube axis ignoring the x/y axes of `slc`
"""
from glue.external.pvextractor import Path, extract_pv_slice
p = Path(list(zip(x, y)))
cube = data[attribute]
dims = list(range(data.ndim))
s = list(slc)
ind = _slice_index(data, slc)
cube_wcs = getattr(data.coords, 'wcs', None)
# transpose cube to (z, y, x, <whatever>)
def _swap(x, s, i, j):
x[i], x[j] = x[j], x[i]
s[i], s[j] = s[j], s[i]
_swap(dims, s, ind, 0)
_swap(dims, s, s.index('y'), 1)
_swap(dims, s, s.index('x'), 2)
cube = cube.transpose(dims)
# slice down from >3D to 3D if needed
s = [slice(None)] * 3 + [slc[d] for d in dims[3:]]
cube = cube[s]
# sample cube
spacing = 1 # pixel
x, y = [np.round(_x).astype(int) for _x in p.sample_points(spacing)]
try:
result = extract_pv_slice(cube, path=p, wcs=cube_wcs, order=0)
except: # sometimes pvextractor complains due to wcs. Try to recover
result = extract_pv_slice(cube, path=p, wcs=None, order=0)
from astropy.wcs import WCS
data = result.data
wcs = WCS(result.header)
return data, x, y, wcs
def _slice_index(data, slc):
"""
The axis over which to extract PV slices
"""
return max([i for i in range(len(slc))
if isinstance(slc[i], int)],
key=lambda x: data.shape[x])
def _slice_label(data, slc):
"""
Returns a formatted axis label corresponding to the slice dimension
in a PV slice
:param data: Data that slice is extracted from
:param slc: orientation in the image widget from which the PV slice
was defined
"""
idx = _slice_index(data, slc)
return data.get_world_component_id(idx).label
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/plugins/tools/pv_slicer/qt/pv_slicer.py",
"copies": "1",
"size": "8780",
"license": "bsd-3-clause",
"hash": -501654696707567900,
"line_mean": 33.0310077519,
"line_max": 117,
"alpha_frac": 0.5824601367,
"autogenerated": false,
"ratio": 3.5176282051282053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4600088341828205,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.viewers.common.qt.mouse_mode import PathMode
from glue.viewers.image.qt import StandaloneImageWidget
from glue.viewers.common.qt.mpl_widget import defer_draw
from glue.external.echo import add_callback
from glue.config import viewer_tool
@viewer_tool
class PVSlicerMode(PathMode):
icon = 'glue_slice'
tool_id = 'slice'
action_text = 'Slice Extraction'
tool_tip = ('Extract a slice from an arbitrary path\n'
' ENTER accepts the path\n'
' ESCAPE clears the path')
shortcut = 'P'
def __init__(self, viewer, **kwargs):
super(PVSlicerMode, self).__init__(viewer, **kwargs)
add_callback(viewer.client, 'display_data', self._display_data_hook)
self._roi_callback = self._extract_callback
self._slice_widget = None
def _display_data_hook(self, data):
if data is not None:
self.enabled = data.ndim > 2
def _clear_path(self):
self.clear()
def _extract_callback(self, mode):
"""
Extract a PV-like slice, given a path traced on the widget
"""
vx, vy = mode.roi().to_polygon()
self._build_from_vertices(vx, vy)
def _build_from_vertices(self, vx, vy):
pv_slice, x, y, wcs = _slice_from_path(vx, vy, self.viewer.data,
self.viewer.attribute,
self.viewer.slice)
if self._slice_widget is None:
self._slice_widget = PVSliceWidget(image=pv_slice, wcs=wcs,
image_client=self.viewer.client,
x=x, y=y, interpolation='nearest')
self.viewer._session.application.add_widget(self._slice_widget,
label='Custom Slice')
self._slice_widget.window_closed.connect(self._clear_path)
else:
self._slice_widget.set_image(image=pv_slice, wcs=wcs,
x=x, y=y, interpolation='nearest')
result = self._slice_widget
result.axes.set_xlabel("Position Along Slice")
result.axes.set_ylabel(_slice_label(self.viewer.data, self.viewer.slice))
result.show()
def close(self):
if self._slice_widget:
self._slice_widget.close()
return super(PVSlicerMode, self).close()
class PVSliceWidget(StandaloneImageWidget):
""" A standalone image widget with extra interactivity for PV slices """
def __init__(self, image=None, wcs=None, image_client=None,
x=None, y=None, **kwargs):
"""
:param image: 2D Numpy array representing the PV Slice
:param wcs: WCS for the PV slice
:param image_client: Parent ImageClient this was extracted from
:param kwargs: Extra keywords are passed to imshow
"""
self._crosshairs = None
self._parent = image_client
super(PVSliceWidget, self).__init__(image=image, wcs=wcs, **kwargs)
conn = self.axes.figure.canvas.mpl_connect
self._down_id = conn('button_press_event', self._on_click)
self._move_id = conn('motion_notify_event', self._on_move)
self.axes.format_coord = self._format_coord
self._x = x
self._y = y
def _format_coord(self, x, y):
"""
Return a formatted location label for the taskbar
:param x: x pixel location in slice array
:param y: y pixel location in slice array
"""
# xy -> xyz in image view
pix = self._pos_in_parent(xdata=x, ydata=y)
# xyz -> data pixel coords
# accounts for fact that image might be shown transposed/rotated
s = list(self._slc)
idx = _slice_index(self._parent.display_data, self._slc)
s[s.index('x')] = pix[0]
s[s.index('y')] = pix[1]
s[idx] = pix[2]
labels = self._parent.coordinate_labels(s)
return ' '.join(labels)
def set_image(self, image=None, wcs=None, x=None, y=None, **kwargs):
super(PVSliceWidget, self).set_image(image=image, wcs=wcs, **kwargs)
self._axes.set_aspect('auto')
self._axes.set_xlim(-0.5, image.shape[1] - 0.5)
self._axes.set_ylim(-0.5, image.shape[0] - 0.5)
self._slc = self._parent.slice
self._x = x
self._y = y
@defer_draw
def _sync_slice(self, event):
s = list(self._slc)
# XXX breaks if display_data changes
_, _, z = self._pos_in_parent(event)
s[_slice_index(self._parent.display_data, s)] = z
self._parent.slice = tuple(s)
@defer_draw
def _draw_crosshairs(self, event):
x, y, _ = self._pos_in_parent(event)
self._parent.show_crosshairs(x, y)
@defer_draw
def _on_move(self, event):
if not event.button:
return
if not event.inaxes or event.canvas.toolbar.mode != '':
return
self._sync_slice(event)
self._draw_crosshairs(event)
def _pos_in_parent(self, event=None, xdata=None, ydata=None):
if event is not None:
xdata = event.xdata
ydata = event.ydata
# Find position slice where cursor is
ind = np.clip(xdata, 0, self._im_array.shape[1] - 1)
# Find pixel coordinate in input image for this slice
x = self._x[ind]
y = self._y[ind]
# The 3-rd coordinate in the input WCS is simply the second
# coordinate in the PV slice.
z = ydata
return x, y, z
def _on_click(self, event):
if not event.inaxes or event.canvas.toolbar.mode != '':
return
self._sync_slice(event)
self._draw_crosshairs(event)
def _slice_from_path(x, y, data, attribute, slc):
"""
Extract a PV-like slice from a cube
:param x: An array of x values to extract (pixel units)
:param y: An array of y values to extract (pixel units)
:param data: :class:`~glue.core.data.Data`
:param attribute: :claass:`~glue.core.data.Component`
:param slc: orientation of the image widget that `pts` are defined on
:returns: (slice, x, y)
slice is a 2D Numpy array, corresponding to a "PV ribbon"
cutout from the cube
x and y are the resampled points along which the
ribbon is extracted
:note: For >3D cubes, the "V-axis" of the PV slice is the longest
cube axis ignoring the x/y axes of `slc`
"""
from glue.external.pvextractor import Path, extract_pv_slice
p = Path(list(zip(x, y)))
cube = data[attribute]
dims = list(range(data.ndim))
s = list(slc)
ind = _slice_index(data, slc)
cube_wcs = getattr(data.coords, 'wcs', None)
# transpose cube to (z, y, x, <whatever>)
def _swap(x, s, i, j):
x[i], x[j] = x[j], x[i]
s[i], s[j] = s[j], s[i]
_swap(dims, s, ind, 0)
_swap(dims, s, s.index('y'), 1)
_swap(dims, s, s.index('x'), 2)
cube = cube.transpose(dims)
# slice down from >3D to 3D if needed
s = [slice(None)] * 3 + [slc[d] for d in dims[3:]]
cube = cube[s]
# sample cube
spacing = 1 # pixel
x, y = [np.round(_x).astype(int) for _x in p.sample_points(spacing)]
try:
result = extract_pv_slice(cube, path=p, wcs=cube_wcs, order=0)
except: # sometimes pvextractor complains due to wcs. Try to recover
result = extract_pv_slice(cube, path=p, wcs=None, order=0)
from astropy.wcs import WCS
data = result.data
wcs = WCS(result.header)
return data, x, y, wcs
def _slice_index(data, slc):
"""
The axis over which to extract PV slices
"""
return max([i for i in range(len(slc))
if isinstance(slc[i], int)],
key=lambda x: data.shape[x])
def _slice_label(data, slc):
"""
Returns a formatted axis label corresponding to the slice dimension
in a PV slice
:param data: Data that slice is extracted from
:param slc: orientation in the image widget from which the PV slice
was defined
"""
idx = _slice_index(data, slc)
return data.get_world_component_id(idx).label
| {
"repo_name": "saimn/glue",
"path": "glue/plugins/tools/pv_slicer/qt/pv_slicer.py",
"copies": "1",
"size": "8336",
"license": "bsd-3-clause",
"hash": -5965692932789620000,
"line_mean": 32.0793650794,
"line_max": 81,
"alpha_frac": 0.5788147793,
"autogenerated": false,
"ratio": 3.5217574989438107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4600572278243811,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.viewers.matplotlib.qt.toolbar import MatplotlibViewerToolbar
from glue.core.edit_subset_mode import EditSubsetMode
from glue.core.roi import PointROI
from glue.core import command
from glue.core.subset import CategorySubsetState
from glue.core.exceptions import IncompatibleDataException
from glue.utils.qt import messagebox_on_error
from glue.plugins.dendro_viewer.dendro_helpers import _substructures
from glue.viewers.matplotlib.qt.data_viewer import MatplotlibDataViewer
from glue.plugins.dendro_viewer.layer_artist import DendrogramLayerArtist
from glue.plugins.dendro_viewer.qt.options_widget import DendrogramOptionsWidget
from glue.plugins.dendro_viewer.state import DendrogramViewerState
from glue.plugins.dendro_viewer.qt.layer_style_editor import DendrogramLayerStyleEditor
from glue.plugins.dendro_viewer.compat import update_dendrogram_viewer_state
__all__ = ['DendrogramViewer']
class DendrogramViewer(MatplotlibDataViewer):
LABEL = 'Dendrogram'
_toolbar_cls = MatplotlibViewerToolbar
_layer_style_widget_cls = DendrogramLayerStyleEditor
_state_cls = DendrogramViewerState
_options_cls = DendrogramOptionsWidget
_data_artist_cls = DendrogramLayerArtist
_subset_artist_cls = DendrogramLayerArtist
tools = ['select:pick']
def __init__(self, *args, **kwargs):
super(DendrogramViewer, self).__init__(*args, **kwargs)
self.axes.set_xticks([])
self.axes.spines['top'].set_visible(False)
self.axes.spines['bottom'].set_visible(False)
self.state.add_callback('_layout', self._update_limits)
self._update_limits()
def _update_limits(self, layout=None):
if self.state._layout is None:
return
x, y = self.state._layout.xy
x, y = x[::3], y[::3]
xlim = np.array([x.min(), x.max()])
xpad = .05 * xlim.ptp()
xlim[0] -= xpad
xlim[1] += xpad
ylim = np.array([y.min(), y.max()])
if self.state.y_log:
ylim = np.maximum(ylim, 1e-5)
pad = 1.05 * ylim[1] / ylim[0]
ylim[0] /= pad
ylim[1] *= pad
else:
pad = .05 * ylim.ptp()
ylim[0] -= pad
ylim[1] += pad
self.axes.set_xlim(*xlim)
self.axes.set_ylim(*ylim)
def initialize_toolbar(self):
super(DendrogramViewer, self).initialize_toolbar()
def on_move(mode):
if mode._drag:
self.apply_roi(mode.roi())
self.toolbar.tools['select:pick']._move_callback = on_move
def close(self, *args, **kwargs):
self.toolbar.tools['select:pick']._move_callback = None
super(DendrogramViewer, self).close(*args, **kwargs)
@messagebox_on_error('Failed to add data')
def add_data(self, data):
if data.ndim != 1:
raise IncompatibleDataException("Only 1-D data can be added to "
"the dendrogram viewer (tried to add a {}-D "
"dataset)".format(data.ndim))
return super(DendrogramViewer, self).add_data(data)
# TODO: move some of the ROI stuff to state class?
def _roi_to_subset_state(self, roi):
# TODO Does subset get applied to all data or just visible data?
if self.state._layout is None:
return
if not roi.defined():
return
if isinstance(roi, PointROI):
x, y = roi.x, roi.y
xs, ys = self.state._layout.xy
parent_ys = ys[1::3]
xs, ys = xs[::3], ys[::3]
delt = np.abs(x - xs)
delt[y > ys] = np.nan
delt[y < parent_ys] = np.nan
if np.isfinite(delt).any():
select = np.nanargmin(delt)
if self.state.select_substruct:
parent = self.state.reference_data[self.state.parent_att]
select = _substructures(parent, select)
select = np.asarray(select, dtype=np.int)
else:
select = np.array([], dtype=np.int)
return CategorySubsetState(self.state.reference_data.pixel_component_ids[0], select)
else:
raise TypeError("Only PointROI selections are supported")
@staticmethod
def update_viewer_state(rec, context):
return update_dendrogram_viewer_state(rec, context)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/plugins/dendro_viewer/qt/data_viewer.py",
"copies": "1",
"size": "4501",
"license": "bsd-3-clause",
"hash": -2279146837337121000,
"line_mean": 32.5895522388,
"line_max": 96,
"alpha_frac": 0.6147522773,
"autogenerated": false,
"ratio": 3.5921787709497206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47069310482497206,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue_vispy_viewers.extern.vispy.visuals.transforms import (ChainTransform, NullTransform,
MatrixTransform, STTransform)
from glue_vispy_viewers.extern.vispy.visuals.transforms.base_transform import InverseTransform
from glue_vispy_viewers.extern.vispy.visuals.transforms._util import arg_to_vec4
def as_matrix_transform(transform):
"""
Simplify a transform to a single matrix transform, which makes it a lot
faster to compute transformations.
Raises a TypeError if the transform cannot be simplified.
"""
if isinstance(transform, ChainTransform):
matrix = np.identity(4)
for tr in transform.transforms:
# We need to do the matrix multiplication manually because VisPy
# somehow doesn't mutliply matrices if there is a perspective
# component. The equation below looks like it's the wrong way
# around, but the VisPy matrices are transposed.
matrix = np.matmul(as_matrix_transform(tr).matrix, matrix)
return MatrixTransform(matrix)
elif isinstance(transform, InverseTransform):
matrix = as_matrix_transform(transform._inverse)
return MatrixTransform(matrix.inv_matrix)
elif isinstance(transform, NullTransform):
return MatrixTransform()
elif isinstance(transform, STTransform):
return transform.as_matrix()
elif isinstance(transform, MatrixTransform):
return transform
else:
raise TypeError("Could not simplify transform of type {0}".format(type(transform)))
try:
from glue.utils.qt import fix_tab_widget_fontsize # noqa
except ImportError:
import platform
from glue.utils.qt import get_qapp
def fix_tab_widget_fontsize(tab_widget):
"""
Because of a bug in Qt, tab titles on MacOS X don't have the right font size
"""
if platform.system() == 'Darwin':
app = get_qapp()
app_font = app.font()
tab_widget.setStyleSheet('font-size: {0}px'.format(app_font.pointSize()))
class NestedSTTransform(STTransform):
glsl_map = """
vec4 st_transform_map(vec4 pos) {
return vec4((pos.xyz * $innerscale.xyz + $innertranslate.xyz * pos.w).xyz
* $scale.xyz + $translate.xyz * pos.w, pos.w);
}
"""
glsl_imap = """
vec4 st_transform_imap(vec4 pos) {
return vec4((((pos.xyz - $innertranslate.xyz * pos.w) / $innerscale.xyz)
- $translate.xyz * pos.w) / $scale.xyz, pos.w);
}
"""
def __init__(self):
self.inner = STTransform()
super(NestedSTTransform, self).__init__()
@arg_to_vec4
def map(self, coords):
coords = self.inner.map(coords)
coords = super(NestedSTTransform, self).map(coords)
return coords
@arg_to_vec4
def imap(self, coords):
coords = super(NestedSTTransform, self).imap(coords)
coords = self.inner.imap(coords)
return coords
def _update_shaders(self):
self._shader_map['scale'] = self.scale
self._shader_map['translate'] = self.translate
self._shader_imap['scale'] = self.scale
self._shader_imap['translate'] = self.translate
self._shader_map['innerscale'] = self.inner.scale
self._shader_map['innertranslate'] = self.inner.translate
self._shader_imap['innerscale'] = self.inner.scale
self._shader_imap['innertranslate'] = self.inner.translate
| {
"repo_name": "astrofrog/glue-vispy-viewers",
"path": "glue_vispy_viewers/utils.py",
"copies": "2",
"size": "3642",
"license": "bsd-2-clause",
"hash": 4985561222788619000,
"line_mean": 35.7878787879,
"line_max": 94,
"alpha_frac": 0.6350906096,
"autogenerated": false,
"ratio": 4.006600660066007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001027185852517364,
"num_lines": 99
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from .hashfunctions import generate_hashfunctions
from .maintenance import maintenance
class CountdownBloomFilter(object):
""" Implementation of a Modified Countdown Bloom Filter. Uses a batched maintenance process instead of a continuous one.
Sanjuas-Cuxart, Josep, et al. "A lightweight algorithm for traffic filtering over sliding windows."
Communications (ICC), 2012 IEEE International Conference on. IEEE, 2012.
http://www-mobile.ecs.soton.ac.uk/home/conference/ICC2012/symposia/papers/a_lightweight_algorithm_for_traffic_filtering_over_sliding__.pdf
"""
def __init__(self, capacity, error_rate=0.001, expiration=60, disable_hard_capacity=False):
self.error_rate = error_rate
self.capacity = capacity
self.expiration = expiration
self.nbr_slices = int(np.ceil(np.log2(1.0 / error_rate)))
self.bits_per_slice = int(np.ceil((capacity * abs(np.log(error_rate))) / (self.nbr_slices * (np.log(2) ** 2))))
self.nbr_bits = self.nbr_slices * self.bits_per_slice
self.count = 0
self.cellarray = np.zeros(self.nbr_bits, dtype=np.uint8)
self.counter_init = 255
self.refresh_head = 0
self.make_hashes = generate_hashfunctions(self.bits_per_slice, self.nbr_slices)
# This is the unset ratio ... and we keep it constant at 0.5
# since the BF will operate most of the time at his optimal
# set ratio (50 %) and the overall effect of this parameter
# on the refresh rate is very minimal anyway.
self.z = 0.5
self.estimate_z = 0
self.disable_hard_capacity = disable_hard_capacity
def _compute_z(self):
""" Compute the unset ratio (exact) """
return self.cellarray.nonzero()[0].shape[0] / self.nbr_bits
def _estimate_count(self):
""" Update the count number using the estimation of the unset ratio """
if self.estimate_z == 0:
self.estimate_z = (1.0 / self.nbr_bits)
self.estimate_z = min(self.estimate_z, 0.999999)
self.count = int(-(self.nbr_bits / self.nbr_slices) * np.log(1 - self.estimate_z))
def expiration_maintenance(self):
""" Decrement cell value if not zero
This maintenance process need to executed each self.compute_refresh_time()
"""
if self.cellarray[self.refresh_head] != 0:
self.cellarray[self.refresh_head] -= 1
self.refresh_head = (self.refresh_head + 1) % self.nbr_bits
def batched_expiration_maintenance_dev(self, elapsed_time):
""" Batched version of expiration_maintenance() """
num_iterations = self.num_batched_maintenance(elapsed_time)
for i in range(num_iterations):
self.expiration_maintenance()
def batched_expiration_maintenance(self, elapsed_time):
""" Batched version of expiration_maintenance()
Cython version
"""
num_iterations = self.num_batched_maintenance(elapsed_time)
self.refresh_head, nonzero = maintenance(self.cellarray, self.nbr_bits, num_iterations, self.refresh_head)
if num_iterations != 0:
self.estimate_z = float(nonzero) / float(num_iterations)
self._estimate_count()
processed_interval = num_iterations * self.compute_refresh_time()
return processed_interval
def compute_refresh_time(self):
""" Compute the refresh period for the given expiration delay """
if self.z == 0:
self.z = 1E-10
s = float(self.expiration) * (1.0/(self.nbr_bits)) * (1.0/(self.counter_init - 1 + (1.0/(self.z * (self.nbr_slices + 1)))))
return s
def num_batched_maintenance(self, elapsed_time):
return int(np.floor(elapsed_time / self.compute_refresh_time()))
def __nonzero__(self):
return True
def __bool__(self):
return True
def __contains__(self, key):
if not isinstance(key, list):
hashes = self.make_hashes(key)
else:
hashes = key
offset = 0
for k in hashes:
if self.cellarray[offset + k] == 0:
return False
offset += self.bits_per_slice
return True
def __len__(self):
""" Return the number of keys stored by this bloom filter. """
return self.count
def add(self, key, skip_check=False):
hashes = self.make_hashes(key)
if not skip_check and hashes in self:
offset = 0
for k in hashes:
self.cellarray[offset + k] = self.counter_init
offset += self.bits_per_slice
return True
if (self.count > self.capacity or self.estimate_z > 0.5) and not self.disable_hard_capacity:
raise IndexError("BloomFilter is at capacity")
offset = 0
for k in hashes:
self.cellarray[offset + k] = self.counter_init
offset += self.bits_per_slice
self.count += 1
return False
| {
"repo_name": "Parsely/probably",
"path": "probably/cdbf.py",
"copies": "1",
"size": "5090",
"license": "mit",
"hash": 6198907979325010000,
"line_mean": 40.7213114754,
"line_max": 146,
"alpha_frac": 0.6214145383,
"autogenerated": false,
"ratio": 3.8473167044595615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49687312427595615,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from . import nputils
from . import dtypes
try:
import dask.array as da
except ImportError:
pass
def dask_rolling_wrapper(moving_func, a, window, min_count=None, axis=-1):
'''wrapper to apply bottleneck moving window funcs on dask arrays'''
dtype, fill_value = dtypes.maybe_promote(a.dtype)
a = a.astype(dtype)
# inputs for ghost
if axis < 0:
axis = a.ndim + axis
depth = {d: 0 for d in range(a.ndim)}
depth[axis] = (window + 1) // 2
boundary = {d: fill_value for d in range(a.ndim)}
# create ghosted arrays
ag = da.ghost.ghost(a, depth=depth, boundary=boundary)
# apply rolling func
out = ag.map_blocks(moving_func, window, min_count=min_count,
axis=axis, dtype=a.dtype)
# trim array
result = da.ghost.trim_internal(out, depth)
return result
def rolling_window(a, axis, window, center, fill_value):
""" Dask's equivalence to np.utils.rolling_window """
orig_shape = a.shape
# inputs for ghost
if axis < 0:
axis = a.ndim + axis
depth = {d: 0 for d in range(a.ndim)}
depth[axis] = int(window / 2)
# For evenly sized window, we need to crop the first point of each block.
offset = 1 if window % 2 == 0 else 0
if depth[axis] > min(a.chunks[axis]):
raise ValueError(
"For window size %d, every chunk should be larger than %d, "
"but the smallest chunk size is %d. Rechunk your array\n"
"with a larger chunk size or a chunk size that\n"
"more evenly divides the shape of your array." %
(window, depth[axis], min(a.chunks[axis])))
# Although dask.ghost pads values to boundaries of the array,
# the size of the generated array is smaller than what we want
# if center == False.
if center:
start = int(window / 2) # 10 -> 5, 9 -> 4
end = window - 1 - start
else:
start, end = window - 1, 0
pad_size = max(start, end) + offset - depth[axis]
drop_size = 0
# pad_size becomes more than 0 when the ghosted array is smaller than
# needed. In this case, we need to enlarge the original array by padding
# before ghosting.
if pad_size > 0:
if pad_size < depth[axis]:
# Ghosting requires each chunk larger than depth. If pad_size is
# smaller than the depth, we enlarge this and truncate it later.
drop_size = depth[axis] - pad_size
pad_size = depth[axis]
shape = list(a.shape)
shape[axis] = pad_size
chunks = list(a.chunks)
chunks[axis] = (pad_size, )
fill_array = da.full(shape, fill_value, dtype=a.dtype, chunks=chunks)
a = da.concatenate([fill_array, a], axis=axis)
boundary = {d: fill_value for d in range(a.ndim)}
# create ghosted arrays
ag = da.ghost.ghost(a, depth=depth, boundary=boundary)
# apply rolling func
def func(x, window, axis=-1):
x = np.asarray(x)
rolling = nputils._rolling_window(x, window, axis)
return rolling[(slice(None), ) * axis + (slice(offset, None), )]
chunks = list(a.chunks)
chunks.append(window)
out = ag.map_blocks(func, dtype=a.dtype, new_axis=a.ndim, chunks=chunks,
window=window, axis=axis)
# crop boundary.
index = (slice(None),) * axis + (slice(drop_size,
drop_size + orig_shape[axis]), )
return out[index]
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/dask_array_ops.py",
"copies": "1",
"size": "3551",
"license": "apache-2.0",
"hash": -2424478796639432000,
"line_mean": 35.2346938776,
"line_max": 77,
"alpha_frac": 0.6040551957,
"autogenerated": false,
"ratio": 3.5228174603174605,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46268726560174606,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from .. import Variable
from ..core import indexing
from ..core.pycompat import integer_types
from ..core.utils import Frozen, FrozenOrderedDict, is_dict_like
from .common import AbstractDataStore, BackendArray, robust_getitem
class PydapArrayWrapper(BackendArray):
def __init__(self, array):
self.array = array
@property
def shape(self):
return self.array.shape
@property
def dtype(self):
return self.array.dtype
def __getitem__(self, key):
key, np_inds = indexing.decompose_indexer(
key, self.shape, indexing.IndexingSupport.BASIC)
# pull the data from the array attribute if possible, to avoid
# downloading coordinate data twice
array = getattr(self.array, 'array', self.array)
result = robust_getitem(array, key.tuple, catch=ValueError)
# pydap doesn't squeeze axes automatically like numpy
axis = tuple(n for n, k in enumerate(key.tuple)
if isinstance(k, integer_types))
if len(axis) > 0:
result = np.squeeze(result, axis)
if len(np_inds.tuple) > 0:
result = indexing.NumpyIndexingAdapter(np.asarray(result))[np_inds]
return result
def _fix_attributes(attributes):
attributes = dict(attributes)
for k in list(attributes):
if k.lower() == 'global' or k.lower().endswith('_global'):
# move global attributes to the top level, like the netcdf-C
# DAP client
attributes.update(attributes.pop(k))
elif is_dict_like(attributes[k]):
# Make Hierarchical attributes to a single level with a
# dot-separated key
attributes.update({'{}.{}'.format(k, k_child): v_child for
k_child, v_child in attributes.pop(k).items()})
return attributes
class PydapDataStore(AbstractDataStore):
"""Store for accessing OpenDAP datasets with pydap.
This store provides an alternative way to access OpenDAP datasets that may
be useful if the netCDF4 library is not available.
"""
def __init__(self, ds):
"""
Parameters
----------
ds : pydap DatasetType
"""
self.ds = ds
@classmethod
def open(cls, url, session=None):
import pydap.client
ds = pydap.client.open_url(url, session=session)
return cls(ds)
def open_store_variable(self, var):
data = indexing.LazilyOuterIndexedArray(PydapArrayWrapper(var))
return Variable(var.dimensions, data,
_fix_attributes(var.attributes))
def get_variables(self):
return FrozenOrderedDict((k, self.open_store_variable(self.ds[k]))
for k in self.ds.keys())
def get_attrs(self):
return Frozen(_fix_attributes(self.ds.attributes))
def get_dimensions(self):
return Frozen(self.ds.dimensions)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/backends/pydap_.py",
"copies": "1",
"size": "3031",
"license": "apache-2.0",
"hash": 5643943285876555000,
"line_mean": 31.5913978495,
"line_max": 79,
"alpha_frac": 0.6238865061,
"autogenerated": false,
"ratio": 4.10148849797023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 93
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from matplotlib.colors import Normalize
from matplotlib.collections import LineCollection
from mpl_scatter_density import ScatterDensityArtist
from astropy.visualization import (ImageNormalize, LinearStretch, SqrtStretch,
AsinhStretch, LogStretch)
from glue.utils import defer_draw, broadcast_to
from glue.viewers.scatter.state import ScatterLayerState
from glue.viewers.matplotlib.layer_artist import MatplotlibLayerArtist
from glue.core.exceptions import IncompatibleAttribute
STRETCHES = {'linear': LinearStretch,
'sqrt': SqrtStretch,
'arcsinh': AsinhStretch,
'log': LogStretch}
CMAP_PROPERTIES = set(['cmap_mode', 'cmap_att', 'cmap_vmin', 'cmap_vmax', 'cmap'])
MARKER_PROPERTIES = set(['size_mode', 'size_att', 'size_vmin', 'size_vmax', 'size_scaling', 'size'])
LINE_PROPERTIES = set(['linewidth', 'linestyle'])
DENSITY_PROPERTIES = set(['dpi', 'stretch', 'density_contrast'])
VISUAL_PROPERTIES = (CMAP_PROPERTIES | MARKER_PROPERTIES | DENSITY_PROPERTIES |
LINE_PROPERTIES | set(['color', 'alpha', 'zorder', 'visible']))
DATA_PROPERTIES = set(['layer', 'x_att', 'y_att', 'cmap_mode', 'size_mode', 'density_map',
'xerr_att', 'yerr_att', 'xerr_visible', 'yerr_visible',
'vector_visible', 'vx_att', 'vy_att', 'vector_arrowhead', 'vector_mode',
'vector_origin', 'line_visible', 'markers_visible', 'vector_scaling'])
class InvertedNormalize(Normalize):
def __call__(self, *args, **kwargs):
return 1 - super(InvertedNormalize, self).__call__(*args, **kwargs)
class DensityMapLimits(object):
contrast = 1
def min(self, array):
return 0
def max(self, array):
return 10. ** (np.log10(np.nanmax(array)) * self.contrast)
def set_mpl_artist_cmap(artist, values, state):
vmin = state.cmap_vmin
vmax = state.cmap_vmax
cmap = state.cmap
if isinstance(artist, ScatterDensityArtist):
artist.set_c(values)
else:
artist.set_array(values)
artist.set_cmap(cmap)
if vmin > vmax:
artist.set_clim(vmax, vmin)
artist.set_norm(InvertedNormalize(vmax, vmin))
else:
artist.set_clim(vmin, vmax)
artist.set_norm(Normalize(vmin, vmax))
class ScatterLayerArtist(MatplotlibLayerArtist):
_layer_state_cls = ScatterLayerState
def __init__(self, axes, viewer_state, layer_state=None, layer=None):
super(ScatterLayerArtist, self).__init__(axes, viewer_state,
layer_state=layer_state, layer=layer)
# Watch for changes in the viewer state which would require the
# layers to be redrawn
self._viewer_state.add_global_callback(self._update_scatter)
self.state.add_global_callback(self._update_scatter)
# Scatter
self.scatter_artist = self.axes.scatter([], [])
self.plot_artist = self.axes.plot([], [], 'o', mec='none')[0]
self.errorbar_artist = self.axes.errorbar([], [], fmt='none')
self.vector_artist = None
self.line_collection = LineCollection(np.zeros((0, 2, 2)))
self.axes.add_collection(self.line_collection)
# Scatter density
self.density_auto_limits = DensityMapLimits()
self.density_artist = ScatterDensityArtist(self.axes, [], [], color='white',
vmin=self.density_auto_limits.min,
vmax=self.density_auto_limits.max)
self.axes.add_artist(self.density_artist)
self.mpl_artists = [self.scatter_artist, self.plot_artist,
self.errorbar_artist, self.vector_artist,
self.line_collection, self.density_artist]
self.errorbar_index = 2
self.vector_index = 3
self.reset_cache()
def reset_cache(self):
self._last_viewer_state = {}
self._last_layer_state = {}
@defer_draw
def _update_data(self, changed):
# Layer artist has been cleared already
if len(self.mpl_artists) == 0:
return
try:
x = self.layer[self._viewer_state.x_att].ravel()
except (IncompatibleAttribute, IndexError):
# The following includes a call to self.clear()
self.disable_invalid_attributes(self._viewer_state.x_att)
return
else:
self.enable()
try:
y = self.layer[self._viewer_state.y_att].ravel()
except (IncompatibleAttribute, IndexError):
# The following includes a call to self.clear()
self.disable_invalid_attributes(self._viewer_state.y_att)
return
else:
self.enable()
if self.state.markers_visible:
if self.state.density_map:
self.density_artist.set_xy(x, y)
self.plot_artist.set_data([], [])
self.scatter_artist.set_offsets(np.zeros((0, 2)))
else:
if self.state.cmap_mode == 'Fixed' and self.state.size_mode == 'Fixed':
# In this case we use Matplotlib's plot function because it has much
# better performance than scatter.
self.plot_artist.set_data(x, y)
self.scatter_artist.set_offsets(np.zeros((0, 2)))
self.density_artist.set_xy([], [])
else:
self.plot_artist.set_data([], [])
offsets = np.vstack((x, y)).transpose()
self.scatter_artist.set_offsets(offsets)
self.density_artist.set_xy([], [])
else:
self.plot_artist.set_data([], [])
self.scatter_artist.set_offsets(np.zeros((0, 2)))
self.density_artist.set_xy([], [])
if self.state.line_visible:
if self.state.cmap_mode == 'Fixed':
points = np.array([x, y]).transpose()
self.line_collection.set_segments([points])
else:
# In the case where we want to color the line, we need to over
# sample the line by a factor of two so that we can assign the
# correct colors to segments - if we didn't do this, then
# segments on one side of a point would be a different color
# from the other side. With oversampling, we can have half a
# segment on either side of a point be the same color as a
# point
x_fine = np.zeros(len(x) * 2 - 1, dtype=float)
y_fine = np.zeros(len(y) * 2 - 1, dtype=float)
x_fine[::2] = x
x_fine[1::2] = 0.5 * (x[1:] + x[:-1])
y_fine[::2] = y
y_fine[1::2] = 0.5 * (y[1:] + y[:-1])
points = np.array([x_fine, y_fine]).transpose().reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
self.line_collection.set_segments(segments)
else:
self.line_collection.set_segments(np.zeros((0, 2, 2)))
for eartist in list(self.errorbar_artist[2]):
if eartist is not None:
try:
eartist.remove()
except ValueError:
pass
except AttributeError: # Matplotlib < 1.5
pass
if self.vector_artist is not None:
self.vector_artist.remove()
self.vector_artist = None
if self.state.vector_visible:
if self.state.vx_att is not None and self.state.vy_att is not None:
vx = self.layer[self.state.vx_att].ravel()
vy = self.layer[self.state.vy_att].ravel()
if self.state.vector_mode == 'Polar':
ang = vx
length = vy
# assume ang is anti clockwise from the x axis
vx = length * np.cos(np.radians(ang))
vy = length * np.sin(np.radians(ang))
else:
vx = None
vy = None
if self.state.vector_arrowhead:
hw = 3
hl = 5
else:
hw = 1
hl = 0
v = np.hypot(vx, vy)
vmax = np.nanmax(v)
vx = vx / vmax
vy = vy / vmax
self.vector_artist = self.axes.quiver(x, y, vx, vy, units='width',
pivot=self.state.vector_origin,
headwidth=hw, headlength=hl,
scale_units='width',
scale=10 / self.state.vector_scaling)
self.mpl_artists[self.vector_index] = self.vector_artist
if self.state.xerr_visible or self.state.yerr_visible:
if self.state.xerr_visible and self.state.xerr_att is not None:
xerr = self.layer[self.state.xerr_att].ravel()
else:
xerr = None
if self.state.yerr_visible and self.state.yerr_att is not None:
yerr = self.layer[self.state.yerr_att].ravel()
else:
yerr = None
self.errorbar_artist = self.axes.errorbar(x, y, fmt='none',
xerr=xerr, yerr=yerr)
self.mpl_artists[self.errorbar_index] = self.errorbar_artist
@defer_draw
def _update_visual_attributes(self, changed, force=False):
if not self.enabled:
return
if self.state.markers_visible:
if self.state.density_map:
if self.state.cmap_mode == 'Fixed':
if force or 'color' in changed or 'cmap_mode' in changed:
self.density_artist.set_color(self.state.color)
self.density_artist.set_c(None)
self.density_artist.set_clim(self.density_auto_limits.min,
self.density_auto_limits.max)
elif force or any(prop in changed for prop in CMAP_PROPERTIES):
c = self.layer[self.state.cmap_att].ravel()
set_mpl_artist_cmap(self.density_artist, c, self.state)
if force or 'stretch' in changed:
self.density_artist.set_norm(ImageNormalize(stretch=STRETCHES[self.state.stretch]()))
if force or 'dpi' in changed:
self.density_artist.set_dpi(self._viewer_state.dpi)
if force or 'density_contrast' in changed:
self.density_auto_limits.contrast = self.state.density_contrast
self.density_artist.stale = True
else:
if self.state.cmap_mode == 'Fixed' and self.state.size_mode == 'Fixed':
if force or 'color' in changed:
self.plot_artist.set_color(self.state.color)
if force or 'size' in changed or 'size_scaling' in changed:
self.plot_artist.set_markersize(self.state.size *
self.state.size_scaling)
else:
# TEMPORARY: Matplotlib has a bug that causes set_alpha to
# change the colors back: https://github.com/matplotlib/matplotlib/issues/8953
if 'alpha' in changed:
force = True
if self.state.cmap_mode == 'Fixed':
if force or 'color' in changed or 'cmap_mode' in changed:
self.scatter_artist.set_facecolors(self.state.color)
self.scatter_artist.set_edgecolor('none')
elif force or any(prop in changed for prop in CMAP_PROPERTIES):
c = self.layer[self.state.cmap_att].ravel()
set_mpl_artist_cmap(self.scatter_artist, c, self.state)
self.scatter_artist.set_edgecolor('none')
if force or any(prop in changed for prop in MARKER_PROPERTIES):
if self.state.size_mode == 'Fixed':
s = self.state.size * self.state.size_scaling
s = broadcast_to(s, self.scatter_artist.get_sizes().shape)
else:
s = self.layer[self.state.size_att].ravel()
s = ((s - self.state.size_vmin) /
(self.state.size_vmax - self.state.size_vmin)) * 30
s *= self.state.size_scaling
# Note, we need to square here because for scatter, s is actually
# proportional to the marker area, not radius.
self.scatter_artist.set_sizes(s ** 2)
if self.state.line_visible:
if self.state.cmap_mode == 'Fixed':
if force or 'color' in changed or 'cmap_mode' in changed:
self.line_collection.set_array(None)
self.line_collection.set_color(self.state.color)
elif force or any(prop in changed for prop in CMAP_PROPERTIES):
# Higher up we oversampled the points in the line so that
# half a segment on either side of each point has the right
# color, so we need to also oversample the color here.
c = self.layer[self.state.cmap_att].ravel()
cnew = np.zeros((len(c) - 1) * 2)
cnew[::2] = c[:-1]
cnew[1::2] = c[1:]
set_mpl_artist_cmap(self.line_collection, cnew, self.state)
if force or 'linewidth' in changed:
self.line_collection.set_linewidth(self.state.linewidth)
if force or 'linestyle' in changed:
self.line_collection.set_linestyle(self.state.linestyle)
if self.state.vector_visible and self.vector_artist is not None:
if self.state.cmap_mode == 'Fixed':
if force or 'color' in changed or 'cmap_mode' in changed:
self.vector_artist.set_array(None)
self.vector_artist.set_color(self.state.color)
elif force or any(prop in changed for prop in CMAP_PROPERTIES):
c = self.layer[self.state.cmap_att].ravel()
set_mpl_artist_cmap(self.vector_artist, c, self.state)
if self.state.xerr_visible or self.state.yerr_visible:
for eartist in list(self.errorbar_artist[2]):
if eartist is None:
continue
if self.state.cmap_mode == 'Fixed':
if force or 'color' in changed or 'cmap_mode' in changed:
eartist.set_color(self.state.color)
elif force or any(prop in changed for prop in CMAP_PROPERTIES):
c = self.layer[self.state.cmap_att].ravel()
set_mpl_artist_cmap(eartist, c, self.state)
if force or 'alpha' in changed:
eartist.set_alpha(self.state.alpha)
if force or 'visible' in changed:
eartist.set_visible(self.state.visible)
if force or 'zorder' in changed:
eartist.set_zorder(self.state.zorder)
for artist in [self.scatter_artist, self.plot_artist,
self.vector_artist, self.line_collection,
self.density_artist]:
if artist is None:
continue
if force or 'alpha' in changed:
artist.set_alpha(self.state.alpha)
if force or 'zorder' in changed:
artist.set_zorder(self.state.zorder)
if force or 'visible' in changed:
artist.set_visible(self.state.visible)
self.redraw()
@defer_draw
def _update_scatter(self, force=False, **kwargs):
if (self._viewer_state.x_att is None or
self._viewer_state.y_att is None or
self.state.layer is None):
return
# Figure out which attributes are different from before. Ideally we shouldn't
# need this but currently this method is called multiple times if an
# attribute is changed due to x_att changing then hist_x_min, hist_x_max, etc.
# If we can solve this so that _update_histogram is really only called once
# then we could consider simplifying this. Until then, we manually keep track
# of which properties have changed.
changed = set()
if not force:
for key, value in self._viewer_state.as_dict().items():
if value != self._last_viewer_state.get(key, None):
changed.add(key)
for key, value in self.state.as_dict().items():
if value != self._last_layer_state.get(key, None):
changed.add(key)
self._last_viewer_state.update(self._viewer_state.as_dict())
self._last_layer_state.update(self.state.as_dict())
if force or len(changed & DATA_PROPERTIES) > 0:
self._update_data(changed)
force = True
if force or len(changed & VISUAL_PROPERTIES) > 0:
self._update_visual_attributes(changed, force=force)
def get_layer_color(self):
if self.state.cmap_mode == 'Fixed':
return self.state.color
else:
return self.state.cmap
@defer_draw
def update(self):
self._update_scatter(force=True)
self.redraw()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/scatter/layer_artist.py",
"copies": "1",
"size": "18014",
"license": "bsd-3-clause",
"hash": 5212772700236165000,
"line_mean": 40.1278538813,
"line_max": 105,
"alpha_frac": 0.5375818808,
"autogenerated": false,
"ratio": 4.104351788562315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5141933669362315,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from numba.npyufunc.deviceufunc import (UFuncMechanism, GenerializedUFunc,
GUFuncCallSteps)
from numba.roc.hsadrv.driver import dgpu_present
import numba.roc.hsadrv.devicearray as devicearray
import numba.roc.api as api
class HsaUFuncDispatcher(object):
"""
Invoke the HSA ufunc specialization for the given inputs.
"""
def __init__(self, types_to_retty_kernels):
self.functions = types_to_retty_kernels
def __call__(self, *args, **kws):
"""
*args: numpy arrays
**kws:
stream -- hsa stream; when defined, asynchronous mode is used.
out -- output array. Can be a numpy array or DeviceArrayBase
depending on the input arguments. Type must match
the input arguments.
"""
return HsaUFuncMechanism.call(self.functions, args, kws)
def reduce(self, arg, stream=0):
raise NotImplementedError
class HsaUFuncMechanism(UFuncMechanism):
"""
Provide OpenCL specialization
"""
DEFAULT_STREAM = 0
ARRAY_ORDER = 'A'
def is_device_array(self, obj):
if dgpu_present:
return devicearray.is_hsa_ndarray(obj)
else:
return isinstance(obj, np.ndarray)
def is_host_array(self, obj):
if dgpu_present:
return False
else:
return isinstance(obj, np.ndarray)
def to_device(self, hostary, stream):
if dgpu_present:
return api.to_device(hostary)
else:
return hostary
def launch(self, func, count, stream, args):
# ILP must match vectorize kernel source
ilp = 4
# Use more wavefront to allow hiding latency
tpb = 64 * 2
count = (count + (ilp - 1)) // ilp
blockcount = (count + (tpb - 1)) // tpb
func[blockcount, tpb](*args)
def device_array(self, shape, dtype, stream):
if dgpu_present:
return api.device_array(shape=shape, dtype=dtype)
else:
return np.empty(shape=shape, dtype=dtype)
def broadcast_device(self, ary, shape):
if dgpu_present:
raise NotImplementedError('device broadcast_device NIY')
else:
ax_differs = [ax for ax in range(len(shape))
if ax >= ary.ndim
or ary.shape[ax] != shape[ax]]
missingdim = len(shape) - len(ary.shape)
strides = [0] * missingdim + list(ary.strides)
for ax in ax_differs:
strides[ax] = 0
return np.ndarray(shape=shape, strides=strides,
dtype=ary.dtype, buffer=ary)
class _HsaGUFuncCallSteps(GUFuncCallSteps):
__slots__ = ()
def is_device_array(self, obj):
if dgpu_present:
return devicearray.is_hsa_ndarray(obj)
else:
return True
def to_device(self, hostary):
if dgpu_present:
return api.to_device(hostary)
else:
return hostary
def to_host(self, devary, hostary):
if dgpu_present:
out = devary.copy_to_host(hostary)
return out
else:
pass
def device_array(self, shape, dtype):
if dgpu_present:
return api.device_array(shape=shape, dtype=dtype)
else:
return np.empty(shape=shape, dtype=dtype)
def launch_kernel(self, kernel, nelem, args):
kernel.configure(nelem, min(nelem, 64))(*args)
class HSAGenerializedUFunc(GenerializedUFunc):
@property
def _call_steps(self):
return _HsaGUFuncCallSteps
def _broadcast_scalar_input(self, ary, shape):
if dgpu_present:
return devicearray.DeviceNDArray(shape=shape,
strides=(0,),
dtype=ary.dtype,
dgpu_data=ary.dgpu_data)
else:
return np.lib.stride_tricks.as_strided(ary, shape=(shape,),
strides=(0,))
def _broadcast_add_axis(self, ary, newshape):
newax = len(newshape) - len(ary.shape)
# Add 0 strides for missing dimension
newstrides = (0,) * newax + ary.strides
if dgpu_present:
return devicearray.DeviceNDArray(shape=newshape,
strides=newstrides,
dtype=ary.dtype,
dgpu_data=ary.dgpu_data)
else:
raise NotImplementedError
| {
"repo_name": "jriehl/numba",
"path": "numba/roc/dispatch.py",
"copies": "2",
"size": "4749",
"license": "bsd-2-clause",
"hash": 2662200452558643700,
"line_mean": 30.66,
"line_max": 75,
"alpha_frac": 0.5506422405,
"autogenerated": false,
"ratio": 4.0451448040885865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5595787044588586,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from qtpy import QtCore, QtWidgets
from glue.config import colormaps
from glue.viewers.matplotlib.qt.toolbar import MatplotlibViewerToolbar
from glue.viewers.matplotlib.qt.widget import MplWidget
from glue.viewers.image.composite_array import CompositeArray
from glue.external.modest_image import imshow
from glue.utils import defer_draw
# Import the mouse mode to make sure it gets registered
from glue.viewers.image.contrast_mouse_mode import ContrastBiasMode # noqa
__all__ = ['StandaloneImageViewer']
class StandaloneImageViewer(QtWidgets.QMainWindow):
"""
A simplified image viewer, without any brushing or linking,
but with the ability to adjust contrast and resample.
"""
window_closed = QtCore.Signal()
_toolbar_cls = MatplotlibViewerToolbar
tools = ['image:contrast', 'image:colormap']
def __init__(self, image=None, wcs=None, parent=None, **kwargs):
"""
:param image: Image to display (2D numpy array)
:param parent: Parent widget (optional)
:param kwargs: Extra keywords to pass to imshow
"""
super(StandaloneImageViewer, self).__init__(parent)
self.central_widget = MplWidget()
self.setCentralWidget(self.central_widget)
self._setup_axes()
self._composite = CompositeArray()
self._composite.allocate('image')
self._im = None
self.initialize_toolbar()
if image is not None:
self.set_image(image=image, wcs=wcs, **kwargs)
def _setup_axes(self):
from glue.viewers.common.viz_client import init_mpl
_, self._axes = init_mpl(self.central_widget.canvas.fig, axes=None, wcs=True)
self._axes.set_aspect('equal', adjustable='datalim')
@defer_draw
def set_image(self, image=None, wcs=None, **kwargs):
"""
Update the image shown in the widget
"""
if self._im is not None:
self._im.remove()
self._im = None
kwargs.setdefault('origin', 'upper')
if wcs is not None:
# In the following we force the color and linewith of the WCSAxes
# frame to be restored after calling reset_wcs. This can be removed
# once we support Astropy 1.3.1 or later.
color = self._axes.coords.frame.get_color()
linewidth = self._axes.coords.frame.get_linewidth()
self._axes.reset_wcs(wcs)
self._axes.coords.frame.set_color(color)
self._axes.coords.frame.set_linewidth(linewidth)
del color, linewidth
self._composite.set('image', array=image, color=colormaps.members[0][1])
self._im = imshow(self._axes, self._composite, **kwargs)
self._im_array = image
self._set_norm(self._contrast_mode)
if 'extent' in kwargs:
self.axes.set_xlim(kwargs['extent'][:2])
self.axes.set_ylim(kwargs['extent'][2:])
else:
ny, nx = image.shape
self.axes.set_xlim(-0.5, nx - 0.5)
self.axes.set_ylim(-0.5, ny - 0.5)
# FIXME: for a reason I don't quite understand, dataLim doesn't
# get updated immediately here, which means that there are then
# issues in the first draw of the image (the limits are such that
# only part of the image is shown). We just set dataLim manually
# to avoid this issue. This is also done in ImageViewer.
self.axes.dataLim.intervalx = self.axes.get_xlim()
self.axes.dataLim.intervaly = self.axes.get_ylim()
self._redraw()
@property
def axes(self):
"""
The Matplolib axes object for this figure
"""
return self._axes
def show(self):
super(StandaloneImageViewer, self).show()
self._redraw()
def _redraw(self):
self.central_widget.canvas.draw()
def set_cmap(self, cmap):
self._composite.set('image', color=cmap)
self._im.invalidate_cache()
self._redraw()
def mdi_wrap(self):
"""
Embed this widget in a GlueMdiSubWindow
"""
from glue.app.qt.mdi_area import GlueMdiSubWindow
sub = GlueMdiSubWindow()
sub.setWidget(self)
self.destroyed.connect(sub.close)
self.window_closed.connect(sub.close)
sub.resize(self.size())
self._mdi_wrapper = sub
return sub
def closeEvent(self, event):
if self._im is not None:
self._im.remove()
self._im = None
self.window_closed.emit()
return super(StandaloneImageViewer, self).closeEvent(event)
def _set_norm(self, mode):
"""
Use the `ContrastMouseMode` to adjust the transfer function
"""
pmin, pmax = mode.get_clip_percentile()
if pmin is None:
clim = mode.get_vmin_vmax()
else:
clim = (np.nanpercentile(self._im_array, pmin),
np.nanpercentile(self._im_array, pmax))
stretch = mode.stretch
self._composite.set('image', clim=clim, stretch=stretch,
bias=mode.bias, contrast=mode.contrast)
self._im.invalidate_cache()
self._redraw()
def initialize_toolbar(self):
from glue.config import viewer_tool
self.toolbar = self._toolbar_cls(self)
for tool_id in self.tools:
mode_cls = viewer_tool.members[tool_id]
if tool_id == 'image:contrast':
mode = mode_cls(self, move_callback=self._set_norm)
self._contrast_mode = mode
else:
mode = mode_cls(self)
self.toolbar.add_tool(mode)
self.addToolBar(self.toolbar)
def set_status(self, message):
sb = self.statusBar()
sb.showMessage(message)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/image/qt/standalone_image_viewer.py",
"copies": "2",
"size": "5898",
"license": "bsd-3-clause",
"hash": 8187811203633673000,
"line_mean": 31.7666666667,
"line_max": 85,
"alpha_frac": 0.6088504578,
"autogenerated": false,
"ratio": 3.819948186528497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5428798644328497,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from qtpy.QtCore import Qt
from qtpy import QtCore, QtGui, PYQT5
from glue.core import roi
from glue.utils.qt import mpl_to_qt4_color
class QtROI(object):
"""
A mixin class used to override the drawing methods used by
the MPL ROIs in core.roi. Paints to the Widget directly,
avoiding calls that redraw the entire matplotlib plot.
This permits smoother ROI selection for dense plots
that take long to render
"""
def setup_patch(self):
pass
def _draw(self):
pass
def _sync_patch(self):
self.canvas.roi_callback = self._paint_check
self.canvas.update() # QT repaint without MPL redraw
@property
def canvas(self):
return self._axes.figure.canvas
def _paint_check(self, canvas):
# check if the ROI should be rendered
# called within the Qt paint loop
if not (self._roi.defined() and self._mid_selection):
return
self.paint(canvas)
def paint(self, canvas):
x, y = self._roi.to_polygon()
self.draw_polygon(canvas, x, y)
def draw_polygon(self, canvas, x, y):
x, y = self._transform(x, y)
poly = QtGui.QPolygon()
points = [QtCore.QPoint(xx, yy) for xx, yy in zip(x, y)]
for p in points:
poly.append(p)
p = self.get_painter(canvas)
p.drawPolygon(poly)
p.end()
def _transform(self, x, y):
""" Convert points from MPL data coords to Qt Widget coords"""
t = self._axes.transData
xy = np.column_stack((x, y))
pts = t.transform(xy)
# Matplotlib 2.x with PyQt5 on a retina display has a bug which means
# that the coordinates returned by transData are twice as large as they
# should be. Since we don't know when/if this bug will be fixed, we
# check whether the coordinates of the top right corner are outside
# the canvas.
if PYQT5:
xmax = self._axes.get_xlim()[1]
ymax = self._axes.get_ylim()[1]
xd, yd = t.transform((xmax, ymax))
if xd > self.canvas.width() or yd > self.canvas.height():
ratio = self.canvas.devicePixelRatio()
pts /= ratio
pts[:, 1] = self.canvas.height() - pts[:, 1]
return pts[:, 0], pts[:, 1]
def get_painter(self, canvas):
p = QtGui.QPainter(canvas)
facecolor = mpl_to_qt4_color(self.plot_opts['facecolor'],
self.plot_opts['alpha'])
edgecolor = mpl_to_qt4_color(self.plot_opts['edgecolor'],
self.plot_opts['alpha'])
pen = QtGui.QPen(edgecolor)
pen.setWidth(self.plot_opts.get('edgewidth', 0))
p.setPen(pen)
p.setBrush(QtGui.QBrush(facecolor))
return p
class QtPathROI(QtROI, roi.MplPathROI):
def get_painter(self, canvas):
p = super(QtPathROI, self).get_painter(canvas)
p.setBrush(Qt.NoBrush)
p.setRenderHint(p.HighQualityAntialiasing)
return p
def draw_polygon(self, canvas, x, y):
x, y = self._transform(x, y)
poly = QtGui.QPolygon()
points = [QtCore.QPoint(xx, yy) for xx, yy in zip(x, y)]
for p in points:
poly.append(p)
p = self.get_painter(canvas)
p.drawPolyline(poly)
p.end()
class QtRectangularROI(QtROI, roi.MplRectangularROI):
def __init__(self, axes):
roi.MplRectangularROI.__init__(self, axes)
class QtPolygonalROI(QtROI, roi.MplPolygonalROI):
def __init__(self, axes):
roi.MplPolygonalROI.__init__(self, axes)
class QtXRangeROI(QtROI, roi.MplXRangeROI):
def __init__(self, axes):
roi.MplXRangeROI.__init__(self, axes)
def paint(self, canvas):
x = self._roi.range()
xy = self._axes.transAxes.transform([(0, 0), (1.0, 1.0)])
xy = self._axes.transData.inverted().transform(xy)
y = xy[:, 1]
self.draw_polygon(canvas, [x[0], x[1], x[1], x[0]],
[y[0], y[0], y[1], y[1]])
class QtYRangeROI(QtROI, roi.MplYRangeROI):
def __init__(self, axes):
roi.MplYRangeROI.__init__(self, axes)
def paint(self, canvas):
y = self._roi.range()
xy = self._axes.transAxes.transform([(0, 0.0), (1.0, 1.0)])
xy = self._axes.transData.inverted().transform(xy)
x = xy[:, 0]
self.draw_polygon(canvas, [x[0], x[1], x[1], x[0]],
[y[0], y[0], y[1], y[1]])
class QtCircularROI(QtROI, roi.MplCircularROI):
def __init__(self, axes):
roi.MplCircularROI.__init__(self, axes)
def paint(self, canvas):
xy = list(map(int, self._roi.get_center()))
radius = int(self._roi.get_radius())
# Matplotlib 2.x with PyQt5 on a retina display has a bug which means
# that the coordinates returned by transData are twice as large as they
# should be. Since we don't know when/if this bug will be fixed, we
# check whether the coordinates of the top right corner are outside
# the canvas.
if PYQT5:
xmax = self._axes.get_xlim()[1]
ymax = self._axes.get_ylim()[1]
xd, yd = self._axes.transData.transform((xmax, ymax))
if xd > self.canvas.width() or yd > self.canvas.height():
ratio = self.canvas.devicePixelRatio()
xy[0] /= ratio
xy[1] /= ratio
radius /= ratio
center = QtCore.QPoint(xy[0], canvas.height() - xy[1])
p = self.get_painter(canvas)
p.drawEllipse(center, radius, radius)
p.end()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/qt/roi.py",
"copies": "3",
"size": "5762",
"license": "bsd-3-clause",
"hash": 5541679687971372000,
"line_mean": 29.8128342246,
"line_max": 79,
"alpha_frac": 0.5734120097,
"autogenerated": false,
"ratio": 3.4318046456223943,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5505216655322394,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from qtpy.QtCore import Qt
from qtpy import QtCore, QtGui, QtWidgets
from glue.core import roi
from glue.utils.qt import mpl_to_qt4_color
class QtROI(object):
"""
A mixin class used to override the drawing methods used by
the MPL ROIs in core.roi. Paints to the Widget directly,
avoiding calls that redraw the entire matplotlib plot.
This permits smoother ROI selection for dense plots
that take long to render
"""
def setup_patch(self):
pass
def _draw(self):
pass
def _sync_patch(self):
self.canvas.roi_callback = self._paint_check
self.canvas.update() # QT repaint without MPL redraw
@property
def canvas(self):
return self._axes.figure.canvas
def _paint_check(self, canvas):
# check if the ROI should be rendered
# called within the Qt paint loop
if not (self._roi.defined() and self._mid_selection):
return
self.paint(canvas)
def paint(self, canvas):
x, y = self._roi.to_polygon()
self.draw_polygon(canvas, x, y)
def draw_polygon(self, canvas, x, y):
x, y = self._transform(x, y)
poly = QtGui.QPolygon()
points = [QtCore.QPoint(xx, yy) for xx, yy in zip(x, y)]
for p in points:
poly.append(p)
p = self.get_painter(canvas)
p.drawPolygon(poly)
p.end()
def _transform(self, x, y):
""" Convert points from MPL data coords to Qt Widget coords"""
t = self._axes.transData
xy = np.column_stack((x, y))
pts = t.transform(xy)
pts[:, 1] = self.canvas.height() - pts[:, 1]
return pts[:, 0], pts[:, 1]
def get_painter(self, canvas):
p = QtGui.QPainter(canvas)
facecolor = mpl_to_qt4_color(self.plot_opts['facecolor'],
self.plot_opts['alpha'])
edgecolor = mpl_to_qt4_color(self.plot_opts['edgecolor'],
self.plot_opts['alpha'])
pen = QtGui.QPen(edgecolor)
pen.setWidth(self.plot_opts.get('edgewidth', 0))
p.setPen(pen)
p.setBrush(QtGui.QBrush(facecolor))
return p
class QtPathROI(QtROI, roi.MplPathROI):
def get_painter(self, canvas):
p = super(QtPathROI, self).get_painter(canvas)
p.setBrush(Qt.NoBrush)
p.setRenderHint(p.HighQualityAntialiasing)
return p
def draw_polygon(self, canvas, x, y):
x, y = self._transform(x, y)
poly = QtGui.QPolygon()
points = [QtCore.QPoint(xx, yy) for xx, yy in zip(x, y)]
for p in points:
poly.append(p)
p = self.get_painter(canvas)
p.drawPolyline(poly)
p.end()
class QtRectangularROI(QtROI, roi.MplRectangularROI):
def __init__(self, axes):
roi.MplRectangularROI.__init__(self, axes)
class QtPolygonalROI(QtROI, roi.MplPolygonalROI):
def __init__(self, axes):
roi.MplPolygonalROI.__init__(self, axes)
class QtXRangeROI(QtROI, roi.MplXRangeROI):
def __init__(self, axes):
roi.MplXRangeROI.__init__(self, axes)
def paint(self, canvas):
x = self._roi.range()
xy = self._axes.transAxes.transform([(0, 0), (1.0, 1.0)])
xy = self._axes.transData.inverted().transform(xy)
y = xy[:, 1]
self.draw_polygon(canvas, [x[0], x[1], x[1], x[0]],
[y[0], y[0], y[1], y[1]])
class QtYRangeROI(QtROI, roi.MplYRangeROI):
def __init__(self, axes):
roi.MplYRangeROI.__init__(self, axes)
def paint(self, canvas):
y = self._roi.range()
xy = self._axes.transAxes.transform([(0, 0.0), (1.0, 1.0)])
xy = self._axes.transData.inverted().transform(xy)
x = xy[:, 0]
self.draw_polygon(canvas, [x[0], x[1], x[1], x[0]],
[y[0], y[0], y[1], y[1]])
class QtCircularROI(QtROI, roi.MplCircularROI):
def __init__(self, axes):
roi.MplCircularROI.__init__(self, axes)
def paint(self, canvas):
xy = list(map(int, self._roi.get_center()))
radius = int(self._roi.get_radius())
center = QtCore.QPoint(xy[0], canvas.height() - xy[1])
p = self.get_painter(canvas)
p.drawEllipse(center, radius, radius)
p.end()
| {
"repo_name": "saimn/glue",
"path": "glue/core/qt/roi.py",
"copies": "1",
"size": "4397",
"license": "bsd-3-clause",
"hash": -7443946654485707000,
"line_mean": 27.5519480519,
"line_max": 70,
"alpha_frac": 0.5751648851,
"autogenerated": false,
"ratio": 3.2740134028294863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9349178287929487,
"avg_score": 0,
"num_lines": 154
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
from qtpy.QtWidgets import QComboBox, QTableWidgetItem
from qtpy import QtCore
from addie.utilities.general import generate_random_key
from addie.processing.mantid.master_table.tree_definition import LIST_SEARCH_CRITERIA
from addie.utilities.gui_handler import unlock_signals_ui
class TableWidgetRuleHandler:
def __init__(self, parent=None):
self.parent = parent
self.table_ui = parent.ui.tableWidget
self.row_height = parent.row_height
def define_unique_rule_name(self, row):
"""this method makes sure that the name of the rule defined is unique and does not exist already"""
nbr_row = self.table_ui.rowCount()
list_rule_name = []
for _row in np.arange(nbr_row):
if self.table_ui.item(_row, 1):
_rule_name = str(self.table_ui.item(_row, 1).text())
list_rule_name.append(_rule_name)
offset = 0
while True:
if ("{}".format(offset + row)) in list_rule_name:
offset += 1
else:
return offset + row
def add_row(self, row=-1):
"""this add a default row to the table that takes care
of the rules"""
_random_key = generate_random_key()
_list_ui_for_this_row = {}
_list_ui_to_unlock = []
self.table_ui.insertRow(row)
self.table_ui.setRowHeight(row, self.row_height)
# key
_item = QTableWidgetItem("{}".format(_random_key))
self.table_ui.setItem(row, 0, _item)
# rule #
_rule_name = self.define_unique_rule_name(row)
_item = QTableWidgetItem("{}".format(_rule_name))
_item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.table_ui.setItem(row, 1, _item)
# search argument
_widget = QComboBox()
_list_ui_for_this_row['list_items'] = _widget
list_items = LIST_SEARCH_CRITERIA[self.parent.parent.instrument['short_name'].lower()]
_widget.addItems(list_items)
self.table_ui.setCellWidget(row, 2, _widget)
_widget.blockSignals(True)
_list_ui_to_unlock.append(_widget)
_widget.currentIndexChanged.connect(lambda value=list_items[0],
key = _random_key:
self.parent.list_item_changed(value, key))
# criteria
list_criteria = ['is', 'contains']
_widget = QComboBox()
_list_ui_for_this_row['list_criteria'] = _widget
_widget.addItems(list_criteria)
self.table_ui.setCellWidget(row, 3, _widget)
_widget.blockSignals(True)
_list_ui_to_unlock.append(_widget)
_widget.currentIndexChanged.connect(lambda value=list_criteria[0],
key = _random_key:
self.parent.list_criteria_changed(value, key))
# argument
_widget = QComboBox()
_widget.setEditable(True)
_list_ui_for_this_row['list_items_value'] = _widget
list_values = list(self.parent.metadata['Chemical Formula'])
_widget.addItems(list_values)
self.table_ui.setCellWidget(row, 4, _widget)
_widget.blockSignals(True)
_list_ui_to_unlock.append(_widget)
_widget.editTextChanged.connect(lambda value=list_values[0],
key = _random_key:
self.parent.list_argument_changed(value, key))
_widget.currentIndexChanged.connect(lambda value=list_values[0],
key = _random_key:
self.parent.list_argument_index_changed(value, key))
if row == 0:
self.table_ui.horizontalHeader().setVisible(True)
unlock_signals_ui(list_ui=_list_ui_to_unlock)
self.parent.list_ui[_random_key] = _list_ui_for_this_row
self.parent.check_all_filter_widgets()
def update_list_value_of_given_item(self, index=-1, key=None):
"""When user clicks, in the Tablewidget rule, the first row showing the name of the list element,
for example 'Chemical formula', the list of available values will update automatically"""
list_ui = self.parent.list_ui
list_metadata = self.parent.metadata
item_name = list_ui[key]['list_items'].itemText(index)
combobox_values = list_ui[key]['list_items_value']
combobox_values.blockSignals(True)
combobox_values.clear()
combobox_values.addItems(list(list_metadata[item_name]))
combobox_values.blockSignals(False)
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/import_from_database/table_widget_rule_handler.py",
"copies": "1",
"size": "4759",
"license": "mit",
"hash": -502036111835493950,
"line_mean": 39.3305084746,
"line_max": 107,
"alpha_frac": 0.5915108216,
"autogenerated": false,
"ratio": 3.82556270096463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.491707352256463,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from .tree import Tree
class TreeLayout(object):
""" The TreeLayout class maps trees onto an xy coordinate space for
plotting.
TreeLayout provides a dictionary-like interface for access to the
location of each node in a tree. The typical use looks something like:
tl = TreeLayout(tree_object)
x_location = tl[key].x
y_location = t1[key].y
width = t1[key].width
height = t1[key].height
where key is either a reference to one of the nodes in the tree,
or the id of that node.
In this base class, the layout assigns each node a width of 1. It
places the root at (0,0). The y position of every other node is
one higher than its parent, and the x location is such that
subtrees are centered over the parent tree.
Subclasses of TreeLayout can override the layout() method to
provide alternative layout styles.
"""
class Layout(object):
""" A small class to hold the layout information for each
tree node.
Attributes:
-----------
node: Tree instance
The node that this layout object describes
x: X location of this node
y: Y location of this node
width: Width of this node
height: Height of this node
"""
def __init__(self, node, x=0., y=0., width=0., height=0.):
self.x = x
self.y = y
self.width = width
self.height = height
self.node = node
def __str__(self):
return ("Node %s: (x, y) = (%f, %f). (w x h) = (%f, %f)" %
(self.node.id, self.x, self.y, self.width, self.height))
def __init__(self, tree):
""" Create a new TreeLayout object
Parameters:
-----------
Tree: Tree instance
The root node of the tree to layout. The tree must be
indexable (i.e. it must have the .index property)
"""
if not isinstance(tree, Tree):
raise TypeError("Input not a tree object: %s" % type(tree))
self.tree = tree
self._dict = {}
try:
tree.index
except KeyError:
raise TypeError("Cannot create tree layout -- "
"input tree can't be indexed")
self.layout()
def __getitem__(self, key):
return self._dict[key]
def layout(self):
"""
Calculate the layout of this tree.
"""
self._tree_width(self.tree)
self._tree_pos(self.tree)
for t in self.tree.index:
self[t].width = 1
def _tree_width(self, tree):
"""
Recursively calculates the width of each subtree. Also populates the
layout dictionary.
"""
node = TreeLayout.Layout(tree, x=0., y=0.,
width=1., height=0.)
self._dict[tree] = node
self._dict[tree.id] = node
width = 0.
for c in tree.children:
self._tree_width(c)
width += self[c].width
node.width = width
def _tree_pos(self, tree):
"""
Based on the width of each subtree, recursively moves the
subtrees so they don't overlap.
"""
w = 0.
node = self[tree]
for c in tree.children:
self[c].x = node.x - node.width / 2. + w + self[c].width / 2.
w += self[c].width
self[c].y = node.y + 1
self._tree_pos(c)
def pick(self, x, y):
"""
Based on the layout of the tree, choose a nearby branch to an
x,y location
Parameters:
-----------
x: The x coordinate to search from
y: The y coordinate to search from
Outputs:
--------
A reference to the closest tree node, if one is
found. Otherwise, returns None
"""
sz = len(self.tree.index)
off = np.zeros(sz)
candidate = np.zeros(sz, dtype=bool)
for i, t in enumerate(self.tree.index):
off[i] = abs(x - self[t].x)
parent = self[t].node.parent
if parent:
candidate[i] = self[parent].y <= y < self[t].y
else:
candidate[i] = y <= self[t].y
if not candidate.any():
return None
off[~candidate] = off.max()
best = np.argmin(off)
return self.tree.index[best]
def tree_to_xy(self, tree):
"""
Convert the locations of one or more (sub)trees into a list of
x,y coordinates suitable for plotting.
Parameters:
-----------
tree: Tree instance, or list of trees
The (sub) tree(s) to generate xy coordinates for
Outputs:
--------
A list of x and y values tracing the tree. If the input is a
list of trees, then the xy list for each tree will be
separated by None. This is convenient for plotting to
matplotlib, since it will not draw lines between the different
trees.
"""
#code for when t is a list of trees
if isinstance(tree, list):
x = []
y = []
for t in tree:
xx, yy = self.tree_to_xy(t)
x.extend(xx)
y.extend(yy)
x.append(None)
y.append(None)
return (x, y)
# code for when tree is a scalar
x = [self[tree].x]
y = [self[tree].y]
for c in tree.children:
xx, yy = self.tree_to_xy(c)
x.extend([self[tree].x, xx[0]])
y.extend([self[tree].y, self[tree].y])
x += xx
y += yy
x.append(None)
y.append(None)
return (x, y)
def branch_to_xy(self, branch):
"""
Convert one or more single branches to a list of line segments
for plotting.
Parameters:
-----------
branch: Tree instance, or id of a tree, or a list of these
The branch(es) to consider
Outputs:
--------
A set of xy coordinates describing the branches
"""
# code for when branch is a list of branches
if isinstance(branch, list):
x = []
y = []
for b in branch:
xx, yy = self.branch_to_xy(b)
x.extend(xx)
y.extend(yy)
x.append(None)
y.append(None)
return (x, y)
#code for when branch is a scalar
node = self[branch].node
parent = node.parent
if parent:
x = [self[branch].x, self[branch].x, self[parent].x]
y = [self[branch].y, self[parent].y, self[parent].y]
return (x, y)
else:
return ([self[branch].x], [self[branch].y])
class DendrogramLayout(TreeLayout):
def __init__(self, tree, data):
self.data = data
super(DendrogramLayout, self).__init__(tree)
def layout(self):
super(DendrogramLayout, self).layout()
self.set_height()
def set_height(self):
nbranch = len(self.tree.index)
nleaf = (nbranch + 1) / 2
hival = self.data.max()
for id in self.tree.index:
self[id].y = hival
for id in self.tree.index:
hit = np.where(self.tree.index_map == id)
assert(len(hit) > 0)
if id < nleaf:
self[id].y = self.data[hit].max()
if len(hit) == 0:
loval = 0
else:
loval = self.data[hit].min()
parent = self[id].node.parent
if not parent:
continue
self[parent].y = min(self[parent].y, loval)
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/tree_layout.py",
"copies": "1",
"size": "7890",
"license": "bsd-3-clause",
"hash": -3383728852712354300,
"line_mean": 27.9010989011,
"line_max": 76,
"alpha_frac": 0.5121673004,
"autogenerated": false,
"ratio": 4.060730828615543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5072898129015543,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from glue.external import six
from glue.core.data_factories.helpers import has_extension
from glue.core.component import Component, CategoricalComponent
from glue.core.data import Data
from glue.config import data_factory, qglue_parser
__all__ = ['pandas_read_table']
def panda_process(indf):
"""
Build a data set from a table using pandas. This attempts to respect
categorical data input by letting pandas.read_csv infer the type
"""
result = Data()
for name, column in indf.iteritems():
if (column.dtype == np.object) | (column.dtype == np.bool):
# try to salvage numerical data
try:
coerced = pd.to_numeric(column, errors='coerce')
except AttributeError: # pandas < 0.19
coerced = column.convert_objects(convert_numeric=True)
if (coerced.dtype != column.dtype) and coerced.isnull().mean() < 0.4:
c = Component(coerced.values)
else:
# pandas has a 'special' nan implementation and this doesn't
# play well with np.unique
c = CategoricalComponent(column.fillna(''))
else:
c = Component(column.values)
# convert header to string - in some cases if the first row contains
# numbers, these are cast to numerical types, so we want to change that
# here.
if not isinstance(name, six.string_types):
name = str(name)
# strip off leading #
name = name.strip()
if name.startswith('#'):
name = name[1:].strip()
result.add_component(c, name)
return result
@data_factory(label="Pandas Table", identifier=has_extension('csv csv txt tsv tbl dat'))
def pandas_read_table(path, **kwargs):
""" A factory for reading tabular data using pandas
:param path: path/to/file
:param kwargs: All kwargs are passed to pandas.read_csv
:returns: :class:`glue.core.data.Data` object
"""
import pandas as pd
try:
from pandas.io.common import CParserError
except ImportError: # pragma: no cover
try:
from pandas.parser import CParserError
except ImportError: # pragma: no cover
from pandas._parser import CParserError
# iterate over common delimiters to search for best option
delimiters = kwargs.pop('delimiter', [None] + list(',|\t '))
fallback = None
for d in delimiters:
try:
indf = pd.read_csv(path, delimiter=d, **kwargs)
# ignore files parsed to empty dataframes
if len(indf) == 0:
continue
# only use files parsed to single-column dataframes
# if we don't find a better strategy
if len(indf.columns) < 2:
fallback = indf
continue
return panda_process(indf)
except CParserError:
continue
if fallback is not None:
return panda_process(fallback)
raise IOError("Could not parse %s using pandas" % path)
try:
import pandas as pd
except ImportError:
pass
else:
@qglue_parser(pd.DataFrame)
def _parse_data_dataframe(data, label):
label = label or 'Data'
result = Data(label=label)
for c in data.columns:
result.add_component(data[c], str(c))
return [result]
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/data_factories/pandas.py",
"copies": "2",
"size": "3493",
"license": "bsd-3-clause",
"hash": -8185681234212810000,
"line_mean": 29.6403508772,
"line_max": 88,
"alpha_frac": 0.6117950186,
"autogenerated": false,
"ratio": 4.17822966507177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.579002468367177,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
h5py = pytest.importorskip('h5py')
from datashape import discover
from blaze import compute
from blaze.expr import symbol
from blaze.utils import tmpfile
from blaze.compute.h5py import pre_compute, optimize
def eq(a, b):
return (a == b).all()
x = np.arange(20*24, dtype='f4').reshape((20, 24))
@pytest.yield_fixture
def file():
with tmpfile('.h5') as filename:
f = h5py.File(filename)
d = f.create_dataset('/x', shape=x.shape, dtype=x.dtype,
fillvalue=0.0, chunks=(4, 6))
d[:] = x
yield f
f.close()
@pytest.yield_fixture
def data(file):
yield file['/x']
@pytest.yield_fixture
def data_1d_chunks():
with tmpfile('.h5') as filename:
f = h5py.File(filename)
d = f.create_dataset('/x', shape=x.shape, dtype=x.dtype,
fillvalue=0.0, chunks=(1, 24))
d[:] = x
yield d
f.close()
rec = np.empty(shape=(20, 24), dtype=[('x', 'i4'), ('y', 'i4')])
rec['x'] = 1
rec['y'] = 2
@pytest.yield_fixture
def recdata():
with tmpfile('.h5') as filename:
f = h5py.File(filename)
d = f.create_dataset('/x', shape=rec.shape,
dtype=rec.dtype,
chunks=(4, 6))
d['x'] = rec['x']
d['y'] = rec['y']
yield d
f.close()
s = symbol('s', discover(x))
def test_slicing(data):
for idx in [0, 1, (0, 1), slice(1, 3), (0, slice(1, 5, 2))]:
assert eq(compute(s[idx], data), x[idx])
def test_reductions(data):
assert eq(compute(s.sum(), data), x.sum())
assert eq(compute(s.sum(axis=1), data), x.sum(axis=1))
assert eq(compute(s.sum(axis=0), data), x.sum(axis=0))
assert eq(compute(s[0], data), x[0])
assert eq(compute(s[-1], data), x[-1])
def test_mixed(recdata):
s = symbol('s', discover(recdata))
expr = (s.x + 1).sum(axis=1)
assert eq(compute(expr, recdata), compute(expr, rec))
def test_uneven_chunk_size(data):
assert eq(compute(s.sum(axis=1), data, chunksize=(7, 7)),
x.sum(axis=1))
def test_nrows_3D_records(recdata):
s = symbol('s', discover(recdata))
assert not hasattr(s, 'nrows')
@pytest.mark.xfail(raises=AttributeError,
reason="We don't support nrows on arrays")
def test_nrows_array(data):
assert compute(s.nrows, data) == len(data)
def test_nelements_records(recdata):
s = symbol('s', discover(recdata))
assert compute(s.nelements(), recdata) == np.prod(recdata.shape)
np.testing.assert_array_equal(compute(s.nelements(axis=0), recdata),
np.zeros(recdata.shape[1]) + recdata.shape[0])
def test_nelements_array(data):
lhs = compute(s.nelements(axis=1), data)
rhs = data.shape[1]
np.testing.assert_array_equal(lhs, rhs)
lhs = compute(s.nelements(axis=0), data)
rhs = data.shape[0]
np.testing.assert_array_equal(lhs, rhs)
lhs = compute(s.nelements(axis=(0, 1)), data)
rhs = np.prod(data.shape)
np.testing.assert_array_equal(lhs, rhs)
def test_field_access_on_file(file):
s = symbol('s', '{x: 20 * 24 * float32}')
d = compute(s.x, file)
# assert isinstance(d, h5py.Dataset)
assert eq(d[:], x)
def test_field_access_on_group(file):
s = symbol('s', '{x: 20 * 24 * float32}')
d = compute(s.x, file['/'])
# assert isinstance(d, h5py.Dataset)
assert eq(d[:], x)
def test_compute_on_file(file):
s = symbol('s', discover(file))
assert eq(compute(s.x.sum(axis=1), file),
x.sum(axis=1))
assert eq(compute(s.x.sum(), file, chunksize=(4, 6)),
x.sum())
def test_compute_on_1d_chunks(data_1d_chunks):
assert eq(compute(s.sum(), data_1d_chunks),
x.sum())
def test_arithmetic_on_small_array(data):
s = symbol('s', discover(data))
assert eq(compute(s + 1, data),
compute(s + 1, x))
def test_arithmetic_on_small_array_from_file(file):
""" Want to make sure that we call pre_compute on Dataset
Even when it's not the leaf data input. """
s = symbol('s', discover(file))
assert eq(compute(s.x + 1, file),
x + 1)
def test_pre_compute_doesnt_collapse_slices(data):
s = symbol('s', discover(data))
assert pre_compute(s[:5], data) is data
def test_optimize_slicing(data):
a = symbol('a', discover(data))
b = symbol('b', discover(data))
assert optimize((a + 1)[:3], data).isidentical(a[:3] + 1)
assert optimize((a + b)[:3], data).isidentical(a[:3] + b[:3])
def test_optimize_slicing_on_file(file):
f = symbol('f', discover(file))
assert optimize((f.x + 1)[:5], file).isidentical(f.x[:5] + 1)
def test_arithmetic_and_then_slicing(data):
s = symbol('s', discover(data))
assert eq(compute((2*s + 1)[0], data, pre_compute=False),
2*x[0] + 1)
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/compute/tests/test_h5py.py",
"copies": "1",
"size": "5006",
"license": "bsd-3-clause",
"hash": -1991720706024508700,
"line_mean": 24.9378238342,
"line_max": 80,
"alpha_frac": 0.5803036356,
"autogenerated": false,
"ratio": 3.0120336943441637,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4092337329944164,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from .. import tree_layout as tl
from ..tree import NewickTree
def test_invalid_input():
with pytest.raises(TypeError) as exc:
layout = tl.TreeLayout(None)
assert exc.value.args[0] == 'Input not a tree object: %s' % type(None)
def test_layout_indexable_by_tree_or_id():
tree = NewickTree('((0,1)4,(2,3)5)6;')
layout = tl.TreeLayout(tree)
assert layout[tree] is layout[tree.id]
def test_default_layout():
tree = NewickTree('((0,1)4,(2,3)5)6;')
layout = tl.TreeLayout(tree)
t6 = tree
t4, t5 = t6.children
t0, t1 = t4.children
t2, t3 = t5.children
ts = [t0, t1, t2, t3, t4, t5, t6]
xs = [-1.5, -.5, .5, 1.5, -1, 1, 0]
ys = [2, 2, 2, 2, 1, 1, 0]
for t, x, y in zip(ts, xs, ys):
assert layout[t].x == x
assert layout[t].y == y
assert layout[t].width == 1
assert layout[t].height == 0
def test_layout_single_leaf():
tree = NewickTree('0;')
layout = tl.TreeLayout(tree)
assert layout[tree].x == 0
assert layout[tree].y == 0
def test_pick():
tree = NewickTree('((0,1)4,(2,3)5)6;')
layout = tl.TreeLayout(tree)
#exact match
assert layout.pick(0, 0) is tree
#closest match, below
assert layout.pick(0, -1) is tree
#only pick if y position is <= node
assert layout.pick(-.01, .01) is tree.children[0]
assert layout.pick(0, 2.1) is None
def test_tree_to_xy():
tree = NewickTree('(0,1)2;')
layout = tl.TreeLayout(tree)
x = np.array([0, 0, -.5, -.5, None, 0, .5, .5, None], dtype=float)
y = np.array([0, 0, 0, 1, None, 0, 0, 1, None], dtype=float)
xx, yy = layout.tree_to_xy(tree)
np.testing.assert_array_almost_equal(x, np.array(xx, dtype=float))
np.testing.assert_array_almost_equal(y, np.array(yy, dtype=float))
def test_tree_to_xy_list():
tree = NewickTree('(0,1)2;')
layout = tl.TreeLayout(tree)
x = np.array([-0.5, None, .5, None], dtype=float)
y = np.array([1, None, 1, None], dtype=float)
xx, yy = layout.tree_to_xy(tree.children)
np.testing.assert_array_almost_equal(x, np.array(xx, dtype=float))
np.testing.assert_array_almost_equal(y, np.array(yy, dtype=float))
def test_branch_to_xy_branch():
tree = NewickTree('((0,1)4,(2,3)5)6;')
layout = tl.TreeLayout(tree)
x = [-1, -1, 0]
y = [1, 0, 0]
xx, yy = layout.branch_to_xy(tree.children[0])
np.testing.assert_array_almost_equal(x, xx)
np.testing.assert_array_almost_equal(y, yy)
def test_branch_to_xy_root():
tree = NewickTree('((0,1)4,(2,3)5)6;')
layout = tl.TreeLayout(tree)
x = [0]
y = [0]
xx, yy = layout.branch_to_xy(tree)
np.testing.assert_array_almost_equal(x, xx)
np.testing.assert_array_almost_equal(y, yy)
def test_branch_to_xy_leaf():
tree = NewickTree('((0,1)4,(2,3)5)6;')
layout = tl.TreeLayout(tree)
x = [-1.5, -1.5, -1]
y = [2, 1, 1]
xx, yy = layout.branch_to_xy(tree.children[0].children[0])
np.testing.assert_array_almost_equal(x, xx)
np.testing.assert_array_almost_equal(y, yy)
def test_branch_to_xy_list():
tree = NewickTree('((0,1)4,(2,3)5)6;')
layout = tl.TreeLayout(tree)
x = np.array([0, None, 0, None], dtype=float)
y = np.array([0, None, 0, None], dtype=float)
xx, yy = layout.branch_to_xy([tree, tree])
np.testing.assert_array_almost_equal(x, np.array(xx, dtype=float))
np.testing.assert_array_almost_equal(y, np.array(yy, dtype=float))
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/tests/test_tree_layout.py",
"copies": "1",
"size": "3570",
"license": "bsd-3-clause",
"hash": -7599557393304830000,
"line_mean": 25.4444444444,
"line_max": 74,
"alpha_frac": 0.6028011204,
"autogenerated": false,
"ratio": 2.6761619190404797,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37789630394404794,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.nnet.bn import batch_normalization
def linear(x):
return x
class BatchNormLayer(object):
"""Batch normalization layer.
Core algorithm is brought from Lasagne.
(https://github.com/Lasagne/Lasagne)
"""
layers = []
def __init__(self, input_shape, layer_name=None, epsilon=1e-4, alpha=0.05):
if len(input_shape) == 2:
self.axes = (0,)
shape = [input_shape[1]]
elif len(input_shape) == 4:
self.axes = (0, 2, 3)
shape = [input_shape[0]]
else:
raise NotImplementedError
self.layer_name = 'BN' if layer_name is None else layer_name
self.epsilon = epsilon
self.alpha = alpha
self.deterministic = False
self.update_averages = True
self.gamma = theano.shared(np.ones(shape, dtype=theano.config.floatX),
name=layer_name + '_G', borrow=True)
self.beta = theano.shared(np.zeros(shape, dtype=theano.config.floatX),
name=layer_name + '_B', borrow=True)
self.mean = theano.shared(np.zeros(shape, dtype=theano.config.floatX),
name=layer_name + '_mean', borrow=True)
self.inv_std = theano.shared(
np.ones(shape, dtype=theano.config.floatX),
name=layer_name + '_inv_std', borrow=True)
self.params = [self.gamma, self.beta]
self.statistics = [self.mean, self.inv_std]
BatchNormLayer.layers.append(self)
def get_output(self, input, **kwargs):
input_mean = input.mean(self.axes)
input_inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))
# input_inv_std = T.inv(T.sqrt(input.var(self.axes)) + 1E-6)
# Decide whether to use the stored averages or mini-batch statistics
use_averages = self.deterministic
if use_averages:
mean = self.mean
inv_std = self.inv_std
else:
mean = input_mean
inv_std = input_inv_std
# Decide whether to update the stored averages
update_averages = self.update_averages and not use_averages
if update_averages:
# Trick: To update the stored statistics, we create memory-aliased
# clones of the stored statistics:
running_mean = theano.clone(self.mean, share_inputs=False)
running_inv_std = theano.clone(self.inv_std, share_inputs=False)
# set a default update for them:
running_mean.default_update = ((1 - self.alpha) * running_mean +
self.alpha * input_mean)
running_inv_std.default_update = ((1 - self.alpha) *
running_inv_std +
self.alpha * input_inv_std)
# and make sure they end up in the graph without participating in
# the computation (this way their default_update will be collected
# and applied, but the computation will be optimized away):
mean += 0 * running_mean
inv_std += 0 * running_inv_std
# prepare dimshuffle pattern inserting broadcastable axes as needed
param_axes = iter(list(range(input.ndim - len(self.axes))))
pattern = ['x' if input_axis in self.axes
else next(param_axes)
for input_axis in range(input.ndim)]
# apply dimshuffle pattern to all parameters
beta = 0 if self.beta is None else self.beta.dimshuffle(pattern)
gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern)
mean = mean.dimshuffle(pattern)
inv_std = inv_std.dimshuffle(pattern)
# normalize
normalized = (input - mean) * (gamma * inv_std) + beta
return normalized
def reset_mean_std(self):
# reset mean and std
self.mean.set_value(np.zeros(self.mean.get_value().shape,
dtype=theano.config.floatX))
self.inv_std.set_value(np.ones(self.std.get_value().shape,
dtype=theano.config.floatX))
@staticmethod
def set_batch_norms_training(training):
deterministic = False if training else True
print(' - Batch norm layres: deterministic =', deterministic)
for layer in BatchNormLayer.layers:
layer.deterministic = deterministic
layer.update_averages = not deterministic
@staticmethod
def reset_batch_norms_mean_std():
print(' - Batch norm layres: reset mean and std')
for layer in BatchNormLayer.layers:
layer.reset_mean_std()
class BatchNormLayerTheano(object):
"""Batch normalization layer
(Using theano.tensor.nnet.bn.batch_normalization)
Core algorithm is brought from Lasagne.
(https://github.com/Lasagne/Lasagne)
"""
layers = []
def __init__(self, input_shape, layer_name=None, activation=linear,
epsilon=1e-4, alpha=0.05):
if len(input_shape) == 2:
self.axes = (0,)
shape = [input_shape[1]]
elif len(input_shape) == 4:
self.axes = (0, 2, 3)
shape = [input_shape[0]]
else:
raise NotImplementedError
self.layer_name = 'BN' if layer_name is None else layer_name
self.epsilon = epsilon
self.alpha = alpha
self.deterministic = False
self.update_averages = True
self.activation = activation
self.act_name = activation.__name__
self.gamma = theano.shared(np.ones(shape, dtype=theano.config.floatX),
name=layer_name + '_G', borrow=True)
self.beta = theano.shared(np.zeros(shape, dtype=theano.config.floatX),
name=layer_name + '_B', borrow=True)
self.mean = theano.shared(np.zeros(shape, dtype=theano.config.floatX),
name=layer_name + '_mean', borrow=True)
self.std = theano.shared(np.ones(shape, dtype=theano.config.floatX),
name=layer_name + '_std', borrow=True)
self.params = [self.gamma, self.beta]
self.statistics = [self.mean, self.std]
BatchNormLayer.layers.append(self)
def get_output(self, input, **kwargs):
input_mean = input.mean(self.axes)
# input_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))
input_std = T.sqrt(input.var(self.axes) + self.epsilon)
# Decide whether to use the stored averages or mini-batch statistics
use_averages = self.deterministic
if use_averages:
mean = self.mean
std = self.std
else:
mean = input_mean
std = input_std
# Decide whether to update the stored averages
update_averages = self.update_averages and not use_averages
if update_averages:
# Trick: To update the stored statistics, we create memory-aliased
# clones of the stored statistics:
running_mean = theano.clone(self.mean, share_inputs=False)
running_std = theano.clone(self.std, share_inputs=False)
# set a default update for them:
running_mean.default_update = ((1 - self.alpha) * running_mean +
self.alpha * input_mean)
running_std.default_update = ((1 - self.alpha) * running_std +
self.alpha * input_std)
# and make sure they end up in the graph without participating in
# the computation (this way their default_update will be collected
# and applied, but the computation will be optimized away):
mean += 0 * running_mean
std += 0 * running_std
# prepare dimshuffle pattern inserting broadcastable axes as needed
param_axes = iter(list(range(input.ndim - len(self.axes))))
pattern = ['x' if input_axis in self.axes
else next(param_axes)
for input_axis in range(input.ndim)]
# apply dimshuffle pattern to all parameters
beta = 0 if self.beta is None else self.beta.dimshuffle(pattern)
gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern)
mean = mean.dimshuffle(pattern)
std = std.dimshuffle(pattern)
# normalize
# normalized = (input - mean) * (gamma * std) + beta
normalized = batch_normalization(
input, gamma, beta, mean, std, mode='low_mem')
return self.activation(normalized)
def reset_mean_std(self):
# reset mean and std
self.mean.set_value(np.zeros(self.mean.get_value().shape,
dtype=theano.config.floatX))
self.std.set_value(np.ones(self.std.get_value().shape,
dtype=theano.config.floatX))
@staticmethod
def set_batch_norms_training(training):
deterministic = False if training else True
print(' - Batch norm layres: deterministic =', deterministic)
for layer in BatchNormLayer.layers:
layer.deterministic = deterministic
layer.update_averages = not deterministic
@staticmethod
def reset_batch_norms_mean_std():
print(' - Batch norm layres: reset mean and std')
for layer in BatchNormLayer.layers:
layer.reset_mean_std()
| {
"repo_name": "jongyookim/IQA_BIECON_release",
"path": "IQA_BIECON_release/layers/normalization.py",
"copies": "1",
"size": "9687",
"license": "mit",
"hash": 3904224244393057000,
"line_mean": 40.2212765957,
"line_max": 79,
"alpha_frac": 0.5819139052,
"autogenerated": false,
"ratio": 4.097715736040609,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5179629641240608,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import xarray as xr
from . import requires_dask
class Reindex:
def setup(self):
data = np.random.RandomState(0).randn(1000, 100, 100)
self.ds = xr.Dataset({'temperature': (('time', 'x', 'y'), data)},
coords={'time': np.arange(1000),
'x': np.arange(100),
'y': np.arange(100)})
def time_1d_coarse(self):
self.ds.reindex(time=np.arange(0, 1000, 5)).load()
def time_1d_fine_all_found(self):
self.ds.reindex(time=np.arange(0, 1000, 0.5), method='nearest').load()
def time_1d_fine_some_missing(self):
self.ds.reindex(time=np.arange(0, 1000, 0.5), method='nearest',
tolerance=0.1).load()
def time_2d_coarse(self):
self.ds.reindex(x=np.arange(0, 100, 2), y=np.arange(0, 100, 2)).load()
def time_2d_fine_all_found(self):
self.ds.reindex(x=np.arange(0, 100, 0.5), y=np.arange(0, 100, 0.5),
method='nearest').load()
def time_2d_fine_some_missing(self):
self.ds.reindex(x=np.arange(0, 100, 0.5), y=np.arange(0, 100, 0.5),
method='nearest', tolerance=0.1).load()
class ReindexDask(Reindex):
def setup(self):
requires_dask()
super(ReindexDask, self).setup()
self.ds = self.ds.chunk({'time': 100})
| {
"repo_name": "shoyer/xray",
"path": "asv_bench/benchmarks/reindexing.py",
"copies": "1",
"size": "1475",
"license": "apache-2.0",
"hash": -7596367717624044000,
"line_mean": 32.5227272727,
"line_max": 78,
"alpha_frac": 0.5484745763,
"autogenerated": false,
"ratio": 3.165236051502146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4213710627802146,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import xarray as xr
from . import requires_dask
class Reindex(object):
def setup(self):
data = np.random.RandomState(0).randn(1000, 100, 100)
self.ds = xr.Dataset({'temperature': (('time', 'x', 'y'), data)},
coords={'time': np.arange(1000),
'x': np.arange(100),
'y': np.arange(100)})
def time_1d_coarse(self):
self.ds.reindex(time=np.arange(0, 1000, 5)).load()
def time_1d_fine_all_found(self):
self.ds.reindex(time=np.arange(0, 1000, 0.5), method='nearest').load()
def time_1d_fine_some_missing(self):
self.ds.reindex(time=np.arange(0, 1000, 0.5), method='nearest',
tolerance=0.1).load()
def time_2d_coarse(self):
self.ds.reindex(x=np.arange(0, 100, 2), y=np.arange(0, 100, 2)).load()
def time_2d_fine_all_found(self):
self.ds.reindex(x=np.arange(0, 100, 0.5), y=np.arange(0, 100, 0.5),
method='nearest').load()
def time_2d_fine_some_missing(self):
self.ds.reindex(x=np.arange(0, 100, 0.5), y=np.arange(0, 100, 0.5),
method='nearest', tolerance=0.1).load()
class ReindexDask(Reindex):
def setup(self):
requires_dask()
super(ReindexDask, self).setup()
self.ds = self.ds.chunk({'time': 100})
| {
"repo_name": "chunweiyuan/xarray",
"path": "asv_bench/benchmarks/reindexing.py",
"copies": "2",
"size": "1483",
"license": "apache-2.0",
"hash": -7699801554381506000,
"line_mean": 32.7045454545,
"line_max": 78,
"alpha_frac": 0.5495616993,
"autogenerated": false,
"ratio": 3.1688034188034186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47183651181034186,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
def ut_diagn(coef, opt):
if opt['RunTimeDisp']:
print('diagnostics ... ', end='')
coef['diagn'] = {}
if opt['twodim']:
PE = np.sum(coef['Lsmaj']**2 + coef['Lsmin']**2)
PE = 100 * (coef['Lsmaj']**2 + coef['Lsmin']**2) / PE
SNR = (coef['Lsmaj']**2 + coef['Lsmin']**2) / (
(coef['Lsmaj_ci']/1.96)**2 +
(coef['Lsmin_ci']/1.96)**2)
else:
PE = 100 * coef['A']**2 / np.sum(coef['A']**2)
SNR = (coef['A']**2) / (coef['A_ci']/1.96)**2
indPE = PE.argsort()[::-1]
coef['diagn']['name'] = coef['name'][indPE]
coef['diagn']['PE'] = PE[indPE]
coef['diagn']['SNR'] = SNR[indPE]
return coef, indPE
# [~,indPE] = sort(PE,'descend');
# coef.diagn.name = coef.name(indPE);
# coef.diagn.PE = PE(indPE);
# coef.diagn.SNR = SNR; % used in ut_diagntable; ordered by PE there
# if opt.twodim
# [coef.diagn,usnrc,vsnrc] = ut_diagntable(coef,cnstit,...
# t,u,v,xmod,m,B,W,varMSM,Gall,Hall,elor,varcov_mCw,indPE);
# else
# [coef.diagn,usnrc,~] = ut_diagntable(coef,cnstit,...
# t,u,[],xmod,m,B,W,varMSM,Gall,Hall,elor,varcov_mCw,indPE);
# end
# if opt.diagnplots
# tmp = nan*ones(size(uin));
# tmp(uvgd) = usnrc;
# usnrc = tmp;
# tmp = nan*ones(size(uin));
# tmp(uvgd) = e;
# e = tmp;
# if opt.twodim
# tmp = nan*ones(size(uin));
# tmp(uvgd) = vsnrc;
# vsnrc = tmp;
# ut_diagnfigs(coef,indPE,tin,uin,vin,usnrc,vsnrc,e);
# else
# ut_diagnfigs(coef,indPE,tin,uin,[],usnrc,[],e);
# end
# end
# end
| {
"repo_name": "efiring/UTide",
"path": "utide/diagnostics.py",
"copies": "1",
"size": "1755",
"license": "mit",
"hash": 2991154380576400400,
"line_mean": 28.25,
"line_max": 71,
"alpha_frac": 0.505982906,
"autogenerated": false,
"ratio": 2.461430575035063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3467413481035063,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
# returns a list of augmented audio data, stereo or mono
def augment_audio(y,
sr,
n_augment=0,
allow_speedandpitch=True,
allow_pitch=True,
allow_speed=True,
allow_dyn=True,
allow_noise=True,
allow_timeshift=True,
tab="",
quiet=False):
mods = [y] # always returns the original as element zero
length = y.shape[0]
for i in range(n_augment):
if not quiet:
print(tab + "augment_audio: ", i + 1, "of", n_augment)
y_mod = y
count_changes = 0
# change speed and pitch together
if (allow_speedandpitch) and random_onoff():
length_change = np.random.uniform(low=0.9, high=1.1)
speed_fac = 1.0 / length_change
if not quiet:
print(tab + " resample length_change = ", length_change)
tmp = np.interp(np.arange(0, len(y), speed_fac), np.arange(0, len(y)), y)
#tmp = resample(y,int(length*lengt_fac)) # signal.resample is too slow
minlen = min(y.shape[0], tmp.shape[0]) # keep same length as original;
y_mod *= 0 # pad with zeros
y_mod[0:minlen] = tmp[0:minlen]
count_changes += 1
# change pitch (w/o speed)
if (allow_pitch) and random_onoff():
bins_per_octave = 24 # pitch increments are quarter-steps
pitch_pm = 4 # +/- this many quarter steps
pitch_change = pitch_pm * 2 * (np.random.uniform() - 0.5)
if not quiet:
print(tab + " pitch_change = ", pitch_change)
y_mod = librosa.effects.pitch_shift(y,
sr,
n_steps=pitch_change,
bins_per_octave=bins_per_octave)
count_changes += 1
# change speed (w/o pitch),
if (allow_speed) and random_onoff():
speed_change = np.random.uniform(low=0.9, high=1.1)
if not quiet:
print(tab + " speed_change = ", speed_change)
tmp = librosa.effects.time_stretch(y_mod, speed_change)
minlen = min(y.shape[0], tmp.shape[0]) # keep same length as original;
y_mod *= 0 # pad with zeros
y_mod[0:minlen] = tmp[0:minlen]
count_changes += 1
# change dynamic range
if (allow_dyn) and random_onoff():
dyn_change = np.random.uniform(low=0.5, high=1.1) # change amplitude
if not quiet:
print(tab + " dyn_change = ", dyn_change)
y_mod = y_mod * dyn_change
count_changes += 1
# add noise
if (allow_noise) and random_onoff():
noise_amp = 0.005 * np.random.uniform() * np.amax(y)
if random_onoff():
if not quiet:
print(tab + " gaussian noise_amp = ", noise_amp)
y_mod += noise_amp * np.random.normal(size=length)
else:
if not quiet:
print(tab + " uniform noise_amp = ", noise_amp)
y_mod += noise_amp * np.random.normal(size=length)
count_changes += 1
# shift in time forwards or backwards
if (allow_timeshift) and random_onoff():
timeshift_fac = 0.2 * 2 * (np.random.uniform() - 0.5
) # up to 20% of length
if not quiet:
print(tab + " timeshift_fac = ", timeshift_fac)
start = int(length * timeshift_fac)
if (start > 0):
y_mod = np.pad(y_mod, (start, 0), mode='constant')[0:y_mod.shape[0]]
else:
y_mod = np.pad(y_mod, (0, -start), mode='constant')[0:y_mod.shape[0]]
count_changes += 1
# last-ditch effort to make sure we made a change (recursive/sloppy, but...works)
if (0 == count_changes):
if not quiet:
print("No changes made to signal, trying again")
mods.append(
augment_audio(y, sr, n_augment=1, tab=" ", quiet=quiet)[1])
else:
mods.append(y_mod)
return mods
""" scale frequency axis logarithmically """
def logscale_spec(spec, sr=44100, factor=20., alpha=1.0, f0=0.9, fmax=1):
spec = spec[:, 0:256]
timebins, freqbins = np.shape(spec)
scale = np.linspace(0, 1, freqbins) #** factor
# http://ieeexplore.ieee.org/xpl/login.jsp?tp=&arnumber=650310&url=http%3A%2F%2Fieeexplore.ieee.org%2Fiel4%2F89%2F14168%2F00650310
scale = np.array(
map(
lambda x: x * alpha if x <= f0 else (fmax - alpha * f0) /
(fmax - f0) * (x - f0) + alpha * f0, scale))
scale *= (freqbins - 1) / max(scale)
newspec = np.complex128(np.zeros([timebins, freqbins]))
allfreqs = np.abs(np.fft.fftfreq(freqbins * 2, 1. / sr)[:freqbins + 1])
freqs = [0.0 for i in range(freqbins)]
totw = [0.0 for i in range(freqbins)]
for i in range(0, freqbins):
if (i < 1 or i + 1 >= freqbins):
newspec[:, i] += spec[:, i]
freqs[i] += allfreqs[i]
totw[i] += 1.0
continue
else:
# scale[15] = 17.2
w_up = scale[i] - np.floor(scale[i])
w_down = 1 - w_up
j = int(np.floor(scale[i]))
newspec[:, j] += w_down * spec[:, i]
freqs[j] += w_down * allfreqs[i]
totw[j] += w_down
newspec[:, j + 1] += w_up * spec[:, i]
freqs[j + 1] += w_up * allfreqs[i]
totw[j + 1] += w_up
for i in range(len(freqs)):
if (totw[i] > 1e-6):
freqs[i] /= totw[i]
return newspec, freqs
| {
"repo_name": "imito/odin",
"path": "odin/preprocessing/audio/audio.py",
"copies": "1",
"size": "5392",
"license": "mit",
"hash": 3362171369931488000,
"line_mean": 33.7870967742,
"line_max": 132,
"alpha_frac": 0.5497032641,
"autogenerated": false,
"ratio": 3.1095732410611303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.415927650516113,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
N_STAGES = 12
N_STAGES_EXTENDED = 16
INTERPOLATOR_POWER = 7
C = np.array([0.0,
0.526001519587677318785587544488e-01,
0.789002279381515978178381316732e-01,
0.118350341907227396726757197510,
0.281649658092772603273242802490,
0.333333333333333333333333333333,
0.25,
0.307692307692307692307692307692,
0.651282051282051282051282051282,
0.6,
0.857142857142857142857142857142,
1.0,
1.0,
0.1,
0.2,
0.777777777777777777777777777778])
A = np.zeros((N_STAGES_EXTENDED, N_STAGES_EXTENDED))
A[1, 0] = 5.26001519587677318785587544488e-2
A[2, 0] = 1.97250569845378994544595329183e-2
A[2, 1] = 5.91751709536136983633785987549e-2
A[3, 0] = 2.95875854768068491816892993775e-2
A[3, 2] = 8.87627564304205475450678981324e-2
A[4, 0] = 2.41365134159266685502369798665e-1
A[4, 2] = -8.84549479328286085344864962717e-1
A[4, 3] = 9.24834003261792003115737966543e-1
A[5, 0] = 3.7037037037037037037037037037e-2
A[5, 3] = 1.70828608729473871279604482173e-1
A[5, 4] = 1.25467687566822425016691814123e-1
A[6, 0] = 3.7109375e-2
A[6, 3] = 1.70252211019544039314978060272e-1
A[6, 4] = 6.02165389804559606850219397283e-2
A[6, 5] = -1.7578125e-2
A[7, 0] = 3.70920001185047927108779319836e-2
A[7, 3] = 1.70383925712239993810214054705e-1
A[7, 4] = 1.07262030446373284651809199168e-1
A[7, 5] = -1.53194377486244017527936158236e-2
A[7, 6] = 8.27378916381402288758473766002e-3
A[8, 0] = 6.24110958716075717114429577812e-1
A[8, 3] = -3.36089262944694129406857109825
A[8, 4] = -8.68219346841726006818189891453e-1
A[8, 5] = 2.75920996994467083049415600797e1
A[8, 6] = 2.01540675504778934086186788979e1
A[8, 7] = -4.34898841810699588477366255144e1
A[9, 0] = 4.77662536438264365890433908527e-1
A[9, 3] = -2.48811461997166764192642586468
A[9, 4] = -5.90290826836842996371446475743e-1
A[9, 5] = 2.12300514481811942347288949897e1
A[9, 6] = 1.52792336328824235832596922938e1
A[9, 7] = -3.32882109689848629194453265587e1
A[9, 8] = -2.03312017085086261358222928593e-2
A[10, 0] = -9.3714243008598732571704021658e-1
A[10, 3] = 5.18637242884406370830023853209
A[10, 4] = 1.09143734899672957818500254654
A[10, 5] = -8.14978701074692612513997267357
A[10, 6] = -1.85200656599969598641566180701e1
A[10, 7] = 2.27394870993505042818970056734e1
A[10, 8] = 2.49360555267965238987089396762
A[10, 9] = -3.0467644718982195003823669022
A[11, 0] = 2.27331014751653820792359768449
A[11, 3] = -1.05344954667372501984066689879e1
A[11, 4] = -2.00087205822486249909675718444
A[11, 5] = -1.79589318631187989172765950534e1
A[11, 6] = 2.79488845294199600508499808837e1
A[11, 7] = -2.85899827713502369474065508674
A[11, 8] = -8.87285693353062954433549289258
A[11, 9] = 1.23605671757943030647266201528e1
A[11, 10] = 6.43392746015763530355970484046e-1
A[12, 0] = 5.42937341165687622380535766363e-2
A[12, 5] = 4.45031289275240888144113950566
A[12, 6] = 1.89151789931450038304281599044
A[12, 7] = -5.8012039600105847814672114227
A[12, 8] = 3.1116436695781989440891606237e-1
A[12, 9] = -1.52160949662516078556178806805e-1
A[12, 10] = 2.01365400804030348374776537501e-1
A[12, 11] = 4.47106157277725905176885569043e-2
A[13, 0] = 5.61675022830479523392909219681e-2
A[13, 6] = 2.53500210216624811088794765333e-1
A[13, 7] = -2.46239037470802489917441475441e-1
A[13, 8] = -1.24191423263816360469010140626e-1
A[13, 9] = 1.5329179827876569731206322685e-1
A[13, 10] = 8.20105229563468988491666602057e-3
A[13, 11] = 7.56789766054569976138603589584e-3
A[13, 12] = -8.298e-3
A[14, 0] = 3.18346481635021405060768473261e-2
A[14, 5] = 2.83009096723667755288322961402e-2
A[14, 6] = 5.35419883074385676223797384372e-2
A[14, 7] = -5.49237485713909884646569340306e-2
A[14, 10] = -1.08347328697249322858509316994e-4
A[14, 11] = 3.82571090835658412954920192323e-4
A[14, 12] = -3.40465008687404560802977114492e-4
A[14, 13] = 1.41312443674632500278074618366e-1
A[15, 0] = -4.28896301583791923408573538692e-1
A[15, 5] = -4.69762141536116384314449447206
A[15, 6] = 7.68342119606259904184240953878
A[15, 7] = 4.06898981839711007970213554331
A[15, 8] = 3.56727187455281109270669543021e-1
A[15, 12] = -1.39902416515901462129418009734e-3
A[15, 13] = 2.9475147891527723389556272149
A[15, 14] = -9.15095847217987001081870187138
B = A[N_STAGES, :N_STAGES]
E3 = np.zeros(N_STAGES + 1)
E3[:-1] = B.copy()
E3[0] -= 0.244094488188976377952755905512
E3[8] -= 0.733846688281611857341361741547
E3[11] -= 0.220588235294117647058823529412e-1
E5 = np.zeros(N_STAGES + 1)
E5[0] = 0.1312004499419488073250102996e-1
E5[5] = -0.1225156446376204440720569753e+1
E5[6] = -0.4957589496572501915214079952
E5[7] = 0.1664377182454986536961530415e+1
E5[8] = -0.3503288487499736816886487290
E5[9] = 0.3341791187130174790297318841
E5[10] = 0.8192320648511571246570742613e-1
E5[11] = -0.2235530786388629525884427845e-1
# First 3 coefficients are computed separately.
D = np.zeros((INTERPOLATOR_POWER - 3, N_STAGES_EXTENDED))
D[0, 0] = -0.84289382761090128651353491142e+1
D[0, 5] = 0.56671495351937776962531783590
D[0, 6] = -0.30689499459498916912797304727e+1
D[0, 7] = 0.23846676565120698287728149680e+1
D[0, 8] = 0.21170345824450282767155149946e+1
D[0, 9] = -0.87139158377797299206789907490
D[0, 10] = 0.22404374302607882758541771650e+1
D[0, 11] = 0.63157877876946881815570249290
D[0, 12] = -0.88990336451333310820698117400e-1
D[0, 13] = 0.18148505520854727256656404962e+2
D[0, 14] = -0.91946323924783554000451984436e+1
D[0, 15] = -0.44360363875948939664310572000e+1
D[1, 0] = 0.10427508642579134603413151009e+2
D[1, 5] = 0.24228349177525818288430175319e+3
D[1, 6] = 0.16520045171727028198505394887e+3
D[1, 7] = -0.37454675472269020279518312152e+3
D[1, 8] = -0.22113666853125306036270938578e+2
D[1, 9] = 0.77334326684722638389603898808e+1
D[1, 10] = -0.30674084731089398182061213626e+2
D[1, 11] = -0.93321305264302278729567221706e+1
D[1, 12] = 0.15697238121770843886131091075e+2
D[1, 13] = -0.31139403219565177677282850411e+2
D[1, 14] = -0.93529243588444783865713862664e+1
D[1, 15] = 0.35816841486394083752465898540e+2
D[2, 0] = 0.19985053242002433820987653617e+2
D[2, 5] = -0.38703730874935176555105901742e+3
D[2, 6] = -0.18917813819516756882830838328e+3
D[2, 7] = 0.52780815920542364900561016686e+3
D[2, 8] = -0.11573902539959630126141871134e+2
D[2, 9] = 0.68812326946963000169666922661e+1
D[2, 10] = -0.10006050966910838403183860980e+1
D[2, 11] = 0.77771377980534432092869265740
D[2, 12] = -0.27782057523535084065932004339e+1
D[2, 13] = -0.60196695231264120758267380846e+2
D[2, 14] = 0.84320405506677161018159903784e+2
D[2, 15] = 0.11992291136182789328035130030e+2
D[3, 0] = -0.25693933462703749003312586129e+2
D[3, 5] = -0.15418974869023643374053993627e+3
D[3, 6] = -0.23152937917604549567536039109e+3
D[3, 7] = 0.35763911791061412378285349910e+3
D[3, 8] = 0.93405324183624310003907691704e+2
D[3, 9] = -0.37458323136451633156875139351e+2
D[3, 10] = 0.10409964950896230045147246184e+3
D[3, 11] = 0.29840293426660503123344363579e+2
D[3, 12] = -0.43533456590011143754432175058e+2
D[3, 13] = 0.96324553959188282948394950600e+2
D[3, 14] = -0.39177261675615439165231486172e+2
D[3, 15] = -0.14972683625798562581422125276e+3
| {
"repo_name": "arokem/scipy",
"path": "scipy/integrate/_ivp/dop853_coefficients.py",
"copies": "6",
"size": "7303",
"license": "bsd-3-clause",
"hash": -8992214274971938000,
"line_mean": 36.4512820513,
"line_max": 64,
"alpha_frac": 0.7503765576,
"autogenerated": false,
"ratio": 1.9038060479666319,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.5654182605566632,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
try:
isclose = np.isclose
except AttributeError:
def isclose(*args, **kwargs):
raise RuntimeError("You need numpy version 1.7 or greater to use "
"isclose.")
try:
full = np.full
except AttributeError:
def full(shape, fill_value, dtype=None, order=None):
"""Our implementation of numpy.full because your numpy is old."""
if order is not None:
raise NotImplementedError("`order` kwarg is not supported upgrade "
"to Numpy 1.8 or greater for support.")
return np.multiply(fill_value, np.ones(shape, dtype=dtype),
dtype=dtype)
# Taken from scikit-learn:
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/fixes.py#L84
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(1, .5, dtype='i8'), 2)
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Divide with dtype doesn't work on Python 3
def divide(x1, x2, out=None, dtype=None):
"""Implementation of numpy.divide that works with dtype kwarg.
Temporary compatibility fix for a bug in numpy's version. See
https://github.com/numpy/numpy/issues/3484 for the relevant issue."""
x = np.divide(x1, x2, out)
if dtype is not None:
x = x.astype(dtype)
return x
| {
"repo_name": "vikhyat/dask",
"path": "dask/array/numpy_compat.py",
"copies": "2",
"size": "1736",
"license": "bsd-3-clause",
"hash": -5901071135662620000,
"line_mean": 37.5777777778,
"line_max": 85,
"alpha_frac": 0.6065668203,
"autogenerated": false,
"ratio": 3.883668903803132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5490235724103132,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy
import random
# Various pre-crafted datasets/variables for testing
# !!! Must not be changed -- only appended !!!
# while testing numpy we better not rely on numpy to produce random
# sequences
random.seed(1)
# but will seed it nevertheless
numpy.random.seed(1)
nx, ny = 1000, 1000
# reduced squares based on indexes_rand, primarily for testing more
# time-consuming functions (ufunc, linalg, etc)
nxs, nys = 100, 100
# a set of interesting types to test
TYPES1 = [
'int16', 'float16',
'int32', 'float32',
'int64', 'float64', 'complex64',
'longfloat', 'complex128',
'complex256',
]
def memoize(func):
result = []
def wrapper():
if not result:
result.append(func())
return result[0]
return wrapper
# values which will be used to construct our sample data matrices
# replicate 10 times to speed up initial imports of this helper
# and generate some redundancy
@memoize
def get_values():
rnd = numpy.random.RandomState(1)
values = numpy.tile(rnd.uniform(0, 100, size=nx*ny//10), 10)
return values
@memoize
def get_squares():
values = get_values()
squares = {t: numpy.array(values,
dtype=getattr(numpy, t)).reshape((nx, ny))
for t in TYPES1}
# adjust complex ones to have non-degenerated imagery part -- use
# original data transposed for that
for t, v in squares.items():
if t.startswith('complex'):
v += v.T*1j
return squares
@memoize
def get_squares_():
# smaller squares
squares_ = {t: s[:nxs, :nys] for t, s in get_squares().items()}
return squares_
@memoize
def get_vectors():
# vectors
vectors = {t: s[0] for t, s in get_squares().items()}
return vectors
@memoize
def get_indexes():
indexes = list(range(nx))
# so we do not have all items
indexes.pop(5)
indexes.pop(95)
indexes = numpy.array(indexes)
return indexes
@memoize
def get_indexes_rand():
rnd = random.Random(1)
indexes_rand = get_indexes().tolist() # copy
rnd.shuffle(indexes_rand) # in-place shuffle
indexes_rand = numpy.array(indexes_rand)
return indexes_rand
@memoize
def get_indexes_():
# smaller versions
indexes = get_indexes()
indexes_ = indexes[indexes < nxs]
return indexes_
@memoize
def get_indexes_rand_():
indexes_rand = get_indexes_rand()
indexes_rand_ = indexes_rand[indexes_rand < nxs]
return indexes_rand_
class Benchmark(object):
goal_time = 0.25
| {
"repo_name": "I--P/numpy",
"path": "benchmarks/benchmarks/common.py",
"copies": "6",
"size": "2610",
"license": "bsd-3-clause",
"hash": 5925764381041466000,
"line_mean": 21.6956521739,
"line_max": 72,
"alpha_frac": 0.6448275862,
"autogenerated": false,
"ratio": 3.5317997293640055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00045766590389016015,
"num_lines": 115
} |
from __future__ import absolute_import, division, print_function
import operator
from functools import partial
from uuid import uuid4
from google.protobuf.message import Message
from mesos.interface import mesos_pb2
from .. import protobuf
class Map(dict):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# self[k] = v
@classmethod
def cast(cls, v):
if isinstance(v, Map):
return v
elif isinstance(v, dict):
return Map(**v)
elif hasattr(v, '__iter__'):
return map(cls.cast, v)
else:
return v
def __setitem__(self, k, v):
# accidental __missing__ call will create a new node
super(Map, self).__setitem__(k, self.cast(v))
def __setattr__(self, k, v):
prop = getattr(self.__class__, k, None)
if isinstance(prop, property): # property binding
prop.fset(self, v)
elif hasattr(v, '__call__'): # method binding
self.__dict__[k] = v
else:
self[k] = v
def __getattr__(self, k):
return self[k]
# def __delattr__(self, k):
# del self[k]
# def __missing__(self, k):
# # TODO: consider not using this, silents errors
# self[k] = Map()
# return self[k]
def __hash__(self):
return hash(tuple(self.items()))
class RegisterProxies(type):
def __init__(cls, name, bases, nmspc):
super(RegisterProxies, cls).__init__(name, bases, nmspc)
if not hasattr(cls, 'registry'):
cls.registry = []
cls.registry.insert(0, (cls.proto, cls))
# cls.registry -= set(bases) # Remove base classes
# Metamethods, called on class objects:
def __iter__(cls):
return iter(cls.registry)
class MessageProxy(Map):
__metaclass__ = RegisterProxies
proto = Message
class Environment(MessageProxy):
proto = mesos_pb2.Environment
class Scalar(MessageProxy):
proto = mesos_pb2.Value.Scalar
class Resource(MessageProxy):
proto = mesos_pb2.Resource
# TODO: RangeResource e.g. ports
class ScalarResource(Resource):
# supports comparison and basic arithmetics with scalars
proto = mesos_pb2.Resource(type=mesos_pb2.Value.SCALAR)
def __init__(self, value=None, **kwargs):
super(Resource, self).__init__(**kwargs)
if value is not None:
self.scalar = Scalar(value=value)
def __cmp__(self, other):
first, second = float(self), float(other)
if first < second:
return -1
elif first > second:
return 1
else:
return 0
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self.scalar.value)
def __float__(self):
return float(self.scalar.value)
@classmethod
def _op(cls, op, first, second):
value = op(float(first), float(second))
return cls(value=value)
def __add__(self, other):
return self._op(operator.add, self, other)
def __radd__(self, other):
return self._op(operator.add, other, self)
def __sub__(self, other):
return self._op(operator.sub, self, other)
def __rsub__(self, other):
return self._op(operator.sub, other, self)
def __mul__(self, other):
return self._op(operator.mul, self, other)
def __rmul__(self, other):
return self._op(operator.mul, other, self)
def __truediv__(self, other):
return self._op(operator.truediv, self, other)
def __rtruediv__(self, other):
return self._op(operator.truediv, other, self)
def __iadd__(self, other):
self.scalar.value = float(self._op(operator.add, self, other))
return self
def __isub__(self, other):
self.scalar.value = float(self._op(operator.sub, self, other))
return self
class Cpus(ScalarResource):
proto = mesos_pb2.Resource(name='cpus', type=mesos_pb2.Value.SCALAR)
class Mem(ScalarResource):
proto = mesos_pb2.Resource(name='mem', type=mesos_pb2.Value.SCALAR)
class Disk(ScalarResource):
proto = mesos_pb2.Resource(name='disk', type=mesos_pb2.Value.SCALAR)
class ResourcesMixin(object):
@classmethod
def _cast_zero(cls, other=0):
if other == 0:
return cls(resources=[Cpus(0), Mem(0), Disk(0)])
else:
return other
@property
def cpus(self):
for res in self.resources:
if isinstance(res, Cpus):
return res
return Cpus(0.0)
@property
def mem(self):
for res in self.resources:
if isinstance(res, Mem):
return res
return Mem(0.0)
@property
def disk(self):
for res in self.resources:
if isinstance(res, Disk):
return res
return Disk(0.0)
# @property
# def ports(self):
# for res in self.resources:
# if isinstance(res, Ports):
# return [(rng.begin, rng.end) for rng in res.ranges.range]
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__,
', '.join(map(str, self.resources)))
def __cmp__(self, other):
other = self._cast_zero(other)
if all([self.cpus < other.cpus,
self.mem < other.mem,
self.disk < other.disk]):
# all resources are smaller the task will fit into offer
return -1
elif any([self.cpus > other.cpus,
self.mem > other.mem,
self.disk > other.disk]):
# any resources is bigger task won't fit into offer
return 1
else:
return 0
def __radd__(self, other): # to support sum()
other = self._cast_zero(other)
return self + other
def __add__(self, other):
other = self._cast_zero(other)
# ports = list(set(self.ports) | set(other.ports))
cpus = self.cpus + other.cpus
mem = self.mem + other.mem
disk = self.disk + other.disk
mixin = self.__class__()
mixin.resources = [cpus, disk, mem]
return mixin
def __sub__(self, other):
other = self._cast_zero(other)
# ports = list(set(self.ports) | set(other.ports))
cpus = self.cpus - other.cpus
mem = self.mem - other.mem
disk = self.disk - other.disk
mixin = self.__class__()
mixin.resources = [cpus, disk, mem]
return mixin
def __iadd__(self, other):
other = self._cast_zero(other)
added = self + other
self.resources = added.resources
return self
def __isub__(self, other):
other = self._cast_zero(other)
subbed = self - other
self.resources = subbed.resources
return self
class FrameworkID(MessageProxy):
proto = mesos_pb2.FrameworkID
class SlaveID(MessageProxy):
proto = mesos_pb2.SlaveID
class ExecutorID(MessageProxy):
proto = mesos_pb2.ExecutorID
class OfferID(MessageProxy):
proto = mesos_pb2.OfferID
class TaskID(MessageProxy):
proto = mesos_pb2.TaskID
class FrameworkInfo(MessageProxy):
proto = mesos_pb2.FrameworkInfo
class ExecutorInfo(MessageProxy):
proto = mesos_pb2.ExecutorInfo
def __init__(self, id=None, **kwargs):
super(ExecutorInfo, self).__init__(**kwargs)
self.id = id or str(uuid4())
@property
def id(self): # more consistent naming
return self['executor_id']
@id.setter
def id(self, value):
if not isinstance(value, ExecutorID):
value = ExecutorID(value=value)
self['executor_id'] = value
class MasterInfo(MessageProxy):
proto = mesos_pb2.MasterInfo
class SlaveInfo(MessageProxy):
proto = mesos_pb2.SlaveInfo
class Filters(MessageProxy):
proto = mesos_pb2.Filters
class TaskStatus(MessageProxy):
proto = mesos_pb2.TaskStatus
@property
def task_id(self): # more consistent naming
return self['task_id']
@task_id.setter
def task_id(self, value):
if not isinstance(value, TaskID):
value = TaskID(value=value)
self['task_id'] = value
def is_staging(self):
return self.state == 'TASK_STAGING'
def is_starting(self):
return self.state == 'TASK_STARTING'
def is_running(self):
return self.state == 'TASK_RUNNING'
def has_finished(self):
return self.state == 'TASK_FINISHED'
def has_succeeded(self):
return self.state == 'TASK_FINISHED'
def has_killed(self):
return self.state == 'TASK_KILLED'
def has_failed(self):
return self.state in ['TASK_FAILED', 'TASK_LOST', 'TASK_KILLED',
'TASK_ERROR']
def has_terminated(self):
return self.has_succeeded() or self.has_failed()
class Offer(ResourcesMixin, MessageProxy): # important order!
proto = mesos_pb2.Offer
class TaskInfo(ResourcesMixin, MessageProxy):
proto = mesos_pb2.TaskInfo
def __init__(self, id=None, **kwargs):
super(TaskInfo, self).__init__(**kwargs)
self.id = id or str(uuid4())
self.status = TaskStatus(task_id=self.id, state='TASK_STAGING')
@property
def id(self): # more consistent naming
return self['task_id']
@id.setter
def id(self, value):
if not isinstance(value, TaskID):
value = TaskID(value=value)
self['task_id'] = value
class CommandInfo(MessageProxy):
proto = mesos_pb2.CommandInfo
class ContainerInfo(MessageProxy):
proto = mesos_pb2.ContainerInfo
class DockerInfo(MessageProxy):
proto = mesos_pb2.ContainerInfo.DockerInfo
class Request(MessageProxy):
proto = mesos_pb2.Request
class Operation(MessageProxy):
proto = mesos_pb2.Offer.Operation
decode = partial(protobuf.decode, containers=MessageProxy.registry)
encode = partial(protobuf.encode, containers=MessageProxy.registry,
strict=False)
| {
"repo_name": "lensacom/satyr",
"path": "mentor/proxies/messages.py",
"copies": "1",
"size": "10084",
"license": "apache-2.0",
"hash": -8778908055611380000,
"line_mean": 24.4646464646,
"line_max": 76,
"alpha_frac": 0.5874652916,
"autogenerated": false,
"ratio": 3.719660641829583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4807125933429583,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import operator
from functools import wraps
from itertools import chain, count
from collections import Iterator
from toolz import merge, unique, curry
from .optimize import cull, fuse
from .utils import concrete, funcname
from . import base
from .compatibility import apply
from . import threaded
__all__ = ['compute', 'do', 'value', 'Value']
def flat_unique(ls):
"""Flatten ``ls``, filter by unique id, and return a list"""
return list(unique(chain.from_iterable(ls), key=id))
def unzip(ls, nout):
"""Unzip a list of lists into ``nout`` outputs."""
out = list(zip(*ls))
if not out:
out = [()] * nout
return out
def to_task_dasks(expr):
"""Normalize a python object and extract all sub-dasks.
- Replace ``Values`` with their keys
- Convert literals to things the schedulers can handle
- Extract dasks from all enclosed values
Parameters
----------
expr : object
The object to be normalized. This function knows how to handle
``Value``s, as well as most builtin python types.
Returns
-------
task : normalized task to be run
dasks : list of dasks that form the dag for this task
Examples
--------
>>> a = value(1, 'a')
>>> b = value(2, 'b')
>>> task, dasks = to_task_dasks([a, b, 3])
>>> task # doctest: +SKIP
(list, ['a', 'b', 3])
>>> dasks # doctest: +SKIP
[{'a': 1}, {'b': 2}]
>>> task, dasks = to_task_dasks({a: 1, b: 2})
>>> task # doctest: +SKIP
(dict, (list, [(list, ['a', 1]), (list, ['b', 2])]))
>>> dasks # doctest: +SKIP
[{'a': 1}, {'b': 2}]
"""
if isinstance(expr, Value):
return expr.key, expr._dasks
if isinstance(expr, base.Base):
name = tokenize(expr, pure=True)
keys = expr._keys()
dsk = expr._optimize(expr.dask, keys)
dsk[name] = (expr._finalize, expr, (concrete, keys))
return name, [dsk]
if isinstance(expr, tuple) and type(expr) != tuple:
return expr, []
if isinstance(expr, (Iterator, list, tuple, set)):
args, dasks = unzip(map(to_task_dasks, expr), 2)
args = list(args)
dasks = flat_unique(dasks)
# Ensure output type matches input type
if isinstance(expr, (list, tuple, set)):
return (type(expr), args), dasks
else:
return args, dasks
if isinstance(expr, dict):
args, dasks = to_task_dasks(list([k, v] for k, v in expr.items()))
return (dict, args), dasks
return expr, []
tokens = ('_{0}'.format(i) for i in count(1))
def tokenize(*args, **kwargs):
"""Mapping function from task -> consistent name.
Parameters
----------
args : object
Python objects that summarize the task.
pure : boolean, optional
If True, a consistent hash function is tried on the input. If this
fails, then a unique identifier is used. If False (default), then a
unique identifier is always used.
"""
if kwargs.pop('pure', False):
return base.tokenize(*args)
return next(tokens)
def applyfunc(func, args, kwargs, pure=False):
"""Create a Value by applying a function to args.
Given a function and arguments, return a Value that represents the result
of that computation."""
args, dasks = unzip(map(to_task_dasks, args), 2)
if kwargs:
dask_kwargs, dasks2 = to_task_dasks(kwargs)
dasks = dasks + (dasks2,)
task = (apply, func, (list, list(args)), dask_kwargs)
else:
task = (func,) + args
name = funcname(func) + '-' + tokenize(*task, pure=pure)
dasks = flat_unique(dasks)
dasks.append({name: task})
return Value(name, dasks)
@curry
def do(func, pure=False):
"""Wraps a function so that it outputs a ``Value``.
Examples
--------
Can be used as a decorator:
>>> @do
... def add(a, b):
... return a + b
>>> res = add(1, 2)
>>> type(res) == Value
True
>>> res.compute()
3
For other cases, it may be cleaner to call ``do`` on a function at call
time:
>>> res2 = do(sum)([res, 2, 3])
>>> res2.compute()
8
``do`` also accepts an optional keyword ``pure``. If False (default), then
subsequent calls will always produce a different ``Value``. This is useful
for non-pure functions (such as ``time`` or ``random``).
>>> from random import random
>>> out1 = do(random)()
>>> out2 = do(random)()
>>> out1.key == out2.key
False
If you know a function is pure (output only depends on the input, with no
global state), then you can set ``pure=True``. This will attempt to apply a
consistent name to the output, but will fallback on the same behavior of
``pure=False`` if this fails.
>>> @do(pure=True)
... def add(a, b):
... return a + b
>>> out1 = add(1, 2)
>>> out2 = add(1, 2)
>>> out1.key == out2.key
True
"""
@wraps(func)
def _dfunc(*args, **kwargs):
return applyfunc(func, args, kwargs, pure=pure)
return _dfunc
def compute(*args, **kwargs):
"""Evaluate more than one ``Value`` at once.
Note that the only difference between this function and
``dask.base.compute`` is that this implicitly wraps python objects in
``Value``, allowing for collections of dask objects to be computed.
Examples
--------
>>> a = value(1)
>>> b = a + 2
>>> c = a + 3
>>> compute(b, c) # Compute both simultaneously
(3, 4)
>>> compute(a, [b, c]) # Works for lists of Values
(1, [3, 4])
"""
args = [value(a) for a in args]
return base.compute(*args, **kwargs)
def right(method):
"""Wrapper to create 'right' version of operator given left version"""
def _inner(self, other):
return method(other, self)
return _inner
class Value(base.Base):
"""Represents a value to be computed by dask.
Equivalent to the output from a single key in a dask graph.
"""
__slots__ = ('_key', '_dasks')
_optimize = staticmethod(lambda dsk, keys: dsk)
_finalize = staticmethod(lambda a, r: r[0])
_default_get = staticmethod(threaded.get)
def __init__(self, name, dasks):
object.__setattr__(self, '_key', name)
object.__setattr__(self, '_dasks', dasks)
@property
def dask(self):
return merge(*self._dasks)
@property
def key(self):
return self._key
def _keys(self):
return [self.key]
def __repr__(self):
return "Value({0})".format(repr(self.key))
def __hash__(self):
return hash(self.key)
def __dir__(self):
return dir(type(self))
def __getattr__(self, attr):
if not attr.startswith('_'):
return do(getattr, pure=True)(self, attr)
else:
raise AttributeError("Attribute {0} not found".format(attr))
def __setattr__(self, attr, val):
raise TypeError("Value objects are immutable")
def __setitem__(self, index, val):
raise TypeError("Value objects are immutable")
def __iter__(self):
raise TypeError("Value objects are not iterable")
def __call__(self, *args, **kwargs):
return do(apply, kwargs.pop('pure', False))(self, args, kwargs)
def __bool__(self):
raise TypeError("Truth of Value objects is not supported")
__nonzero__ = __bool__
__abs__ = do(operator.abs, True)
__add__ = do(operator.add, True)
__and__ = do(operator.and_, True)
__div__ = do(operator.floordiv, True)
__eq__ = do(operator.eq, True)
__floordiv__ = do(operator.floordiv, True)
__ge__ = do(operator.ge, True)
__getitem__ = do(operator.getitem, True)
__gt__ = do(operator.gt, True)
__index__ = do(operator.index, True)
__invert__ = do(operator.invert, True)
__le__ = do(operator.le, True)
__lshift__ = do(operator.lshift, True)
__lt__ = do(operator.lt, True)
__mod__ = do(operator.mod, True)
__mul__ = do(operator.mul, True)
__ne__ = do(operator.ne, True)
__neg__ = do(operator.neg, True)
__or__ = do(operator.or_, True)
__pos__ = do(operator.pos, True)
__pow__ = do(operator.pow, True)
__radd__ = do(right(operator.add), True)
__rand__ = do(right(operator.and_), True)
__rdiv__ = do(right(operator.floordiv), True)
__rfloordiv__ = do(right(operator.floordiv), True)
__rlshift__ = do(right(operator.lshift), True)
__rmod__ = do(right(operator.mod), True)
__rmul__ = do(right(operator.mul), True)
__ror__ = do(right(operator.or_), True)
__rpow__ = do(right(operator.pow), True)
__rrshift__ = do(right(operator.rshift), True)
__rshift__ = do(operator.rshift, True)
__rsub__ = do(right(operator.sub), True)
__rtruediv__ = do(right(operator.truediv), True)
__rxor__ = do(right(operator.xor), True)
__sub__ = do(operator.sub, True)
__truediv__ = do(operator.truediv, True)
__xor__ = do(operator.xor, True)
base.normalize_token.register(Value, lambda a: a.key)
def value(val, name=None):
"""Create a ``Value`` from a python object.
Parameters
----------
val : object
Object to be wrapped.
name : string, optional
Name to be used in the resulting dask.
Examples
--------
>>> a = value([1, 2, 3])
>>> a.compute()
[1, 2, 3]
Values can act as a proxy to the underlying object. Many operators are
supported:
>>> (a + [1, 2]).compute()
[1, 2, 3, 1, 2]
>>> a[1].compute()
2
Method and attribute access also works:
>>> a.count(2).compute()
1
Note that if a method doesn't exist, no error will be thrown until runtime:
>>> res = a.not_a_real_method()
>>> res.compute() # doctest: +SKIP
AttributeError("'list' object has no attribute 'not_a_real_method'")
Methods are assumed to be impure by default, meaning that subsequent calls
may return different results. To assume purity, set `pure=True`. This
allows sharing of any intermediate values.
>>> a.count(2, pure=True).key == a.count(2, pure=True).key
True
"""
if isinstance(val, Value):
return val
task, dasks = to_task_dasks(val)
name = name or (type(val).__name__ + '-' + tokenize(task, pure=True))
dasks.append({name: task})
return Value(name, dasks)
| {
"repo_name": "pombredanne/dask",
"path": "dask/imperative.py",
"copies": "1",
"size": "10406",
"license": "bsd-3-clause",
"hash": -4469779611477858000,
"line_mean": 27.9860724234,
"line_max": 79,
"alpha_frac": 0.5859119739,
"autogenerated": false,
"ratio": 3.50016818028927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45860801541892704,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import operator
from operator import add, getitem
import inspect
from numbers import Number
from collections import Iterable, MutableMapping
from bisect import bisect
from itertools import product, count
from collections import Iterator
from functools import partial, wraps
from toolz.curried import (pipe, partition, concat, unique, pluck, join, first,
memoize, map, groupby, valmap, accumulate, merge,
curry, reduce, interleave, sliding_window, partial)
import numpy as np
from threading import Lock
from . import chunk
from .slicing import slice_array
from . import numpy_compat
from ..utils import deepmap, ignoring, repr_long_list, concrete, is_integer
from ..compatibility import unicode
from .. import threaded, core
from ..context import _globals
names = ('x_%d' % i for i in count(1))
tokens = ('-%d' % i for i in count(1))
def getarray(a, b, lock=None):
""" Mimics getitem but includes call to np.asarray
>>> getarray([1, 2, 3, 4, 5], slice(1, 4))
array([2, 3, 4])
"""
if lock:
lock.acquire()
try:
c = a[b]
if type(c) != np.ndarray:
c = np.asarray(c)
finally:
if lock:
lock.release()
return c
from .optimization import optimize
def slices_from_chunks(chunks):
""" Translate chunks tuple to a set of slices in product order
>>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE
[(slice(0, 2, None), slice(0, 3, None)),
(slice(0, 2, None), slice(3, 6, None)),
(slice(0, 2, None), slice(6, 9, None)),
(slice(2, 4, None), slice(0, 3, None)),
(slice(2, 4, None), slice(3, 6, None)),
(slice(2, 4, None), slice(6, 9, None))]
"""
cumdims = [list(accumulate(add, (0,) + bds[:-1])) for bds in chunks]
shapes = product(*chunks)
starts = product(*cumdims)
return [tuple(slice(s, s+dim) for s, dim in zip(start, shape))
for start, shape in zip(starts, shapes)]
def getem(arr, chunks, shape=None):
""" Dask getting various chunks from an array-like
>>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
>>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
"""
chunks = normalize_chunks(chunks, shape)
keys = list(product([arr], *[range(len(bds)) for bds in chunks]))
values = [(getarray, arr, x) for x in slices_from_chunks(chunks)]
return dict(zip(keys, values))
def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):
""" Dot product of many aligned chunks
>>> x = np.array([[1, 2], [1, 2]])
>>> y = np.array([[10, 20], [10, 20]])
>>> dotmany([x, x, x], [y, y, y])
array([[ 90, 180],
[ 90, 180]])
Optionally pass in functions to apply to the left and right chunks
>>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)
array([[150, 150],
[150, 150]])
"""
if leftfunc:
A = map(leftfunc, A)
if rightfunc:
B = map(rightfunc, B)
return sum(map(partial(np.dot, **kwargs), A, B))
def lol_tuples(head, ind, values, dummies):
""" List of list of tuple keys
Parameters
----------
head : tuple
The known tuple so far
ind : Iterable
An iterable of indices not yet covered
values : dict
Known values for non-dummy indices
dummies : dict
Ranges of values for dummy indices
Examples
--------
>>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {})
('x', 1, 0)
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 1, 0), ('x', 1, 1, 1)],
[('x', 1, 2, 0), ('x', 1, 2, 1)]]
"""
if not ind:
return head
if ind[0] not in dummies:
return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)
else:
return [lol_tuples(head + (v,), ind[1:], values, dummies)
for v in dummies[ind[0]]]
def zero_broadcast_dimensions(lol, nblocks):
"""
>>> lol = [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> nblocks = (4, 1, 2) # note singleton dimension in second place
>>> lol = [[('x', 1, 0, 0), ('x', 1, 0, 1)],
... [('x', 1, 1, 0), ('x', 1, 1, 1)],
... [('x', 1, 2, 0), ('x', 1, 2, 1)]]
>>> zero_broadcast_dimensions(lol, nblocks) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)]]
See Also
--------
lol_tuples
"""
f = lambda t: (t[0],) + tuple(0 if d == 1 else i for i, d in zip(t[1:], nblocks))
return deepmap(f, lol)
def broadcast_dimensions(argpairs, numblocks, sentinels=(1, (1,))):
""" Find block dimensions from arguments
Parameters
----------
argpairs: iterable
name, ijk index pairs
numblocks: dict
maps {name: number of blocks}
sentinels: iterable (optional)
values for singleton dimensions
Examples
--------
>>> argpairs = [('x', 'ij'), ('y', 'ji')]
>>> numblocks = {'x': (2, 3), 'y': (3, 2)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Supports numpy broadcasting rules
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> numblocks = {'x': (2, 1), 'y': (1, 3)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Works in other contexts too
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
>>> broadcast_dimensions(argpairs, d)
{'i': 'Hello', 'j': (2, 3)}
"""
# List like [('i', 2), ('j', 1), ('i', 1), ('j', 2)]
L = concat([zip(inds, dims)
for (x, inds), (x, dims)
in join(first, argpairs, first, numblocks.items())])
g = groupby(0, L)
g = dict((k, set([d for i, d in v])) for k, v in g.items())
g2 = dict((k, v - set(sentinels) if len(v) > 1 else v) for k, v in g.items())
if g2 and not set(map(len, g2.values())) == set([1]):
raise ValueError("Shapes do not align %s" % g)
return valmap(first, g2)
def top(func, output, out_indices, *arrind_pairs, **kwargs):
""" Tensor operation
Applies a function, ``func``, across blocks from many different input
dasks. We arrange the pattern with which those blocks interact with sets
of matching indices. E.g.
top(func, 'z', 'i', 'x', 'i', 'y', 'i')
yield an embarassingly parallel communication pattern and is read as
z_i = func(x_i, y_i)
More complex patterns may emerge, including multiple indices
top(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')
$$ z_{ij} = func(x_{ij}, y_{ji}) $$
Indices missing in the output but present in the inputs results in many
inputs being sent to one function (see examples).
Examples
--------
Simple embarassing map operation
>>> inc = lambda x: x + 1
>>> top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
Simple operation on two datasets
>>> add = lambda x, y: x + y
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Operation that flips one of the datasets
>>> addT = lambda x, y: x + y.T # Transpose each chunk
>>> # z_ij ~ x_ij y_ji
>>> # .. .. .. notice swap
>>> top(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Dot product with contraction over ``j`` index. Yields list arguments
>>> top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
Supports Broadcasting rules
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}
"""
numblocks = kwargs['numblocks']
argpairs = list(partition(2, arrind_pairs))
assert set(numblocks) == set(pluck(0, argpairs))
all_indices = pipe(argpairs, pluck(1), concat, set)
dummy_indices = all_indices - set(out_indices)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
dims = broadcast_dimensions(argpairs, numblocks)
# (0, 0), (0, 1), (0, 2), (1, 0), ...
keytups = list(product(*[range(dims[i]) for i in out_indices]))
# {i: 0, j: 0}, {i: 0, j: 1}, ...
keydicts = [dict(zip(out_indices, tup)) for tup in keytups]
# {j: [1, 2, 3], ...} For j a dummy index of dimension 3
dummies = dict((i, list(range(dims[i]))) for i in dummy_indices)
# Create argument lists
valtups = []
for kd in keydicts:
args = []
for arg, ind in argpairs:
tups = lol_tuples((arg,), ind, kd, dummies)
tups2 = zero_broadcast_dimensions(tups, numblocks[arg])
args.append(tups2)
valtups.append(tuple(args))
# Add heads to tuples
keys = [(output,) + kt for kt in keytups]
vals = [(func,) + vt for vt in valtups]
return dict(zip(keys, vals))
def _concatenate2(arrays, axes=[]):
""" Recursively Concatenate nested lists of arrays along axes
Each entry in axes corresponds to each level of the nested list. The
length of axes should correspond to the level of nesting of arrays.
>>> x = np.array([[1, 2], [3, 4]])
>>> _concatenate2([x, x], axes=[0])
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> _concatenate2([x, x], axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> _concatenate2([[x, x], [x, x]], axes=[0, 1])
array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
Supports Iterators
>>> _concatenate2(iter([x, x]), axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
"""
if isinstance(arrays, Iterator):
arrays = list(arrays)
if len(axes) > 1:
arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]
return np.concatenate(arrays, axis=axes[0])
def map_blocks(func, *arrs, **kwargs):
""" Map a function across all blocks of a dask array
You must also specify the chunks of the resulting array. If you don't then
we assume that the resulting array has the same block structure as the
input.
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If function changes shape of the blocks then please provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
Your block function can learn where in the array it is if it supports a
``block_id`` keyword argument. This will receive entries like (2, 0, 1),
the position of the block in the dask array.
>>> def func(block, block_id=None):
... pass
"""
if not callable(func):
raise TypeError("First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)" %
type(func).__name__)
dtype = kwargs.get('dtype')
assert all(isinstance(arr, Array) for arr in arrs)
inds = [tuple(range(x.ndim))[::-1] for x in arrs]
args = list(concat(zip(arrs, inds)))
out_ind = tuple(range(max(x.ndim for x in arrs)))[::-1]
result = atop(func, out_ind, *args, dtype=dtype)
# If func has block_id as an argument then swap out func
# for func with block_id partialed in
try:
spec = inspect.getargspec(func)
except:
spec = None
if spec and 'block_id' in spec.args:
for k in core.flatten(result._keys()):
result.dask[k] = (partial(func, block_id=k[1:]),) + result.dask[k][1:]
# Assert user specified chunks
chunks = kwargs.get('chunks')
if chunks is not None and chunks and not isinstance(chunks[0], tuple):
chunks = tuple([nb * (bs,)
for nb, bs in zip(result.numblocks, chunks)])
if chunks is not None:
result.chunks = chunks
return result
@wraps(np.squeeze)
def squeeze(a, axis=None):
if axis is None:
axis = tuple(i for i, d in enumerate(a.shape) if d == 1)
b = a.map_blocks(partial(np.squeeze, axis=axis), dtype=a.dtype)
chunks = tuple(bd for bd in b.chunks if bd != (1,))
old_keys = list(product([b.name], *[range(len(bd)) for bd in b.chunks]))
new_keys = list(product([b.name], *[range(len(bd)) for bd in chunks]))
dsk = b.dask.copy()
for o, n in zip(old_keys, new_keys):
dsk[n] = dsk[o]
del dsk[o]
return Array(dsk, b.name, chunks, dtype=a.dtype)
def topk(k, x):
""" The top k elements of an array
Returns the k greatest elements of the array in sorted order. Only works
on arrays of a single dimension.
>>> x = np.array([5, 1, 3, 6])
>>> d = from_array(x, chunks=2)
>>> d.topk(2).compute()
array([6, 5])
Runs in near linear time, returns all results in a single chunk so
all k elements must fit in memory.
"""
if x.ndim != 1:
raise ValueError("Topk only works on arrays of one dimension")
name = next(names)
dsk = dict(((name, i), (chunk.topk, k, key))
for i, key in enumerate(x._keys()))
name2 = next(names)
dsk[(name2, 0)] = (getitem,
(np.sort, (np.concatenate, (list, list(dsk)))),
slice(-1, -k - 1, -1))
chunks = ((k,),)
return Array(merge(dsk, x.dask), name2, chunks, dtype=x.dtype)
def compute(*args, **kwargs):
""" Evaluate several dask arrays at once
The result of this function is always a tuple of numpy arrays. To evaluate
a single dask array into a numpy array, use ``myarray.compute()`` or simply
``np.array(myarray)``.
Examples
--------
>>> import dask.array as da
>>> d = da.ones((4, 4), chunks=(2, 2))
>>> a = d + 1 # two different dask arrays
>>> b = d + 2
>>> A, B = da.compute(a, b) # Compute both simultaneously
"""
dsk = merge(*[arg.dask for arg in args])
keys = [arg._keys() for arg in args]
results = get(dsk, keys, **kwargs)
results2 = tuple(concatenate3(x) if arg.shape else unpack_singleton(x)
for x, arg in zip(results, args))
return results2
def store(sources, targets, **kwargs):
""" Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or iterable of Arrays
targets: array-like or iterable of array-likes
These should support setitem syntax ``target[10:20] = ...``
Examples
--------
>>> x = ... # doctest: +SKIP
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets]
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError("Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets)))
updates = [insert_to_ooc(tgt, src) for tgt, src in zip(targets, sources)]
dsk = merge([src.dask for src in sources] + updates)
keys = [key for u in updates for key in u]
get(dsk, keys, **kwargs)
def blockdims_from_blockshape(shape, chunks):
"""
>>> blockdims_from_blockshape((10, 10), (4, 3))
((4, 4, 2), (3, 3, 3, 1))
"""
if chunks is None:
raise TypeError("Must supply chunks= keyword argument")
if shape is None:
raise TypeError("Must supply shape= keyword argument")
if not all(map(is_integer, chunks)):
raise ValueError("chunks can only contain integers.")
if not all(map(is_integer, shape)):
raise ValueError("shape can only contain integers.")
shape = map(int, shape)
chunks = map(int, chunks)
return tuple((bd,) * (d // bd) + ((d % bd,) if d % bd else ())
for d, bd in zip(shape, chunks))
class Array(object):
""" Parallel Array
Parameters
----------
dask : dict
Task dependency graph
name : string
Name of array in dask
shape : tuple of ints
Shape of the entire array
chunks: iterable of tuples
block sizes along each dimension
"""
__slots__ = 'dask', 'name', 'chunks', '_dtype'
def __init__(self, dask, name, chunks, dtype=None, shape=None):
self.dask = dask
self.name = name
self.chunks = normalize_chunks(chunks, shape)
if dtype is not None:
dtype = np.dtype(dtype)
self._dtype = dtype
@property
def _args(self):
return (self.dask, self.name, self.chunks, self.dtype)
@property
def numblocks(self):
return tuple(map(len, self.chunks))
@property
def shape(self):
return tuple(map(sum, self.chunks))
def __len__(self):
return sum(self.chunks[0])
def _visualize(self, optimize_graph=False):
from dask.dot import dot_graph
if optimize_graph:
dot_graph(optimize(self.dask, self._keys()))
else:
dot_graph(self.dask)
@property
@memoize(key=lambda args, kwargs: (id(args[0]), args[0].name, args[0].chunks))
def dtype(self):
if self._dtype is not None:
return self._dtype
if self.shape:
return self[(0,) * self.ndim].compute().dtype
else:
return self.compute().dtype
def __repr__(self):
chunks = '(' + ', '.join(map(repr_long_list, self.chunks)) + ')'
return ("dask.array<%s, shape=%s, chunks=%s, dtype=%s>" %
(self.name, self.shape, chunks, self._dtype))
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
""" Number of elements in array """
return np.prod(self.shape)
@property
def nbytes(self):
""" Number of bytes in array """
return self.size * self.dtype.itemsize
def _keys(self, *args):
if self.ndim == 0:
return [(self.name,)]
ind = len(args)
if ind + 1 == self.ndim:
return [(self.name,) + args + (i,)
for i in range(self.numblocks[ind])]
else:
return [self._keys(*(args + (i,)))
for i in range(self.numblocks[ind])]
def __array__(self, dtype=None, **kwargs):
x = self.compute()
if dtype and x.dtype != dtype:
x = x.astype(dtype)
if not isinstance(x, np.ndarray):
x = np.array(x)
return x
@wraps(store)
def store(self, target, **kwargs):
return store([self], [target], **kwargs)
def to_hdf5(self, filename, datapath, **kwargs):
""" Store array in HDF5 file
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP
See also
--------
da.store
h5py.File.create_dataset
"""
import h5py
with h5py.File(filename) as f:
if 'chunks' not in kwargs:
kwargs['chunks'] = tuple([c[0] for c in self.chunks])
d = f.require_dataset(datapath, shape=self.shape, dtype=self.dtype, **kwargs)
slices = slices_from_chunks(self.chunks)
name = next(names)
dsk = dict(((name,) + t[1:], (write_hdf5_chunk, filename, datapath, slc, t))
for t, slc in zip(core.flatten(self._keys()), slices))
myget = kwargs.get('get', get)
myget(merge(dsk, self.dask), list(dsk.keys()))
@wraps(compute)
def compute(self, **kwargs):
result, = compute(self, **kwargs)
return result
def cache(self, store=None, **kwargs):
""" Evaluate and cache array
Parameters
----------
store: MutableMapping or ndarray-like
Place to put computed and cached chunks
kwargs:
Keyword arguments to pass on to ``get`` function for scheduling
This triggers evaluation and store the result in either
1. An ndarray object supporting setitem (see da.store)
2. A MutableMapping like a dict or chest
It then returns a new dask array that points to this store.
This returns a semantically equivalent dask array.
>>> import dask.array as da
>>> x = da.arange(5, chunks=2)
>>> y = 2*x + 1
>>> z = y.cache() # triggers computation
>>> y.compute() # Does entire computation
array([1, 3, 5, 7, 9])
>>> z.compute() # Just pulls from store
array([1, 3, 5, 7, 9])
You might base a cache off of an array like a numpy array or
h5py.Dataset.
>>> cache = np.empty(5, dtype=x.dtype)
>>> z = y.cache(store=cache)
>>> cache
array([1, 3, 5, 7, 9])
Or one might use a MutableMapping like a dict or chest
>>> cache = dict()
>>> z = y.cache(store=cache)
>>> cache # doctest: +SKIP
{('x', 0): array([1, 3]),
('x', 1): array([5, 7]),
('x', 2): array([9])}
"""
if store is not None and hasattr(store, 'shape'):
self.store(store)
return from_array(store, chunks=self.chunks)
if store is None:
try:
from chest import Chest
store = Chest()
except ImportError:
if self.nbytes <= 1e9:
store = dict()
else:
raise ValueError("No out-of-core storage found."
"Either:\n"
"1. Install ``chest``, an out-of-core dictionary\n"
"2. Provide an on-disk array like an h5py.Dataset") # pragma: no cover
if isinstance(store, MutableMapping):
name = next(names)
dsk = dict(((name, k[1:]), (operator.setitem, store, (tuple, list(k)), k))
for k in core.flatten(self._keys()))
get(merge(dsk, self.dask), list(dsk.keys()), **kwargs)
dsk2 = dict((k, (operator.getitem, store, (tuple, list(k))))
for k in store)
return Array(dsk2, self.name, chunks=self.chunks, dtype=self._dtype)
def __int__(self):
return int(self.compute())
def __bool__(self):
return bool(self.compute())
__nonzero__ = __bool__ # python 2
def __float__(self):
return float(self.compute())
def __complex__(self):
return complex(self.compute())
def __getitem__(self, index):
# Field access, e.g. x['a'] or x[['a', 'b']]
if (isinstance(index, (str, unicode)) or
( isinstance(index, list)
and all(isinstance(i, (str, unicode)) for i in index))):
if self._dtype is not None and isinstance(index, (str, unicode)):
dt = self._dtype[index]
elif self._dtype is not None and isinstance(index, list):
dt = np.dtype([(name, self._dtype[name]) for name in index])
else:
dt = None
return elemwise(getarray, self, index, dtype=dt)
# Slicing
out = next(names)
if not isinstance(index, tuple):
index = (index,)
if all(isinstance(i, slice) and i == slice(None) for i in index):
return self
dsk, chunks = slice_array(out, self.name, self.chunks, index)
return Array(merge(self.dask, dsk), out, chunks, dtype=self._dtype)
@wraps(np.dot)
def dot(self, other):
return tensordot(self, other, axes=((self.ndim-1,), (other.ndim-2,)))
@property
def T(self):
return transpose(self)
@wraps(np.transpose)
def transpose(self, axes=None):
return transpose(self, axes)
@wraps(topk)
def topk(self, k):
return topk(k, self)
def astype(self, dtype, **kwargs):
""" Copy of the array, cast to a specified type """
return elemwise(lambda x: x.astype(dtype, **kwargs), self, dtype=dtype)
def __abs__(self):
return elemwise(operator.abs, self)
def __add__(self, other):
return elemwise(operator.add, self, other)
def __radd__(self, other):
return elemwise(operator.add, other, self)
def __and__(self, other):
return elemwise(operator.and_, self, other)
def __rand__(self, other):
return elemwise(operator.and_, other, self)
def __div__(self, other):
return elemwise(operator.div, self, other)
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
def __eq__(self, other):
return elemwise(operator.eq, self, other)
def __gt__(self, other):
return elemwise(operator.gt, self, other)
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.invert, self)
def __lshift__(self, other):
return elemwise(operator.lshift, self, other)
def __rlshift__(self, other):
return elemwise(operator.lshift, other, self)
def __lt__(self, other):
return elemwise(operator.lt, self, other)
def __le__(self, other):
return elemwise(operator.le, self, other)
def __mod__(self, other):
return elemwise(operator.mod, self, other)
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
def __mul__(self, other):
return elemwise(operator.mul, self, other)
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __pos__(self):
return self
def __ror__(self, other):
return elemwise(operator.or_, other, self)
def __pow__(self, other):
return elemwise(operator.pow, self, other)
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
def __rshift__(self, other):
return elemwise(operator.rshift, self, other)
def __rrshift__(self, other):
return elemwise(operator.rshift, other, self)
def __sub__(self, other):
return elemwise(operator.sub, self, other)
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
def __xor__(self, other):
return elemwise(operator.xor, self, other)
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
@wraps(np.any)
def any(self, axis=None, keepdims=False):
from .reductions import any
return any(self, axis=axis, keepdims=keepdims)
@wraps(np.all)
def all(self, axis=None, keepdims=False):
from .reductions import all
return all(self, axis=axis, keepdims=keepdims)
@wraps(np.min)
def min(self, axis=None, keepdims=False):
from .reductions import min
return min(self, axis=axis, keepdims=keepdims)
@wraps(np.max)
def max(self, axis=None, keepdims=False):
from .reductions import max
return max(self, axis=axis, keepdims=keepdims)
@wraps(np.argmin)
def argmin(self, axis=None):
from .reductions import argmin
return argmin(self, axis=axis)
@wraps(np.argmax)
def argmax(self, axis=None):
from .reductions import argmax
return argmax(self, axis=axis)
@wraps(np.sum)
def sum(self, axis=None, dtype=None, keepdims=False):
from .reductions import sum
return sum(self, axis=axis, dtype=dtype, keepdims=keepdims)
@wraps(np.prod)
def prod(self, axis=None, dtype=None, keepdims=False):
from .reductions import prod
return prod(self, axis=axis, dtype=dtype, keepdims=keepdims)
@wraps(np.mean)
def mean(self, axis=None, dtype=None, keepdims=False):
from .reductions import mean
return mean(self, axis=axis, dtype=dtype, keepdims=keepdims)
@wraps(np.std)
def std(self, axis=None, dtype=None, keepdims=False, ddof=0):
from .reductions import std
return std(self, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof)
@wraps(np.var)
def var(self, axis=None, dtype=None, keepdims=False, ddof=0):
from .reductions import var
return var(self, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof)
def moment(self, order, axis=None, dtype=None, keepdims=False, ddof=0):
"""Calculate the nth centralized moment.
Parameters
----------
order : int
Order of the moment that is returned, must be >= 2.
axis : int, optional
Axis along which the central moment is computed. The default is to
compute the moment of the flattened array.
dtype : data-type, optional
Type to use in computing the moment. For arrays of integer type the
default is float64; for arrays of float types it is the same as the
array type.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
N - ddof, where N represents the number of elements. By default
ddof is zero.
Returns
-------
moment : ndarray
References
----------
.. [1] Pebay, Philippe (2008), "Formulas for Robust, One-Pass Parallel
Computation of Covariances and Arbitrary-Order Statistical Moments"
(PDF), Technical Report SAND2008-6212, Sandia National Laboratories
"""
from .reductions import moment
return moment(self, order, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof)
def vnorm(self, ord=None, axis=None, keepdims=False):
""" Vector norm """
from .reductions import vnorm
return vnorm(self, ord=ord, axis=axis, keepdims=keepdims)
@wraps(map_blocks)
def map_blocks(self, func, chunks=None, dtype=None):
return map_blocks(func, self, chunks=chunks, dtype=dtype)
def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):
""" Map a function over blocks of the array with some overlap
We share neighboring zones between blocks of the array, then map a
function, then trim away the neighboring strips.
Parameters
----------
func: function
The function to apply to each extended block
depth: int, tuple, or dict
The number of cells that each block should share with its neighbors
If a tuple or dict this can be different per axis
boundary: str
how to handle the boundaries. Values include 'reflect', 'periodic'
or any constant value like 0 or np.nan
trim: bool
Whether or not to trim the excess after the map function. Set this
to false if your mapping function does this for you.
**kwargs:
Other keyword arguments valid in ``map_blocks``
Examples
--------
>>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])
>>> x = from_array(x, chunks=5)
>>> def derivative(x):
... return x - np.roll(x, 1)
>>> y = x.map_overlap(derivative, depth=1, boundary=0)
>>> y.compute()
array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])
"""
from .ghost import map_overlap
return map_overlap(self, func, depth, boundary, trim, **kwargs)
@wraps(squeeze)
def squeeze(self):
return squeeze(self)
def rechunk(self, chunks):
from .rechunk import rechunk
return rechunk(self, chunks)
def normalize_chunks(chunks, shape=None):
""" Normalize chunks to tuple of tuples
>>> normalize_chunks((2, 2), shape=(5, 6))
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(4, 6)) # Idempotent
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks([[2, 2], [3, 3]]) # Cleans up lists to tuples
((2, 2), (3, 3))
>>> normalize_chunks(10, shape=(30, 5)) # Supports integer inputs
((10, 10, 10), (5,))
>>> normalize_chunks((), shape=(0, 0)) # respects null dimensions
((), ())
"""
if isinstance(chunks, list):
chunks = tuple(chunks)
if isinstance(chunks, Number):
chunks = (chunks,) * len(shape)
if not chunks:
if shape is None:
chunks = ()
else:
chunks = ((),) * len(shape)
if shape is not None:
chunks = tuple(c if c is not None else s for c, s in zip(chunks, shape))
if chunks and shape is not None:
chunks = sum((blockdims_from_blockshape((s,), (c,))
if not isinstance(c, (tuple, list)) else (c,)
for s, c in zip(shape, chunks)), ())
return tuple(map(tuple, chunks))
def from_array(x, chunks, name=None, lock=False, **kwargs):
""" Create dask array from something that looks like an array
Input must have a ``.shape`` and support numpy-style slicing.
The ``chunks`` argument must be one of the following forms:
- a blocksize like 1000
- a blockshape like (1000, 1000)
- explicit sizes of all blocks along all dimensions
like ((1000, 1000, 500), (400, 400)).
Examples
--------
>>> x = h5py.File('...')['/data/path'] # doctest: +SKIP
>>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP
If your underlying datastore does not support concurrent reads then include
the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple
arrays to coordinate around the same lock.
>>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP
"""
chunks = normalize_chunks(chunks, x.shape)
name = name or next(names)
dsk = getem(name, chunks)
if lock is True:
lock = Lock()
if lock:
dsk = dict((k, v + (lock,)) for k, v in dsk.items())
return Array(merge({name: x}, dsk), name, chunks, dtype=x.dtype)
def atop(func, out_ind, *args, **kwargs):
""" Tensor operation: Generalized inner and outer products
A broad class of blocked algorithms and patterns can be specified with a
concise multi-index notation. The ``atop`` function applies an in-memory
function across multiple blocks of multiple inputs in a variety of ways.
Parameters
----------
func: callable
Function to apply to individual tuples of blocks
out_ind: iterable
Block pattern of the output, something like 'ijk' or (1, 2, 3)
*args: sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
This is best explained through example. Consider the following examples:
Examples
--------
2D embarassingly parallel operation from two arrays, x, and y.
>>> z = atop(operator.add, 'ij', x, 'ij', y, 'ij') # z = x + y # doctest: +SKIP
Outer product multiplying x by y, two 1-d vectors
>>> z = atop(operator.mul, 'ij', x, 'i', y, 'j') # doctest: +SKIP
z = x.T
>>> z = atop(np.transpose, 'ji', x, 'ij') # doctest: +SKIP
The transpose case above is illustrative because it does same transposition
both on each in-memory block by calling ``np.transpose`` and on the order
of the blocks themselves, by switching the order of the index ``ij -> ji``.
We can compose these same patterns with more variables and more complex
in-memory functions
z = X + Y.T
>>> z = atop(lambda x, y: x + y.T, 'ij', x, 'ij', y, 'ji') # doctest: +SKIP
Any index, like ``i`` missing from the output index is interpreted as a
contraction (note that this differs from Einstein convention; repeated
indices do not imply contraction.) In the case of a contraction the passed
function should expect an iterator of blocks on any array that holds that
index.
Inner product multiplying x by y, two 1-d vectors
>>> def sequence_dot(x_blocks, y_blocks):
... result = 0
... for x, y in zip(x_blocks, y_blocks):
... result += x.dot(y)
... return result
>>> z = atop(sequence_dot, '', x, 'i', y, 'i') # doctest: +SKIP
Many dask.array operations are special cases of atop. These tensor
operations cover a broad subset of NumPy and this function has been battle
tested, supporting tricky concepts like broadcasting.
See also:
top - dict formulation of this function, contains most logic
"""
out = kwargs.pop('name', None) or next(names)
dtype = kwargs.get('dtype', None)
arginds = list(partition(2, args)) # [x, ij, y, jk] -> [(x, ij), (y, jk)]
numblocks = dict([(a.name, a.numblocks) for a, ind in arginds])
argindsstr = list(concat([(a.name, ind) for a, ind in arginds]))
dsk = top(func, out, out_ind, *argindsstr, numblocks=numblocks)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
shapes = dict((a.name, a.shape) for a, _ in arginds)
nameinds = [(a.name, i) for a, i in arginds]
dims = broadcast_dimensions(nameinds, shapes)
shape = tuple(dims[i] for i in out_ind)
blockdim_dict = dict((a.name, a.chunks) for a, _ in arginds)
chunkss = broadcast_dimensions(nameinds, blockdim_dict)
chunks = tuple(chunkss[i] for i in out_ind)
dsks = [a.dask for a, _ in arginds]
return Array(merge(dsk, *dsks), out, chunks, dtype=dtype)
def get(dsk, keys, get=None, **kwargs):
""" Specialized get function
1. Handle inlining
2. Use custom score function
"""
get = get or _globals['get'] or threaded.get
dsk2 = optimize(dsk, keys, **kwargs)
return get(dsk2, keys, **kwargs)
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array(datetime.date(2000, 1, 1), dtype='datetime64[D]')
"""
while True:
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x
stacked_names = ('stack-%d' % i for i in count(1))
def stack(seq, axis=0):
"""
Stack arrays along a new axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along a new dimension (axis=0 by default)
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also
--------
concatenate
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis + 1
if axis > ndim:
raise ValueError("Axis must not be greater than number of dimensions"
"\nData has %d dimensions, but got axis=%d" % (ndim, axis))
assert len(set(a.chunks for a in seq)) == 1 # same chunks
shape = seq[0].shape[:axis] + (len(seq),) + seq[0].shape[axis:]
chunks = ( seq[0].chunks[:axis]
+ ((1,) * n,)
+ seq[0].chunks[axis:])
name = next(stacked_names)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
names = [a.name for a in seq]
inputs = [(names[key[axis+1]],) + key[1:axis + 1] + key[axis + 2:]
for key in keys]
values = [(getarray, inp, (slice(None, None, None),) * axis
+ (None,)
+ (slice(None, None, None),) * (ndim - axis))
for inp in inputs]
dsk = dict(zip(keys, values))
dsk2 = merge(dsk, *[a.dask for a in seq])
if all(a._dtype is not None for a in seq):
dt = reduce(np.promote_types, [a._dtype for a in seq])
else:
dt = None
return Array(dsk2, name, chunks, dtype=dt)
concatenate_names = ('concatenate-%d' % i for i in count(1))
def concatenate(seq, axis=0):
"""
Concatenate arrays along an existing axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along an existing dimension (axis=0 by default)
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.concatenate(data, axis=0)
>>> x.shape
(12, 4)
>>> da.concatenate(data, axis=1).shape
(4, 12)
Result is a new dask Array
See Also
--------
stack
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis
if axis >= ndim:
raise ValueError("Axis must be less than than number of dimensions"
"\nData has %d dimensions, but got axis=%d" % (ndim, axis))
bds = [a.chunks for a in seq]
if not all(len(set(bds[i][j] for i in range(n))) == 1
for j in range(len(bds[0])) if j != axis):
raise ValueError("Block shapes do not align")
shape = (seq[0].shape[:axis]
+ (sum(a.shape[axis] for a in seq),)
+ seq[0].shape[axis + 1:])
chunks = ( seq[0].chunks[:axis]
+ (sum([bd[axis] for bd in bds], ()),)
+ seq[0].chunks[axis + 1:])
name = next(concatenate_names)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq]))
names = [a.name for a in seq]
values = [(names[bisect(cum_dims, key[axis + 1]) - 1],)
+ key[1:axis + 1]
+ (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis+1]) - 1],)
+ key[axis + 2:]
for key in keys]
dsk = dict(zip(keys, values))
dsk2 = merge(dsk, *[a.dask for a in seq])
if all(a._dtype is not None for a in seq):
dt = reduce(np.promote_types, [a._dtype for a in seq])
else:
dt = None
return Array(dsk2, name, chunks, dtype=dt)
@wraps(np.take)
def take(a, indices, axis=0):
if not -a.ndim <= axis < a.ndim:
raise ValueError('axis=(%s) out of bounds' % axis)
if axis < 0:
axis += a.ndim
if isinstance(a, np.ndarray) and isinstance(indices, Array):
return _take_dask_array_from_numpy(a, indices, axis)
else:
return a[(slice(None),) * axis + (indices,)]
def _take_dask_array_from_numpy(a, indices, axis):
assert isinstance(a, np.ndarray)
assert isinstance(indices, Array)
return indices.map_blocks(lambda block: np.take(a, block, axis),
chunks=indices.chunks,
dtype=a.dtype)
@wraps(np.transpose)
def transpose(a, axes=None):
axes = axes or tuple(range(a.ndim))[::-1]
return atop(partial(np.transpose, axes=axes),
axes,
a, tuple(range(a.ndim)), dtype=a._dtype)
@curry
def many(a, b, binop=None, reduction=None, **kwargs):
"""
Apply binary operator to pairwise to sequences, then reduce.
>>> many([1, 2, 3], [10, 20, 30], mul, sum) # dot product
140
"""
return reduction(map(partial(binop, **kwargs), a, b))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
ALPHABET = alphabet.upper()
@wraps(np.tensordot)
def tensordot(lhs, rhs, axes=2):
if isinstance(axes, Iterable):
left_axes, right_axes = axes
else:
left_axes = tuple(range(lhs.ndim - 1, lhs.ndim - axes - 1, -1))
right_axes = tuple(range(0, axes))
if isinstance(left_axes, int):
left_axes = (left_axes,)
if isinstance(right_axes, int):
right_axes = (right_axes,)
if isinstance(left_axes, list):
left_axes = tuple(left_axes)
if isinstance(right_axes, list):
right_axes = tuple(right_axes)
if len(left_axes) > 1:
raise NotImplementedError("Simultaneous Contractions of multiple "
"indices not yet supported")
left_index = list(alphabet[:lhs.ndim])
right_index = list(ALPHABET[:rhs.ndim])
out_index = left_index + right_index
for l, r in zip(left_axes, right_axes):
out_index.remove(right_index[r])
out_index.remove(left_index[l])
right_index[r] = left_index[l]
if lhs._dtype is not None and rhs._dtype is not None :
dt = np.promote_types(lhs._dtype, rhs._dtype)
else:
dt = None
func = many(binop=np.tensordot, reduction=sum,
axes=(left_axes, right_axes))
return atop(func,
out_index,
lhs, tuple(left_index),
rhs, tuple(right_index), dtype=dt)
def insert_to_ooc(out, arr):
lock = Lock()
def store(x, index):
with lock:
out[index] = np.asanyarray(x)
return None
slices = slices_from_chunks(arr.chunks)
name = 'store-%s' % arr.name
dsk = dict(((name,) + t[1:], (store, t, slc))
for t, slc in zip(core.flatten(arr._keys()), slices))
return dsk
def partial_by_order(op, other):
"""
>>> f = partial_by_order(add, [(1, 10)])
>>> f(5)
15
"""
def f(*args):
args2 = list(args)
for i, arg in other:
args2.insert(i, arg)
return op(*args2)
return f
def elemwise(op, *args, **kwargs):
""" Apply elementwise function across arguments
Respects broadcasting rules
Examples
--------
>>> elemwise(add, x, y) # doctest: +SKIP
>>> elemwise(sin, x) # doctest: +SKIP
See also
--------
atop
"""
name = kwargs.get('name') or next(names)
out_ndim = max(len(arg.shape) if isinstance(arg, Array) else 0
for arg in args)
expr_inds = tuple(range(out_ndim))[::-1]
arrays = [arg for arg in args if isinstance(arg, Array)]
other = [(i, a) for i, a in enumerate(args) if not isinstance(a, Array)]
if any(isinstance(arg, np.ndarray) for arg in args):
raise NotImplementedError("Dask.array operations only work on dask "
"arrays, not numpy arrays.")
if 'dtype' in kwargs:
dt = kwargs['dtype']
elif not all(a._dtype is not None for a in arrays):
dt = None
else:
vals = [np.empty((1,) * a.ndim, dtype=a.dtype)
if hasattr(a, 'dtype') else a
for a in args]
try:
dt = op(*vals).dtype
except AttributeError:
dt = None
if other:
op2 = partial_by_order(op, other)
else:
op2 = op
return atop(op2, expr_inds,
*concat((a, tuple(range(a.ndim)[::-1])) for a in arrays),
dtype=dt, name=name)
def wrap_elemwise(func, **kwargs):
""" Wrap up numpy function into dask.array """
f = partial(elemwise, func, **kwargs)
f.__doc__ = func.__doc__
f.__name__ = func.__name__
return f
# ufuncs, copied from this page:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html
# math operations
logaddexp = wrap_elemwise(np.logaddexp)
logaddexp2 = wrap_elemwise(np.logaddexp2)
conj = wrap_elemwise(np.conj)
exp = wrap_elemwise(np.exp)
log = wrap_elemwise(np.log)
log2 = wrap_elemwise(np.log2)
log10 = wrap_elemwise(np.log10)
log1p = wrap_elemwise(np.log1p)
expm1 = wrap_elemwise(np.expm1)
sqrt = wrap_elemwise(np.sqrt)
square = wrap_elemwise(np.square)
# trigonometric functions
sin = wrap_elemwise(np.sin)
cos = wrap_elemwise(np.cos)
tan = wrap_elemwise(np.tan)
arcsin = wrap_elemwise(np.arcsin)
arccos = wrap_elemwise(np.arccos)
arctan = wrap_elemwise(np.arctan)
arctan2 = wrap_elemwise(np.arctan2)
hypot = wrap_elemwise(np.hypot)
sinh = wrap_elemwise(np.sinh)
cosh = wrap_elemwise(np.cosh)
tanh = wrap_elemwise(np.tanh)
arcsinh = wrap_elemwise(np.arcsinh)
arccosh = wrap_elemwise(np.arccosh)
arctanh = wrap_elemwise(np.arctanh)
deg2rad = wrap_elemwise(np.deg2rad)
rad2deg = wrap_elemwise(np.rad2deg)
# comparison functions
logical_and = wrap_elemwise(np.logical_and, dtype='bool')
logical_or = wrap_elemwise(np.logical_or, dtype='bool')
logical_xor = wrap_elemwise(np.logical_xor, dtype='bool')
logical_not = wrap_elemwise(np.logical_not, dtype='bool')
maximum = wrap_elemwise(np.maximum)
minimum = wrap_elemwise(np.minimum)
fmax = wrap_elemwise(np.fmax)
fmin = wrap_elemwise(np.fmin)
# floating functions
isreal = wrap_elemwise(np.isreal, dtype='bool')
iscomplex = wrap_elemwise(np.iscomplex, dtype='bool')
isfinite = wrap_elemwise(np.isfinite, dtype='bool')
isinf = wrap_elemwise(np.isinf, dtype='bool')
isnan = wrap_elemwise(np.isnan, dtype='bool')
signbit = wrap_elemwise(np.signbit, dtype='bool')
copysign = wrap_elemwise(np.copysign)
nextafter = wrap_elemwise(np.nextafter)
# modf: see below
ldexp = wrap_elemwise(np.ldexp)
# frexp: see below
fmod = wrap_elemwise(np.fmod)
floor = wrap_elemwise(np.floor)
ceil = wrap_elemwise(np.ceil)
trunc = wrap_elemwise(np.trunc)
# more math routines, from this page:
# http://docs.scipy.org/doc/numpy/reference/routines.math.html
degrees = wrap_elemwise(np.degrees)
radians = wrap_elemwise(np.radians)
rint = wrap_elemwise(np.rint)
fix = wrap_elemwise(np.fix)
angle = wrap_elemwise(np.angle)
real = wrap_elemwise(np.real)
imag = wrap_elemwise(np.imag)
clip = wrap_elemwise(np.clip)
fabs = wrap_elemwise(np.fabs)
sign = wrap_elemwise(np.fabs)
def frexp(x):
tmp = elemwise(np.frexp, x)
left = next(names)
right = next(names)
ldsk = dict(((left,) + key[1:], (getitem, key, 0))
for key in core.flatten(tmp._keys()))
rdsk = dict(((right,) + key[1:], (getitem, key, 1))
for key in core.flatten(tmp._keys()))
if x._dtype is not None:
a = np.empty((1,), dtype=x._dtype)
l, r = np.frexp(a)
ldt = l.dtype
rdt = r.dtype
else:
ldt = None
rdt = None
L = Array(merge(tmp.dask, ldsk), left, chunks=tmp.chunks,
dtype=ldt)
R = Array(merge(tmp.dask, rdsk), right, chunks=tmp.chunks,
dtype=rdt)
return L, R
frexp.__doc__ = np.frexp
def modf(x):
tmp = elemwise(np.modf, x)
left = next(names)
right = next(names)
ldsk = dict(((left,) + key[1:], (getitem, key, 0))
for key in core.flatten(tmp._keys()))
rdsk = dict(((right,) + key[1:], (getitem, key, 1))
for key in core.flatten(tmp._keys()))
if x._dtype is not None:
a = np.empty((1,), dtype=x._dtype)
l, r = np.modf(a)
ldt = l.dtype
rdt = r.dtype
else:
ldt = None
rdt = None
L = Array(merge(tmp.dask, ldsk), left, chunks=tmp.chunks,
dtype=ldt)
R = Array(merge(tmp.dask, rdsk), right, chunks=tmp.chunks,
dtype=rdt)
return L, R
modf.__doc__ = np.modf
@wraps(np.around)
def around(x, decimals=0):
return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)
def isnull(values):
""" pandas.isnull for dask arrays """
import pandas as pd
return elemwise(pd.isnull, values, dtype='bool')
def notnull(values):
""" pandas.notnull for dask arrays """
return ~isnull(values)
@wraps(numpy_compat.isclose)
def isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
func = partial(numpy_compat.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)
return elemwise(func, arr1, arr2, dtype='bool')
def variadic_choose(a, *choices):
return np.choose(a, choices)
@wraps(np.choose)
def choose(a, choices):
return elemwise(variadic_choose, a, *choices)
where_error_message = """
The dask.array version of where only handles the three argument case.
da.where(x > 0, x, 0)
and not the single argument case
da.where(x > 0)
This is because dask.array operations must be able to infer the shape of their
outputs prior to execution. The number of positive elements of x requires
execution. See the ``np.where`` docstring for examples and the following link
for a more thorough explanation:
http://dask.pydata.org/en/latest/array-overview.html#construct
""".strip()
@wraps(np.where)
def where(condition, x=None, y=None):
if x is None or y is None:
raise TypeError(where_error_message)
return choose(condition, [y, x])
@wraps(chunk.coarsen)
def coarsen(reduction, x, axes):
if not all(bd % div == 0 for i, div in axes.items()
for bd in x.chunks[i]):
raise ValueError(
"Coarsening factor does not align with block dimensions")
if 'dask' in inspect.getfile(reduction):
reduction = getattr(np, reduction.__name__)
name = next(names)
dsk = dict(((name,) + key[1:], (chunk.coarsen, reduction, key, axes))
for key in core.flatten(x._keys()))
chunks = tuple(tuple(int(bd / axes.get(i, 1)) for bd in bds)
for i, bds in enumerate(x.chunks))
if x._dtype is not None:
dt = reduction(np.empty((1,) * x.ndim, dtype=x.dtype)).dtype
else:
dt = None
return Array(merge(x.dask, dsk), name, chunks, dtype=dt)
def split_at_breaks(array, breaks, axis=0):
""" Split an array into a list of arrays (using slices) at the given breaks
>>> split_at_breaks(np.arange(6), [3, 5])
[array([0, 1, 2]), array([3, 4]), array([5])]
"""
padded_breaks = concat([[None], breaks, [None]])
slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]
preslice = (slice(None),) * axis
split_array = [array[preslice + (s,)] for s in slices]
return split_array
@wraps(np.insert)
def insert(arr, obj, values, axis):
# axis is a required argument here to avoid needing to deal with the numpy
# default case (which reshapes the array to make it flat)
if not -arr.ndim <= axis < arr.ndim:
raise IndexError('axis %r is out of bounds for an array of dimension '
'%s' % (axis, arr.ndim))
if axis < 0:
axis += arr.ndim
if isinstance(obj, slice):
obj = np.arange(*obj.indices(arr.shape[axis]))
obj = np.asarray(obj)
scalar_obj = obj.ndim == 0
if scalar_obj:
obj = np.atleast_1d(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
if (np.diff(obj) < 0).any():
raise NotImplementedError(
'da.insert only implemented for monotonic ``obj`` argument')
split_arr = split_at_breaks(arr, np.unique(obj), axis)
if getattr(values, 'ndim', 0) == 0:
# we need to turn values into a dask array
name = next(names)
dtype = getattr(values, 'dtype', type(values))
values = Array({(name,): values}, name, chunks=(), dtype=dtype)
values_shape = tuple(len(obj) if axis == n else s
for n, s in enumerate(arr.shape))
values = broadcast_to(values, values_shape)
elif scalar_obj:
values = values[(slice(None),) * axis + (None,)]
values_chunks = tuple(values_bd if axis == n else arr_bd
for n, (arr_bd, values_bd)
in enumerate(zip(arr.chunks,
values.chunks)))
values = values.rechunk(values_chunks)
counts = np.bincount(obj)[:-1]
values_breaks = np.cumsum(counts[counts > 0])
split_values = split_at_breaks(values, values_breaks, axis)
interleaved = list(interleave([split_arr, split_values]))
interleaved = [i for i in interleaved if i.nbytes]
return concatenate(interleaved, axis=axis)
@wraps(chunk.broadcast_to)
def broadcast_to(x, shape):
shape = tuple(shape)
ndim_new = len(shape) - x.ndim
if ndim_new < 0 or any(new != old
for new, old in zip(shape[ndim_new:], x.shape)
if old != 1):
raise ValueError('cannot broadcast shape %s to shape %s'
% (x.shape, shape))
name = next(names)
chunks = (tuple((s,) for s in shape[:ndim_new])
+ tuple(bd if old > 1 else (new,)
for bd, old, new in zip(x.chunks, x.shape,
shape[ndim_new:])))
dsk = dict(((name,) + (0,) * ndim_new + key[1:],
(chunk.broadcast_to, key,
shape[:ndim_new] +
tuple(bd[i] for i, bd in zip(key[1:], chunks[ndim_new:]))))
for key in core.flatten(x._keys()))
return Array(merge(dsk, x.dask), name, chunks, dtype=x.dtype)
def offset_func(func, offset, *args):
""" Offsets inputs by offset
>>> double = lambda x: x * 2
>>> f = offset_func(double, (10,))
>>> f(1)
22
>>> f(300)
620
"""
def _offset(*args):
args2 = list(map(add, args, offset))
return func(*args2)
with ignoring(Exception):
_offset.__name__ = 'offset_' + func.__name__
return _offset
fromfunction_names = ('fromfunction-%d' % i for i in count(1))
@wraps(np.fromfunction)
def fromfunction(func, chunks=None, shape=None, dtype=None):
name = next(fromfunction_names)
if chunks:
chunks = normalize_chunks(chunks, shape)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
aggdims = [list(accumulate(add, (0,) + bd[:-1])) for bd in chunks]
offsets = list(product(*aggdims))
shapes = list(product(*chunks))
values = [(np.fromfunction, offset_func(func, offset), shape)
for offset, shape in zip(offsets, shapes)]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype=dtype)
@wraps(np.unique)
def unique(x):
name = next(names)
dsk = dict(((name, i), (np.unique, key)) for i, key in enumerate(x._keys()))
parts = get(merge(dsk, x.dask), list(dsk.keys()))
return np.unique(np.concatenate(parts))
def write_hdf5_chunk(fn, datapath, index, data):
import h5py
with h5py.File(fn) as f:
d = f[datapath]
d[index] = data
@wraps(np.bincount)
def bincount(x, weights=None, minlength=None):
if minlength is None:
raise TypeError("Must specify minlength argument in da.bincount")
assert x.ndim == 1
if weights is not None:
assert weights.chunks == x.chunks
# Call np.bincount on each block, possibly with weights
name = 'bincount' + next(tokens)
if weights is not None:
dsk = dict(((name, i),
(np.bincount, (x.name, i), (weights.name, i), minlength))
for i, _ in enumerate(x._keys()))
dtype = 'f8'
else:
dsk = dict(((name, i),
(np.bincount, (x.name, i), None, minlength))
for i, _ in enumerate(x._keys()))
dtype = 'i8'
# Sum up all of the intermediate bincounts per block
name = 'bincount-sum' + next(tokens)
dsk[(name, 0)] = (np.sum, (list, list(dsk)), 0)
chunks = ((minlength,),)
dsk.update(x.dask)
if weights is not None:
dsk.update(weights.dask)
return Array(dsk, name, chunks, dtype)
def chunks_from_arrays(arrays):
""" Chunks tuple from nested list of arrays
>>> x = np.array([1, 2])
>>> chunks_from_arrays([x, x])
((2, 2),)
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x], [x]])
((1, 1), (2,))
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x, x]])
((1,), (2, 2))
"""
result = []
dim = 0
while isinstance(arrays, (list, tuple)):
result.append(tuple(deepfirst(a).shape[dim] for a in arrays))
arrays = arrays[0]
dim += 1
return tuple(result)
def deepfirst(seq):
""" First element in a nested list
>>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])
1
"""
if not isinstance(seq, (list, tuple)):
return seq
else:
return deepfirst(seq[0])
def ndimlist(seq):
if not isinstance(seq, (list, tuple)):
return 0
else:
return 1 + ndimlist(seq[0])
def concatenate3(arrays):
""" Recursive np.concatenate
Input should be a nested list of numpy arrays arranged in the order they
should appear in the array itself. Each array should have the same number
of dimensions as the desired output and the nesting of the lists.
>>> x = np.array([[1, 2]])
>>> concatenate3([[x, x, x], [x, x, x]])
array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])
>>> concatenate3([[x, x], [x, x], [x, x]])
array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])
"""
arrays = concrete(arrays)
ndim = ndimlist(arrays)
if not ndim:
return arrays
chunks = chunks_from_arrays(arrays)
shape = tuple(map(sum, chunks))
result = np.empty(shape=shape, dtype=deepfirst(arrays).dtype)
for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)):
while arr.ndim < ndim:
arr = arr[None, ...]
result[idx] = arr
return result
| {
"repo_name": "freeman-lab/dask",
"path": "dask/array/core.py",
"copies": "1",
"size": "65152",
"license": "bsd-3-clause",
"hash": 920068847602019100,
"line_mean": 30.7969741337,
"line_max": 107,
"alpha_frac": 0.5621623281,
"autogenerated": false,
"ratio": 3.399885195428691,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44620475235286905,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import operator
from operator import add, getitem
import inspect
from numbers import Number
from collections import Iterable
from bisect import bisect
from itertools import product, count
from collections import Iterator
from functools import partial, wraps
from toolz.curried import (pipe, partition, concat, unique, pluck, join, first,
memoize, map, groupby, valmap, accumulate, merge,
curry, reduce, interleave, sliding_window)
import numpy as np
from threading import Lock
from . import chunk
from .slicing import slice_array
from . import numpy_compat
from ..utils import deepmap, ignoring, repr_long_list
from ..compatibility import unicode
from .. import threaded, core
from ..context import _globals
names = ('x_%d' % i for i in count(1))
def getarray(a, b, lock=None):
""" Mimics getitem but includes call to np.asarray
>>> getarray([1, 2, 3, 4, 5], slice(1, 4))
array([2, 3, 4])
"""
if lock:
lock.acquire()
try:
c = a[b]
if type(c) != np.ndarray:
c = np.asarray(c)
finally:
if lock:
lock.release()
return c
from .optimization import optimize
def slices_from_chunks(chunks):
""" Translate chunks tuple to a set of slices in product order
>>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE
[(slice(0, 2, None), slice(0, 3, None)),
(slice(0, 2, None), slice(3, 6, None)),
(slice(0, 2, None), slice(6, 9, None)),
(slice(2, 4, None), slice(0, 3, None)),
(slice(2, 4, None), slice(3, 6, None)),
(slice(2, 4, None), slice(6, 9, None))]
"""
cumdims = [list(accumulate(add, (0,) + bds[:-1])) for bds in chunks]
shapes = product(*chunks)
starts = product(*cumdims)
return [tuple(slice(s, s+dim) for s, dim in zip(start, shape))
for start, shape in zip(starts, shapes)]
def getem(arr, chunks, shape=None):
""" Dask getting various chunks from an array-like
>>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
>>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
"""
chunks = normalize_chunks(chunks, shape)
keys = list(product([arr], *[range(len(bds)) for bds in chunks]))
values = [(getarray, arr, x) for x in slices_from_chunks(chunks)]
return dict(zip(keys, values))
def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):
""" Dot product of many aligned chunks
>>> x = np.array([[1, 2], [1, 2]])
>>> y = np.array([[10, 20], [10, 20]])
>>> dotmany([x, x, x], [y, y, y])
array([[ 90, 180],
[ 90, 180]])
Optionally pass in functions to apply to the left and right chunks
>>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)
array([[150, 150],
[150, 150]])
"""
if leftfunc:
A = map(leftfunc, A)
if rightfunc:
B = map(rightfunc, B)
return sum(map(partial(np.dot, **kwargs), A, B))
def lol_tuples(head, ind, values, dummies):
""" List of list of tuple keys
Parameters
----------
head : tuple
The known tuple so far
ind : Iterable
An iterable of indices not yet covered
values : dict
Known values for non-dummy indices
dummies : dict
Ranges of values for dummy indices
Examples
--------
>>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {})
('x', 1, 0)
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 1, 0), ('x', 1, 1, 1)],
[('x', 1, 2, 0), ('x', 1, 2, 1)]]
"""
if not ind:
return head
if ind[0] not in dummies:
return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)
else:
return [lol_tuples(head + (v,), ind[1:], values, dummies)
for v in dummies[ind[0]]]
def zero_broadcast_dimensions(lol, nblocks):
"""
>>> lol = [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> nblocks = (4, 1, 2) # note singleton dimension in second place
>>> lol = [[('x', 1, 0, 0), ('x', 1, 0, 1)],
... [('x', 1, 1, 0), ('x', 1, 1, 1)],
... [('x', 1, 2, 0), ('x', 1, 2, 1)]]
>>> zero_broadcast_dimensions(lol, nblocks) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)]]
See Also
--------
lol_tuples
"""
f = lambda t: (t[0],) + tuple(0 if d == 1 else i for i, d in zip(t[1:], nblocks))
return deepmap(f, lol)
def broadcast_dimensions(argpairs, numblocks, sentinels=(1, (1,))):
""" Find block dimensions from arguments
Parameters
----------
argpairs: iterable
name, ijk index pairs
numblocks: dict
maps {name: number of blocks}
sentinels: iterable (optional)
values for singleton dimensions
Examples
--------
>>> argpairs = [('x', 'ij'), ('y', 'ji')]
>>> numblocks = {'x': (2, 3), 'y': (3, 2)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Supports numpy broadcasting rules
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> numblocks = {'x': (2, 1), 'y': (1, 3)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Works in other contexts too
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
>>> broadcast_dimensions(argpairs, d)
{'i': 'Hello', 'j': (2, 3)}
"""
# List like [('i', 2), ('j', 1), ('i', 1), ('j', 2)]
L = concat([zip(inds, dims)
for (x, inds), (x, dims)
in join(first, argpairs, first, numblocks.items())])
g = groupby(0, L)
g = dict((k, set([d for i, d in v])) for k, v in g.items())
g2 = dict((k, v - set(sentinels) if len(v) > 1 else v) for k, v in g.items())
if g2 and not set(map(len, g2.values())) == set([1]):
raise ValueError("Shapes do not align %s" % g)
return valmap(first, g2)
def top(func, output, out_indices, *arrind_pairs, **kwargs):
""" Tensor operation
Applies a function, ``func``, across blocks from many different input
dasks. We arrange the pattern with which those blocks interact with sets
of matching indices. E.g.
top(func, 'z', 'i', 'x', 'i', 'y', 'i')
yield an embarassingly parallel communication pattern and is read as
z_i = func(x_i, y_i)
More complex patterns may emerge, including multiple indices
top(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')
$$ z_{ij} = func(x_{ij}, y_{ji}) $$
Indices missing in the output but present in the inputs results in many
inputs being sent to one function (see examples).
Examples
--------
Simple embarassing map operation
>>> inc = lambda x: x + 1
>>> top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
Simple operation on two datasets
>>> add = lambda x, y: x + y
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Operation that flips one of the datasets
>>> addT = lambda x, y: x + y.T # Transpose each chunk
>>> # z_ij ~ x_ij y_ji
>>> # .. .. .. notice swap
>>> top(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Dot product with contraction over ``j`` index. Yields list arguments
>>> top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
Supports Broadcasting rules
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}
"""
numblocks = kwargs['numblocks']
argpairs = list(partition(2, arrind_pairs))
assert set(numblocks) == set(pluck(0, argpairs))
all_indices = pipe(argpairs, pluck(1), concat, set)
dummy_indices = all_indices - set(out_indices)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
dims = broadcast_dimensions(argpairs, numblocks)
# (0, 0), (0, 1), (0, 2), (1, 0), ...
keytups = list(product(*[range(dims[i]) for i in out_indices]))
# {i: 0, j: 0}, {i: 0, j: 1}, ...
keydicts = [dict(zip(out_indices, tup)) for tup in keytups]
# {j: [1, 2, 3], ...} For j a dummy index of dimension 3
dummies = dict((i, list(range(dims[i]))) for i in dummy_indices)
# Create argument lists
valtups = []
for kd in keydicts:
args = []
for arg, ind in argpairs:
tups = lol_tuples((arg,), ind, kd, dummies)
tups2 = zero_broadcast_dimensions(tups, numblocks[arg])
args.append(tups2)
valtups.append(tuple(args))
# Add heads to tuples
keys = [(output,) + kt for kt in keytups]
vals = [(func,) + vt for vt in valtups]
return dict(zip(keys, vals))
def _concatenate2(arrays, axes=[]):
""" Recursively Concatenate nested lists of arrays along axes
Each entry in axes corresponds to each level of the nested list. The
length of axes should correspond to the level of nesting of arrays.
>>> x = np.array([[1, 2], [3, 4]])
>>> _concatenate2([x, x], axes=[0])
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> _concatenate2([x, x], axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> _concatenate2([[x, x], [x, x]], axes=[0, 1])
array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
Supports Iterators
>>> _concatenate2(iter([x, x]), axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
"""
if isinstance(arrays, Iterator):
arrays = list(arrays)
if len(axes) > 1:
arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]
return np.concatenate(arrays, axis=axes[0])
def rec_concatenate(arrays, axis=0):
""" Recursive np.concatenate
>>> x = np.array([1, 2])
>>> rec_concatenate([[x, x], [x, x], [x, x]])
array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])
"""
if not arrays:
return np.array([])
if isinstance(arrays, Iterator):
arrays = list(arrays)
if isinstance(arrays[0], Iterator):
arrays = list(map(list, arrays))
if not isinstance(arrays[0], np.ndarray) and not hasattr(arrays[0], '__array__'):
arrays = [rec_concatenate(a, axis=axis + 1) for a in arrays]
if arrays[0].ndim <= axis:
arrays = [a[None, ...] for a in arrays]
if len(arrays) == 1:
return arrays[0]
return np.concatenate(arrays, axis=axis)
def map_blocks(x, func, chunks=None, dtype=None):
""" Map a function across all blocks of a dask array
You must also specify the chunks of the resulting array. If you don't then
we assume that the resulting array has the same block structure as the
input.
>>> import dask.array as da
>>> x = da.ones((8,), chunks=(4,))
>>> np.array(x.map_blocks(lambda x: x + 1))
array([ 2., 2., 2., 2., 2., 2., 2., 2.])
If function changes shape of the blocks provide a chunks
>>> y = x.map_blocks(lambda x: x[::2], chunks=(2,))
Or, if the result is ragged, provide a chunks
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
Your block function can learn where in the array it is if it supports a
block_id keyword argument. This will receive entries like (2, 0, 1), the
position of the block in the dask array.
>>> def func(block, block_id=None):
... pass
"""
if not chunks:
chunks = x.chunks
elif not isinstance(chunks[0], tuple):
chunks = tuple([nb * (bs,)
for nb, bs in zip(x.numblocks, chunks)])
name = next(names)
try:
spec = inspect.getargspec(func)
except:
spec = None
if spec and 'block_id' in spec.args:
dsk = dict(((name,) + k[1:], (partial(func, block_id=k[1:]), k))
for k in core.flatten(x._keys()))
else:
dsk = dict(((name,) + k[1:], (func, k)) for k in core.flatten(x._keys()))
return Array(merge(dsk, x.dask), name, chunks, dtype=dtype)
@wraps(np.squeeze)
def squeeze(a, axis=None):
if axis is None:
axis = tuple(i for i, d in enumerate(a.shape) if d == 1)
b = a.map_blocks(partial(np.squeeze, axis=axis), dtype=a.dtype)
chunks = tuple(bd for bd in b.chunks if bd != (1,))
old_keys = list(product([b.name], *[range(len(bd)) for bd in b.chunks]))
new_keys = list(product([b.name], *[range(len(bd)) for bd in chunks]))
dsk = b.dask.copy()
for o, n in zip(old_keys, new_keys):
dsk[n] = dsk[o]
del dsk[o]
return Array(dsk, b.name, chunks, dtype=a.dtype)
def compute(*args, **kwargs):
""" Evaluate several dask arrays at once
The result of this function is always a tuple of numpy arrays. To evaluate
a single dask array into a numpy array, use ``myarray.compute()`` or simply
``np.array(myarray)``.
Example
-------
>>> import dask.array as da
>>> d = da.ones((4, 4), chunks=(2, 2))
>>> a = d + 1 # two different dask arrays
>>> b = d + 2
>>> A, B = da.compute(a, b) # Compute both simultaneously
"""
dsk = merge(*[arg.dask for arg in args])
keys = [arg._keys() for arg in args]
results = get(dsk, keys, **kwargs)
results2 = tuple(rec_concatenate(x) if arg.shape else unpack_singleton(x)
for x, arg in zip(results, args))
return results2
def store(sources, targets, **kwargs):
""" Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or iterable of Arrays
targets: array-like or iterable of array-likes
These should support setitem syntax ``target[10:20] = ...``
Examples
--------
>>> x = ... # doctest: +SKIP
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets]
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError("Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets)))
updates = [insert_to_ooc(tgt, src) for tgt, src in zip(targets, sources)]
dsk = merge([src.dask for src in sources] + updates)
keys = [key for u in updates for key in u]
get(dsk, keys, **kwargs)
def blockdims_from_blockshape(shape, blockshape):
"""
>>> blockdims_from_blockshape((10, 10), (4, 3))
((4, 4, 2), (3, 3, 3, 1))
"""
if blockshape is None:
raise TypeError("Must supply chunks= keyword argument")
if shape is None:
raise TypeError("Must supply shape= keyword argument")
return tuple((bd,) * (d // bd) + ((d % bd,) if d % bd else ())
for d, bd in zip(shape, blockshape))
class Array(object):
""" Array object holding a dask
Parameters
----------
dask : dict
Task dependency graph
name : string
Name of array in dask
shape : tuple of ints
Shape of the entire array
chunks: iterable of tuples
block sizes along each dimension
"""
__slots__ = 'dask', 'name', 'chunks', '_dtype'
def __init__(self, dask, name, chunks, dtype=None, shape=None):
self.dask = dask
self.name = name
self.chunks = normalize_chunks(chunks, shape)
if dtype is not None:
dtype = np.dtype(dtype)
self._dtype = dtype
@property
def _args(self):
return (self.dask, self.name, self.chunks, self.dtype)
@property
def numblocks(self):
return tuple(map(len, self.chunks))
@property
def shape(self):
return tuple(map(sum, self.chunks))
def __len__(self):
return sum(self.chunks[0])
def _visualize(self, optimize_graph=False):
from dask.dot import dot_graph
if optimize_graph:
dot_graph(optimize(self.dask, self._keys()))
else:
dot_graph(self.dask)
@property
@memoize(key=lambda args, kwargs: (id(args[0]), args[0].name, args[0].chunks))
def dtype(self):
if self._dtype is not None:
return self._dtype
if self.shape:
return self[(0,) * self.ndim].compute().dtype
else:
return self.compute().dtype
def __repr__(self):
chunks = '(' + ', '.join(map(repr_long_list, self.chunks)) + ')'
return ("dask.array<%s, shape=%s, chunks=%s, dtype=%s>" %
(self.name, self.shape, chunks, self._dtype))
def _get_block(self, *args):
return core.get(self.dask, (self.name,) + args)
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
""" Number of elements in array """
return np.prod(self.shape)
@property
def nbytes(self):
""" Number of bytes in array """
return self.size * self.dtype.itemsize
def _keys(self, *args):
if self.ndim == 0:
return [(self.name,)]
ind = len(args)
if ind + 1 == self.ndim:
return [(self.name,) + args + (i,)
for i in range(self.numblocks[ind])]
else:
return [self._keys(*(args + (i,)))
for i in range(self.numblocks[ind])]
def __array__(self, dtype=None, **kwargs):
x = self.compute()
if dtype and x.dtype != dtype:
x = x.astype(dtype)
if not isinstance(x, np.ndarray):
x = np.array(x)
return x
@wraps(store)
def store(self, target, **kwargs):
return store([self], [target], **kwargs)
def to_hdf5(self, filename, datapath, **kwargs):
""" Store array in HDF5 file
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP
See also:
da.store
h5py.File.create_dataset
"""
import h5py
with h5py.File(filename) as f:
if 'chunks' not in kwargs:
kwargs['chunks'] = tuple([c[0] for c in self.chunks])
d = f.require_dataset(datapath, shape=self.shape, dtype=self.dtype, **kwargs)
slices = slices_from_chunks(self.chunks)
name = next(names)
dsk = dict(((name,) + t[1:], (write_hdf5_chunk, filename, datapath, slc, t))
for t, slc in zip(core.flatten(self._keys()), slices))
myget = kwargs.get('get', get)
myget(merge(dsk, self.dask), list(dsk.keys()))
@wraps(compute)
def compute(self, **kwargs):
result, = compute(self, **kwargs)
return result
def __int__(self):
return int(self.compute())
def __bool__(self):
return bool(self.compute())
__nonzero__ = __bool__ # python 2
def __float__(self):
return float(self.compute())
def __complex__(self):
return complex(self.compute())
def __getitem__(self, index):
# Field access, e.g. x['a'] or x[['a', 'b']]
if (isinstance(index, (str, unicode)) or
( isinstance(index, list)
and all(isinstance(i, (str, unicode)) for i in index))):
if self._dtype is not None and isinstance(index, (str, unicode)):
dt = self._dtype[index]
elif self._dtype is not None and isinstance(index, list):
dt = np.dtype([(name, self._dtype[name]) for name in index])
else:
dt = None
return elemwise(getarray, self, index, dtype=dt)
# Slicing
out = next(names)
if not isinstance(index, tuple):
index = (index,)
if all(isinstance(i, slice) and i == slice(None) for i in index):
return self
dsk, chunks = slice_array(out, self.name, self.chunks, index)
return Array(merge(self.dask, dsk), out, chunks, dtype=self._dtype)
@wraps(np.dot)
def dot(self, other):
return tensordot(self, other, axes=((self.ndim-1,), (other.ndim-2,)))
@property
def T(self):
return transpose(self)
@wraps(np.transpose)
def transpose(self, axes=None):
return transpose(self, axes)
def astype(self, dtype, **kwargs):
""" Copy of the array, cast to a specified type """
return elemwise(partial(np.ndarray.astype, dtype=dtype, **kwargs),
self, dtype=dtype)
def __abs__(self):
return elemwise(operator.abs, self)
def __add__(self, other):
return elemwise(operator.add, self, other)
def __radd__(self, other):
return elemwise(operator.add, other, self)
def __and__(self, other):
return elemwise(operator.and_, self, other)
def __rand__(self, other):
return elemwise(operator.and_, other, self)
def __div__(self, other):
return elemwise(operator.div, self, other)
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
def __eq__(self, other):
return elemwise(operator.eq, self, other)
def __gt__(self, other):
return elemwise(operator.gt, self, other)
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.invert, self)
def __lshift__(self, other):
return elemwise(operator.lshift, self, other)
def __rlshift__(self, other):
return elemwise(operator.lshift, other, self)
def __lt__(self, other):
return elemwise(operator.lt, self, other)
def __le__(self, other):
return elemwise(operator.le, self, other)
def __mod__(self, other):
return elemwise(operator.mod, self, other)
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
def __mul__(self, other):
return elemwise(operator.mul, self, other)
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __pos__(self):
return self
def __ror__(self, other):
return elemwise(operator.or_, other, self)
def __pow__(self, other):
return elemwise(operator.pow, self, other)
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
def __rshift__(self, other):
return elemwise(operator.rshift, self, other)
def __rrshift__(self, other):
return elemwise(operator.rshift, other, self)
def __sub__(self, other):
return elemwise(operator.sub, self, other)
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
def __xor__(self, other):
return elemwise(operator.xor, self, other)
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
def any(self, axis=None, keepdims=False):
from .reductions import any
return any(self, axis=axis, keepdims=keepdims)
def all(self, axis=None, keepdims=False):
from .reductions import all
return all(self, axis=axis, keepdims=keepdims)
def min(self, axis=None, keepdims=False):
from .reductions import min
return min(self, axis=axis, keepdims=keepdims)
def max(self, axis=None, keepdims=False):
from .reductions import max
return max(self, axis=axis, keepdims=keepdims)
def argmin(self, axis=None):
from .reductions import argmin
return argmin(self, axis=axis)
def argmax(self, axis=None):
from .reductions import argmax
return argmax(self, axis=axis)
def sum(self, axis=None, keepdims=False):
from .reductions import sum
return sum(self, axis=axis, keepdims=keepdims)
def prod(self, axis=None, keepdims=False):
from .reductions import prod
return prod(self, axis=axis, keepdims=keepdims)
def mean(self, axis=None, keepdims=False):
from .reductions import mean
return mean(self, axis=axis, keepdims=keepdims)
def std(self, axis=None, keepdims=False, ddof=0):
from .reductions import std
return std(self, axis=axis, keepdims=keepdims, ddof=ddof)
def var(self, axis=None, keepdims=False, ddof=0):
from .reductions import var
return var(self, axis=axis, keepdims=keepdims, ddof=ddof)
def vnorm(self, ord=None, axis=None, keepdims=False):
from .reductions import vnorm
return vnorm(self, ord=ord, axis=axis, keepdims=keepdims)
@wraps(map_blocks)
def map_blocks(self, func, chunks=None, dtype=None):
return map_blocks(self, func, chunks, dtype=dtype)
def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):
""" Map a function over blocks of the array with some overlap
We share neighboring zones between blocks of the array, then map a
function, then trim away the neighboring strips.
Parameters
----------
func: function
The function to apply to each extended block
depth: int, tuple, or dict
The number of cells that each block should share with its neighbors
If a tuple or dict this can be different per axis
boundary: str
how to handle the boundaries. Values include 'reflect', 'periodic'
or any constant value like 0 or np.nan
trim: bool
Whether or not to trim the excess after the map function. Set this
to false if your mapping function does this for you.
**kwargs:
Other keyword arguments valid in ``map_blocks``
Example
-------
>>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])
>>> x = from_array(x, chunks=5)
>>> def derivative(x):
... return x - np.roll(x, 1)
>>> y = x.map_overlap(derivative, depth=1, boundary=0)
>>> y.compute()
array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])
"""
from .ghost import map_overlap
return map_overlap(self, func, depth, boundary, trim, **kwargs)
@wraps(squeeze)
def squeeze(self):
return squeeze(self)
def rechunk(self, chunks):
from .rechunk import rechunk
return rechunk(self, chunks)
def normalize_chunks(chunks, shape=None):
""" Normalize chunks to tuple of tuples
>>> normalize_chunks((2, 2), shape=(5, 6))
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(4, 6)) # Idempotent
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks([[2, 2], [3, 3]]) # Cleans up lists to tuples
((2, 2), (3, 3))
>>> normalize_chunks(10, shape=(30, 5)) # Supports integer inputs
((10, 10, 10), (5,))
>>> normalize_chunks((), shape=(0, 0)) # respects null dimensions
((), ())
"""
if isinstance(chunks, list):
chunks = tuple(chunks)
if isinstance(chunks, Number):
chunks = (chunks,) * len(shape)
if not chunks:
if shape is None:
chunks = ()
else:
chunks = ((),) * len(shape)
if chunks and not isinstance(chunks[0], (tuple, list)):
chunks = blockdims_from_blockshape(shape, chunks)
chunks = tuple(map(tuple, chunks))
return chunks
def from_array(x, chunks, name=None, lock=False, **kwargs):
""" Create dask array from something that looks like an array
Input must have a ``.shape`` and support numpy-style slicing.
The ``chunks`` argument must be one of the following forms:
- a blocksize like 1000
- a blockshape like (1000, 1000)
- explicit sizes of all blocks along all dimensions
like ((1000, 1000, 500), (400, 400)).
Example
-------
>>> x = h5py.File('...')['/data/path'] # doctest: +SKIP
>>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP
If your underlying datastore does not support concurrent reads then include
the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple
arrays to coordinate around the same lock.
>>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP
"""
chunks = normalize_chunks(chunks, x.shape)
name = name or next(names)
dsk = getem(name, chunks)
if lock is True:
lock = Lock()
if lock:
dsk = dict((k, v + (lock,)) for k, v in dsk.items())
return Array(merge({name: x}, dsk), name, chunks, dtype=x.dtype)
def atop(func, out, out_ind, *args, **kwargs):
""" Array object version of dask.array.top """
dtype = kwargs.get('dtype', None)
arginds = list(partition(2, args)) # [x, ij, y, jk] -> [(x, ij), (y, jk)]
numblocks = dict([(a.name, a.numblocks) for a, ind in arginds])
argindsstr = list(concat([(a.name, ind) for a, ind in arginds]))
dsk = top(func, out, out_ind, *argindsstr, numblocks=numblocks)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
shapes = dict((a.name, a.shape) for a, _ in arginds)
nameinds = [(a.name, i) for a, i in arginds]
dims = broadcast_dimensions(nameinds, shapes)
shape = tuple(dims[i] for i in out_ind)
blockdim_dict = dict((a.name, a.chunks) for a, _ in arginds)
chunkss = broadcast_dimensions(nameinds, blockdim_dict)
chunks = tuple(chunkss[i] for i in out_ind)
dsks = [a.dask for a, _ in arginds]
return Array(merge(dsk, *dsks), out, chunks, dtype=dtype)
def get(dsk, keys, get=None, **kwargs):
""" Specialized get function
1. Handle inlining
2. Use custom score function
"""
get = get or _globals['get'] or threaded.get
dsk2 = optimize(dsk, keys, **kwargs)
return get(dsk2, keys, **kwargs)
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array(datetime.date(2000, 1, 1), dtype='datetime64[D]')
"""
while True:
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x
stacked_names = ('stack-%d' % i for i in count(1))
def stack(seq, axis=0):
"""
Stack arrays along a new axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along a new dimension (axis=0 by default)
Example
-------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also:
concatenate
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis + 1
if axis > ndim:
raise ValueError("Axis must not be greater than number of dimensions"
"\nData has %d dimensions, but got axis=%d" % (ndim, axis))
assert len(set(a.chunks for a in seq)) == 1 # same chunks
shape = seq[0].shape[:axis] + (len(seq),) + seq[0].shape[axis:]
chunks = ( seq[0].chunks[:axis]
+ ((1,) * n,)
+ seq[0].chunks[axis:])
name = next(stacked_names)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
names = [a.name for a in seq]
inputs = [(names[key[axis+1]],) + key[1:axis + 1] + key[axis + 2:]
for key in keys]
values = [(getarray, inp, (slice(None, None, None),) * axis
+ (None,)
+ (slice(None, None, None),) * (ndim - axis))
for inp in inputs]
dsk = dict(zip(keys, values))
dsk2 = merge(dsk, *[a.dask for a in seq])
if all(a._dtype is not None for a in seq):
dt = reduce(np.promote_types, [a._dtype for a in seq])
else:
dt = None
return Array(dsk2, name, chunks, dtype=dt)
concatenate_names = ('concatenate-%d' % i for i in count(1))
def concatenate(seq, axis=0):
"""
Concatenate arrays along an existing axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along an existing dimension (axis=0 by default)
Example
-------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.concatenate(data, axis=0)
>>> x.shape
(12, 4)
>>> da.concatenate(data, axis=1).shape
(4, 12)
Result is a new dask Array
See Also:
stack
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis
if axis >= ndim:
raise ValueError("Axis must be less than than number of dimensions"
"\nData has %d dimensions, but got axis=%d" % (ndim, axis))
bds = [a.chunks for a in seq]
if not all(len(set(bds[i][j] for i in range(n))) == 1
for j in range(len(bds[0])) if j != axis):
raise ValueError("Block shapes do not align")
shape = (seq[0].shape[:axis]
+ (sum(a.shape[axis] for a in seq),)
+ seq[0].shape[axis + 1:])
chunks = ( seq[0].chunks[:axis]
+ (sum([bd[axis] for bd in bds], ()),)
+ seq[0].chunks[axis + 1:])
name = next(concatenate_names)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq]))
names = [a.name for a in seq]
values = [(names[bisect(cum_dims, key[axis + 1]) - 1],)
+ key[1:axis + 1]
+ (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis+1]) - 1],)
+ key[axis + 2:]
for key in keys]
dsk = dict(zip(keys, values))
dsk2 = merge(dsk, *[a.dask for a in seq])
if all(a._dtype is not None for a in seq):
dt = reduce(np.promote_types, [a._dtype for a in seq])
else:
dt = None
return Array(dsk2, name, chunks, dtype=dt)
@wraps(np.take)
def take(a, indices, axis):
if not -a.ndim <= axis < a.ndim:
raise ValueError('axis=(%s) out of bounds' % axis)
if axis < 0:
axis += a.ndim
return a[(slice(None),) * axis + (indices,)]
@wraps(np.transpose)
def transpose(a, axes=None):
axes = axes or tuple(range(a.ndim))[::-1]
return atop(curry(np.transpose, axes=axes),
next(names), axes,
a, tuple(range(a.ndim)), dtype=a._dtype)
@curry
def many(a, b, binop=None, reduction=None, **kwargs):
"""
Apply binary operator to pairwise to sequences, then reduce.
>>> many([1, 2, 3], [10, 20, 30], mul, sum) # dot product
140
"""
return reduction(map(curry(binop, **kwargs), a, b))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
ALPHABET = alphabet.upper()
@wraps(np.tensordot)
def tensordot(lhs, rhs, axes=2):
if isinstance(axes, Iterable):
left_axes, right_axes = axes
else:
left_axes = tuple(range(lhs.ndim - 1, lhs.ndim - axes - 1, -1))
right_axes = tuple(range(0, axes))
if isinstance(left_axes, int):
left_axes = (left_axes,)
if isinstance(right_axes, int):
right_axes = (right_axes,)
if isinstance(left_axes, list):
left_axes = tuple(left_axes)
if isinstance(right_axes, list):
right_axes = tuple(right_axes)
if len(left_axes) > 1:
raise NotImplementedError("Simultaneous Contractions of multiple "
"indices not yet supported")
left_index = list(alphabet[:lhs.ndim])
right_index = list(ALPHABET[:rhs.ndim])
out_index = left_index + right_index
for l, r in zip(left_axes, right_axes):
out_index.remove(right_index[r])
out_index.remove(left_index[l])
right_index[r] = left_index[l]
if lhs._dtype is not None and rhs._dtype is not None :
dt = np.promote_types(lhs._dtype, rhs._dtype)
else:
dt = None
func = many(binop=np.tensordot, reduction=sum,
axes=(left_axes, right_axes))
return atop(func,
next(names), out_index,
lhs, tuple(left_index),
rhs, tuple(right_index), dtype=dt)
def insert_to_ooc(out, arr):
lock = Lock()
def store(x, index):
with lock:
out[index] = np.asanyarray(x)
return None
slices = slices_from_chunks(arr.chunks)
name = 'store-%s' % arr.name
dsk = dict(((name,) + t[1:], (store, t, slc))
for t, slc in zip(core.flatten(arr._keys()), slices))
return dsk
def partial_by_order(op, other):
"""
>>> f = partial_by_order(add, [(1, 10)])
>>> f(5)
15
"""
def f(*args):
args2 = list(args)
for i, arg in other:
args2.insert(i, arg)
return op(*args2)
return f
def elemwise(op, *args, **kwargs):
""" Apply elementwise function across arguments
Respects broadcasting rules
>>> elemwise(add, x, y) # doctest: +SKIP
>>> elemwise(sin, x) # doctest: +SKIP
See also:
atop
"""
name = kwargs.get('name') or next(names)
out_ndim = max(len(arg.shape) if isinstance(arg, Array) else 0
for arg in args)
expr_inds = tuple(range(out_ndim))[::-1]
arrays = [arg for arg in args if isinstance(arg, Array)]
other = [(i, arg) for i, arg in enumerate(args) if not isinstance(arg, Array)]
if 'dtype' in kwargs:
dt = kwargs['dtype']
elif not all(a._dtype is not None for a in arrays):
dt = None
else:
vals = [np.empty((1,) * a.ndim, dtype=a.dtype)
if hasattr(a, 'dtype') else a
for a in args]
try:
dt = op(*vals).dtype
except AttributeError:
dt = None
if other:
op2 = partial_by_order(op, other)
else:
op2 = op
return atop(op2, name, expr_inds,
*concat((a, tuple(range(a.ndim)[::-1])) for a in arrays),
dtype=dt)
def wrap_elemwise(func, **kwargs):
""" Wrap up numpy function into dask.array """
f = partial(elemwise, func, **kwargs)
f.__doc__ = func.__doc__
f.__name__ = func.__name__
return f
# ufuncs, copied from this page:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html
# math operations
logaddexp = wrap_elemwise(np.logaddexp)
logaddexp2 = wrap_elemwise(np.logaddexp2)
conj = wrap_elemwise(np.conj)
exp = wrap_elemwise(np.exp)
log = wrap_elemwise(np.log)
log2 = wrap_elemwise(np.log2)
log10 = wrap_elemwise(np.log10)
log1p = wrap_elemwise(np.log1p)
expm1 = wrap_elemwise(np.expm1)
sqrt = wrap_elemwise(np.sqrt)
square = wrap_elemwise(np.square)
# trigonometric functions
sin = wrap_elemwise(np.sin)
cos = wrap_elemwise(np.cos)
tan = wrap_elemwise(np.tan)
arcsin = wrap_elemwise(np.arcsin)
arccos = wrap_elemwise(np.arccos)
arctan = wrap_elemwise(np.arctan)
arctan2 = wrap_elemwise(np.arctan2)
hypot = wrap_elemwise(np.hypot)
sinh = wrap_elemwise(np.sinh)
cosh = wrap_elemwise(np.cosh)
tanh = wrap_elemwise(np.tanh)
arcsinh = wrap_elemwise(np.arcsinh)
arccosh = wrap_elemwise(np.arccosh)
arctanh = wrap_elemwise(np.arctanh)
deg2rad = wrap_elemwise(np.deg2rad)
rad2deg = wrap_elemwise(np.rad2deg)
# comparison functions
logical_and = wrap_elemwise(np.logical_and, dtype='bool')
logical_or = wrap_elemwise(np.logical_or, dtype='bool')
logical_xor = wrap_elemwise(np.logical_xor, dtype='bool')
logical_not = wrap_elemwise(np.logical_not, dtype='bool')
maximum = wrap_elemwise(np.maximum)
minimum = wrap_elemwise(np.minimum)
fmax = wrap_elemwise(np.fmax)
fmin = wrap_elemwise(np.fmin)
# floating functions
isreal = wrap_elemwise(np.isreal, dtype='bool')
iscomplex = wrap_elemwise(np.iscomplex, dtype='bool')
isfinite = wrap_elemwise(np.isfinite, dtype='bool')
isinf = wrap_elemwise(np.isinf, dtype='bool')
isnan = wrap_elemwise(np.isnan, dtype='bool')
signbit = wrap_elemwise(np.signbit, dtype='bool')
copysign = wrap_elemwise(np.copysign)
nextafter = wrap_elemwise(np.nextafter)
# modf: see below
ldexp = wrap_elemwise(np.ldexp)
# frexp: see below
fmod = wrap_elemwise(np.fmod)
floor = wrap_elemwise(np.floor)
ceil = wrap_elemwise(np.ceil)
trunc = wrap_elemwise(np.trunc)
# more math routines, from this page:
# http://docs.scipy.org/doc/numpy/reference/routines.math.html
degrees = wrap_elemwise(np.degrees)
radians = wrap_elemwise(np.radians)
rint = wrap_elemwise(np.rint)
fix = wrap_elemwise(np.fix)
angle = wrap_elemwise(np.angle)
real = wrap_elemwise(np.real)
imag = wrap_elemwise(np.imag)
clip = wrap_elemwise(np.clip)
fabs = wrap_elemwise(np.fabs)
sign = wrap_elemwise(np.fabs)
def frexp(x):
tmp = elemwise(np.frexp, x)
left = next(names)
right = next(names)
ldsk = dict(((left,) + key[1:], (getitem, key, 0))
for key in core.flatten(tmp._keys()))
rdsk = dict(((right,) + key[1:], (getitem, key, 1))
for key in core.flatten(tmp._keys()))
if x._dtype is not None:
a = np.empty((1,), dtype=x._dtype)
l, r = np.frexp(a)
ldt = l.dtype
rdt = r.dtype
else:
ldt = None
rdt = None
L = Array(merge(tmp.dask, ldsk), left, chunks=tmp.chunks,
dtype=ldt)
R = Array(merge(tmp.dask, rdsk), right, chunks=tmp.chunks,
dtype=rdt)
return L, R
frexp.__doc__ = np.frexp
def modf(x):
tmp = elemwise(np.modf, x)
left = next(names)
right = next(names)
ldsk = dict(((left,) + key[1:], (getitem, key, 0))
for key in core.flatten(tmp._keys()))
rdsk = dict(((right,) + key[1:], (getitem, key, 1))
for key in core.flatten(tmp._keys()))
if x._dtype is not None:
a = np.empty((1,), dtype=x._dtype)
l, r = np.modf(a)
ldt = l.dtype
rdt = r.dtype
else:
ldt = None
rdt = None
L = Array(merge(tmp.dask, ldsk), left, chunks=tmp.chunks,
dtype=ldt)
R = Array(merge(tmp.dask, rdsk), right, chunks=tmp.chunks,
dtype=rdt)
return L, R
modf.__doc__ = np.modf
@wraps(np.around)
def around(x, decimals=0):
return map_blocks(x, partial(np.around, decimals=decimals), dtype=x.dtype)
def isnull(values):
""" pandas.isnull for dask arrays """
import pandas as pd
return elemwise(pd.isnull, values, dtype='bool')
def notnull(values):
""" pandas.notnull for dask arrays """
return ~isnull(values)
@wraps(numpy_compat.isclose)
def isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
func = partial(numpy_compat.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)
return elemwise(func, arr1, arr2, dtype='bool')
def variadic_choose(a, *choices):
return np.choose(a, choices)
@wraps(np.choose)
def choose(a, choices):
return elemwise(variadic_choose, a, *choices)
@wraps(np.where)
def where(condition, x, y):
return choose(condition, [y, x])
@wraps(chunk.coarsen)
def coarsen(reduction, x, axes):
if not all(bd % div == 0 for i, div in axes.items()
for bd in x.chunks[i]):
raise ValueError(
"Coarsening factor does not align with block dimensions")
if 'dask' in inspect.getfile(reduction):
reduction = getattr(np, reduction.__name__)
name = next(names)
dsk = dict(((name,) + key[1:], (chunk.coarsen, reduction, key, axes))
for key in core.flatten(x._keys()))
chunks = tuple(tuple(int(bd / axes.get(i, 1)) for bd in bds)
for i, bds in enumerate(x.chunks))
if x._dtype is not None:
dt = reduction(np.empty((1,) * x.ndim, dtype=x.dtype)).dtype
else:
dt = None
return Array(merge(x.dask, dsk), name, chunks, dtype=dt)
def split_at_breaks(array, breaks, axis=0):
""" Split an array into a list of arrays (using slices) at the given breaks
>>> split_at_breaks(np.arange(6), [3, 5])
[array([0, 1, 2]), array([3, 4]), array([5])]
"""
padded_breaks = concat([[None], breaks, [None]])
slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]
preslice = (slice(None),) * axis
split_array = [array[preslice + (s,)] for s in slices]
return split_array
@wraps(np.insert)
def insert(arr, obj, values, axis):
# axis is a required argument here to avoid needing to deal with the numpy
# default case (which reshapes the array to make it flat)
if not -arr.ndim <= axis < arr.ndim:
raise IndexError('axis %r is out of bounds for an array of dimension '
'%s' % (axis, arr.ndim))
if axis < 0:
axis += arr.ndim
if isinstance(obj, slice):
obj = np.arange(*obj.indices(arr.shape[axis]))
obj = np.asarray(obj)
scalar_obj = obj.ndim == 0
if scalar_obj:
obj = np.atleast_1d(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
if (np.diff(obj) < 0).any():
raise NotImplementedError(
'da.insert only implemented for monotonic ``obj`` argument')
split_arr = split_at_breaks(arr, np.unique(obj), axis)
if getattr(values, 'ndim', 0) == 0:
# we need to turn values into a dask array
name = next(names)
dtype = getattr(values, 'dtype', type(values))
values = Array({(name,): values}, name, chunks=(), dtype=dtype)
values_shape = tuple(len(obj) if axis == n else s
for n, s in enumerate(arr.shape))
values = broadcast_to(values, values_shape)
elif scalar_obj:
values = values[(slice(None),) * axis + (None,)]
values_chunks = tuple(values_bd if axis == n else arr_bd
for n, (arr_bd, values_bd)
in enumerate(zip(arr.chunks,
values.chunks)))
values = values.rechunk(values_chunks)
counts = np.bincount(obj)[:-1]
values_breaks = np.cumsum(counts[counts > 0])
split_values = split_at_breaks(values, values_breaks, axis)
interleaved = list(interleave([split_arr, split_values]))
interleaved = [i for i in interleaved if i.nbytes]
return concatenate(interleaved, axis=axis)
@wraps(chunk.broadcast_to)
def broadcast_to(x, shape):
shape = tuple(shape)
ndim_new = len(shape) - x.ndim
if ndim_new < 0 or any(new != old
for new, old in zip(shape[ndim_new:], x.shape)
if old != 1):
raise ValueError('cannot broadcast shape %s to shape %s'
% (x.shape, shape))
name = next(names)
chunks = (tuple((s,) for s in shape[:ndim_new])
+ tuple(bd if old > 1 else (new,)
for bd, old, new in zip(x.chunks, x.shape,
shape[ndim_new:])))
dsk = dict(((name,) + (0,) * ndim_new + key[1:],
(chunk.broadcast_to, key,
shape[:ndim_new] +
tuple(bd[i] for i, bd in zip(key[1:], chunks[ndim_new:]))))
for key in core.flatten(x._keys()))
return Array(merge(dsk, x.dask), name, chunks, dtype=x.dtype)
def offset_func(func, offset, *args):
""" Offsets inputs by offset
>>> double = lambda x: x * 2
>>> f = offset_func(double, (10,))
>>> f(1)
22
>>> f(300)
620
"""
def _offset(*args):
args2 = list(map(add, args, offset))
return func(*args2)
with ignoring(Exception):
_offset.__name__ = 'offset_' + func.__name__
return _offset
fromfunction_names = ('fromfunction-%d' % i for i in count(1))
@wraps(np.fromfunction)
def fromfunction(func, chunks=None, shape=None, dtype=None):
name = next(fromfunction_names)
if chunks:
chunks = normalize_chunks(chunks, shape)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
aggdims = [list(accumulate(add, (0,) + bd[:-1])) for bd in chunks]
offsets = list(product(*aggdims))
shapes = list(product(*chunks))
values = [(np.fromfunction, offset_func(func, offset), shape)
for offset, shape in zip(offsets, shapes)]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype=dtype)
@wraps(np.unique)
def unique(x):
name = next(names)
dsk = dict(((name, i), (np.unique, key)) for i, key in enumerate(x._keys()))
parts = get(merge(dsk, x.dask), list(dsk.keys()))
return np.unique(np.concatenate(parts))
def write_hdf5_chunk(fn, datapath, index, data):
import h5py
with h5py.File(fn) as f:
d = f[datapath]
d[index] = data
| {
"repo_name": "minrk/dask",
"path": "dask/array/core.py",
"copies": "1",
"size": "52615",
"license": "bsd-3-clause",
"hash": -2187182692964355600,
"line_mean": 30.3744782349,
"line_max": 107,
"alpha_frac": 0.5571985175,
"autogenerated": false,
"ratio": 3.3180929557923946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9367030256369641,
"avg_score": 0.0016522433845507197,
"num_lines": 1677
} |
from __future__ import absolute_import, division, print_function
import operator
from toolz import first
import numpy as np
from datashape import dshape, var, DataShape
from dateutil.parser import parse as dt_parse
from datashape.predicates import isscalar, isboolean, isnumeric
from datashape import coretypes as ct, discover, unsigned, promote, optionify
from .core import parenthesize, eval_str
from .expressions import Expr, shape, ElemWise
from ..dispatch import dispatch
from ..compatibility import _strtypes
__all__ = '''BinOp UnaryOp Arithmetic Add Mult Sub Div FloorDiv Pow Mod USub
Relational Eq Ne Ge Lt Le Gt Gt And Or Not'''.split()
def name(o):
if hasattr(o, '_name'):
return o._name
else:
return None
class BinOp(ElemWise):
__slots__ = '_hash', 'lhs', 'rhs'
__inputs__ = 'lhs', 'rhs'
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def __str__(self):
lhs = parenthesize(eval_str(self.lhs))
rhs = parenthesize(eval_str(self.rhs))
return '%s %s %s' % (lhs, self.symbol, rhs)
@property
def _name(self):
if not isscalar(self.dshape.measure):
return None
l, r = name(self.lhs), name(self.rhs)
if l and not r:
return l
if r and not l:
return r
if l == r:
return l
@property
def _inputs(self):
result = []
if isinstance(self.lhs, Expr):
result.append(self.lhs)
if isinstance(self.rhs, Expr):
result.append(self.rhs)
return tuple(result)
def maxvar(L):
"""
>>> maxvar([1, 2, var])
Var()
>>> maxvar([1, 2, 3])
3
"""
if var in L:
return var
else:
return max(L)
def maxshape(shapes):
"""
>>> maxshape([(10, 1), (1, 10), ()])
(10, 10)
>>> maxshape([(4, 5), (5,)])
(4, 5)
"""
shapes = [shape for shape in shapes if shape]
if not shapes:
return ()
ndim = max(map(len, shapes))
shapes = [(1,) * (ndim - len(shape)) + shape for shape in shapes]
for dims in zip(*shapes):
if len(set(dims) - set([1])) >= 2:
raise ValueError("Shapes don't align, %s" % str(dims))
return tuple(map(maxvar, zip(*shapes)))
class UnaryOp(ElemWise):
__slots__ = '_hash', '_child',
def __init__(self, child):
self._child = child
def __str__(self):
return '%s(%s)' % (self.symbol, eval_str(self._child))
@property
def symbol(self):
return type(self).__name__
@property
def dshape(self):
return DataShape(*(shape(self._child) + (self._dtype,)))
@property
def _name(self):
return self._child._name
class Arithmetic(BinOp):
""" Super class for arithmetic operators like add or mul """
@property
def _dtype(self):
# we can't simply use .schema or .datashape because we may have a bare
# integer, for example
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
return promote(lhs, rhs)
@property
def dshape(self):
# TODO: better inference. e.g. int + int -> int
return DataShape(*(maxshape([shape(self.lhs), shape(self.rhs)]) +
(self._dtype,)))
class Add(Arithmetic):
symbol = '+'
op = operator.add
class Mult(Arithmetic):
symbol = '*'
op = operator.mul
class Sub(Arithmetic):
symbol = '-'
op = operator.sub
class Div(Arithmetic):
symbol = '/'
op = operator.truediv
@property
def _dtype(self):
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
return optionify(lhs, rhs, ct.float64)
class FloorDiv(Arithmetic):
symbol = '//'
op = operator.floordiv
@property
def _dtype(self):
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
is_unsigned = lhs in unsigned and rhs in unsigned
max_width = max(lhs.itemsize, rhs.itemsize)
prefix = 'u' if is_unsigned else ''
measure = getattr(ct, '%sint%d' % (prefix, max_width * 8))
return optionify(lhs, rhs, measure)
class Pow(Arithmetic):
symbol = '**'
op = operator.pow
class Mod(Arithmetic):
symbol = '%'
op = operator.mod
class USub(UnaryOp):
op = operator.neg
symbol = '-'
def __str__(self):
return '-%s' % parenthesize(eval_str(self._child))
@property
def _dtype(self):
# TODO: better inference. -uint -> int
return self._child.schema
@dispatch(ct.Option, object)
def scalar_coerce(ds, val):
if val or val == 0:
return scalar_coerce(ds.ty, val)
else:
return None
@dispatch((ct.Record, ct.Mono, ct.Option, DataShape), Expr)
def scalar_coerce(ds, val):
return val
@dispatch(ct.Date, _strtypes)
def scalar_coerce(_, val):
dt = dt_parse(val)
if dt.time():
raise ValueError("Can not coerce %s to type Date, "
"contains time information")
return dt.date()
@dispatch(ct.DateTime, _strtypes)
def scalar_coerce(_, val):
return dt_parse(val)
@dispatch(ct.CType, _strtypes)
def scalar_coerce(dt, val):
return np.asscalar(np.asarray(val, dtype=dt.to_numpy_dtype()))
@dispatch(ct.Record, object)
def scalar_coerce(rec, val):
if len(rec.fields) == 1:
return scalar_coerce(first(rec.types), val)
else:
raise TypeError("Trying to coerce complex datashape\n"
"got dshape: %s\n"
"scalar_coerce only intended for scalar values" % rec)
@dispatch(ct.DataShape, object)
def scalar_coerce(ds, val):
return scalar_coerce(ds.measure, val)
@dispatch(object, object)
def scalar_coerce(dtype, val):
return val
@dispatch(_strtypes, object)
def scalar_coerce(ds, val):
return scalar_coerce(dshape(ds), val)
def _neg(self):
return USub(self)
def _add(self, other):
result = Add(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
def _radd(self, other):
result = Add(scalar_coerce(self.dshape, other), self)
result.dshape # Check that shapes and dtypes match up
return result
def _mul(self, other):
result = Mult(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
def _rmul(self, other):
result = Mult(scalar_coerce(self.dshape, other), self)
result.dshape # Check that shapes and dtypes match up
return result
def _div(self, other):
result = Div(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
def _rdiv(self, other):
result = Div(scalar_coerce(self.dshape, other), self)
result.dshape # Check that shapes and dtypes match up
return result
def _floordiv(self, other):
result = FloorDiv(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
def _rfloordiv(self, other):
result = FloorDiv(scalar_coerce(self.dshape, other), self)
result.dshape # Check that shapes and dtypes match up
return result
def _sub(self, other):
result = Sub(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
def _rsub(self, other):
result = Sub(scalar_coerce(self.dshape, other), self)
result.dshape # Check that shapes and dtypes match up
return result
def _pow(self, other):
result = Pow(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
def _rpow(self, other):
result = Pow(scalar_coerce(self.dshape, other), self)
result.dshape # Check that shapes and dtypes match up
return result
def _mod(self, other):
result = Mod(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
def _rmod(self, other):
result = Mod(scalar_coerce(self.dshape, other), self)
result.dshape # Check that shapes and dtypes match up
return result
class Relational(Arithmetic):
_dtype = ct.bool_
class Eq(Relational):
symbol = '=='
op = operator.eq
class Ne(Relational):
symbol = '!='
op = operator.ne
class Ge(Relational):
symbol = '>='
op = operator.ge
class Le(Relational):
symbol = '<='
op = operator.le
class Gt(Relational):
symbol = '>'
op = operator.gt
class Lt(Relational):
symbol = '<'
op = operator.lt
class And(Arithmetic):
symbol = '&'
op = operator.and_
_dtype = ct.bool_
class Or(Arithmetic):
symbol = '|'
op = operator.or_
_dtype = ct.bool_
class Not(UnaryOp):
symbol = '~'
op = operator.invert
_dtype = ct.bool_
def __str__(self):
return '~%s' % parenthesize(eval_str(self._child))
def _eq(self, other):
result = Eq(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
def _ne(self, other):
result = Ne(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
def _lt(self, other):
result = Lt(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
def _le(self, other):
result = Le(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
def _gt(self, other):
result = Gt(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
def _ge(self, other):
result = Ge(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
def _invert(self):
result = Invert(self)
result.dshape # Check that shapes and dtypes match up
return result
def _and(self, other):
result = And(self, other)
result.dshape # Check that shapes and dtypes match up
return result
def _rand(self, other):
result = And(other, self)
result.dshape # Check that shapes and dtypes match up
return result
def _or(self, other):
result = Or(self, other)
result.dshape # Check that shapes and dtypes match up
return result
def _ror(self, other):
result = Or(other, self)
result.dshape # Check that shapes and dtypes match up
return result
def _invert(self):
result = Not(self)
result.dshape # Check that shapes and dtypes match up
return result
Invert = Not
BitAnd = And
BitOr = Or
from .expressions import schema_method_list
schema_method_list.extend([
(isnumeric,
set([_add, _radd, _mul,
_rmul, _div, _rdiv, _floordiv, _rfloordiv, _sub, _rsub, _pow,
_rpow, _mod, _rmod, _neg])),
(isscalar, set([_eq, _ne, _lt, _le, _gt, _ge])),
(isboolean, set([_or, _ror, _and, _rand, _invert])),
])
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/expr/arithmetic.py",
"copies": "1",
"size": "11014",
"license": "bsd-3-clause",
"hash": 2276067550438492200,
"line_mean": 23.0480349345,
"line_max": 78,
"alpha_frac": 0.6247503178,
"autogenerated": false,
"ratio": 3.465701699181875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4590452016981875,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import operator
from toolz import first
import numpy as np
import pandas as pd
from datashape import dshape, var, DataShape
from dateutil.parser import parse as dt_parse
from datashape.predicates import isscalar, isboolean, isnumeric, isdatelike
from datashape import coretypes as ct, discover, unsigned, promote, optionify
from .core import parenthesize, eval_str
from .expressions import Expr, shape, ElemWise
from ..dispatch import dispatch
from ..compatibility import _strtypes
__all__ = '''
BinOp
UnaryOp
Arithmetic
Add
Mult
Repeat
Sub
Div
FloorDiv
Pow
Mod
Interp
USub
Relational
Eq
Ne
Ge
Lt
Le
Gt
Gt
And
Or
Not
'''.split()
class BinOp(ElemWise):
__slots__ = '_hash', 'lhs', 'rhs'
__inputs__ = 'lhs', 'rhs'
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def __str__(self):
lhs = parenthesize(eval_str(self.lhs))
rhs = parenthesize(eval_str(self.rhs))
return '%s %s %s' % (lhs, self.symbol, rhs)
@property
def _name(self):
if not isscalar(self.dshape.measure):
return None
l = getattr(self.lhs, '_name', None)
r = getattr(self.rhs, '_name', None)
if l is not None and r is None:
return l
elif r is not None and l is None:
return r
elif l == r:
return l
else:
return None
@property
def _inputs(self):
result = []
if isinstance(self.lhs, Expr):
result.append(self.lhs)
if isinstance(self.rhs, Expr):
result.append(self.rhs)
return tuple(result)
def maxvar(L):
"""
>>> maxvar([1, 2, var])
Var()
>>> maxvar([1, 2, 3])
3
"""
if var in L:
return var
else:
return max(L)
def maxshape(shapes):
"""
>>> maxshape([(10, 1), (1, 10), ()])
(10, 10)
>>> maxshape([(4, 5), (5,)])
(4, 5)
"""
shapes = [shape for shape in shapes if shape]
if not shapes:
return ()
ndim = max(map(len, shapes))
shapes = [(1,) * (ndim - len(shape)) + shape for shape in shapes]
for dims in zip(*shapes):
if len(set(dims) - set([1])) >= 2:
raise ValueError("Shapes don't align, %s" % str(dims))
return tuple(map(maxvar, zip(*shapes)))
class UnaryOp(ElemWise):
__slots__ = '_hash', '_child',
def __init__(self, child):
self._child = child
def __str__(self):
return '%s(%s)' % (self.symbol, eval_str(self._child))
@property
def symbol(self):
return type(self).__name__
@property
def dshape(self):
return DataShape(*(shape(self._child) + (self._dtype,)))
@property
def _name(self):
return self._child._name
class Arithmetic(BinOp):
""" Super class for arithmetic operators like add or mul """
@property
def _dtype(self):
# we can't simply use .schema or .datashape because we may have a bare
# integer, for example
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
return promote(lhs, rhs)
@property
def dshape(self):
# TODO: better inference. e.g. int + int -> int
return DataShape(*(maxshape([shape(self.lhs), shape(self.rhs)]) +
(self._dtype,)))
class Add(Arithmetic):
symbol = '+'
op = operator.add
class Mult(Arithmetic):
symbol = '*'
op = operator.mul
class Repeat(Arithmetic):
# Sequence repeat
symbol = '*'
op = operator.mul
class Sub(Arithmetic):
symbol = '-'
op = operator.sub
class Div(Arithmetic):
symbol = '/'
op = operator.truediv
@property
def _dtype(self):
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
return optionify(lhs, rhs, ct.float64)
class FloorDiv(Arithmetic):
symbol = '//'
op = operator.floordiv
@property
def _dtype(self):
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
is_unsigned = lhs in unsigned and rhs in unsigned
max_width = max(lhs.itemsize, rhs.itemsize)
prefix = 'u' if is_unsigned else ''
measure = getattr(ct, '%sint%d' % (prefix, max_width * 8))
return optionify(lhs, rhs, measure)
class Pow(Arithmetic):
symbol = '**'
op = operator.pow
class Mod(Arithmetic):
symbol = '%'
op = operator.mod
class Interp(Arithmetic):
# String interpolation
symbol = '%'
op = operator.mod
class USub(UnaryOp):
op = operator.neg
symbol = '-'
def __str__(self):
return '-%s' % parenthesize(eval_str(self._child))
@property
def _dtype(self):
# TODO: better inference. -uint -> int
return self._child.schema
@dispatch(ct.Option, object)
def scalar_coerce(ds, val):
if val or val == 0:
return scalar_coerce(ds.ty, val)
else:
return None
@dispatch((ct.Record, ct.Mono, ct.Option, DataShape), Expr)
def scalar_coerce(ds, val):
return val
@dispatch(ct.Date, _strtypes)
def scalar_coerce(_, val):
dt = dt_parse(val)
if dt.time():
raise ValueError("Can not coerce %s to type Date, "
"contains time information")
return dt.date()
@dispatch(ct.DateTime, _strtypes)
def scalar_coerce(_, val):
return pd.Timestamp(val)
@dispatch(ct.CType, _strtypes)
def scalar_coerce(dt, val):
return np.asscalar(np.asarray(val, dtype=dt.to_numpy_dtype()))
@dispatch(ct.Record, object)
def scalar_coerce(rec, val):
if len(rec.fields) == 1:
return scalar_coerce(first(rec.types), val)
else:
raise TypeError("Trying to coerce complex datashape\n"
"got dshape: %s\n"
"scalar_coerce only intended for scalar values" % rec)
@dispatch(ct.DataShape, object)
def scalar_coerce(ds, val):
return scalar_coerce(ds.measure, val)
@dispatch(object, object)
def scalar_coerce(dtype, val):
return val
@dispatch(_strtypes, object)
def scalar_coerce(ds, val):
return scalar_coerce(dshape(ds), val)
def _neg(self):
return USub(self)
def _mkbin(name, cons, private=True, reflected=True):
prefix = '_' if private else ''
def _bin(self, other):
result = cons(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
_bin.__name__ = prefix + name
if reflected:
def _rbin(self, other):
result = cons(scalar_coerce(self.dshape, other), self)
result.dshape # Check that shapes and dtypes match up
return result
_rbin.__name__ = prefix + 'r' + name
return _bin, _rbin
return _bin
_add, _radd = _mkbin('add', Add)
_div, _rdiv = _mkbin('div', Div)
_floordiv, _rfloordiv = _mkbin('floordiv', FloorDiv)
_mod, _rmod = _mkbin('mod', Mod)
_mul, _rmul = _mkbin('mul', Mult)
_pow, _rpow = _mkbin('pow', Pow)
repeat = _mkbin('repeat', Repeat, reflected=False, private=False)
_sub, _rsub = _mkbin('sub', Sub)
interp = _mkbin('interp', Interp, reflected=False, private=False)
class Relational(Arithmetic):
_dtype = ct.bool_
class Eq(Relational):
symbol = '=='
op = operator.eq
class Ne(Relational):
symbol = '!='
op = operator.ne
class Ge(Relational):
symbol = '>='
op = operator.ge
class Le(Relational):
symbol = '<='
op = operator.le
class Gt(Relational):
symbol = '>'
op = operator.gt
class Lt(Relational):
symbol = '<'
op = operator.lt
class And(Arithmetic):
symbol = '&'
op = operator.and_
_dtype = ct.bool_
class Or(Arithmetic):
symbol = '|'
op = operator.or_
_dtype = ct.bool_
class Not(UnaryOp):
symbol = '~'
op = operator.invert
_dtype = ct.bool_
def __str__(self):
return '~%s' % parenthesize(eval_str(self._child))
_and, _rand = _mkbin('and', And)
_eq = _mkbin('eq', Eq, reflected=False)
_ge = _mkbin('ge', Ge, reflected=False)
_gt = _mkbin('gt', Gt, reflected=False)
_le = _mkbin('le', Le, reflected=False)
_lt = _mkbin('lt', Lt, reflected=False)
_ne = _mkbin('ne', Ne, reflected=False)
_or, _ror = _mkbin('or', Or)
def _invert(self):
result = Invert(self)
result.dshape # Check that shapes and dtypes match up
return result
Invert = Not
BitAnd = And
BitOr = Or
from .expressions import schema_method_list
schema_method_list.extend([
(isnumeric,
set([_add, _radd, _mul, _rmul, _div, _rdiv, _floordiv, _rfloordiv, _sub,
_rsub, _pow, _rpow, _mod, _rmod, _neg])),
(isscalar, set([_eq, _ne, _lt, _le, _gt, _ge])),
(isboolean, set([_or, _ror, _and, _rand, _invert])),
(isdatelike, set([_add, _radd, _sub, _rsub])),
])
| {
"repo_name": "caseyclements/blaze",
"path": "blaze/expr/arithmetic.py",
"copies": "5",
"size": "8784",
"license": "bsd-3-clause",
"hash": -535188195921522050,
"line_mean": 20.3722627737,
"line_max": 78,
"alpha_frac": 0.5931238616,
"autogenerated": false,
"ratio": 3.3122171945701355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00038308199611787564,
"num_lines": 411
} |
from __future__ import absolute_import, division, print_function
import operator
from toolz import first
import numpy as np
import pandas as pd
from datashape import dshape, var, DataShape, Option
from dateutil.parser import parse as dt_parse
from datashape.predicates import isscalar, isboolean, isnumeric, isdatelike
from datashape import coretypes as ct, discover, unsigned, promote, optionify
from .core import parenthesize, eval_str
from .expressions import Expr, shape, ElemWise
from ..dispatch import dispatch
from ..compatibility import _strtypes
__all__ = '''
BinOp
UnaryOp
Arithmetic
Add
Mult
Repeat
Sub
Div
FloorDiv
Pow
Mod
Interp
USub
Relational
Eq
Ne
Ge
Lt
Le
Gt
Gt
And
Or
Not
'''.split()
class BinOp(ElemWise):
__slots__ = '_hash', 'lhs', 'rhs'
__inputs__ = 'lhs', 'rhs'
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def __str__(self):
lhs = parenthesize(eval_str(self.lhs))
rhs = parenthesize(eval_str(self.rhs))
return '%s %s %s' % (lhs, self.symbol, rhs)
@property
def dshape(self):
# TODO: better inference. e.g. int + int -> int
return DataShape(*(maxshape([shape(self.lhs), shape(self.rhs)]) +
(self._dtype,)))
@property
def _name(self):
if not isscalar(self.dshape.measure):
return None
l = getattr(self.lhs, '_name', None)
r = getattr(self.rhs, '_name', None)
if l is not None and r is None:
return l
elif r is not None and l is None:
return r
elif l == r:
return l
else:
return None
@property
def _inputs(self):
result = []
if isinstance(self.lhs, Expr):
result.append(self.lhs)
if isinstance(self.rhs, Expr):
result.append(self.rhs)
return tuple(result)
def maxvar(L):
"""
>>> maxvar([1, 2, var])
Var()
>>> maxvar([1, 2, 3])
3
"""
if var in L:
return var
else:
return max(L)
def maxshape(shapes):
"""
>>> maxshape([(10, 1), (1, 10), ()])
(10, 10)
>>> maxshape([(4, 5), (5,)])
(4, 5)
"""
shapes = [shape for shape in shapes if shape]
if not shapes:
return ()
ndim = max(map(len, shapes))
shapes = [(1,) * (ndim - len(shape)) + shape for shape in shapes]
for dims in zip(*shapes):
if len(set(dims) - set([1])) >= 2:
raise ValueError("Shapes don't align, %s" % str(dims))
return tuple(map(maxvar, zip(*shapes)))
class UnaryOp(ElemWise):
__slots__ = '_hash', '_child',
def __init__(self, child):
self._child = child
def __str__(self):
return '%s(%s)' % (self.symbol, eval_str(self._child))
@property
def symbol(self):
return type(self).__name__
@property
def dshape(self):
return DataShape(*(shape(self._child) + (self._dtype,)))
@property
def _name(self):
return self._child._name
class Arithmetic(BinOp):
""" Super class for arithmetic operators like add or mul """
@property
def _dtype(self):
# we can't simply use .schema or .datashape because we may have a bare
# integer, for example
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
return promote(lhs, rhs)
class Add(Arithmetic):
symbol = '+'
op = operator.add
class Mult(Arithmetic):
symbol = '*'
op = operator.mul
class Repeat(Arithmetic):
# Sequence repeat
symbol = '*'
op = operator.mul
class Sub(Arithmetic):
symbol = '-'
op = operator.sub
class Div(Arithmetic):
symbol = '/'
op = operator.truediv
@property
def _dtype(self):
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
return optionify(lhs, rhs, ct.float64)
class FloorDiv(Arithmetic):
symbol = '//'
op = operator.floordiv
@property
def _dtype(self):
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
is_unsigned = lhs in unsigned and rhs in unsigned
max_width = max(lhs.itemsize, rhs.itemsize)
prefix = 'u' if is_unsigned else ''
measure = getattr(ct, '%sint%d' % (prefix, max_width * 8))
return optionify(lhs, rhs, measure)
class Pow(Arithmetic):
symbol = '**'
op = operator.pow
class Mod(Arithmetic):
symbol = '%'
op = operator.mod
class Interp(Arithmetic):
# String interpolation
symbol = '%'
op = operator.mod
class USub(UnaryOp):
op = operator.neg
symbol = '-'
def __str__(self):
return '-%s' % parenthesize(eval_str(self._child))
@property
def _dtype(self):
# TODO: better inference. -uint -> int
return self._child.schema
@dispatch(ct.Option, object)
def scalar_coerce(ds, val):
return scalar_coerce(ds.ty, val) if val is not None else None
@dispatch((ct.Record, ct.Mono, ct.Option, DataShape), Expr)
def scalar_coerce(ds, val):
return val
@dispatch(ct.Date, _strtypes)
def scalar_coerce(_, val):
if val == '':
raise TypeError('%r is not a valid date' % val)
dt = dt_parse(val)
if dt.time(): # TODO: doesn't work with python 3.5
raise TypeError(
"Can not coerce %r to type Date, contains time information" % val
)
return dt.date()
@dispatch(ct.DateTime, _strtypes)
def scalar_coerce(_, val):
if val == '':
raise TypeError('%r is not a valid datetime' % val)
return pd.Timestamp(val)
@dispatch(ct.CType, _strtypes)
def scalar_coerce(dt, val):
return np.asscalar(np.asarray(val, dtype=dt.to_numpy_dtype()))
@dispatch(ct.Record, object)
def scalar_coerce(rec, val):
if len(rec.fields) == 1:
return scalar_coerce(first(rec.types), val)
else:
raise TypeError("Trying to coerce complex datashape\n"
"got dshape: %s\n"
"scalar_coerce only intended for scalar values" % rec)
@dispatch(ct.DataShape, object)
def scalar_coerce(ds, val):
return scalar_coerce(ds.measure, val)
@dispatch(object, object)
def scalar_coerce(dtype, val):
return val
@dispatch(_strtypes, object)
def scalar_coerce(ds, val):
return scalar_coerce(dshape(ds), val)
def _neg(self):
return USub(self)
def _mkbin(name, cons, private=True, reflected=True):
prefix = '_' if private else ''
def _bin(self, other):
result = cons(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
_bin.__name__ = prefix + name
if reflected:
def _rbin(self, other):
result = cons(scalar_coerce(self.dshape, other), self)
result.dshape # Check that shapes and dtypes match up
return result
_rbin.__name__ = prefix + 'r' + name
return _bin, _rbin
return _bin
_add, _radd = _mkbin('add', Add)
_div, _rdiv = _mkbin('div', Div)
_floordiv, _rfloordiv = _mkbin('floordiv', FloorDiv)
_mod, _rmod = _mkbin('mod', Mod)
_mul, _rmul = _mkbin('mul', Mult)
_pow, _rpow = _mkbin('pow', Pow)
repeat = _mkbin('repeat', Repeat, reflected=False, private=False)
_sub, _rsub = _mkbin('sub', Sub)
interp = _mkbin('interp', Interp, reflected=False, private=False)
class _Optional(Arithmetic):
@property
def _dtype(self):
# we can't simply use .schema or .datashape because we may have a bare
# integer, for example
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
if isinstance(lhs, Option) or isinstance(rhs, Option):
return Option(ct.bool_)
return ct.bool_
class Relational(_Optional):
# Leave this to separate relationals from other types of optionals.
pass
class Eq(Relational):
symbol = '=='
op = operator.eq
class Ne(Relational):
symbol = '!='
op = operator.ne
class Ge(Relational):
symbol = '>='
op = operator.ge
class Le(Relational):
symbol = '<='
op = operator.le
class Gt(Relational):
symbol = '>'
op = operator.gt
class Lt(Relational):
symbol = '<'
op = operator.lt
class And(_Optional):
symbol = '&'
op = operator.and_
class Or(_Optional):
symbol = '|'
op = operator.or_
class Not(UnaryOp):
symbol = '~'
op = operator.invert
@property
def _dtype(self):
return self._child.schema
def __str__(self):
return '~%s' % parenthesize(eval_str(self._child))
_and, _rand = _mkbin('and', And)
_eq = _mkbin('eq', Eq, reflected=False)
_ge = _mkbin('ge', Ge, reflected=False)
_gt = _mkbin('gt', Gt, reflected=False)
_le = _mkbin('le', Le, reflected=False)
_lt = _mkbin('lt', Lt, reflected=False)
_ne = _mkbin('ne', Ne, reflected=False)
_or, _ror = _mkbin('or', Or)
def _invert(self):
result = Invert(self)
result.dshape # Check that shapes and dtypes match up
return result
Invert = Not
BitAnd = And
BitOr = Or
from .expressions import schema_method_list
schema_method_list.extend([
(isnumeric,
set([_add, _radd, _mul, _rmul, _div, _rdiv, _floordiv, _rfloordiv, _sub,
_rsub, _pow, _rpow, _mod, _rmod, _neg])),
(isscalar, set([_eq, _ne, _lt, _le, _gt, _ge])),
(isboolean, set([_or, _ror, _and, _rand, _invert])),
(isdatelike, set([_add, _radd, _sub, _rsub])),
])
| {
"repo_name": "ChinaQuants/blaze",
"path": "blaze/expr/arithmetic.py",
"copies": "1",
"size": "9396",
"license": "bsd-3-clause",
"hash": 6308118187008525000,
"line_mean": 21.0563380282,
"line_max": 78,
"alpha_frac": 0.5966368668,
"autogenerated": false,
"ratio": 3.364124597207304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4460761464007304,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.