text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
import os
import sys
import subprocess
import uuid
import mmap
from tempfile import NamedTemporaryFile
from contextlib import closing
from functools import partial
from distutils.spawn import find_executable
import pandas as pd
from pandas.formats.format import CSVFormatter
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.elements import Executable, ClauseElement
from toolz import merge
from multipledispatch import MDNotImplementedError
from ..append import append
from ..convert import convert
from ..compatibility import StringIO
from ..utils import literal_compile
from .csv import CSV, infer_header
from ..temp import Temp
from .aws import S3
from .sql import getbind
class CopyFromCSV(Executable, ClauseElement):
def __init__(self, element, csv, delimiter=',', header=None, na_value='',
lineterminator='\n', quotechar='"', escapechar='\\',
encoding='utf8', skiprows=0, bind=None, **kwargs):
if not isinstance(element, sa.Table):
raise TypeError('element must be a sqlalchemy.Table instance')
self.element = element
self.csv = csv
self.delimiter = delimiter
self.header = (
header if header is not None else
(csv.has_header
if csv.has_header is not None else infer_header(csv.path))
)
self.na_value = na_value
self.lineterminator = lineterminator
self.quotechar = quotechar
self.escapechar = escapechar
self.encoding = encoding
self.skiprows = int(skiprows or self.header)
self._bind = getbind(element, bind)
for k, v in kwargs.items():
setattr(self, k, v)
@property
def bind(self):
return self._bind
@compiles(CopyFromCSV, 'sqlite')
def compile_from_csv_sqlite(element, compiler, **kwargs):
if not find_executable('sqlite3'):
raise MDNotImplementedError("Could not find sqlite executable")
t = element.element
if not element.header:
csv = element.csv
else:
csv = Temp(CSV)('.%s' % uuid.uuid1())
assert csv.has_header, \
'SQLAlchemy element.header is True but CSV inferred no header'
# write to a temporary file after skipping the first line
chunksize = 1 << 24 # 16 MiB
lineterminator = element.lineterminator.encode(element.encoding)
with element.csv.open() as f:
with closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as mf:
index = mf.find(lineterminator)
if index == -1:
raise ValueError("'%s' not found" % lineterminator)
mf.seek(index + len(lineterminator)) # len because \r\n
with open(csv.path, 'wb') as g:
for chunk in iter(partial(mf.read, chunksize), b''):
g.write(chunk)
fullpath = os.path.abspath(csv.path).encode('unicode-escape').decode()
cmd = ['sqlite3',
'-nullvalue', repr(element.na_value),
'-separator', element.delimiter,
'-cmd', '.import "%s" \'%s\'' % (
# FIXME: format_table(t) is correct, but sqlite will complain
fullpath, compiler.preparer.format_table(t)
),
element.bind.url.database]
stderr = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
).decode(sys.getfilesystemencoding())
if stderr:
raise sa.exc.DatabaseError(' '.join(cmd), [], OSError(stderr))
return ''
@compiles(CopyFromCSV, 'mysql')
def compile_from_csv_mysql(element, compiler, **kwargs):
if element.na_value:
raise ValueError(
'MySQL does not support custom NULL values for CSV input'
)
return compiler.process(
sa.text(
"""LOAD DATA {local} INFILE :path
INTO TABLE {table}
CHARACTER SET :encoding
FIELDS
TERMINATED BY :delimiter
ENCLOSED BY :quotechar
ESCAPED BY :escapechar
LINES TERMINATED BY :lineterminator
IGNORE :skiprows LINES
""".format(
local=getattr(element, 'local', ''),
table=compiler.preparer.format_table(element.element)
)
).bindparams(
path=os.path.abspath(element.csv.path),
encoding=element.encoding or element.bind.execute(
'select @@character_set_client'
).scalar(),
delimiter=element.delimiter,
quotechar=element.quotechar,
escapechar=element.escapechar,
lineterminator=element.lineterminator,
skiprows=int(element.header)
),
**kwargs
)
@compiles(CopyFromCSV, 'postgresql')
def compile_from_csv_postgres(element, compiler, **kwargs):
if len(element.escapechar) != 1:
raise ValueError(
'postgres does not allow escape characters longer than 1 byte when '
'bulk loading a CSV file'
)
return compiler.process(
sa.text(
"""
COPY {0} FROM STDIN (
FORMAT CSV,
DELIMITER :delimiter,
NULL :na_value,
QUOTE :quotechar,
ESCAPE :escapechar,
HEADER :header,
ENCODING :encoding
)
""".format(compiler.preparer.format_table(element.element))
).bindparams(
delimiter=element.delimiter,
na_value=element.na_value,
quotechar=element.quotechar,
# use quotechar for escape intentionally because it makes it easier
# to create a csv that pandas and postgres agree on
escapechar=element.quotechar,
header=element.header,
encoding=element.encoding or element.bind(
'show client_encoding'
).execute().scalar()
),
**kwargs
)
try:
import boto
from odo.backends.aws import S3
from redshift_sqlalchemy.dialect import CopyCommand
except ImportError:
pass
else:
@compiles(CopyFromCSV, 'redshift')
def compile_from_csv_redshift(element, compiler, **kwargs):
assert isinstance(element.csv, S3(CSV))
assert element.csv.path.startswith('s3://')
cfg = boto.Config()
aws_access_key_id = cfg.get('Credentials', 'aws_access_key_id')
aws_secret_access_key = cfg.get('Credentials', 'aws_secret_access_key')
compression = getattr(element, 'compression', '').upper() or None
cmd = CopyCommand(
table=element.element,
data_location=element.csv.path,
access_key_id=aws_access_key_id,
secret_access_key=aws_secret_access_key,
format='CSV',
delimiter=element.delimiter,
ignore_header=int(element.header),
empty_as_null=True,
blanks_as_null=False,
compression=compression
)
return compiler.process(cmd)
@append.register(sa.Table, CSV)
def append_csv_to_sql_table(tbl, csv, bind=None, **kwargs):
bind = getbind(tbl, bind)
dialect = bind.dialect.name
# move things to a temporary S3 bucket if we're using redshift and we
# aren't already in S3
if dialect == 'redshift' and not isinstance(csv, S3(CSV)):
csv = convert(Temp(S3(CSV)), csv, **kwargs)
elif dialect != 'redshift' and isinstance(csv, S3(CSV)):
csv = convert(Temp(CSV), csv, has_header=csv.has_header, **kwargs)
elif dialect == 'hive':
from .ssh import SSH
return append(tbl, convert(Temp(SSH(CSV)), csv, **kwargs), **kwargs)
kwargs = merge(csv.dialect, kwargs)
stmt = CopyFromCSV(tbl, csv, bind=bind, **kwargs)
if dialect == 'postgresql':
with bind.begin() as c:
with csv.open() as f:
c.connection.cursor().copy_expert(literal_compile(stmt), f)
else:
with bind.begin() as conn:
conn.execute(stmt)
return tbl
class NanIsNotNullFormatter(CSVFormatter):
def _save_chunk(self, start_i, end_i):
data_index = self.data_index
# create the data for a chunk
slicer = slice(start_i, end_i)
for i in range(len(self.blocks)):
b = self.blocks[i]
na_rep = 'nan' if b.dtype.kind == 'f' else self.na_rep
d = b.to_native_types(slicer=slicer,
na_rep=na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
for col_loc, col in zip(b.mgr_locs, d):
# self.data is a preallocated list
self.data[col_loc] = col
ix = data_index.to_native_types(slicer=slicer, na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
pd.lib.write_csv_rows(self.data,
ix,
self.nlevels,
self.cols,
self.writer)
# The set of sql dialects which do not bounce dataframes to disk to append
# to a sql table.
DATAFRAME_TO_TABLE_IN_MEMORY_CSV = frozenset({
'postgresql',
})
# TODO: figure out how to dynamically detect this number, users may have
# compiled their sqlite with a different max variable number but this is the
# default.
SQLITE_MAX_VARIABLE_NUMBER = 999
@append.register(sa.Table, pd.DataFrame)
def append_dataframe_to_sql_table(tbl,
df,
bind=None,
dshape=None,
sqlite_max_variable_number=SQLITE_MAX_VARIABLE_NUMBER,
quotechar='"',
**kwargs):
bind = getbind(tbl, bind)
dialect = bind.dialect.name
if dialect == 'sqlite':
name = ('.'.join((tbl.schema, tbl.name))
if tbl.schema is not None else
tbl.name)
df.to_sql(
name,
bind,
index=False,
if_exists='append',
chunksize=sqlite_max_variable_number,
)
return tbl
if dialect in DATAFRAME_TO_TABLE_IN_MEMORY_CSV:
buf = StringIO()
path = None
else:
buf = NamedTemporaryFile(mode='w+')
path = buf.name
with buf:
NanIsNotNullFormatter(
df[[c.name for c in tbl.columns]],
buf,
index=False,
quotechar=quotechar,
doublequote=True,
# use quotechar for escape intentionally because it makes it easier
# to create a csv that pandas and postgres agree on
escapechar=quotechar,
).save()
buf.flush()
buf.seek(0)
return append_csv_to_sql_table(
tbl,
CSV(path,
buffer=buf if path is None else None,
has_header=True,
# use quotechar for escape intentionally because it makes it
# makes it easier to create a csv that pandas and postgres
# agree on
escapechar=quotechar,
quotechar=quotechar),
dshape=dshape,
bind=bind,
# use quotechar for escape intentionally because it makes it easier
# to create a csv that pandas and postgres agree on
escapechar=quotechar,
quotechar=quotechar,
**kwargs
)
| {
"repo_name": "ContinuumIO/odo",
"path": "odo/backends/sql_csv.py",
"copies": "4",
"size": "12066",
"license": "bsd-3-clause",
"hash": -6115847992412246000,
"line_mean": 33.5730659026,
"line_max": 88,
"alpha_frac": 0.5659704956,
"autogenerated": false,
"ratio": 4.3062098501070665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010209119438283606,
"num_lines": 349
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import subprocess
import uuid
import mmap
from contextlib import closing
from functools import partial
from distutils.spawn import find_executable
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.elements import Executable, ClauseElement
from toolz import merge
from multipledispatch import MDNotImplementedError
from ..append import append
from ..convert import convert
from .csv import CSV, infer_header
from ..temp import Temp
from .aws import S3
from .sql import getbind
class CopyFromCSV(Executable, ClauseElement):
def __init__(self, element, csv, delimiter=',', header=None, na_value='',
lineterminator='\n', quotechar='"', escapechar='\\',
encoding='utf8', skiprows=0, bind=None, **kwargs):
if not isinstance(element, sa.Table):
raise TypeError('element must be a sqlalchemy.Table instance')
self.element = element
self.csv = csv
self.delimiter = delimiter
self.header = (
header if header is not None else
(csv.has_header
if csv.has_header is not None else infer_header(csv.path))
)
self.na_value = na_value
self.lineterminator = lineterminator
self.quotechar = quotechar
self.escapechar = escapechar
self.encoding = encoding
self.skiprows = int(skiprows or self.header)
self._bind = getbind(element, bind)
for k, v in kwargs.items():
setattr(self, k, v)
@property
def bind(self):
return self._bind
@compiles(CopyFromCSV, 'sqlite')
def compile_from_csv_sqlite(element, compiler, **kwargs):
if not find_executable('sqlite3'):
raise MDNotImplementedError("Could not find sqlite executable")
t = element.element
if not element.header:
csv = element.csv
else:
csv = Temp(CSV)('.%s' % uuid.uuid1())
assert csv.has_header, \
'SQLAlchemy element.header is True but CSV inferred no header'
# write to a temporary file after skipping the first line
chunksize = 1 << 24 # 16 MiB
lineterminator = element.lineterminator.encode(element.encoding)
with open(element.csv.path, 'rb') as f:
with closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as mf:
index = mf.find(lineterminator)
if index == -1:
raise ValueError("'%s' not found" % lineterminator)
mf.seek(index + len(lineterminator)) # len because \r\n
with open(csv.path, 'wb') as g:
for chunk in iter(partial(mf.read, chunksize), b''):
g.write(chunk)
fullpath = os.path.abspath(csv.path).encode('unicode-escape').decode()
cmd = ['sqlite3',
'-nullvalue', repr(element.na_value),
'-separator', element.delimiter,
'-cmd', '.import "%s" %s' % (
# FIXME: format_table(t) is correct, but sqlite will complain
fullpath, compiler.preparer.format_table(t)
),
element.bind.url.database]
stderr = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE
).decode(sys.getfilesystemencoding())
if stderr:
# TODO: this seems like a lot of rigamarole
try:
raise OSError(stderr)
except OSError as e:
raise sa.exc.DatabaseError(' '.join(cmd), [], e)
return ''
@compiles(CopyFromCSV, 'mysql')
def compile_from_csv_mysql(element, compiler, **kwargs):
if element.na_value:
raise ValueError(
'MySQL does not support custom NULL values for CSV input'
)
return compiler.process(
sa.text(
"""LOAD DATA {local} INFILE :path
INTO TABLE {table}
CHARACTER SET :encoding
FIELDS
TERMINATED BY :delimiter
ENCLOSED BY :quotechar
ESCAPED BY :escapechar
LINES TERMINATED BY :lineterminator
IGNORE :skiprows LINES
""".format(
local=getattr(element, 'local', ''),
table=compiler.preparer.format_table(element.element)
)
).bindparams(
path=os.path.abspath(element.csv.path),
encoding=element.encoding or element.bind.execute(
'select @@character_set_client'
).scalar(),
delimiter=element.delimiter,
quotechar=element.quotechar,
escapechar=element.escapechar,
lineterminator=element.lineterminator,
skiprows=int(element.header)
),
**kwargs
)
@compiles(CopyFromCSV, 'postgresql')
def compile_from_csv_postgres(element, compiler, **kwargs):
if len(element.escapechar) != 1:
raise ValueError(
'postgres does not allow escape characters longer than 1 byte when '
'bulk loading a CSV file'
)
if element.lineterminator != '\n':
raise ValueError(
r'PostgreSQL does not support line terminators other than \n'
)
return compiler.process(
sa.text(
"""
COPY {0} FROM :path (
FORMAT CSV,
DELIMITER :delimiter,
NULL :na_value,
QUOTE :quotechar,
ESCAPE :escapechar,
HEADER :header,
ENCODING :encoding
)
""".format(compiler.preparer.format_table(element.element))
).bindparams(
path=os.path.abspath(element.csv.path),
delimiter=element.delimiter,
na_value=element.na_value,
quotechar=element.quotechar,
escapechar=element.escapechar,
header=element.header,
encoding=element.encoding or element.bind(
'show client_encoding'
).execute().scalar()
),
**kwargs
)
try:
import boto
from odo.backends.aws import S3
from redshift_sqlalchemy.dialect import CopyCommand
except ImportError:
pass
else:
@compiles(CopyFromCSV, 'redshift')
def compile_from_csv_redshift(element, compiler, **kwargs):
assert isinstance(element.csv, S3(CSV))
assert element.csv.path.startswith('s3://')
cfg = boto.Config()
aws_access_key_id = cfg.get('Credentials', 'aws_access_key_id')
aws_secret_access_key = cfg.get('Credentials', 'aws_secret_access_key')
compression = getattr(element, 'compression', '').upper() or None
cmd = CopyCommand(
table=element.element,
data_location=element.csv.path,
access_key_id=aws_access_key_id,
secret_access_key=aws_secret_access_key,
format='CSV',
delimiter=element.delimiter,
ignore_header=int(element.header),
empty_as_null=True,
blanks_as_null=False,
compression=compression
)
return compiler.process(cmd)
@append.register(sa.Table, CSV)
def append_csv_to_sql_table(tbl, csv, bind=None, **kwargs):
bind = getbind(tbl, bind)
dialect = bind.dialect.name
# move things to a temporary S3 bucket if we're using redshift and we
# aren't already in S3
if dialect == 'redshift' and not isinstance(csv, S3(CSV)):
csv = convert(Temp(S3(CSV)), csv, **kwargs)
elif dialect != 'redshift' and isinstance(csv, S3(CSV)):
csv = convert(Temp(CSV), csv, has_header=csv.has_header, **kwargs)
elif dialect == 'hive':
from .ssh import SSH
return append(tbl, convert(Temp(SSH(CSV)), csv, **kwargs), **kwargs)
kwargs = merge(csv.dialect, kwargs)
stmt = CopyFromCSV(tbl, csv, bind=bind, **kwargs)
with bind.begin() as conn:
conn.execute(stmt)
return tbl
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/sql_csv.py",
"copies": "1",
"size": "8019",
"license": "bsd-3-clause",
"hash": 5883079477662075000,
"line_mean": 33.2692307692,
"line_max": 82,
"alpha_frac": 0.595211373,
"autogenerated": false,
"ratio": 4.159232365145228,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010424731531380349,
"num_lines": 234
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import subprocess
from setuptools import setup, find_packages
from codecs import open
from os import path
from huckle import package
from huckle import hutils
if sys.argv[-1] == 'publish':
branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).strip()
if branch.decode('ASCII') != "master":
sys.exit("publishing from a branch other than master is disallowed.")
os.system("rm -rf dist")
os.system("python setup.py sdist")
os.system("twine upload dist/* -r pypi")
os.system("git tag -a %s -m 'version %s'" % ("huckle-" + package.__version__, "huckle-" + package.__version__))
os.system("git push")
os.system("git push --tags")
sys.exit()
if sys.argv[-1] == 'tag':
branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).strip()
if branch != "master":
sys.exit("tagging from a branch other than master is disallowed.")
os.system("git tag -a %s -m 'version %s'" % ("huckle-" + package.__version__, "huckle-" + package.__version__))
sys.exit()
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='huckle',
version=package.__version__,
description='A CLI that can act as an impostor for any CLI expressed through hypertext command line interface (HCLI) semantics.',
long_description=long_description,
url='https://github.com/cometaj2/huckle',
author='Jeff Michaud',
author_email='cometaj2@comcast.net',
license='MIT',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='cli client hypermedia rest generic development',
packages=find_packages(exclude=['__pycache__', 'tests']),
install_requires=[package.dependencies[0],
package.dependencies[1],
package.dependencies[2],
package.dependencies[3]],
package_data={'huckle': ['data/*']},
include_package_data=True,
entry_points={
'console_scripts': [
'huckle=huckle.__main__:main',
],
},
)
| {
"repo_name": "cometaj2/huckle",
"path": "setup.py",
"copies": "1",
"size": "2602",
"license": "mit",
"hash": 7998725660022406000,
"line_mean": 36.1714285714,
"line_max": 133,
"alpha_frac": 0.6299000769,
"autogenerated": false,
"ratio": 3.7874818049490537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4917381881849054,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import tensorflow.compat.v1 as tfv1
from attrdict import AttrDict
from xdg import BaseDirectory as xdg
from src.flags import FLAGS
from .gpu import get_available_gpus
from .logging import log_error
from .text import Alphabet, UTF8Alphabet
from .helpers import parse_file_size
class ConfigSingleton:
_config = None
def __getattr__(self, name):
if not ConfigSingleton._config:
raise RuntimeError("Global configuration not yet initialized.")
if not hasattr(ConfigSingleton._config, name):
raise RuntimeError("Configuration option {} not found in config.".format(name))
return ConfigSingleton._config[name]
Config = ConfigSingleton() # pylint: disable=invalid-name
def initialize_globals():
c = AttrDict()
# Read-buffer
FLAGS.read_buffer = parse_file_size(FLAGS.read_buffer)
# Set default dropout rates
if FLAGS.dropout_rate2 < 0:
FLAGS.dropout_rate2 = FLAGS.dropout_rate
if FLAGS.dropout_rate3 < 0:
FLAGS.dropout_rate3 = FLAGS.dropout_rate
if FLAGS.dropout_rate6 < 0:
FLAGS.dropout_rate6 = FLAGS.dropout_rate
# Set default checkpoint dir
if not FLAGS.checkpoint_dir:
FLAGS.checkpoint_dir = xdg.save_data_path(os.path.join('deepspeech', 'checkpoints'))
if FLAGS.load_train not in ['last', 'best', 'init', 'auto']:
FLAGS.load_train = 'auto'
if FLAGS.load_evaluate not in ['last', 'best', 'auto']:
FLAGS.load_evaluate = 'auto'
# Set default summary dir
if not FLAGS.summary_dir:
FLAGS.summary_dir = xdg.save_data_path(os.path.join('deepspeech', 'summaries'))
# Standard session configuration that'll be used for all new sessions.
c.session_config = tfv1.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_placement,
inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,
gpu_options=tfv1.GPUOptions(allow_growth=FLAGS.use_allow_growth))
# CPU device
c.cpu_device = '/cpu:0'
# Available GPU devices
c.available_devices = get_available_gpus(c.session_config)
# If there is no GPU available, we fall back to CPU based operation
if not c.available_devices:
c.available_devices = [c.cpu_device]
if FLAGS.utf8:
c.alphabet = UTF8Alphabet()
else:
c.alphabet = Alphabet(os.path.abspath(FLAGS.alphabet_config_path))
# Geometric Constants
# ===================
# For an explanation of the meaning of the geometric constants, please refer to
# doc/Geometry.md
# Number of MFCC features
c.n_input = 26 # TODO: Determine this programmatically from the sample rate
# The number of frames in the context
c.n_context = 9 # TODO: Determine the optimal value using a validation data set
# Number of units in hidden layers
c.n_hidden = FLAGS.n_hidden
c.n_hidden_1 = c.n_hidden
c.n_hidden_2 = c.n_hidden
c.n_hidden_5 = c.n_hidden
# LSTM cell state dimension
c.n_cell_dim = c.n_hidden
# The number of units in the third layer, which feeds in to the LSTM
c.n_hidden_3 = c.n_cell_dim
# Units in the sixth layer = number of characters in the target language plus one
c.n_hidden_6 = c.alphabet.size() + 1 # +1 for CTC blank label
# Size of audio window in samples
if (FLAGS.feature_win_len * FLAGS.audio_sample_rate) % 1000 != 0:
log_error('--feature_win_len value ({}) in milliseconds ({}) multiplied '
'by --audio_sample_rate value ({}) must be an integer value. Adjust '
'your --feature_win_len value or resample your audio accordingly.'
''.format(FLAGS.feature_win_len, FLAGS.feature_win_len / 1000, FLAGS.audio_sample_rate))
sys.exit(1)
c.audio_window_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_len / 1000)
# Stride for feature computations in samples
if (FLAGS.feature_win_step * FLAGS.audio_sample_rate) % 1000 != 0:
log_error('--feature_win_step value ({}) in milliseconds ({}) multiplied '
'by --audio_sample_rate value ({}) must be an integer value. Adjust '
'your --feature_win_step value or resample your audio accordingly.'
''.format(FLAGS.feature_win_step, FLAGS.feature_win_step / 1000, FLAGS.audio_sample_rate))
sys.exit(1)
c.audio_step_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_step / 1000)
if FLAGS.one_shot_infer:
if not os.path.exists(FLAGS.one_shot_infer):
log_error('Path specified in --one_shot_infer is not a valid file.')
sys.exit(1)
if FLAGS.train_cudnn and FLAGS.load_cudnn:
log_error('Trying to use --train_cudnn, but --load_cudnn '
'was also specified. The --load_cudnn flag is only '
'needed when converting a CuDNN RNN checkpoint to '
'a CPU-capable graph. If your system is capable of '
'using CuDNN RNN, you can just specify the CuDNN RNN '
'checkpoint normally with --save_checkpoint_dir.')
sys.exit(1)
# If separate save and load flags were not specified, default to load and save
# from the same dir.
if not FLAGS.save_checkpoint_dir:
FLAGS.save_checkpoint_dir = FLAGS.checkpoint_dir
if not FLAGS.load_checkpoint_dir:
FLAGS.load_checkpoint_dir = FLAGS.checkpoint_dir
ConfigSingleton._config = c # pylint: disable=protected-access
| {
"repo_name": "googleinterns/deepspeech-reconstruction",
"path": "src/deepspeech_training/util/config.py",
"copies": "1",
"size": "5748",
"license": "apache-2.0",
"hash": 2776932445715784000,
"line_mean": 37.32,
"line_max": 108,
"alpha_frac": 0.6475295755,
"autogenerated": false,
"ratio": 3.8041032428855064,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49516328183855063,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import threading
import queue
import numpy as np
from util import text_processing
class BatchLoaderClevr:
def __init__(self, imdb, data_params):
self.imdb = imdb
self.data_params = data_params
self.vocab_dict = text_processing.VocabDict(data_params['vocab_question_file'])
self.T_encoder = data_params['T_encoder']
# peek one example to see whether answer and gt_layout are in the data
self.load_answer = ('answer' in self.imdb[0]) and (self.imdb[0]['answer'] is not None)
self.load_gt_layout = ('gt_layout_tokens' in self.imdb[0]) and (self.imdb[0]['gt_layout_tokens'] is not None)
if 'load_gt_layout' in data_params:
self.load_gt_layout = data_params['load_gt_layout']
# the answer dict is always loaded, regardless of self.load_answer
self.answer_dict = text_processing.VocabDict(data_params['vocab_answer_file'])
if not self.load_answer:
print('imdb does not contain answers')
if self.load_gt_layout:
self.T_decoder = data_params['T_decoder']
self.assembler = data_params['assembler']
self.prune_filter_module = (data_params['prune_filter_module']
if 'prune_filter_module' in data_params
else False)
else:
print('imdb does not contain ground-truth layout')
# load one feature map to peek its size
feats = np.load(self.imdb[0]['feature_path'])
self.feat_H, self.feat_W, self.feat_D = feats.shape[1:]
def load_one_batch(self, sample_ids):
actual_batch_size = len(sample_ids)
input_seq_batch = np.zeros((self.T_encoder, actual_batch_size), np.int32)
seq_length_batch = np.zeros(actual_batch_size, np.int32)
image_feat_batch = np.zeros((actual_batch_size, self.feat_H, self.feat_W, self.feat_D), np.float32)
image_path_list = [None]*actual_batch_size
if self.load_answer:
answer_label_batch = np.zeros(actual_batch_size, np.int32)
if self.load_gt_layout:
gt_layout_batch = np.zeros((self.T_decoder, actual_batch_size), np.int32)
for n in range(len(sample_ids)):
iminfo = self.imdb[sample_ids[n]]
question_inds = [self.vocab_dict.word2idx(w) for w in iminfo['question_tokens']]
seq_length = len(question_inds)
input_seq_batch[:seq_length, n] = question_inds
seq_length_batch[n] = seq_length
image_feat_batch[n:n+1] = np.load(iminfo['feature_path'])
image_path_list[n] = iminfo['image_path']
if self.load_answer:
answer_idx = self.answer_dict.word2idx(iminfo['answer'])
answer_label_batch[n] = answer_idx
if self.load_gt_layout:
gt_layout_tokens = iminfo['gt_layout_tokens']
if self.prune_filter_module:
# remove duplicated consequtive modules (only keeping one _Filter)
for n_t in range(len(gt_layout_tokens)-1, 0, -1):
if (gt_layout_tokens[n_t-1] in {'_Filter', '_Find'}
and gt_layout_tokens[n_t] == '_Filter'):
gt_layout_tokens[n_t] = None
gt_layout_tokens = [t for t in gt_layout_tokens if t]
gt_layout_batch[:, n] = self.assembler.module_list2tokens(
gt_layout_tokens, self.T_decoder)
batch = dict(input_seq_batch=input_seq_batch,
seq_length_batch=seq_length_batch,
image_feat_batch=image_feat_batch,
image_path_list=image_path_list)
if self.load_answer:
batch['answer_label_batch'] = answer_label_batch
if self.load_gt_layout:
batch['gt_layout_batch'] = gt_layout_batch
return batch
class DataReader:
def __init__(self, imdb_file, shuffle=True, one_pass=False, prefetch_num=8, **kwargs):
print('Loading imdb from file...', end=''); sys.stdout.flush()
if imdb_file.endswith('.npy'):
imdb = np.load(imdb_file)
else:
raise TypeError('unknown imdb format.')
print('Done')
self.imdb = imdb
self.shuffle = shuffle
self.one_pass = one_pass
self.prefetch_num = prefetch_num
self.data_params = kwargs
# Clevr data loader
self.batch_loader = BatchLoaderClevr(self.imdb, self.data_params)
# Start prefetching thread
self.prefetch_queue = queue.Queue(maxsize=self.prefetch_num)
self.prefetch_thread = threading.Thread(target=_run_prefetch,
args=(self.prefetch_queue, self.batch_loader, self.imdb,
self.shuffle, self.one_pass, self.data_params))
self.prefetch_thread.daemon = True
self.prefetch_thread.start()
def batches(self):
while True:
# Get a batch from the prefetching queue
if self.prefetch_queue.empty():
print('data reader: waiting for data loading (IO is slow)...')
batch = self.prefetch_queue.get(block=True)
if batch is None:
assert(self.one_pass)
print('data reader: one pass finished')
raise StopIteration()
yield batch
def _run_prefetch(prefetch_queue, batch_loader, imdb, shuffle, one_pass, data_params):
num_samples = len(imdb)
batch_size = data_params['batch_size']
n_sample = 0
fetch_order = np.arange(num_samples)
while True:
# Shuffle the sample order for every epoch
if n_sample == 0 and shuffle:
fetch_order = np.random.permutation(num_samples)
# Load batch from file
# note that len(sample_ids) <= batch_size, not necessarily equal
sample_ids = fetch_order[n_sample:n_sample+batch_size]
batch = batch_loader.load_one_batch(sample_ids)
prefetch_queue.put(batch, block=True)
n_sample += len(sample_ids)
if n_sample >= num_samples:
# Put in a None batch to indicate a whole pass is over
if one_pass:
prefetch_queue.put(None, block=True)
n_sample = 0
| {
"repo_name": "ronghanghu/n2nmn",
"path": "util/clevr_train/data_reader.py",
"copies": "1",
"size": "6386",
"license": "bsd-2-clause",
"hash": -6562502287334776000,
"line_mean": 43.6573426573,
"line_max": 117,
"alpha_frac": 0.5878484184,
"autogenerated": false,
"ratio": 3.7498532002348797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4837701618634879,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import threading
import queue
import numpy as np
from util import text_processing
class BatchLoaderVqa:
def __init__(self, imdb, data_params):
self.imdb = imdb
self.data_params = data_params
self.vocab_dict = text_processing.VocabDict(data_params['vocab_question_file'])
self.T_encoder = data_params['T_encoder']
# peek one example to see whether answer and gt_layout are in the data
self.load_answer = ('valid_answers' in self.imdb[0]) and (self.imdb[0]['valid_answers'] is not None)
self.load_gt_layout = ('gt_layout_tokens' in self.imdb[0]) and (self.imdb[0]['gt_layout_tokens'] is not None)
if 'load_gt_layout' in data_params:
self.load_gt_layout = data_params['load_gt_layout']
# decide whether or not to load gt textatt
self.load_gt_txtatt = ('gt_txtatt' in self.imdb[0]) and (self.imdb[0]['gt_txtatt'] is not None)
if 'load_gt_txtatt' in data_params:
self.load_gt_txtatt = data_params['load_gt_txtatt']
# the answer dict is always loaded, regardless of self.load_answer
self.answer_dict = text_processing.VocabDict(data_params['vocab_answer_file'])
self.num_choices = self.answer_dict.num_vocab
if not self.load_answer:
print('imdb does not contain answers')
else:
self.load_binary_labels = ('load_binary_labels' in data_params) \
and data_params['load_binary_labels']
if self.load_binary_labels:
print('loading softmax and binary classification labels.')
else:
print('loading softmax labels (but not binary labels).')
# if 'overriding_layout' is set in data_params, force self.load_gt_layout to True
# and overrides the ground-truth layout
self.overriding_layout = None
if 'overriding_layout' in data_params:
print('"overriding_layout" key is set in data_params')
print('overriding all layout with:', data_params['overriding_layout'])
self.load_gt_layout = True
self.load_gt_txtatt = False
self.overriding_layout = data_params['overriding_layout']
if self.load_gt_layout:
self.T_decoder = data_params['T_decoder']
self.assembler = data_params['assembler']
# self.prune_filter_module = (data_params['prune_filter_module']
# if 'prune_filter_module' in data_params
# else False)
else:
print('imdb does not contain ground-truth layout, and "overriding_layout" key is not set')
if 'use_count_module' in data_params and data_params['use_count_module']:
print('Use Count module: all "how many" questions will use Count for answer')
self.use_count_module = True
else:
print('Not using Count module')
self.use_count_module = False
# load one feature map to peek its size
feats = np.load(self.imdb[0]['feature_path'])
self.feat_H, self.feat_W, self.feat_D = feats.shape[1:]
def load_one_batch(self, sample_ids):
actual_batch_size = len(sample_ids)
input_seq_batch = np.zeros((self.T_encoder, actual_batch_size), np.int32)
seq_length_batch = np.zeros(actual_batch_size, np.int32)
image_feat_batch = np.zeros((actual_batch_size, self.feat_H, self.feat_W, self.feat_D), np.float32)
image_path_list = [None]*actual_batch_size
qid_list = [None]*actual_batch_size
qstr_list = [None]*actual_batch_size
if self.load_answer:
answer_label_batch = np.zeros(actual_batch_size, np.int32)
if self.load_binary_labels:
answer_binarylabel_batch = \
np.zeros((actual_batch_size, self.num_choices), np.float32)
valid_answers_list = [None]*actual_batch_size
all_answers_list = [None]*actual_batch_size
if self.load_gt_layout:
gt_layout_batch = np.zeros((self.T_decoder, actual_batch_size), np.int32)
if self.load_gt_txtatt:
gt_txtatt_batch = np.zeros((self.T_decoder, self.T_encoder, actual_batch_size, 1), np.bool)
for n in range(len(sample_ids)):
iminfo = self.imdb[sample_ids[n]]
question_inds = [self.vocab_dict.word2idx(w) for w in iminfo['question_tokens']]
seq_length = len(question_inds)
input_seq_batch[:seq_length, n] = question_inds
seq_length_batch[n] = seq_length
image_feat_batch[n:n+1] = np.load(iminfo['feature_path'])
image_path_list[n] = iminfo['image_path']
qid_list[n] = iminfo['question_id']
qstr_list[n] = iminfo['question_str']
if self.load_answer:
valid_answers = iminfo['valid_answers']
valid_answers_list[n] = valid_answers
all_answers = iminfo['valid_answers']
all_answers_list[n] = all_answers
# randomly sample an answer from valid answers
answer = np.random.choice(valid_answers)
answer_idx = self.answer_dict.word2idx(answer)
answer_label_batch[n] = answer_idx
if self.load_binary_labels:
valid_answer_inds = [self.answer_dict.word2idx(a) for a in valid_answers]
answer_binarylabel_batch[n, valid_answer_inds] = 1.
if self.load_gt_layout:
if self.overriding_layout is not None:
gt_layout_tokens = self.overriding_layout
else:
gt_layout_tokens = iminfo['gt_layout_tokens'][:] # make a copy for modificaton
if self.use_count_module:
is_count_question = ('how many' in iminfo['question_str'].lower())
if is_count_question: # overwrite describe with count
assert(gt_layout_tokens[-1] == '_Describe')
gt_layout_tokens[-1] = '_Count'
# print('_Describe replaced by _Count in ' + iminfo['question_str'])
# if self.prune_filter_module:
# # remove duplicated consequtive modules (only keeping one _Filter)
# for n_t in range(len(gt_layout_tokens)-1, 0, -1):
# if (gt_layout_tokens[n_t-1] in {'_Filter', '_Find'}
# and gt_layout_tokens[n_t] == '_Filter'):
# gt_layout_tokens[n_t] = None
# gt_layout_tokens = [t for t in gt_layout_tokens if t]
gt_layout_batch[:, n] = self.assembler.module_list2tokens(
gt_layout_tokens, self.T_decoder)
if self.load_gt_txtatt:
gt_txtatt = iminfo['gt_txtatt']
for t_decoder, ind in enumerate(gt_txtatt):
if ind is not None:
t_begin, t_end = ind
gt_txtatt_batch[t_decoder, t_begin:t_end, n, 0] = True
batch = dict(input_seq_batch=input_seq_batch,
seq_length_batch=seq_length_batch,
image_feat_batch=image_feat_batch,
image_path_list=image_path_list,
qid_list=qid_list, qstr_list=qstr_list)
if self.load_answer:
batch['answer_label_batch'] = answer_label_batch
batch['valid_answers_list'] = valid_answers_list
batch['all_answers_list'] = all_answers_list
if self.load_binary_labels:
batch['answer_binarylabel_batch'] = answer_binarylabel_batch
if self.load_gt_layout:
batch['gt_layout_batch'] = gt_layout_batch
if self.load_gt_txtatt:
batch['gt_txtatt_batch'] = gt_txtatt_batch
return batch
class DataReader:
def __init__(self, imdb_file, shuffle=True, one_pass=False, prefetch_num=8, **kwargs):
print('Loading imdb from file...', end=''); sys.stdout.flush()
if imdb_file.endswith('.npy'):
imdb = np.load(imdb_file)
else:
raise TypeError('unknown imdb format.')
print('Done')
self.imdb = imdb
self.shuffle = shuffle
self.one_pass = one_pass
self.prefetch_num = prefetch_num
self.data_params = kwargs
# Vqa data loader
self.batch_loader = BatchLoaderVqa(self.imdb, self.data_params)
# Start prefetching thread
self.prefetch_queue = queue.Queue(maxsize=self.prefetch_num)
self.prefetch_thread = threading.Thread(target=_run_prefetch,
args=(self.prefetch_queue, self.batch_loader, self.imdb,
self.shuffle, self.one_pass, self.data_params))
self.prefetch_thread.daemon = True
self.prefetch_thread.start()
def batches(self):
while True:
# Get a batch from the prefetching queue
if self.prefetch_queue.empty():
print('data reader: waiting for data loading (IO is slow)...')
batch = self.prefetch_queue.get(block=True)
if batch is None:
assert(self.one_pass)
print('data reader: one pass finished')
raise StopIteration()
yield batch
def _run_prefetch(prefetch_queue, batch_loader, imdb, shuffle, one_pass, data_params):
num_samples = len(imdb)
batch_size = data_params['batch_size']
n_sample = 0
fetch_order = np.arange(num_samples)
while True:
# Shuffle the sample order for every epoch
if n_sample == 0 and shuffle:
fetch_order = np.random.permutation(num_samples)
# Load batch from file
# note that len(sample_ids) <= batch_size, not necessarily equal
sample_ids = fetch_order[n_sample:n_sample+batch_size]
batch = batch_loader.load_one_batch(sample_ids)
prefetch_queue.put(batch, block=True)
n_sample += len(sample_ids)
if n_sample >= num_samples:
# Put in a None batch to indicate a whole pass is over
if one_pass:
prefetch_queue.put(None, block=True)
n_sample = 0
| {
"repo_name": "ronghanghu/n2nmn",
"path": "util/vqa_train/data_reader.py",
"copies": "1",
"size": "10411",
"license": "bsd-2-clause",
"hash": -869276417773364200,
"line_mean": 47.1990740741,
"line_max": 117,
"alpha_frac": 0.5756411488,
"autogenerated": false,
"ratio": 3.807973664959766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4883614813759766,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import time
import timeit
import math
import numpy as np
import PIL.Image as Image
from scipy.stats import spearmanr
from scipy.stats import pearsonr
from scipy.stats import kendalltau
from .utils import tile_raster_images, image_from_nparray
from .utils import tile_tensor4_from_list
class Trainer(object):
"""
Arguments
---------
model: a model to proceed training and testing. type = model instance.
train_config: A dictionary containing:
'batch_size': number of data in a batch
'epochs': maximum number of epochs for training
'test_freq': test_model the trained model every test_freq
'save_freq': save data every save_freq
'regular_snap_freq': save model snapshot every regular_snap_freq
'n_imgs_to_record': number of images to record
'prefix': prefix of filenames of recording data
snap_path: path to save snapshot file.
output_path: path to save output data.
"""
def __init__(self, train_config, snap_path=None, output_path=None):
self.test_freq = train_config.get('test_freq', None)
assert self.test_freq is not None
self.save_freq = train_config.get('save_freq', None)
if self.save_freq is None:
self.save_freq = self.test_freq
self.regular_snap_freq = train_config.get('regular_snap_freq', 40)
self.n_imgs_to_record = train_config.get('n_imgs_to_record', 20)
self.prefix = train_config.get('prefix', '')
self.set_path(snap_path, output_path)
def set_path(self, snap_path, output_path=None):
if snap_path is not None:
if not os.path.isdir(snap_path):
os.makedirs(snap_path)
if output_path is not None:
if not os.path.isdir(output_path):
os.makedirs(output_path)
else:
output_path = snap_path
self.snap_path = snap_path
self.output_path = output_path
def training_routine(self, model, get_train_outputs, rec_train,
get_test_outputs, rec_test,
train_batch_size, test_batch_size,
train_data, test_data,
epochs, prefix2='', check_mos_corr=False):
"""
Actual training routine.
@type model: .models.model_basis.ModelBasis
@type rec_train: .models.model_record.Record
@type rec_test: .models.model_record.Record
@type train_data: .data_load.dataset.Dataset
@type test_data: .data_load.dataset.Dataset
"""
# check validity
assert self.snap_path is not None
# get numbers of training and Testing batches
n_train_imgs = train_data.n_data
n_test_imgs = test_data.n_data
n_train_batches = int(n_train_imgs / train_batch_size)
n_test_batches = int(n_test_imgs / test_batch_size)
assert n_train_batches > 0, 'n_train_batches = %d' % (n_train_batches)
assert n_test_batches > 0, 'n_test_batches = %d' % (n_test_batches)
# check n_imgs_to_record
n_valid_rec_batches = self.n_imgs_to_record // test_batch_size + 1
if n_valid_rec_batches > n_test_batches:
n_valid_rec_batches = n_test_batches
n_imgs_to_record = n_valid_rec_batches * test_batch_size
# get numbers of data and images to monitor and write
until_loss, until_im_info, until_img = rec_test.get_until_indices(1)
# snapshot file names
snapshot_file_latest = os.path.join(
self.snap_path, self.prefix + prefix2 + 'snapshot_lastest.npy')
snapshot_file_best = os.path.join(
self.snap_path, self.prefix + prefix2 + 'snapshot_best.npy')
snapshot_file_best_srcc = os.path.join(
self.snap_path, self.prefix + prefix2 + 'snapshot_best_srcc.npy')
snapshot_file_regular = os.path.join(
self.snap_path, self.prefix + prefix2 + 'snapshot_{:03d}.npy')
snapshot_file_fin = os.path.join(
self.snap_path, self.prefix + prefix2 + 'snapshot.npy')
# log file names
log_file = os.path.join(
self.snap_path, prefix2 + 'log.txt')
log_test_file = os.path.join(
self.output_path, prefix2 + 'log_test.txt')
# Show information
print('\nTrain', end='')
if train_data.imagewise:
print(' imagewise', end='')
else:
print(' patchwise', end='')
print(' / Test', end='')
if test_data.imagewise:
print(' imagewise', end='')
else:
print(' patchwise', end='')
print(' (%d epochs).' % (epochs))
print('Save a snapshot every %d epochs,' % self.save_freq, end='')
print(' and test the model every %d epochs.' % self.test_freq)
print(' - Regular snapshot: every %d epochs' % self.regular_snap_freq)
print(' - Snapshot path: %s' % self.snap_path)
print(' - Batch size: %d (train) / %d (test)' % (
train_batch_size, test_batch_size))
print(' - Training batches: %d (%d images)' % (
n_train_batches, n_train_imgs))
print(' - Testing batches: %d (%d images)' % (
n_test_batches, n_test_imgs), end='')
print(' / Missed images: %d' % (
n_test_imgs - n_test_batches * test_batch_size))
print(' - Monitor data: %s' % (', '.join(rec_train.data_keys)))
print(' - Monitor images: %s' % (', '.join(rec_test.data_keys)))
print(' - Monitor im. data: %s' % (', '.join(rec_test.im_data_keys)))
print(' - Num of rec. images: %d (%d x %d batches)' % (
n_imgs_to_record, test_batch_size, n_valid_rec_batches))
# get MOS list
if check_mos_corr:
# if check_mos_corr is true, the first value of
# rec_im_data must be mos predicted.
assert rec_test.im_data_keys[0] == 'mos_p'
assert test_data.exist_score
n_valid_test_imgs = n_test_batches * test_batch_size
test_score_list = test_data.score_data[:n_valid_test_imgs]
mos_p_list = np.zeros(n_valid_test_imgs, dtype='float32')
print(' - Check SRCC/PLCC using %d images' % (n_valid_test_imgs))
start_time = timeit.default_timer()
prev_time = start_time
best_test_loss = np.inf
# write current time in log file
cur_time = 'Started at %s\n' % (time.strftime('%X %x'))
key_str = 'cost, ' + ", ".join(rec_train.data_keys) + '\n'
with open(log_file, 'a') as f_hist:
f_hist.write(cur_time)
f_hist.write(key_str)
key_str = 'cost, ' + ", ".join(rec_train.data_keys)
key_str += ', SRCC, PLCC\n' if check_mos_corr else '\n'
with open(log_test_file, 'a') as f_hist:
f_hist.write(cur_time)
f_hist.write(key_str)
best_score_set = (0., 0., -1) if check_mos_corr else (np.inf, 0., -1)
# go through training epochs
for epoch in range(epochs):
# train model
losses = np.zeros(rec_train.num_data + 1, dtype='float32')
for batch_idx in range(n_train_batches):
# get training loss
losses += get_train_outputs()
losses /= n_train_batches
# write log
with open(log_file, 'a') as f_hist:
data = '%d' % (epoch + 1)
for idx in range(-1, rec_train.num_data):
data += '\t%.6f' % (losses[idx + 1])
data += '\n'
f_hist.write(data)
# show information
end_time = timeit.default_timer()
pr_str = ' {:3d}, cost {:.3f}, '.format(epoch + 1, losses[0])
for idx, key in enumerate(rec_train.data_keys):
pr_str += '{:s} {:.3f}, '.format(key, losses[idx + 1])
minutes, seconds = divmod(end_time - prev_time, 60)
pr_str += 'time {:02.0f}:{:05.2f}\n'.format(minutes, seconds)
sys.stdout.write(pr_str)
sys.stdout.flush()
prev_time = end_time
if (epoch + 1) % self.regular_snap_freq == 0:
model.save(snapshot_file_regular.format(epoch + 1))
##################################################################
# test_model the trained model and save a snapshot
# For every safe_freq and test_freq
test_model = (epoch + 1) % self.test_freq == 0
save_data = (epoch + 1) % self.save_freq == 0
if test_model or save_data:
if save_data:
# make output folder
numstr = '{:03d}'.format(epoch + 1)
out_path = os.path.join(
self.output_path, prefix2 + numstr + '/')
if not os.path.isdir(out_path):
os.makedirs(out_path)
im_data = np.zeros(
(rec_test.num_im_data, n_imgs_to_record),
dtype='float32')
losses = np.zeros(rec_test.num_data + 1, dtype='float32')
for test_bat_idx in range(0, n_test_batches):
# get testing loss
outputs = get_test_outputs()
losses += outputs[:until_loss]
cur_im_data = outputs[until_loss:until_im_info]
cur_images = outputs[until_im_info:until_img]
# get predicted mos
if check_mos_corr:
mos_p = cur_im_data[0]
idx_from = test_bat_idx * test_batch_size
idx_to = (test_bat_idx + 1) * test_batch_size
mos_p_list[idx_from:idx_to] = mos_p
# write image data
if (save_data and rec_test.num_im_data > 0 and
test_bat_idx < n_valid_rec_batches):
idx_from = test_bat_idx * test_batch_size
idx_to = (test_bat_idx + 1) * test_batch_size
im_data[:, idx_from:idx_to] = cur_im_data
# write images
if (save_data and rec_test.num_imgs > 0 and
test_bat_idx < n_valid_rec_batches):
if test_data.imagewise:
rec_info = test_data.get_current_recon_info()
draw_tiled_images(
cur_images, rec_test.rec_imgs, test_bat_idx,
out_path,
rec_info['bat2img_idx_set'],
rec_info['npat_img_list'],
rec_info['filt_idx_list'],
test_data.patch_size,
test_data.patch_step)
else:
draw_images(
cur_images, rec_test.rec_imgs, test_bat_idx,
test_batch_size, out_path)
losses /= n_test_batches
# get SRCC and PLCC
if check_mos_corr:
rho_s, _ = spearmanr(test_score_list, mos_p_list)
rho_p, _ = pearsonr(test_score_list, mos_p_list)
if math.isnan(rho_s) or math.isnan(rho_p):
print('@ Stop iteration! (NaN)')
best_score_set = (0, 0, epoch)
break
else:
if rho_s > best_score_set[0]:
best_score_set = (rho_s, rho_p, epoch)
model.save(snapshot_file_best_srcc)
else:
if losses[0] < best_score_set[0]:
if rec_test.num_data >= 1:
best_score_set = (losses[0], losses[1], epoch)
else:
best_score_set = (losses[0], 0, epoch)
# save the latest snapshot
model.save(snapshot_file_latest)
# save the best snapshot
if losses[0] < best_test_loss:
best_test_loss = losses[0]
print(' # BEST', end=' ')
model.save(snapshot_file_best)
# For every save_freq
if save_data:
# write image data
if rec_test.num_im_data > 0:
with open(out_path + 'info.txt', 'w') as f:
# header
data = 'epoch: %s (%s)\n' % (
numstr, ', '.join(rec_test.im_data_keys))
f.write(data)
for idx in range(n_imgs_to_record):
imidx = idx
data = '%d' % idx
for ii in range(rec_test.num_im_data):
data += '\t%.6f' % (im_data[ii][imidx])
data += '\n'
f.write(data)
# write mos
if check_mos_corr:
with open(out_path + 'mos_res.txt', 'w') as f:
# header
data = 'epoch: %s (mos_p, mos)\n' % (numstr)
f.write(data)
for idx in range(n_valid_test_imgs):
data = '{:.6f}\t{:.6f}\n'.format(
mos_p_list[idx], test_score_list[idx])
f.write(data)
data = 'SRCC: {:.4f}, PLCC: {:.4f}\n'.format(
rho_s, rho_p)
f.write(data)
# write kernel images
draw_kernels(rec_test.rec_kernels, self.output_path,
prefix2, '_' + numstr)
# write log
with open(log_test_file, 'a') as f_hist:
data = '{:d}'.format(epoch + 1)
for idx in range(-1, rec_test.num_data):
data += '\t{:.6f}'.format(losses[idx + 1])
if check_mos_corr:
data += '\t{:.4f}\t{:.4f}'.format(rho_s, rho_p)
data += '\n'
f_hist.write(data)
# show information
end_time = timeit.default_timer()
pr_str = ' * vcost {:.3f}, '.format(losses[0])
for idx, key in enumerate(rec_train.data_keys):
pr_str += '{:s} {:.3f}, '.format(key, losses[idx + 1])
if check_mos_corr:
pr_str += 'SRCC {:.3f}, PLCC {:.3f}, '.format(rho_s, rho_p)
minutes, seconds = divmod(end_time - prev_time, 60)
pr_str += 'time {:02.0f}:{:05.2f}\n'.format(minutes, seconds)
sys.stdout.write(pr_str)
sys.stdout.flush()
prev_time = end_time
end_time = timeit.default_timer()
total_time = end_time - start_time
print(' - Train ran for %.2fm' % ((total_time) / 60.))
print(' - Finished at %s' % (time.strftime('%X %x')))
if best_score_set[0] != 0:
model.save(snapshot_file_fin)
return best_score_set
def testing_routine(self, get_test_outputs, rec_test,
test_batch_size, test_data, prefix2='',
check_mos_corr=False):
"""Actual testing routine: group patches for each image
@type rec_test: .models.model_record.Record
"""
# get numbers of training and Testing batches
n_test_imgs = test_data.n_images
n_test_batches = int(n_test_imgs / test_batch_size)
assert n_test_batches > 0
n_valid_test_imgs = n_test_batches * test_batch_size
if self.n_imgs_to_record == 'all':
n_imgs_to_record = n_valid_test_imgs
else:
n_valid_rec_batches = self.n_imgs_to_record // test_batch_size + 1
if n_valid_rec_batches > n_test_batches:
n_valid_rec_batches = n_test_batches
n_imgs_to_record = n_valid_rec_batches * test_batch_size
# get numbers of data and images to monitor and write
until_loss = rec_test.num_data + 1
until_im_info = until_loss + rec_test.num_im_data
until_img = until_im_info + rec_test.num_imgs
# Show information
print('\nTest the model')
if test_data.imagewise:
print(' (imagewise)')
else:
print(' (patchwise)')
print(' - Num of images in a batch: %d' % (test_batch_size))
print(' - Testing batches: %d (%d images)' % (
n_test_batches, n_test_imgs))
print(' - Missed images in validation: %d' % (
n_test_imgs - n_test_batches * test_batch_size))
print(' - Image recording batches: %d (%d images)' % (
n_valid_rec_batches, n_imgs_to_record))
print(' - Monitor data: %s' % (', '.join(rec_test.data_keys)))
print(' - Monitor images: %s' % (', '.join(rec_test.data_keys)))
print(' - Monitor im. data: %s' % (', '.join(rec_test.im_data_keys)))
# get MOS list
if check_mos_corr:
# if check_mos_corr is true, the first value of
# rec_im_data must be mos predicted.
assert rec_test.im_data_keys[0] == 'mos_p'
assert test_data.exist_score
test_score_list = test_data.score_data[:n_valid_test_imgs]
mos_p_list = np.zeros(n_valid_test_imgs, dtype='float32')
print(' - Check SRCC/PLCC using %d images' % (n_valid_test_imgs))
start_time = timeit.default_timer()
prev_time = start_time
# write current time in log file
cur_time = 'Started at %s\n' % (time.strftime('%X %x'))
log_file = os.path.join(self.output_path, prefix2 + 'log_test.txt')
with open(log_file, 'a') as f_hist:
f_hist.write(cur_time)
out_path = os.path.join(self.output_path, prefix2 + '/')
if not os.path.isdir(out_path):
os.makedirs(out_path)
im_data = np.zeros(
(rec_test.num_im_data, n_valid_test_imgs), dtype='float32')
best_score_set = (0., 0.) if check_mos_corr else (np.inf, np.inf)
losses = np.zeros(rec_test.num_data + 1, dtype='float32')
for test_bat_idx in range(0, n_test_batches):
# get testing loss
outputs = get_test_outputs()
losses += outputs[:until_loss]
cur_im_data = outputs[until_loss:until_im_info]
cur_images = outputs[until_im_info:until_img]
# get predicted mos
if check_mos_corr:
mos_p = cur_im_data[0]
idx_from = test_bat_idx * test_batch_size
idx_to = (test_bat_idx + 1) * test_batch_size
mos_p_list[idx_from:idx_to] = mos_p
# write image data
if rec_test.num_im_data > 0:
idx_from = test_bat_idx * test_batch_size
idx_to = (test_bat_idx + 1) * test_batch_size
im_data[:, idx_from:idx_to] = cur_im_data
# write images
if rec_test.num_imgs > 0 and test_bat_idx < n_valid_rec_batches:
if test_data.imagewise:
rec_info = test_data.get_current_recon_info()
draw_tiled_images(
cur_images, rec_test.rec_imgs, test_bat_idx,
out_path,
rec_info['bat2img_idx_set'],
rec_info['npat_img_list'],
rec_info['filt_idx_list'],
test_data.patch_size,
test_data.patch_step)
else:
draw_images(
cur_images, rec_test.rec_imgs, test_bat_idx,
test_batch_size, out_path)
rec_info = test_data.get_current_recon_info()
draw_tiled_images(
cur_images, rec_test.rec_imgs, test_bat_idx, out_path,
rec_info['bat2img_idx_set'],
rec_info['npat_img_list'],
rec_info['filt_idx_list'],
test_data.patch_size,
test_data.patch_step)
losses /= n_test_batches
# get SRCC and PLCC
if check_mos_corr:
rho_s, _ = spearmanr(test_score_list, mos_p_list)
rho_p, _ = pearsonr(test_score_list, mos_p_list)
tau, _ = kendalltau(test_score_list, mos_p_list)
rmse = np.sqrt(((test_score_list - mos_p_list) ** 2).mean())
best_score_set = (rho_s, rho_p)
else:
if rec_test.num_data >= 1:
best_score_set = (losses[0], losses[1])
else:
best_score_set = (losses[0], 0)
# write image data
if rec_test.num_im_data > 0:
with open(out_path + 'info.txt', 'w') as f:
# header
data = 'imidx, %s\n' % (
', '.join(rec_test.im_data_keys))
f.write(data)
for idx in range(n_valid_test_imgs):
imidx = idx
data = '%d' % idx
for ii in range(rec_test.num_im_data):
data += '\t%.6f' % (im_data[ii][imidx])
data += '\n'
f.write(data)
# write mos
if check_mos_corr:
with open(out_path + 'mos_res.txt', 'w') as f:
# header
data = 'mos_p, mos\n'
f.write(data)
for idx in range(n_valid_test_imgs):
data = '{:.6f}\t{:.6f}\n'.format(
mos_p_list[idx], test_score_list[idx])
f.write(data)
data = 'SRCC: {:.4f}, PLCC: {:.4f}'.format(rho_s, rho_p)
data += ', KRCC: {:.4f}, RMSE: {:.4f}\n'.format(tau, rmse)
f.write(data)
# write kernel images
draw_kernels(rec_test.rec_kernels, self.output_path, prefix2)
# show information
end_time = timeit.default_timer()
pr_str = ' * vcost {:.3f}, '.format(losses[0])
for idx, key in enumerate(rec_test.data_keys):
pr_str += '{:s} {:.3f}, '.format(key, losses[idx + 1])
if check_mos_corr:
pr_str += 'SRCC {:.3f}, PLCC {:.3f}, '.format(rho_s, rho_p)
pr_str += 'KRCC {:.3f}, RMSE {:.3f}, '.format(tau, rmse)
minutes, seconds = divmod(end_time - prev_time, 60)
pr_str += 'time {:02.0f}:{:05.2f}\n'.format(minutes, seconds)
sys.stdout.write(pr_str)
sys.stdout.flush()
prev_time = end_time
end_time = timeit.default_timer()
total_time = end_time - start_time
print(' - Test ran for %.2fm' % ((total_time) / 60.))
print(' - Finished at %s' % (time.strftime('%X %x')))
return best_score_set
def draw_kernels(kernels, out_path, prefix='', suffix=''):
if not os.path.isdir(out_path):
os.makedirs(out_path)
for idx in range(len(kernels)):
kernel = kernels[idx].get_value(borrow=True)
name = kernels[idx].name.replace('/', '_')
assert len(kernel.shape) == 4
(nkern, nfeat, kern_sz0, kern_sz1) = kernel.shape
tile = int(np.ceil(np.sqrt(nkern)))
imgshape = ((kern_sz0 + 1) * tile - 1, (kern_sz1 + 1) * tile - 1)
tot_kern_array = np.zeros((nfeat, imgshape[0] * imgshape[1]))
feat_tile = int(np.ceil(np.sqrt(nfeat)))
for fidx in range(nfeat):
kern_array = tile_raster_images(
X=kernel[:, fidx, :, :],
img_shape=(kern_sz0, kern_sz1),
tile_shape=(tile, tile),
tile_spacing=(1, 1))
tot_kern_array[fidx] = kern_array.flatten()
tot_kern_image = Image.fromarray(tile_raster_images(
X=tot_kern_array,
img_shape=imgshape,
tile_shape=(feat_tile, feat_tile),
tile_spacing=(2, 2)))
img_name = '%s%s%s.png' % (prefix, name, suffix)
tot_kern_image.save(os.path.join(out_path, img_name))
def draw_tiled_images(images, img_info_dict, bat_idx, out_path,
bat2img_idx_set, npat_img_list, filt_idx_list=None,
patch_size=None, patch_step=None):
n_batch_imgs = len(npat_img_list)
for ii, key in enumerate(img_info_dict):
for idx in range(n_batch_imgs):
idx_from, idx_to = bat2img_idx_set[idx]
cur_img = images[ii][idx_from: idx_to]
caxis = img_info_dict[key].get('caxis', None)
scale = img_info_dict[key].get('scale', None)
if scale:
tile_spacing = (
int(-(patch_size[0] - patch_step[0]) * scale),
int(-(patch_size[1] - patch_step[1]) * scale))
else:
tile_spacing = (0, 0)
nch = int(cur_img.shape[1])
if nch == 1 or nch == 3:
tiled_array = tile_tensor4_from_list(
X=cur_img,
tile_shape=npat_img_list[idx][1:],
idx_list=filt_idx_list[idx],
tile_spacing=tile_spacing,
caxis=caxis)
img = Image.fromarray(tiled_array.astype(np.uint8))
img_name = '%d_%s.png' % (bat_idx * n_batch_imgs + idx, key)
img.save(os.path.join(out_path, img_name))
else:
for ch_idx in range(nch):
tiled_array = tile_tensor4_from_list(
X=cur_img[:, ch_idx, :, :],
tile_shape=npat_img_list[idx][1:],
idx_list=filt_idx_list[idx],
tile_spacing=tile_spacing,
caxis=caxis)
img = Image.fromarray(tiled_array.astype(np.uint8))
img_name = '%d_%s_%02d.png' % (
bat_idx * n_batch_imgs + idx, key, ch_idx)
img.save(os.path.join(out_path, img_name))
def draw_images(images, img_info_dict, bat_idx, n_batch_imgs, out_path):
for ii, key in enumerate(img_info_dict):
for idx in range(n_batch_imgs):
cur_img = images[ii][idx]
caxis = img_info_dict[key].get('caxis', None)
nch = int(cur_img.shape[0])
if nch == 1 or nch == 3:
img = image_from_nparray(
np.transpose(cur_img, (1, 2, 0)), caxis=caxis)
img_name = '%d_%s.png' % (bat_idx * n_batch_imgs + idx, key)
img.save(os.path.join(out_path, img_name))
else:
for ch_idx in range(nch):
img = image_from_nparray(
cur_img[ch_idx, :, :], caxis=caxis)
img_name = '%d_%s_%02d.png' % (
bat_idx * n_batch_imgs + idx, key, ch_idx)
img.save(os.path.join(out_path, img_name))
| {
"repo_name": "jongyookim/IQA_BIECON_release",
"path": "IQA_BIECON_release/trainer.py",
"copies": "1",
"size": "27661",
"license": "mit",
"hash": 5576336438958596000,
"line_mean": 41.4248466258,
"line_max": 79,
"alpha_frac": 0.4845811793,
"autogenerated": false,
"ratio": 3.6073291601460618,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9591910339446061,
"avg_score": 0,
"num_lines": 652
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import unittest
from pathlib import Path
import numpy as np
from sprocket.util.hdf5 import HDF5
dirpath = os.path.dirname(os.path.realpath(__file__))
listf = os.path.join(dirpath, '/data/test.h5')
class hdf5FunctionsTest(unittest.TestCase):
def test_HDF5(self):
data1d = np.random.rand(100)
data1d2 = np.random.rand(50)
data2d = np.random.rand(100).reshape(50, 2)
# write test
path = os.path.join(dirpath, 'data/test.h5')
h5 = HDF5(path, 'w')
h5.save(data1d, '1d')
h5.save(data2d, '2d')
h5.close()
# open test
tmph5 = HDF5(path, 'r')
tmp1d = tmph5.read(ext='1d')
tmp2d = tmph5.read(ext='2d')
tmph5.close()
assert np.allclose(tmp1d, data1d)
assert np.allclose(tmp2d, data2d)
# open test with 'with' statement
with HDF5(path, 'r') as h5_with:
assert np.allclose(h5_with.read('1d'), data1d)
# read/write and replace test
h5 = HDF5(path, 'a')
tmp1d = h5.read(ext='1d')
h5.save(data1d2, '1d')
tmp1d2 = h5.read(ext='1d')
h5.close()
assert np.allclose(tmp1d, data1d)
assert np.allclose(tmp1d2, data1d2)
# remove files
os.remove(path)
def test_HDF5_current_dir(self):
listf_current = os.path.split(listf)[-1]
data1d = np.random.rand(50)
try:
h5_write = HDF5(Path(listf_current) if sys.version_info >= (3,6) else listf_current, 'w')
h5_write.save(data1d, '1d')
h5_write.close()
h5_read = HDF5(os.curdir + os.sep + listf_current, 'r')
read_data1d = h5_read.read(ext='1d')
h5_read.close()
assert np.allclose(data1d, read_data1d)
except: # pragma: no cover
raise
finally:
os.remove(listf_current)
| {
"repo_name": "k2kobayashi/sprocket",
"path": "sprocket/util/tests/test_hdf5.py",
"copies": "1",
"size": "1977",
"license": "mit",
"hash": -9121286587846112000,
"line_mean": 27.652173913,
"line_max": 101,
"alpha_frac": 0.5700556399,
"autogenerated": false,
"ratio": 2.9729323308270676,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4042987970727068,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import unittest
import tempfile
from itertools import product as it_product
import blaze
from blaze.datadescriptor import dd_as_py
blaze.set_strategy('jit')
import numpy as np
from numpy.testing import assert_allclose
def _clean_disk_arrays():
try:
from shutil import rmtree
rmtree(tmpdir)
except Exception as e:
print('Error cleaning up temp dir %s:\n%s' % (tmpdir, e))
def _mk_dir():
global tmpdir
tmpdir = tempfile.mkdtemp(prefix='blztmp')
def _store(name):
return blaze.io.Storage(os.path.join(tmpdir, name + '.blz'))
def _addition(a,b):
return (a+b)
def _expression(a, b):
return (a+b)*(a+b)
#------------------------------------------------------------------------
# Test Generation
#------------------------------------------------------------------------
def _add_tests():
_pair = ['mem', 'dsk']
frame = sys._getframe(1)
for expr, ltr in zip([_addition, _expression], ['R', 'Q']):
for i in it_product(_pair, _pair, _pair):
args = i + (ltr,)
f = _build_tst(expr, *args)
f.__name__ = 'test_{1}_{2}_to_{3}{0}'.format(f.__name__, *args)
frame.f_locals[f.__name__] = f
def _build_tst(kernel, storage1, storage2, storage3, R):
def function(self):
A = getattr(self, storage1 + 'A')
B = getattr(self, storage2 + 'B')
Rd = kernel(A, B)
self.assert_(isinstance(Rd, blaze.Array))
self.assert_(Rd._data.capabilities.deferred)
p = _store(storage3 + 'Rd') if storage3 == 'dsk' else None
try:
Rc = blaze.eval(Rd, storage=p)
self.assert_(isinstance(Rc, blaze.Array))
npy_data = getattr(self, 'npy' + R)
assert_allclose(np.array(dd_as_py(Rc._data)), npy_data)
if storage3 == 'dsk':
self.assert_(Rc._data.capabilities.persistent)
else:
self.assert_(not Rc._data.capabilities.persistent)
finally:
try:
if p is not None:
blaze.drop(p)
except:
pass # show the real error...
return function
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
class TestEvalScalar(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.npyA = np.array(10)
cls.npyB = np.arange(0.0, 100.0)
cls.npyR = _addition(cls.npyA, cls.npyB)
cls.npyQ = _expression(cls.npyA, cls.npyB)
cls.memA = blaze.array(cls.npyA)
cls.memB = blaze.array(cls.npyB)
_mk_dir()
cls.dskA = blaze.array(cls.npyA, storage=_store('dskA'))
cls.dskB = blaze.array(cls.npyB, storage=_store('dskB'))
@classmethod
def tearDownClass(cls):
_clean_disk_arrays()
del(cls.npyA)
del(cls.npyB)
del(cls.npyR)
del(cls.memA)
del(cls.memB)
del(cls.dskA)
del(cls.dskB)
# add all tests for all permutations
# TODO: Enable. Currently segfaults
# _add_tests()
class TestEval1D(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.npyA = np.arange(0.0, 100.0)
cls.npyB = np.arange(0.0, 100.0)
cls.npyR = _addition(cls.npyA, cls.npyB)
cls.npyQ = _expression(cls.npyA, cls.npyB)
cls.memA = blaze.array(cls.npyA)
cls.memB = blaze.array(cls.npyB)
_mk_dir()
cls.dskA = blaze.array(cls.npyA, storage=_store('dskA'))
cls.dskB = blaze.array(cls.npyB, storage=_store('dskB'))
@classmethod
def tearDownClass(cls):
_clean_disk_arrays()
del(cls.npyA)
del(cls.npyB)
del(cls.npyR)
del(cls.memA)
del(cls.memB)
del(cls.dskA)
del(cls.dskB)
# add all tests for all permutations
_add_tests()
class TestEval2D(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.npyA = np.arange(0.0, 100.0).reshape(20, 5)
cls.npyB = np.arange(0.0, 100.0).reshape(20, 5)
cls.npyR = _addition(cls.npyA, cls.npyB)
cls.npyQ = _expression(cls.npyA, cls.npyB)
cls.memA = blaze.array(cls.npyA)
cls.memB = blaze.array(cls.npyB)
_mk_dir()
cls.dskA = blaze.array(cls.npyA, storage=_store('dskA'))
cls.dskB = blaze.array(cls.npyB, storage=_store('dskB'))
@classmethod
def tearDownClass(cls):
_clean_disk_arrays()
del(cls.npyA)
del(cls.npyB)
del(cls.npyR)
del(cls.memA)
del(cls.memB)
del(cls.dskA)
del(cls.dskB)
# add all tests for all permutations
_add_tests()
class TestStrategy(unittest.TestCase):
def test_strategy(self):
current = blaze.current_strategy()
with blaze.strategy('blah'):
self.assertEqual(blaze.current_strategy(), 'blah')
self.assertEqual(blaze.current_strategy(), current)
def isolated():
npyA = np.arange(0.0, 100.0).reshape(20, 5)
npyB = np.arange(0.0, 100.0).reshape(20, 5)
memA = blaze.array(npyA)
memB = blaze.array(npyB)
_mk_dir()
dskA = blaze.array(npyA, storage=_store('dskA'))
dskB = blaze.array(npyB, storage=_store('dskB'))
expr = memA + memA
#expr = memA + dskA
#expr = dskA + dskA
print(memA.dshape, expr.dshape)
p = _store('dskRd')
result = blaze.eval(expr, storage=p)
#print(result)
result2 = blaze.eval(result == npyA + npyA)
assert np.all(result2)
if __name__ == '__main__':
#TestEval2D.setUpClass()
#TestEval2D('test_dsk_mem_to_memfunction').debug()
unittest.main(verbosity=2)
| {
"repo_name": "zeeshanali/blaze",
"path": "blaze/tests/test_eval.py",
"copies": "2",
"size": "5802",
"license": "bsd-3-clause",
"hash": -3369850830092426000,
"line_mean": 26.7607655502,
"line_max": 75,
"alpha_frac": 0.5501551189,
"autogenerated": false,
"ratio": 3.1584104518236256,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4708565570723625,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
from cffi import FFI
include_dirs = [os.path.join("extras", "libargon2", "include")]
use_system_argon2 = os.environ.get("ARGON2_CFFI_USE_SYSTEM", "0") == "1"
if use_system_argon2:
include_dirs = []
# Add vendored integer types headers.
if "win32" in str(sys.platform).lower():
int_base = os.path.join("extras", "msinttypes")
inttypes = os.path.join(int_base, "inttypes")
stdint = os.path.join(int_base, "stdint")
vi = sys.version_info[0:2]
if vi in [(2, 6), (2, 7)]:
# VS 2008 needs both.
include_dirs += [inttypes, stdint]
elif vi in [(3, 3), (3, 4)]:
# VS 2010 needs only inttypes.h
include_dirs += [inttypes]
ffi = FFI()
ffi.set_source(
"_ffi",
"#include <argon2.h>",
include_dirs=include_dirs,
libraries=["argon2"],
)
ffi.cdef(
"""\
typedef enum Argon2_type {
Argon2_d = ...,
Argon2_i = ...,
Argon2_id = ...,
} argon2_type;
typedef enum Argon2_version {
ARGON2_VERSION_10 = ...,
ARGON2_VERSION_13 = ...,
ARGON2_VERSION_NUMBER = ...
} argon2_version;
int argon2_hash(const uint32_t t_cost, const uint32_t m_cost,
const uint32_t parallelism, const void *pwd,
const size_t pwdlen, const void *salt,
const size_t saltlen, void *hash,
const size_t hashlen, char *encoded,
const size_t encodedlen, argon2_type type,
const uint32_t version);
int argon2_verify(const char *encoded, const void *pwd,
const size_t pwdlen, argon2_type type);
const char *argon2_error_message(int error_code);
typedef int (*allocate_fptr)(uint8_t **memory, size_t bytes_to_allocate);
typedef void (*deallocate_fptr)(uint8_t *memory, size_t bytes_to_allocate);
typedef struct Argon2_Context {
uint8_t *out; /* output array */
uint32_t outlen; /* digest length */
uint8_t *pwd; /* password array */
uint32_t pwdlen; /* password length */
uint8_t *salt; /* salt array */
uint32_t saltlen; /* salt length */
uint8_t *secret; /* key array */
uint32_t secretlen; /* key length */
uint8_t *ad; /* associated data array */
uint32_t adlen; /* associated data length */
uint32_t t_cost; /* number of passes */
uint32_t m_cost; /* amount of memory requested (KB) */
uint32_t lanes; /* number of lanes */
uint32_t threads; /* maximum number of threads */
uint32_t version; /* version number */
allocate_fptr allocate_cbk; /* pointer to memory allocator */
deallocate_fptr free_cbk; /* pointer to memory deallocator */
uint32_t flags; /* array of bool options */
} argon2_context;
int argon2_ctx(argon2_context *context, argon2_type type);
/* Error codes */
typedef enum Argon2_ErrorCodes {
ARGON2_OK = ...,
ARGON2_OUTPUT_PTR_NULL = ...,
ARGON2_OUTPUT_TOO_SHORT = ...,
ARGON2_OUTPUT_TOO_LONG = ...,
ARGON2_PWD_TOO_SHORT = ...,
ARGON2_PWD_TOO_LONG = ...,
ARGON2_SALT_TOO_SHORT = ...,
ARGON2_SALT_TOO_LONG = ...,
ARGON2_AD_TOO_SHORT = ...,
ARGON2_AD_TOO_LONG = ...,
ARGON2_SECRET_TOO_SHORT = ...,
ARGON2_SECRET_TOO_LONG = ...,
ARGON2_TIME_TOO_SMALL = ...,
ARGON2_TIME_TOO_LARGE = ...,
ARGON2_MEMORY_TOO_LITTLE = ...,
ARGON2_MEMORY_TOO_MUCH = ...,
ARGON2_LANES_TOO_FEW = ...,
ARGON2_LANES_TOO_MANY = ...,
ARGON2_PWD_PTR_MISMATCH = ..., /* NULL ptr with non-zero length */
ARGON2_SALT_PTR_MISMATCH = ..., /* NULL ptr with non-zero length */
ARGON2_SECRET_PTR_MISMATCH = ..., /* NULL ptr with non-zero length */
ARGON2_AD_PTR_MISMATCH = ..., /* NULL ptr with non-zero length */
ARGON2_MEMORY_ALLOCATION_ERROR = ...,
ARGON2_FREE_MEMORY_CBK_NULL = ...,
ARGON2_ALLOCATE_MEMORY_CBK_NULL = ...,
ARGON2_INCORRECT_PARAMETER = ...,
ARGON2_INCORRECT_TYPE = ...,
ARGON2_OUT_PTR_MISMATCH = ...,
ARGON2_THREADS_TOO_FEW = ...,
ARGON2_THREADS_TOO_MANY = ...,
ARGON2_MISSING_ARGS = ...,
ARGON2_ENCODING_FAIL = ...,
ARGON2_DECODING_FAIL = ...,
ARGON2_THREAD_FAIL = ...,
ARGON2_DECODING_LENGTH_FAIL= ...,
ARGON2_VERIFY_MISMATCH = ...,
} argon2_error_codes;
#define ARGON2_FLAG_CLEAR_PASSWORD ...
#define ARGON2_FLAG_CLEAR_SECRET ...
#define ARGON2_DEFAULT_FLAGS ...
#define ARGON2_MIN_LANES ...
#define ARGON2_MAX_LANES ...
#define ARGON2_MIN_THREADS ...
#define ARGON2_MAX_THREADS ...
#define ARGON2_SYNC_POINTS ...
#define ARGON2_MIN_OUTLEN ...
#define ARGON2_MAX_OUTLEN ...
#define ARGON2_MIN_MEMORY ...
#define ARGON2_MAX_MEMORY_BITS ...
#define ARGON2_MAX_MEMORY ...
#define ARGON2_MIN_TIME ...
#define ARGON2_MAX_TIME ...
#define ARGON2_MIN_PWD_LENGTH ...
#define ARGON2_MAX_PWD_LENGTH ...
#define ARGON2_MIN_AD_LENGTH ...
#define ARGON2_MAX_AD_LENGTH ...
#define ARGON2_MIN_SALT_LENGTH ...
#define ARGON2_MAX_SALT_LENGTH ...
#define ARGON2_MIN_SECRET ...
#define ARGON2_MAX_SECRET ...
uint32_t argon2_encodedlen(uint32_t t_cost, uint32_t m_cost,
uint32_t parallelism, uint32_t saltlen,
uint32_t hashlen, argon2_type type);
"""
)
if __name__ == "__main__":
ffi.compile()
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/argon2/_ffi_build.py",
"copies": "2",
"size": "5277",
"license": "mit",
"hash": -6346051924984462000,
"line_mean": 26.6282722513,
"line_max": 75,
"alpha_frac": 0.6109531931,
"autogenerated": false,
"ratio": 2.961279461279461,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9572232654379461,
"avg_score": 0,
"num_lines": 191
} |
from __future__ import absolute_import, division, print_function
import os
import sys
from conda.base.context import context
from conda.cli.conda_argparse import ArgumentParser
from conda.cli.main import init_loggers
from conda.gateways.logging import initialize_logging
try:
from conda.exceptions import conda_exception_handler
except ImportError as e:
if 'CONDA_DEFAULT_ENV' in os.environ:
sys.stderr.write("""
There was an error importing conda.
It appears this was caused by installing conda-env into a conda
environment. Like conda, conda-env needs to be installed into your
base conda/Anaconda environment.
Please deactivate your current environment, then re-install conda-env
using this command:
conda install -c conda conda-env
If you are seeing this error and have not installed conda-env into an
environment, please open a bug report at:
https://github.com/conda/conda-env
""".lstrip())
sys.exit(-1)
else:
raise e
from . import main_attach
from . import main_create
from . import main_export
from . import main_list
from . import main_remove
from . import main_upload
from . import main_update
# TODO: This belongs in a helper library somewhere
# Note: This only works with `conda-env` as a sub-command. If this gets
# merged into conda-env, this needs to be adjusted.
def show_help_on_empty_command():
if len(sys.argv) == 1: # sys.argv == ['/path/to/bin/conda-env']
sys.argv.append('--help')
def create_parser():
p = ArgumentParser()
sub_parsers = p.add_subparsers()
main_attach.configure_parser(sub_parsers)
main_create.configure_parser(sub_parsers)
main_export.configure_parser(sub_parsers)
main_list.configure_parser(sub_parsers)
main_remove.configure_parser(sub_parsers)
main_upload.configure_parser(sub_parsers)
main_update.configure_parser(sub_parsers)
show_help_on_empty_command()
return p
def do_call(args, parser):
relative_mod, func_name = args.func.rsplit('.', 1)
# func_name should always be 'execute'
from importlib import import_module
module = import_module(relative_mod, __name__.rsplit('.', 1)[0])
exit_code = getattr(module, func_name)(args, parser)
return exit_code
def main():
initialize_logging()
parser = create_parser()
args = parser.parse_args()
os.environ["CONDA_AUTO_UPDATE_CONDA"] = "false"
context.__init__(argparse_args=args)
init_loggers(context)
return conda_exception_handler(do_call, args, parser)
if __name__ == '__main__':
sys.exit(main())
| {
"repo_name": "Microsoft/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda_env/cli/main.py",
"copies": "1",
"size": "2566",
"license": "apache-2.0",
"hash": 527158863517042600,
"line_mean": 27.8314606742,
"line_max": 72,
"alpha_frac": 0.7116134061,
"autogenerated": false,
"ratio": 3.544198895027624,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9755812301127624,
"avg_score": 0,
"num_lines": 89
} |
from __future__ import absolute_import, division, print_function
import os
import sys
from ginga import cmap as ginga_cmap
from qtpy import QtGui, QtWidgets
from glue.config import viewer_tool
from glue.viewers.common.qt.tool import CheckableTool, Tool
from glue.plugins.ginga_viewer.qt.utils import cmap2pixmap, ginga_graphic_to_roi
from glue.utils import nonpartial
from glue.plugins.tools.spectrum_tool.qt import SpectrumTool
from glue.plugins.tools.pv_slicer.qt import PVSlicerMode
# Find out location of ginga module so we can some of its icons
GINGA_HOME = os.path.split(sys.modules['ginga'].__file__)[0]
GINGA_ICON_DIR = os.path.join(GINGA_HOME, 'icons')
@viewer_tool
class RectangleROIMode(CheckableTool):
tool_id = 'ginga:rectangle'
icon = 'glue_square'
tooltip = 'Rectangle'
def activate(self):
self.viewer._set_roi_mode('rectangle', True)
def deactivate(self):
self.viewer._set_roi_mode('rectangle', False)
@viewer_tool
class CircleROIMode(CheckableTool):
tool_id = 'ginga:circle'
icon = 'glue_circle'
tooltip = 'select:circle'
def activate(self):
self.viewer._set_roi_mode('circle', True)
def deactivate(self):
self.viewer._set_roi_mode('circle', False)
@viewer_tool
class PolygonROIMode(CheckableTool):
tool_id = 'ginga:polygon'
icon = 'glue_lasso'
tooltip = 'select:polygon'
def activate(self):
self.viewer._set_roi_mode('polygon', True)
def deactivate(self):
self.viewer._set_roi_mode('polygon', False)
@viewer_tool
class PanMode(CheckableTool):
tool_id = 'ginga:pan'
icon = 'glue_move'
tooltip = 'Pan'
def activate(self):
self.viewer.mode_cb('pan', True)
def deactivate(self):
self.viewer.mode_cb('pan', False)
@viewer_tool
class FreePanMode(CheckableTool):
tool_id = 'ginga:freepan'
icon = os.path.join(GINGA_ICON_DIR, 'hand_48.png')
tooltip = 'Free Pan'
def activate(self):
self.viewer.mode_cb('freepan', True)
def deactivate(self):
self.viewer.mode_cb('freepan', False)
@viewer_tool
class RotateMode(CheckableTool):
tool_id = 'ginga:rotate'
icon = os.path.join(GINGA_ICON_DIR, 'rotate_48.png')
tooltip = 'Rotate'
def activate(self):
self.viewer.mode_cb('rotate', True)
def deactivate(self):
self.viewer.mode_cb('rotate', False)
@viewer_tool
class ContrastMode(CheckableTool):
tool_id = 'ginga:contrast'
icon = 'glue_contrast'
tooltip = 'Rotate'
def activate(self):
self.viewer.mode_cb('contrast', True)
def deactivate(self):
self.viewer.mode_cb('contrast', False)
@viewer_tool
class CutsMode(CheckableTool):
tool_id = 'ginga:cuts'
icon = os.path.join(GINGA_ICON_DIR, 'cuts_48.png')
tooltip = 'Cuts'
def activate(self):
self.viewer.mode_cb('cuts', True)
def deactivate(self):
self.viewer.mode_cb('cuts', False)
class ColormapAction(QtWidgets.QAction):
def __init__(self, label, cmap, parent):
super(ColormapAction, self).__init__(label, parent)
self.cmap = cmap
pm = cmap2pixmap(cmap)
self.setIcon(QtGui.QIcon(pm))
@viewer_tool
class ColormapMode(Tool):
icon = 'glue_rainbow'
tool_id = 'ginga:colormap'
action_text = 'Set color scale'
tool_tip = 'Set color scale'
def menu_actions(self):
acts = []
for label in ginga_cmap.get_names():
cmap = ginga_cmap.get_cmap(label)
a = ColormapAction(label, cmap, self.viewer)
a.triggered.connect(nonpartial(self.viewer.client.set_cmap, cmap))
acts.append(a)
return acts
class GingaMode(CheckableTool):
label = None
icon = None
shape = 'polygon'
color = 'red'
linestyle = 'solid'
def __init__(self, viewer):
super(CheckableTool, self).__init__(viewer)
self.parent_canvas = self.viewer.canvas
self._shape_tag = None
self.parent_canvas.add_callback('draw-event', self._extract_callback)
self.parent_canvas.add_callback('draw-down', self._clear_shape_cb)
def _set_path_mode(self, enable):
self.parent_canvas.enable_draw(True)
self.parent_canvas.draw_context = self
self.parent_canvas.set_drawtype(self.shape, color=self.color,
linestyle=self.linestyle)
bm = self.parent_canvas.get_bindmap()
bm.set_mode('draw', mode_type='locked')
def _clear_shape_cb(self, *args):
try:
self.parent_canvas.deleteObjectByTag(self._shape_tag)
except:
pass
_clear_path = _clear_shape_cb
@viewer_tool
class GingaPVSlicerMode(GingaMode):
icon = 'glue_slice'
tool_id = 'ginga:slicer'
action_text = 'Slice Extraction'
tool_tip = 'Extract a slice from an arbitrary path'
shape = 'path'
def _extract_callback(self, canvas, tag):
if self.parent_canvas.draw_context is not self:
return
self._shape_tag = tag
obj = self.parent_canvas.getObjectByTag(tag)
vx, vy = zip(*obj.points)
return self._build_from_vertices(vx, vy)
_build_from_vertices = PVSlicerMode._build_from_vertices
@viewer_tool
class GingaSpectrumMode(GingaMode, SpectrumTool):
icon = 'glue_spectrum'
tool_id = 'ginga:spectrum'
action_text = 'Spectrum'
tool_tip = 'Extract a spectrum from the selection'
shape = 'rectangle'
def __init__(self, widget=None):
GingaMode.__init__(self, widget)
SpectrumTool.__init__(self, widget, self)
self._release_callback = self._update_profile
self._move_callback = self._move_profile
def _extract_callback(self, canvas, tag):
if self.parent_canvas.draw_context is not self:
return
self._shape_tag = tag
obj = self.parent_canvas.getObjectByTag(tag)
roi = ginga_graphic_to_roi(obj)
return self._update_from_roi(roi)
def clear(self):
pass
| {
"repo_name": "saimn/glue",
"path": "glue/plugins/ginga_viewer/qt/mouse_modes.py",
"copies": "1",
"size": "6049",
"license": "bsd-3-clause",
"hash": -4377492188591445000,
"line_mean": 23.8930041152,
"line_max": 80,
"alpha_frac": 0.639940486,
"autogenerated": false,
"ratio": 3.34383637368712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.448377685968712,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
from pudb.py3compat import PY3
if PY3:
from configparser import ConfigParser
else:
from ConfigParser import ConfigParser
# minor LGPL violation: stolen from python-xdg
_home = os.environ.get('HOME', None)
xdg_data_home = os.environ.get('XDG_DATA_HOME',
os.path.join(_home, '.local', 'share') if _home else None)
xdg_config_home = os.environ.get('XDG_CONFIG_HOME',
os.path.join(_home, '.config') if _home else None)
xdg_config_dirs = [xdg_config_home] if xdg_config_home else [] + \
os.environ.get('XDG_CONFIG_DIRS', '/etc/xdg').split(':')
def get_save_config_path(*resource):
if xdg_config_home is None:
return None
if not resource:
resource = [XDG_CONF_RESOURCE]
resource = os.path.join(*resource)
assert not resource.startswith('/')
path = os.path.join(xdg_config_home, resource)
if not os.path.isdir(path):
os.makedirs(path, 448) # 0o700
return path
# end LGPL violation
CONF_SECTION = "pudb"
XDG_CONF_RESOURCE = "pudb"
CONF_FILE_NAME = "pudb.cfg"
SAVED_BREAKPOINTS_FILE_NAME = "saved-breakpoints-%d.%d" % sys.version_info[:2]
BREAKPOINTS_FILE_NAME = "breakpoints-%d.%d" % sys.version_info[:2]
def load_config():
from os.path import join, isdir
cparser = ConfigParser()
conf_dict = {}
try:
cparser.read([
join(cdir, XDG_CONF_RESOURCE, CONF_FILE_NAME)
for cdir in xdg_config_dirs if isdir(cdir)])
if cparser.has_section(CONF_SECTION):
conf_dict.update(dict(cparser.items(CONF_SECTION)))
except:
pass
conf_dict.setdefault("shell", "internal")
conf_dict.setdefault("theme", "classic")
conf_dict.setdefault("line_numbers", False)
conf_dict.setdefault("seen_welcome", "a")
conf_dict.setdefault("sidebar_width", 0.5)
conf_dict.setdefault("variables_weight", 1)
conf_dict.setdefault("stack_weight", 1)
conf_dict.setdefault("breakpoints_weight", 1)
conf_dict.setdefault("current_stack_frame", "top")
conf_dict.setdefault("stringifier", "type")
conf_dict.setdefault("custom_theme", "")
conf_dict.setdefault("custom_stringifier", "")
conf_dict.setdefault("wrap_variables", True)
conf_dict.setdefault("display", "auto")
conf_dict.setdefault("prompt_on_quit", True)
def normalize_bool_inplace(name):
try:
if conf_dict[name].lower() in ["0", "false", "off"]:
conf_dict[name] = False
else:
conf_dict[name] = True
except:
pass
normalize_bool_inplace("line_numbers")
normalize_bool_inplace("wrap_variables")
normalize_bool_inplace("prompt_on_quit")
return conf_dict
def save_config(conf_dict):
from os.path import join
cparser = ConfigParser()
cparser.add_section(CONF_SECTION)
for key in sorted(conf_dict):
cparser.set(CONF_SECTION, key, str(conf_dict[key]))
try:
save_path = get_save_config_path()
if not save_path:
return
outf = open(join(save_path, CONF_FILE_NAME), "w")
cparser.write(outf)
outf.close()
except:
pass
def edit_config(ui, conf_dict):
import urwid
old_conf_dict = conf_dict.copy()
def _update_theme():
ui.setup_palette(ui.screen)
ui.screen.clear()
def _update_line_numbers():
for sl in ui.source:
sl._invalidate()
def _update_prompt_on_quit():
pass
def _update_current_stack_frame():
ui.update_stack()
def _update_stringifier():
import pudb.var_view
pudb.var_view.custom_stringifier_dict = {}
ui.update_var_view()
def _update_wrap_variables():
ui.update_var_view()
def _update_config(check_box, new_state, option_newvalue):
option, newvalue = option_newvalue
new_conf_dict = {option: newvalue}
if option == "theme":
# only activate if the new state of the radio button is 'on'
if new_state:
if newvalue is None:
# Select the custom theme entry dialog
lb.set_focus(lb_contents.index(theme_edit_list_item))
return
conf_dict.update(theme=newvalue)
_update_theme()
elif option == "line_numbers":
new_conf_dict["line_numbers"] = not check_box.get_state()
conf_dict.update(new_conf_dict)
_update_line_numbers()
elif option == "prompt_on_quit":
new_conf_dict["prompt_on_quit"] = not check_box.get_state()
conf_dict.update(new_conf_dict)
_update_prompt_on_quit()
elif option == "current_stack_frame":
# only activate if the new state of the radio button is 'on'
if new_state:
conf_dict.update(new_conf_dict)
_update_current_stack_frame()
elif option == "stringifier":
# only activate if the new state of the radio button is 'on'
if new_state:
if newvalue is None:
lb.set_focus(lb_contents.index(stringifier_edit_list_item))
return
conf_dict.update(stringifier=newvalue)
_update_stringifier()
elif option == "wrap_variables":
new_conf_dict["wrap_variables"] = not check_box.get_state()
conf_dict.update(new_conf_dict)
_update_wrap_variables()
heading = urwid.Text("This is the preferences screen for PuDB. "
"Hit Ctrl-P at any time to get back to it.\n\n"
"Configuration settings are saved in "
"$HOME/.config/pudb or $XDG_CONFIG_HOME/pudb "
"environment variable. If both variables are not set "
"configurations settings will not be saved.\n")
cb_line_numbers = urwid.CheckBox("Show Line Numbers",
bool(conf_dict["line_numbers"]), on_state_change=_update_config,
user_data=("line_numbers", None))
cb_prompt_on_quit = urwid.CheckBox("Prompt before quitting",
bool(conf_dict["prompt_on_quit"]), on_state_change=_update_config,
user_data=("prompt_on_quit", None))
# {{{ shells
shell_info = urwid.Text("This is the shell that will be "
"used when you hit '!'.\n")
shells = ["internal", "classic", "ipython", "bpython", "ptpython"]
shell_rb_group = []
shell_rbs = [
urwid.RadioButton(shell_rb_group, name,
conf_dict["shell"] == name)
for name in shells]
# }}}
# {{{ themes
from pudb.theme import THEMES
known_theme = conf_dict["theme"] in THEMES
theme_rb_group = []
theme_edit = urwid.Edit(edit_text=conf_dict["custom_theme"])
theme_edit_list_item = urwid.AttrMap(theme_edit, "value")
theme_rbs = [
urwid.RadioButton(theme_rb_group, name,
conf_dict["theme"] == name, on_state_change=_update_config,
user_data=("theme", name))
for name in THEMES]+[
urwid.RadioButton(theme_rb_group, "Custom:",
not known_theme, on_state_change=_update_config,
user_data=("theme", None)),
theme_edit_list_item,
urwid.Text("\nTo use a custom theme, see example-theme.py in the "
"pudb distribution. Enter the full path to a file like it in "
"the box above. '~' will be expanded to your home directory. "
"Note that a custom theme will not be applied until you close "
"this dialog."),
]
# }}}
# {{{ stack
stack_rb_group = []
stack_opts = ["top", "bottom"]
stack_info = urwid.Text("Show the current stack frame at the\n")
stack_rbs = [
urwid.RadioButton(stack_rb_group, name,
conf_dict["current_stack_frame"] == name,
on_state_change=_update_config,
user_data=("current_stack_frame", name))
for name in stack_opts
]
# }}}
# {{{ stringifier
stringifier_opts = ["type", "str", "repr"]
known_stringifier = conf_dict["stringifier"] in stringifier_opts
stringifier_rb_group = []
stringifier_edit = urwid.Edit(edit_text=conf_dict["custom_stringifier"])
stringifier_info = urwid.Text("This is the default function that will be "
"called on variables in the variables list. Note that you can change "
"this on a per-variable basis by selecting a variable and hitting Enter "
"or by typing t/s/r. Note that str and repr will be slower than type "
"and have the potential to crash PuDB.\n")
stringifier_edit_list_item = urwid.AttrMap(stringifier_edit, "value")
stringifier_rbs = [
urwid.RadioButton(stringifier_rb_group, name,
conf_dict["stringifier"] == name,
on_state_change=_update_config,
user_data=("stringifier", name))
for name in stringifier_opts
]+[
urwid.RadioButton(stringifier_rb_group, "Custom:",
not known_stringifier, on_state_change=_update_config,
user_data=("stringifier", None)),
stringifier_edit_list_item,
urwid.Text("\nTo use a custom stringifier, see "
"example-stringifier.py in the pudb distribution. Enter the "
"full path to a file like it in the box above. "
"'~' will be expanded to your home directory. "
"The file should contain a function called pudb_stringifier() "
"at the module level, which should take a single argument and "
"return the desired string form of the object passed to it. "
"Note that if you choose a custom stringifier, the variables "
"view will not be updated until you close this dialog."),
]
# }}}
# {{{ wrap variables
cb_wrap_variables = urwid.CheckBox("Wrap variables",
bool(conf_dict["wrap_variables"]), on_state_change=_update_config,
user_data=("wrap_variables", None))
wrap_variables_info = urwid.Text("\nNote that you can change this option on "
"a per-variable basis by selecting the "
"variable and pressing 'w'.")
# }}}
# {{{ display
display_info = urwid.Text("What driver is used to talk to your terminal. "
"'raw' has the most features (colors and highlighting), "
"but is only correct for "
"XTerm and terminals like it. 'curses' "
"has fewer "
"features, but it will work with just about any terminal. 'auto' "
"will attempt to pick between the two based on availability and "
"the $TERM environment variable.\n\n"
"Changing this setting requires a restart of PuDB.")
displays = ["auto", "raw", "curses"]
display_rb_group = []
display_rbs = [
urwid.RadioButton(display_rb_group, name,
conf_dict["display"] == name)
for name in displays]
# }}}
lb_contents = (
[heading]
+ [urwid.AttrMap(urwid.Text("Line Numbers:\n"), "group head")]
+ [cb_line_numbers]
+ [urwid.AttrMap(urwid.Text("\nPrompt on quit:\n"), "group head")]
+ [cb_prompt_on_quit]
+ [urwid.AttrMap(urwid.Text("\nShell:\n"), "group head")]
+ [shell_info]
+ shell_rbs
+ [urwid.AttrMap(urwid.Text("\nTheme:\n"), "group head")]
+ theme_rbs
+ [urwid.AttrMap(urwid.Text("\nStack Order:\n"), "group head")]
+ [stack_info]
+ stack_rbs
+ [urwid.AttrMap(urwid.Text("\nVariable Stringifier:\n"), "group head")]
+ [stringifier_info]
+ stringifier_rbs
+ [urwid.AttrMap(urwid.Text("\nWrap Variables:\n"), "group head")]
+ [cb_wrap_variables]
+ [wrap_variables_info]
+ [urwid.AttrMap(urwid.Text("\nDisplay driver:\n"), "group head")]
+ [display_info]
+ display_rbs
)
lb = urwid.ListBox(urwid.SimpleListWalker(lb_contents))
if ui.dialog(lb, [
("OK", True),
("Cancel", False),
],
title="Edit Preferences"):
# Only update the settings here that instant-apply (above) doesn't take
# care of.
# if we had a custom theme, it wasn't updated live
if theme_rb_group[-1].state:
newvalue = theme_edit.get_edit_text()
conf_dict.update(theme=newvalue, custom_theme=newvalue)
_update_theme()
# Ditto for custom stringifiers
if stringifier_rb_group[-1].state:
newvalue = stringifier_edit.get_edit_text()
conf_dict.update(stringifier=newvalue, custom_stringifier=newvalue)
_update_stringifier()
for shell, shell_rb in zip(shells, shell_rbs):
if shell_rb.get_state():
conf_dict["shell"] = shell
for display, display_rb in zip(displays, display_rbs):
if display_rb.get_state():
conf_dict["display"] = display
else: # The user chose cancel, revert changes
conf_dict.update(old_conf_dict)
_update_theme()
# _update_line_numbers() is equivalent to _update_theme()
_update_current_stack_frame()
_update_stringifier()
# {{{ breakpoint saving
def parse_breakpoints(lines):
# b [ (filename:lineno | function) [, "condition"] ]
breakpoints = []
for arg in lines:
if not arg:
continue
arg = arg[1:]
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
colon = arg.rfind(':')
funcname = None
if colon > 0:
filename = arg[:colon].strip()
from pudb.lowlevel import lookup_module
f = lookup_module(filename)
if not f:
continue
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError:
continue
else:
continue
from pudb.lowlevel import get_breakpoint_invalid_reason
if get_breakpoint_invalid_reason(filename, lineno) is None:
breakpoints.append((filename, lineno, False, cond, funcname))
return breakpoints
def get_breakpoints_file_name():
from os.path import join
save_path = get_save_config_path()
if not save_path:
return None
else:
return join(save_path, SAVED_BREAKPOINTS_FILE_NAME)
def load_breakpoints():
from os.path import join, isdir
file_names = [
join(cdir, XDG_CONF_RESOURCE, name)
for cdir in xdg_config_dirs if isdir(cdir)
for name in [SAVED_BREAKPOINTS_FILE_NAME, BREAKPOINTS_FILE_NAME]
]
lines = []
for fname in file_names:
try:
rcFile = open(fname)
except IOError:
pass
else:
lines.extend([l.strip() for l in rcFile.readlines()])
rcFile.close()
return parse_breakpoints(lines)
def save_breakpoints(bp_list):
"""
:arg bp_list: a list of tuples `(file_name, line)`
"""
save_path = get_breakpoints_file_name()
if not save_path:
return
histfile = open(get_breakpoints_file_name(), 'w')
bp_list = set([(bp.file, bp.line, bp.cond) for bp in bp_list])
for bp in bp_list:
line = "b %s:%d" % (bp[0], bp[1])
if bp[2]:
line += ", %s" % bp[2]
line += "\n"
histfile.write(line)
histfile.close()
# }}}
# vim:foldmethod=marker
| {
"repo_name": "albfan/pudb",
"path": "pudb/settings.py",
"copies": "1",
"size": "16295",
"license": "mit",
"hash": 7966609117641069000,
"line_mean": 31.2673267327,
"line_max": 84,
"alpha_frac": 0.5655108929,
"autogenerated": false,
"ratio": 3.8179475164011247,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48834584093011246,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
from setuptools import setup, Extension
try:
from Cython.Distutils import build_ext
from Cython.Build import cythonize
except ImportError:
print("Could not import Cython. Install `cython` and rerun.")
sys.exit(1)
extra_compile_args = []
extra_link_args = []
glfw3_lib = 'glfw3'
mcpp_lib = 'mcpp'
tess2_lib = 'tess2'
platform = sys.platform.lower()
if 'darwin' in platform or 'linux' in platform:
extra_compile_args.extend(['-I/usr/local/include', '-I/opt/local/include'])
extra_link_args.extend(['-L/usr/local/lib', '-L/opt/local/lib'])
if 'win32' in platform:
glfw3_lib = 'glfw3dll'
glfw3_root = os.environ.get('GLFW_ROOT')
if glfw3_root is not None:
extra_compile_args.append('-I%s' % (os.path.join(glfw3_root, 'include'),))
extra_link_args.append('/LIBPATH:%s' % (os.path.join(glfw3_root, 'lib-vc2012'),))
mcpp_root = os.environ.get('MCPP_ROOT')
if mcpp_root is not None:
extra_compile_args.append('-I%s' % (os.path.join(mcpp_root, 'src'),))
extra_link_args.append('/LIBPATH:%s' % (os.path.join(mcpp_root, 'lib'),))
tess2_root = os.environ.get('TESS2_ROOT')
if tess2_root is not None:
extra_compile_args.append('-I%s' % (os.path.join(tess2_root, 'include'),))
extra_link_args.append('/LIBPATH:%s' % (os.path.join(tess2_root, 'lib-vc2012'),))
_all_ = {
'clibs.glfw3': Extension('clibs.glfw3.glfw3', ['clibs/glfw3/glfw3.pyx'], libraries=[glfw3_lib],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args
),
'clibs.mcpp': Extension('clibs.mcpp.mcpp_lib', ['clibs/mcpp/mcpp_lib.pyx'], libraries=[mcpp_lib],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args
),
'clibs.openal': Extension('clibs.openal.al', ['clibs/openal/al.pyx'], libraries=[],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args + ['-framework', 'OpenAL']
),
'clibs.opengl': Extension('clibs.opengl.gl3', ['clibs/opengl/gl3.pyx'], libraries=[],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args + ['-framework', 'OpenGL']
),
'clibs.tess2': Extension('clibs.tess2.tesselator', ['clibs/tess2/tesselator.pyx'], libraries=[tess2_lib],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args
),
}
selection = [arg for arg in sys.argv if arg.startswith('clibs.')]
for arg in selection: sys.argv.remove(arg)
ext_modules = cythonize(list(filter(None, [_all_.get(arg) for arg in selection])))
setup(
name="clibs",
version = "0.0.1",
license = 'MIT',
author="Daniel Filonik",
author_email="d.filonik@hdr.qut.edu.au",
cmdclass={'build_ext': build_ext},
packages=['clibs'] + selection,
package_data={package: ['*.pyx', '*.pxd'] for package in selection},
requires=['cython'],
ext_modules=ext_modules,
zip_safe=True
) | {
"repo_name": "filonik/clibs",
"path": "setup.py",
"copies": "1",
"size": "3025",
"license": "mit",
"hash": 4576642634832667600,
"line_mean": 35.4578313253,
"line_max": 109,
"alpha_frac": 0.6476033058,
"autogenerated": false,
"ratio": 2.91988416988417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.406748747568417,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
from virtualenv.builders.legacy import LegacyBuilder
from virtualenv.builders.venv import VenvBuilder
from virtualenv.flavors.posix import PosixFlavor
from virtualenv.flavors.windows import WindowsFlavor
def select_flavor():
# Determine if we're running under Windows or not.
if (sys.platform.startswith("win") or (sys.platform == "cli" and os.name == "nt")):
return WindowsFlavor
return PosixFlavor
def select_builder(python, builders=None):
# Determine what Python we're going to be using. If this is None we'll use
# the Python which we're currently running under.
if python is None:
python = sys.executable
# If we were not given a list of builders we'll default to one that
# contains both of our builders
if builders is None:
builders = [VenvBuilder, LegacyBuilder]
# Loop over our builders and return the first one that is acceptable for
# the target Python.
for builder in builders:
if builder.check_available(python):
return builder
# If we got to this point then we haven't selected a builder then we need
# to raise an error.
raise RuntimeError("No available builders for the target Python.")
def create(destination, python=None, **kwargs):
# Determine which builder to use based on the capabiltiies of the target
# python.
builder_type = select_builder(python)
# Determine which flavor to use, based on the platform we're running on.
flavor_type = select_flavor()
# Instantiate our selected builder with the values given to us, and then
# create our virtual environment using the given builder.
builder = builder_type(python=python, flavor=flavor_type(), **kwargs)
builder.create(destination)
| {
"repo_name": "ionelmc/virtualenv",
"path": "virtualenv/core.py",
"copies": "1",
"size": "1845",
"license": "mit",
"hash": 3673681554003491000,
"line_mean": 33.8113207547,
"line_max": 87,
"alpha_frac": 0.718699187,
"autogenerated": false,
"ratio": 4.392857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5611556329857144,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import os
import sys
import netCDF4
import ruamel.yaml as yaml
from six import raise_from
from docopt import docopt
__all__ = [
'main',
'build'
]
__doc__ = """
Generate ncml based on a yaml file.
Usage:
yaml2ncml INFILE [--output=OUTFILE]
yaml2ncml (-h | --help | --version)
Examples:
yaml2ncml roms.yaml
yaml2ncml roms.yaml --output=roms.ncml
Arguments:
file yaml file.
Options:
-h --help Show this screen.
-v --version Show version.
"""
def str_att(name, value):
if isinstance(value, list):
try:
value = ','.join(value)
except TypeError as e:
raise_from(ValueError('Expected `str` got {!r}'.format(value)), e)
msg = ' <attribute name="{:s}" type="String" value="{:s}"/>\n'
return msg.format(name, value)
def header():
text = '<?xml version="1.0" encoding="UTF-8"?>\n<netcdf xmlns='
text += '"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2">\n'
text += str_att('Conventions', 'CF-1.6, SGRID-0.1, ACDD-1.3')
text += str_att('cdm_data_type', 'Grid')
return text
def footer(text):
text += '</netcdf>\n'
return text
def add_global_atts(text, a):
d = a['dataset']
for key, value in d.items():
# Handle simple attribute pairs first.
if key in ['id', 'license', 'summary', 'title', 'project',
'naming_authority', 'references', 'acknowledgments']:
text += str_att(key, value)
elif key in ['creator', 'publisher']:
email = value.get("email", None)
if email:
text += str_att('_'.join([key, 'email']), email)
url = value.get("url", None)
if url:
text += str_att('_'.join([key, 'url']), url)
name = value.get("name", None)
if name:
text += str_att('_'.join([key, 'name']), name)
elif key in ['contributor']:
role = value.get("role", None)
text += str_att('_'.join([key, 'role']), role)
email = value.get("email", None)
if email:
text += str_att('_'.join([key, 'email']), email)
url = value.get("url", None)
if url:
text += str_att('_'.join([key, 'url']), url)
name = value.get("name", None)
if name:
text += str_att('_'.join([key, 'name']), name)
return text
def add_bed_coord(text, a):
ncfile = os.path.join(a['aggregation']['dir'],
a['aggregation']['sample_file'])
nc = netCDF4.Dataset(ncfile)
bed_coord_var = """<variable name="Nbed" shape="Nbed" type="double">
<attribute name="long_name" value="pseudo coordinate at seabed points"/>
<attribute name="standard_name" value="ocean_sigma_coordinate"/>
<attribute name="positive" value="up"/>
<attribute name="formula_terms" value="sigma: Nbed eta: zeta depth: h"/>
<values start="-1.0" increment="-0.01"/>
</variable>\n """
if 'Nbed' in nc.dimensions.keys():
text += bed_coord_var
return text
def add_var_atts(text, a):
ncfile = os.path.join(a['aggregation']['dir'],
a['aggregation']['sample_file'])
nc = netCDF4.Dataset(ncfile)
ncv = nc.variables
# Get a list of all variables more than 1D.
vars = [var for var, vart in ncv.items() if vart.ndim > 1]
# identify all the rho, u and v vars
rho_vars = [var for var in vars if 'eta_rho' in
ncv[var].dimensions and 'xi_rho' in ncv[var].dimensions]
u_vars = [var for var in vars if 'eta_u' in
ncv[var].dimensions and 'xi_u' in ncv[var].dimensions]
v_vars = [var for var in vars if 'eta_v' in
ncv[var].dimensions and 'xi_v' in ncv[var].dimensions]
vars_all = set(vars)
vars_include = set(a['variables']['include'])
vars_exclude = set(a['variables']['exclude'])
# include/exclude only variables that actually occur in variable list
vars_include = vars_all.intersection(vars_include)
vars_exclude = vars_all.intersection(vars_exclude)
# If there are variables excluded, exclude them and keep all rest.
# If no variables are excluded, take just the included variables
# If no variables are included or excluded, take all variables (leave
# list of variables unchanged)
if vars_exclude:
vars_display = vars_all - vars_all.intersection(vars_exclude)
else:
if vars_include:
vars_display = vars_all.intersection(vars_include)
else:
vars_display = vars_all
# remove some variables we never want (if they exist)
Tobc = set(['Tobc_in', 'Tobc_out'])
vars_display = vars_display - vars_display.intersection(Tobc)
vars_display = list(vars_display)
# add the variable attributes: S-grid stuff, display=T|F, ...
for var in vars:
text += '<variable name="{:s}">\n'.format(var)
try:
text += str_att('standard_name', cf[var])
except:
pass
text += str_att('grid', 'grid')
if 'Nbed' in ncv[var].dimensions:
text += str_att('coordinates', ncv[var].coordinates+' Nbed')
if var in vars_display:
text += str_att('display', 'True')
else:
text += str_att('display', 'False')
text += str_att('coverage_content_type', 'modelResult')
if var in rho_vars:
text += str_att('location', 'face')
elif var in u_vars:
text += str_att('location', 'edge1')
elif var in v_vars:
text += str_att('location', 'edge2')
text += '</variable>\n\n'
# write standard_name for time coordinate variable
var = 'ocean_time'
if var in ncv.keys():
try:
text += '\n<variable name="{:s}">\n'.format(var)
text += str_att('standard_name', cf[var])
text += '</variable>\n\n'
except:
pass
nc.close()
return text
def write_grid_var(text):
grid_var = """<variable name="grid" type="int">
<attribute name="cf_role" value="grid_topology"/>
<attribute name="topology_dimension" type="int" value="2"/>
<attribute name="node_dimensions" value="xi_psi eta_psi"/>
<attribute name="face_dimensions"
value="xi_rho: xi_psi (padding: both) eta_rho: eta_psi (padding: both)"/>
<attribute name="edge1_dimensions" value="xi_u: xi_psi eta_u: eta_psi (padding: both)"/>
<attribute name="edge2_dimensions" value="xi_v: xi_psi (padding: both) eta_v: eta_psi"/>
<attribute name="node_coordinates" value="lon_psi lat_psi"/>
<attribute name="face_coordinates" value="lon_rho lat_rho"/>
<attribute name="edge1_coordinates" value="lon_u lat_u"/>
<attribute name="edge2_coordinates" value="lon_v lat_v"/>
<attribute name="vertical_dimensions" value="s_rho: s_w (padding: none)"/>
</variable>\n""" # noqa
text += grid_var
return text
def add_aggregation_scan(text, a):
agg = a['aggregation']
text += '<aggregation dimName="{:s}" type="joinExisting">\n'.format(
agg['time_var'])
text += '<scan location="{:s}" regExp="{:s}" subdirs="false"/>\n</aggregation>\n'\
.format(agg['dir'], agg['pattern']) # noqa
return text
# Map ROMS variables to CF standard_names.
cf = dict(ocean_time='time',
zeta='sea_surface_height_above_geopotential_datum',
temp='sea_water_potential_temperature',
salt='sea_water_salinity',
u='sea_water_x_velocity',
v='sea_water_y_velocity',
ubar='barotropic_sea_water_x_velocity',
vbar='barotropic_sea_water_y_velocity',
Hwave='sea_surface_wave_significant_height',
bed_thickness='sediment_bed_thickness') #sediment_bed_thickness not in CF standard_names
def build(yml):
text = header()
text = add_global_atts(text, yml)
text = add_var_atts(text, yml)
text = write_grid_var(text)
text = add_bed_coord(text, yml)
text = add_aggregation_scan(text, yml)
text = footer(text)
return text
def main():
args = docopt(__doc__, version='0.6.0')
fname = args.get('INFILE')
fout = args.get('--output', None)
with open(fname, 'r') as stream:
yml = yaml.load(stream, Loader=yaml.RoundTripLoader)
text = build(yml)
if fout:
with open(fout, 'w') as f:
f.write("{:s}".format(text))
else:
sys.stdout.write(text)
| {
"repo_name": "ocefpaf/yaml2ncml",
"path": "yaml2ncml/yaml2ncml.py",
"copies": "1",
"size": "8605",
"license": "mit",
"hash": -3814965847861699600,
"line_mean": 31.8435114504,
"line_max": 99,
"alpha_frac": 0.5764090645,
"autogenerated": false,
"ratio": 3.469758064516129,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9542746870893976,
"avg_score": 0.0006840516244306716,
"num_lines": 262
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
import decatur
from decatur import config, utils
###############################################################################
# EXCLUDE MULTIPLE EPHEMERIDES, UNCERTAIN, AND HEARTBEAT SYSTEMS
###############################################################################
run = False
if run:
kebc = utils.load_catalog()
# Exclude EBs with multiple ephemerides
kebc_keep = kebc[kebc['Mult'] == 1]
# Exclude EBs flagged as uncertain in KEBC
df_unc = pd.read_csv('{}/unc_list.csv'.format(config.repo_data_dir),
comment='#')
mask_unc = ~np.in1d(kebc_keep['KIC'], df_unc['KIC'])
kebc_keep = kebc_keep[mask_unc]
# Exclude EBs flagged as heartbeats in KEBC
df_hb = pd.read_csv('{}/hb_list.csv'.format(config.repo_data_dir),
comment='#')
mask_hb = ~np.in1d(kebc_keep['KIC'], df_hb['KIC'])
kebc_keep = kebc_keep[mask_hb]
kebc_keep.to_csv('{}/kebc_keep.csv'.format(config.repo_data_dir), index=False)
###############################################################################
# DOWNLOAD LIGHT CURVES
###############################################################################
run = False
if run:
kebc = decatur.utils.load_catalog(catalog_file='kebc_keep.csv')
for kic in kebc['KIC']:
eb = decatur.eclipsing_binary.EclipsingBinary.from_kic(kic)
###############################################################################
# GENERATE AUTOCORRELATION FUNCTIONS
###############################################################################
detrend_list = [4815612, 4839180, 4931073, 10960995]
sap_list = [4815612, 4931073, 7199774, 7691527, 10031409]
run = False
if run:
decatur.analyze_sample.compute_periodicity('acf', period_max=200.,
window=1.5,
catalog_file='kebc_keep.csv',
detrend_list=detrend_list,
sap_list=sap_list)
###############################################################################
# GENERATE REFERENCE PERIODOGRAMS
# These are for reference in the visual inspection program.
# higher oversampling periodograms are generated when looking for
# multiple rotation periods
###############################################################################
run = False
if run:
decatur.analyze_sample.compute_periodicity('periodogram', period_max=100.,
window=1.5,
catalog_file='kebc_keep.csv')
###############################################################################
# INSPECT AND CLASSIFY LIGHT CURVES, CHECK ROTATION PERIODS
###############################################################################
run = False
if run:
decatur.inspector.create_results_file(catalog_file='kebc_keep.csv')
run = False
if run:
gadget = decatur.inspector.InspectorGadget('grid_pgrams.20170609.h5',
'acfs.20170510.h5',
zoom_pan=0.15)
gadget.gogo_gadget()
###############################################################################
# MEASURE ROTATION PERIODS FROM THE ACF
###############################################################################
run = False
if run:
decatur.analyze_sample.find_acf_peaks('acfs.20170510.h5')
###############################################################################
# GENERATE OVERSAMPLED PERIODOGRAMS ON A UNIFORM FREQUENCY GRID
###############################################################################
run = False
if run:
decatur.mulitperiodic.generate_frequency_periodograms(detrend_list=detrend_list,
sap_list=sap_list)
###############################################################################
# SEARCH FOR MULTIPLE ROTATION PERIODS
###############################################################################
run = False
if run:
decatur.mulitperiodic.detect_multiple_periods('all_results.csv',
'grid_pgrams.20170609.h5')
###############################################################################
# CREATE LATEX TABLE
###############################################################################
run = False
if run:
decatur.tables.latex_rotation_table(start=80, stop=100)
plt.style.use('publish')
###############################################################################
# TIDAL EQUILIBRIUM TIMESCALES
###############################################################################
run = False
if run:
print('Alpha is ratio of orbital to rotational angular momentum at '
'equilibrium. \nSee Hut (1981).')
print('\n1 M_sun + 0.1 M_sun in 0.5 day orbit')
alpha_1 = utils.equilibrium_stability(0.5, 1, 1, 0.1)
print('alpha = {:.2f}'.format(alpha_1))
print('\n1 M_sun + 1 M_sun in 1 day orbit')
alpha_1 = utils.equilibrium_stability(1, 1, 1, 1)
print('alpha = {:.2f}'.format(alpha_1))
###############################################################################
# FRACTION OF SP EBs WITH ECLIPSE WIDTHS LESS THAN 0.1
###############################################################################
run = False
if run:
df = utils.collect_results()
df = df[df['class'] == 'sp']
df['swidth'][df['swidth'] < 0] = 0
width_total = df['pwidth'] + df['swidth']
narrow = width_total < 0.1
print('Percentage of EBs with pwidth + swidth < 0.1:')
print('{:.1f}'.format(np.sum(narrow) / len(df) * 100))
###############################################################################
# FIG 1. EXAMPLE CLASSIFICATION LIGHT CURVES
###############################################################################
run = False
if run:
decatur.results_plots.class_examples()
###############################################################################
# FIG 2. ROTATION PERIOD MEASUREMENT EXAMPLE
###############################################################################
run = False
if run:
decatur.results_plots.prot_measurement_example()
###############################################################################
# FIG 3. ACF PEAK HEIGHT DISTRIBUTIONS BY CLASS
###############################################################################
run = False
if run:
decatur.results_plots.peak_heights_vs_class()
###############################################################################
# FIG 4. MULTIPLE PERIODOGRAM PEAK FINDING EXAMPLE
###############################################################################
run = False
if run:
decatur.results_plots.multiperiod_example('grid_pgrams.20170609.h5')
###############################################################################
# FIG 5. ORBITAL PERIOD HISTOGRAMS FOR CLASSES
###############################################################################
run = False
if run:
decatur.results_plots.classification_p_orb_hist()
###############################################################################
# FIG 6. PERIOD RATIO VS. ORBITAL PERIOD
# FIG 7. PERIOD RATIO VS. ORBITAL PERIOD WITH ECCENTRICITY
###############################################################################
run = False
if run:
decatur.results_plots.plot_prot_porb()
###############################################################################
# FIG 8. SYNCHRONIZATION VS. ECCENTRICITY
###############################################################################
run = False
if run:
decatur.results_plots.sync_vs_ecc()
###############################################################################
# FIG 9. ORBITAL PERIOD VS. G-K COLOR
# FIG 15. P_ORB VS. G-K FOR CONTACT BINARIES
###############################################################################
run = False
if run:
decatur.results_plots.sync_vs_gk()
###############################################################################
# SPECTRAL TYPE BREAKDOWN
###############################################################################
run = False
if run:
decatur.analyze_sample.spectral_type_breakdown()
###############################################################################
# FIG 10. PERIOD RATIO HISTOGRAMS VS. G-K COLOR
###############################################################################
run = False
if run:
decatur.results_plots.sync_vs_gk_hist()
###############################################################################
# FIG 11. PERIOD RATIO HISTOGRAMS VS. MASS RATIO (ECLIPSE DEPTH)
###############################################################################
run = False
if run:
decatur.results_plots.sync_vs_depth()
###############################################################################
# FIG 12. RELATIVE SHEAR VS ROTATION PERIOD
###############################################################################
run = False
if run:
decatur.results_plots.multiperiodic()
###############################################################################
# FIG. 13. PERIOD RATIO RANGES VS. ORBITAL PERIOD
###############################################################################
run = False
if run:
decatur.results_plots.multi_p_orb_p_rot()
###############################################################################
# FIG 14. EVOLVED STAR/RS CVN CANDIDATE
###############################################################################
run = False
if run:
decatur.results_plots.rs_cvn_candidate()
| {
"repo_name": "jlurie/decatur",
"path": "tests/paper.py",
"copies": "1",
"size": "9876",
"license": "mit",
"hash": 3658238776590735400,
"line_mean": 39.1463414634,
"line_max": 84,
"alpha_frac": 0.3828473066,
"autogenerated": false,
"ratio": 4.660689004247287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005363395235653277,
"num_lines": 246
} |
from __future__ import absolute_import, division, print_function
import os
import sys
try:
import sphinx_rtd_theme
except ImportError:
sphinx_rtd_theme = None
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("."))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "symantecssl"
copyright = "2014 Rackspace Barbican Team"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
base_dir = os.path.join(os.path.dirname(__file__), os.pardir)
about = {}
with open(os.path.join(base_dir, "symantecssl", "__about__.py")) as f:
exec(f.read(), about)
version = release = about["__version__"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx_rtd_theme:
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = "default"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Output file base name for HTML help builder.
htmlhelp_basename = "symantecssldoc"
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
(
"index",
"symantecssl.tex",
"symantecssl Documentation",
"Rackspace Barbican Team",
"manual",
),
]
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"symantecssl",
"symantecssl Documentation",
["Rackspace Barbican Team"],
1,
)
]
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"symantecssl",
"symantecssl Documentation",
"Rackspace Barbican Team",
"symantecssl",
"Supports working with the Symantec SSL service",
"Miscellaneous",
),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"http://docs.python.org/": None,
}
epub_theme = "epub"
| {
"repo_name": "grigouze/symantecssl",
"path": "docs/conf.py",
"copies": "5",
"size": "3873",
"license": "apache-2.0",
"hash": -2321553010709284400,
"line_mean": 28.3409090909,
"line_max": 79,
"alpha_frac": 0.6493674154,
"autogenerated": false,
"ratio": 3.8614157527417747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7010783168141774,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import tempfile
import unittest
import warnings
import blaze
from blaze.datadescriptor import dd_as_py
# A CSV toy example
csv_buf = u"""k1,v1,1,False
k2,v2,2,True
k3,v3,3,False
"""
csv_schema = "{ f0: string; f1: string; f2: int16; f3: bool }"
csv_ldict = [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}
]
class TestOpenCSV(unittest.TestCase):
def setUp(self):
handle, self.fname = tempfile.mkstemp(suffix='.csv')
self.url = self.fname
with os.fdopen(handle, "w") as f:
f.write(csv_buf)
def tearDown(self):
os.unlink(self.fname)
def test_open(self):
store = blaze.Storage(self.url, mode='r')
a = blaze.open(store, schema=csv_schema)
self.assert_(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), csv_ldict)
def test_open_dialect(self):
store = blaze.Storage(self.url, mode='r')
a = blaze.open(store, schema=csv_schema, dialect='excel')
self.assert_(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), csv_ldict)
def test_open_has_header(self):
store = blaze.Storage(self.url, mode='r')
a = blaze.open(store, schema=csv_schema, has_header=False)
self.assert_(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), csv_ldict)
def test_append(self):
store = blaze.Storage(self.url, mode='r+')
a = blaze.open(store, schema=csv_schema)
blaze.append(a, ["k4", "v4", 4, True])
self.assertEqual(dd_as_py(a._data), csv_ldict + \
[{u'f0': u'k4', u'f1': u'v4', u'f2': 4, u'f3': True}])
def test_deprecated_open(self):
url = "csv://" + self.fname
store = blaze.Storage(url, mode='r')
a = blaze.open(store, schema=csv_schema)
self.assert_(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), csv_ldict)
json_buf = u"[1, 2, 3, 4, 5]"
json_schema = "var, int8"
class TestOpenJSON(unittest.TestCase):
def setUp(self):
handle, self.fname = tempfile.mkstemp(suffix='.json')
self.url = self.fname
with os.fdopen(handle, "w") as f:
f.write(json_buf)
def tearDown(self):
os.unlink(self.fname)
def test_open(self):
store = blaze.Storage(self.url, mode='r')
a = blaze.open(store, schema=json_schema)
self.assert_(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [1, 2, 3, 4, 5])
def test_deprecated_open(self):
url = "json://" + self.fname
store = blaze.Storage(url, mode='r')
a = blaze.open(store, schema=json_schema)
self.assert_(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [1, 2, 3, 4, 5])
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"repo_name": "aburan28/blaze",
"path": "blaze/tests/test_array_opening.py",
"copies": "7",
"size": "3020",
"license": "bsd-3-clause",
"hash": -7763266689859111000,
"line_mean": 30.1340206186,
"line_max": 66,
"alpha_frac": 0.5887417219,
"autogenerated": false,
"ratio": 2.814538676607642,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018981811088596189,
"num_lines": 97
} |
from __future__ import absolute_import, division, print_function
import os
import tempfile
import unittest
import blaze
from blaze.datadescriptor import dd_as_py
# A CSV toy example
csv_buf = u"""k1,v1,1,False
k2,v2,2,True
k3,v3,3,False
"""
csv_schema = "{ f0: string, f1: string, f2: int16, f3: bool }"
csv_ldict = [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}
]
class TestOpenCSV(unittest.TestCase):
def setUp(self):
handle, self.fname = tempfile.mkstemp(suffix='.csv')
self.url = self.fname
with os.fdopen(handle, "w") as f:
f.write(csv_buf)
def tearDown(self):
os.unlink(self.fname)
def test_open(self):
store = blaze.Storage(self.url, mode='r')
a = blaze.from_csv(store, schema=csv_schema)
self.assert_(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), csv_ldict)
def test_from_dialect(self):
store = blaze.Storage(self.url, mode='r')
a = blaze.from_csv(store, schema=csv_schema, dialect='excel')
self.assert_(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), csv_ldict)
def test_from_has_header(self):
store = blaze.Storage(self.url, mode='r')
a = blaze.from_csv(store, schema=csv_schema, has_header=False)
self.assert_(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), csv_ldict)
def test_append(self):
store = blaze.Storage(self.url, mode='r+')
a = blaze.from_csv(store, schema=csv_schema)
blaze.append(a, ["k4", "v4", 4, True])
self.assertEqual(dd_as_py(a._data), csv_ldict + \
[{u'f0': u'k4', u'f1': u'v4', u'f2': 4, u'f3': True}])
def test_deprecated_open(self):
url = "csv://" + self.fname
store = blaze.Storage(url, mode='r')
a = blaze.from_csv(store, schema=csv_schema)
self.assert_(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), csv_ldict)
json_buf = u"[1, 2, 3, 4, 5]"
json_schema = "var * int8"
class TestOpenJSON(unittest.TestCase):
def setUp(self):
handle, self.fname = tempfile.mkstemp(suffix='.json')
self.url = self.fname
with os.fdopen(handle, "w") as f:
f.write(json_buf)
def tearDown(self):
os.unlink(self.fname)
def test_open(self):
store = blaze.Storage(self.url, mode='r')
a = blaze.from_json(store, schema=json_schema)
self.assert_(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [1, 2, 3, 4, 5])
def test_deprecated_open(self):
url = "json://" + self.fname
store = blaze.Storage(url, mode='r')
a = blaze.from_json(store, schema=json_schema)
self.assert_(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [1, 2, 3, 4, 5])
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"repo_name": "mwiebe/blaze",
"path": "blaze/tests/test_array_opening.py",
"copies": "1",
"size": "3037",
"license": "bsd-3-clause",
"hash": -9196627735526026000,
"line_mean": 29.9897959184,
"line_max": 70,
"alpha_frac": 0.5884096148,
"autogenerated": false,
"ratio": 2.8016605166051662,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3890070131405166,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import tempfile
from functools import partial
import pytest
from bag8.project import Project
from bag8.utils import check_call as base_check_call
from bag8.utils import inspect
check_call = partial(base_check_call, exit=False)
@pytest.mark.exclusive
@pytest.mark.needdocker()
def test_build(client):
# rmi first
Project('busybox').rmi()
out, err, code = check_call(['bag8', 'build', 'busybox'])
assert code == 0, err
assert len(client.images('bag8/busybox')) == 1
@pytest.mark.needdocker()
def test_develop(slave_id):
# not exist -> create
out, err, code = check_call(['bag8', 'develop', 'busybox',
'-c', 'echo "hi"', '-p', slave_id])
assert code == 0, err + '\n' + out
assert out.strip().split('\n')[-1] == 'hi', err + '\n' + out
# check container exist
p = Project('busybox', develop=True, prefix=slave_id)
assert len(p.containers(['busybox'])) == 1
# started -> re-use
out, err, code = check_call(['bag8', 'develop', 'busybox',
'-c', 'echo "yo"', '-p', slave_id])
assert code == 0, err + '\n' + out
assert out.strip() == 'yo', err + '\n' + out
# stop
p.stop(timeout=0)
assert len(p.containers(['busybox'])) == 0
# before test
tmp_file = tempfile.NamedTemporaryFile().name
if os.path.exists(tmp_file):
os.remove(tmp_file)
# not started -> start -> reuse and touch file in shared volume
out, err, code = check_call(['bag8', 'develop', 'busybox',
'-c', 'touch "{0}"'.format(tmp_file),
'-p', slave_id])
assert code == 0, err + '\n' + out
local_tmp_file = tmp_file.replace('/tmp/', '')
assert os.path.exists(local_tmp_file), \
'not found: {}'.format(local_tmp_file)
os.remove(local_tmp_file)
out, err, code = check_call(['bag8', 'develop', 'busybox', '-c', 'env',
'-p', slave_id])
assert code == 0, err + '\n' + out
assert 'DUMMY=yo' in out.split(), err + '\n' + out
@pytest.mark.needdocker()
def test_develop_no_recreate(slave_id):
# develop
check_call(['bag8', 'develop', 'busybox', '-c', 'echo "hi"',
'-p', slave_id])
container = '{0}_busybox_1'.format(slave_id)
# rm
out, err, code = check_call(['docker', 'rm', '-f', container])
assert code == 0, err + '\n' + out
assert out.strip() == container
# should not recreate container link
out, err, code = check_call(['bag8', 'develop', 'busybox',
'-c', 'echo "hi"', '-p', slave_id])
assert code == 0, err + '\n' + out
assert err.strip() == 'Creating {0}_busybox_1...'.format(slave_id), err + '\n' + out # noqa
assert out.strip() == 'hi', err + '\n' + out
@pytest.mark.needdocker()
def test_dns():
# not exist -> create
out, err, code = check_call(['bag8', 'dns'])
assert code == 0, err + '\n' + out
assert inspect('dnsdock')['State']['Running']
@pytest.mark.needdocker()
def test_execute(slave_id):
# up a container to execute command in
check_call(['bag8', 'up', 'busybox', '-p', slave_id])
# execute something
out, err, code = check_call(['bag8', 'exec', 'busybox',
'-c', 'echo "hi"', '-p', slave_id])
assert code == 0, err + '\n' + out
assert out.strip() == 'hi'
# execute something in link container
out, err, code = check_call(['bag8', 'exec', 'busybox', '-s', 'link',
'-c', 'echo "hi link"', '-p', slave_id])
assert code == 0, err + '\n' + out
assert out.strip() == 'hi link'
@pytest.mark.needdocker()
def test_logs(slave_id):
# run some messages
check_call(['bag8', 'run', 'busybox', '--keep', '-c', 'echo "busybox"',
'-p', slave_id])
# logs main
out, err, code = check_call(['bag8', 'logs', 'busybox', '-p', slave_id])
assert code == 0, err + '\n' + out
assert out.strip() == 'busybox'
# logs service
out, err, code = check_call(['bag8', 'logs', 'busybox', '-s', 'link',
'--no-follow', '-p', slave_id])
assert code == 0, err + '\n' + out
assert out.strip() == ''
# logs ?
out, err, code = check_call(['bag8', 'logs', 'busybox', '-s', 'what',
'-p', slave_id])
assert code == 0, err + '\n' + out
assert out.strip() == 'no container for {0}_what_x'.format(slave_id)
@pytest.mark.exclusive
@pytest.mark.needdocker()
def test_nginx(config, slave_id):
conf_path = os.path.join(config.tmpfolder, 'nginx', 'conf.d')
# up a container to proxify
check_call(['bag8', 'up', 'busybox', '-p', slave_id])
out, err, code = check_call(['bag8', 'nginx', '--no-ports'])
assert code == 0, err + '\n' + out
assert inspect('nginx')['State']['Running']
site_conf_path = os.path.join(conf_path, 'busybox.conf')
assert os.path.exists(site_conf_path)
with open(site_conf_path) as site_conf:
assert site_conf.readlines()[1].strip() == 'server link.docker:1234;'
out, err, code = check_call(['bag8', 'nginx', '--no-ports',
'--upstream-server-domain', '192.168.0.1'])
assert code == 0, err + '\n' + out
assert inspect('nginx')['State']['Running']
site_conf_path = os.path.join(conf_path, 'busybox.conf')
assert os.path.exists(site_conf_path)
with open(site_conf_path) as site_conf:
assert site_conf.readlines()[1].strip() == 'server 192.168.0.1:1234;'
# in filtered projects to overide upstream domain
out, err, code = check_call(['bag8', 'nginx', '--no-ports',
'-p', 'busybox', '-p', 'link',
'--upstream-server-domain', '192.168.0.1'])
assert code == 0, err + '\n' + out
assert inspect('nginx')['State']['Running']
site_conf_path = os.path.join(conf_path, 'busybox.conf')
assert os.path.exists(site_conf_path)
with open(site_conf_path) as site_conf:
assert site_conf.readlines()[1].strip() == 'server 192.168.0.1:1234;'
# not in filtered projects to overide upstream domain
out, err, code = check_call(['bag8', 'nginx', '--no-ports',
'-p', 'link',
'--upstream-server-domain', '192.168.0.1'])
assert code == 0, err + '\n' + out
assert inspect('nginx')['State']['Running']
site_conf_path = os.path.join(conf_path, 'busybox.conf')
assert os.path.exists(site_conf_path)
with open(site_conf_path) as site_conf:
assert site_conf.readlines()[1].strip() == 'server link.docker:1234;'
@pytest.mark.exclusive
@pytest.mark.needdocker()
def test_pull(client):
# rmi first
Project('busybox').rmi()
assert len(client.images('bag8/busybox')) == 0
out, err, code = check_call(['bag8', 'pull', 'busybox'])
assert code == 0, err
assert len(client.images('bag8/busybox')) == 1
@pytest.mark.needdocker()
def test_push():
pass # lets say it works
@pytest.mark.needdocker()
def test_rm(slave_id):
# up a container to proxify
check_call(['bag8', 'up', 'busybox', '-p', slave_id])
# check container exist
p = Project('busybox', prefix=slave_id)
assert [c.name for c in p.containers()] == [
'{0}_busybox_1'.format(p.name),
'{0}_link_1'.format(p.name),
]
# recursive rm
check_call(['bag8', 'rm', 'busybox', '-p', slave_id])
assert len(p.containers(stopped=True)) == 0
# new up
check_call(['bag8', 'up', 'busybox', '-p', slave_id])
assert len(p.containers(stopped=True)) == 2
# single rm
check_call(['bag8', 'rm', 'busybox', '-p', slave_id,
'-s', 'busybox'])
assert len(p.containers()) == 1
assert [c.name for c in p.containers()][0] == '{0}_link_1'.format(slave_id)
@pytest.mark.exclusive
@pytest.mark.needdocker()
def test_rmi(client):
# initial test
assert len(client.images('bag8/busybox')) == 1
# rmi first
Project('busybox').rmi()
assert len(client.images('bag8/busybox')) == 0
@pytest.mark.needdocker()
def test_run(slave_id):
# not exist -> create
out, err, code = check_call(['bag8', 'run', 'busybox',
'-c', 'echo "hi"', '-p', slave_id])
assert code == 0, err + '\n' + out
assert out.strip().split('\n')[-1] == 'hi'
@pytest.mark.needdocker()
def test_start(slave_id):
# up a container to proxify
check_call(['bag8', 'up', 'busybox', '-p', slave_id])
# check container exist
p = Project('busybox', prefix=slave_id)
assert len(p.containers(['busybox'])) == 1
# stop all
check_call(['bag8', 'stop', 'busybox', '-p', slave_id])
assert [c.name for c in p.containers()] == []
# start all
check_call(['bag8', 'start', 'busybox', '-p', slave_id])
assert [c.name for c in p.containers()] == [
'{0}_busybox_1'.format(slave_id),
'{0}_link_1'.format(slave_id),
]
# stop all again
check_call(['bag8', 'stop', 'busybox', '-p', slave_id])
assert [c.name for c in p.containers()] == []
# start one
check_call(['bag8', 'start', 'busybox', '-p', slave_id,
'-s', 'link'])
assert [c.name for c in p.containers()] == [
'{0}_link_1'.format(slave_id),
]
@pytest.mark.needdocker()
def test_stop(slave_id):
# up a container to proxify
check_call(['bag8', 'up', 'busybox', '-p', slave_id])
# check container exist
p = Project('busybox', prefix=slave_id)
assert len(p.containers(['busybox'])) == 1
# stop
check_call(['bag8', 'stop', 'busybox', '-p', slave_id])
assert len(p.containers(['busybox'])) == 0
assert len(p.containers(['busybox'], stopped=True)) == 1
# start all again
check_call(['bag8', 'start', 'busybox', '-p', slave_id])
assert [c.name for c in p.containers()] == [
'{0}_busybox_1'.format(slave_id),
'{0}_link_1'.format(slave_id),
]
# stop one
check_call(['bag8', 'stop', 'busybox', '-p', slave_id,
'-s', 'busybox'])
assert [c.name for c in p.containers()] == [
'{0}_link_1'.format(slave_id),
]
@pytest.mark.needdocker()
def test_up(slave_id):
# up a container to proxify with prefix
check_call(['bag8', 'up', 'busybox', '-p', slave_id])
# check container exist
p = Project('busybox', prefix=slave_id)
assert len(p.containers(['busybox'])) == 1
| {
"repo_name": "novafloss/bag8",
"path": "bag8/tests/test_cli.py",
"copies": "1",
"size": "10623",
"license": "mit",
"hash": 6714268961105525000,
"line_mean": 30.0614035088,
"line_max": 96,
"alpha_frac": 0.5532335498,
"autogenerated": false,
"ratio": 3.2666051660516606,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.931983871585166,
"avg_score": 0,
"num_lines": 342
} |
from __future__ import absolute_import, division, print_function
import os
import types
import warnings
from itertools import count
import numpy as np
from sunpy.extern import six
from sunpy.extern.six.moves import map, zip
__all__ = ['to_signed', 'unique', 'print_table',
'replacement_filename', 'merge', 'common_base',
'minimal_pairs', 'polyfun_at',
'expand_list', 'expand_list_generator', 'Deprecated']
def to_signed(dtype):
""" Return dtype that can hold data of passed dtype but is signed.
Raise ValueError if no such dtype exists.
Parameters
----------
dtype : `numpy.dtype`
dtype whose values the new dtype needs to be able to represent.
Returns
-------
`numpy.dtype`
"""
if dtype.kind == "u":
if dtype.itemsize == 8:
raise ValueError("Cannot losslessly convert uint64 to int.")
dtype = "int{0:d}".format(min(dtype.itemsize * 2 * 8, 64))
return np.dtype(dtype)
def unique(itr, key=None):
"""
not documented yet
Parameters
----------
itr : iterable
Object to be iterated over
key : object
not documented yet
Returns
-------
not documented yet
.. todo::
improve documentation. what does this function do?
"""
items = set()
if key is None:
for elem in itr:
if elem not in items:
yield elem
items.add(elem)
else:
for elem in itr:
x = key(elem)
if x not in items:
yield elem
items.add(x)
def print_table(lst, colsep=' ', linesep='\n'):
"""
?
Parameters
----------
lst : ?
?
colsep : ?
?
linesep : ?
?
Returns
-------
?
.. todo::
improve documentation.
"""
width = [max(map(len, col)) for col in zip(*lst)]
return linesep.join(
colsep.join(
col.ljust(n) for n, col in zip(width, row)
) for row in lst
)
def polyfun_at(coeff, p):
""" Return value of polynomial with coefficients (highest first) at
point (can also be an np.ndarray for more than one point) p.
Parameters
----------
coeff : not documented yet
not documented yet
p : not documented yet
not documented yet
Returns
-------
not documented yet
.. todo::
improve documentation. what does this do? Does numpy have this functionality?
"""
return np.sum(k * p ** n for n, k in enumerate(reversed(coeff)))
def minimal_pairs(one, other):
""" Find pairs of values in one and other with minimal distance.
Assumes one and other are sorted in the same sort sequence.
Parameters
----------
one, other : sequence
Sequence of scalars to find pairs from.
Returns
-------
`tuple`
Pairs of values in `one` and `other` with minimal distance
.. todo::
improve documentation. what does this do?
"""
lbestdiff = bestdiff = bestj = besti = None
for i, freq in enumerate(one):
lbestj = bestj
bestdiff, bestj = None, None
for j, o_freq in enumerate(other[lbestj:]):
j = lbestj + j if lbestj else j
diff = abs(freq - o_freq)
if bestj is not None and diff > bestdiff:
break
if bestj is None or bestdiff > diff:
bestj = j
bestdiff = diff
if lbestj is not None and lbestj != bestj:
yield (besti, lbestj, lbestdiff)
besti = i
lbestdiff = bestdiff
elif lbestdiff is None or bestdiff < lbestdiff:
besti = i
lbestdiff = bestdiff
yield (besti, bestj, lbestdiff)
DONT = object()
def find_next(one, other, pad=DONT):
""" Given two sorted sequences one and other, for every element
in one, return the one larger than it but nearest to it in other.
If no such exists and pad is not DONT, return value of pad as "partner".
.. todo::
improve documentation. what does this do?
"""
n = 0
for elem1 in one:
for elem2 in other[n:]:
n += 1
if elem2 > elem1:
yield elem1, elem2
break
else:
if pad is not DONT:
yield elem1, pad
def common_base(objs):
""" Find class that every item of objs is an instance of.
.. todo::
improve documentation. what does this do?
"""
for cls in objs[0].__class__.__mro__:
if all(isinstance(obj, cls) for obj in objs):
break
return cls
def merge(items, key=(lambda x: x)):
""" Given sorted lists of iterables, return new iterable that returns
elements of all iterables sorted with respect to key.
.. todo::
improve documentation. what does this do?
"""
state = {}
for item in map(iter, items):
try:
first = next(item)
except StopIteration:
continue
else:
state[item] = (first, key(first))
while state:
for item, (value, tk) in six.iteritems(state):
# Value is biggest.
if all(tk >= k for it, (v, k)
in six.iteritems(state) if it is not item):
yield value
break
try:
n = next(item)
state[item] = (n, key(n))
except StopIteration:
del state[item]
def replacement_filename(path):
""" Return replacement path for already used path. Enumerates
until an unused filename is found. E.g., "/home/florian/foo.fits"
becomes "/home/florian/foo.0.fits", if that is used
"/home/florian/foo.1.fits", etc.
.. todo::
improve documentation. what does this do?
"""
if not os.path.exists(path):
return path
else:
dir_, filename = os.path.split(path)
base, ext = os.path.splitext(filename)
for c in count():
name = base + '.' + str(c) + ext
newpath = os.path.join(dir_, name)
if not os.path.exists(newpath):
return newpath
def expand_list(input):
"""
Expand a list of lists.
Parameters
----------
input : `list`
Returns
-------
`list`
A flat list consisting of the entries of the input.
References
----------
Taken from :http://stackoverflow.com/a/2185971/2486799
.. todo::
improve documentation. Can this handle Arbitrarily nested lists?
"""
return [item for item in expand_list_generator(input)]
def expand_list_generator(input):
"""
.. todo::
improve documentation. what does this function do?
"""
for item in input:
if type(item) in [list, tuple]:
for nested_item in expand_list_generator(item):
yield nested_item
else:
yield item
#==============================================================================
# Deprecation decorator: http://code.activestate.com/recipes/391367-deprecated/
# and http://www.artima.com/weblogs/viewpost.jsp?thread=240845
#==============================================================================
class Deprecated(object):
""" Use this decorator to deprecate a function or method, you can pass an
additional message to the decorator:
@Deprecated("no more")
"""
def __init__(self, message=""):
self.message = message
def __call__(self, func):
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function {0}. \n {1}".format(
func.__name__,
self.message),
category=Warning, stacklevel=2)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
| {
"repo_name": "Alex-Ian-Hamilton/sunpy",
"path": "sunpy/util/util.py",
"copies": "1",
"size": "8079",
"license": "bsd-2-clause",
"hash": 2408865092647867400,
"line_mean": 25.145631068,
"line_max": 86,
"alpha_frac": 0.5391756405,
"autogenerated": false,
"ratio": 4.23205866946045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.527123430996045,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import unittest
import dask.array as da
import numpy as np
from netCDF4 import Dataset
from odvc import ocean_s_coordinate_g1
from odvc.utils import (
get_formula_terms,
get_formula_terms_dims,
get_formula_terms_variables,
prepare_arrays,
z_shape,
)
data_path = os.path.join(os.path.dirname(__file__), "data")
class OceanSCoordinateG1(unittest.TestCase):
def setUp(self):
roms = "ocean_s_coordinate_g1_roms.nc"
self.nc = Dataset(os.path.join(data_path, roms))
formula_terms_variable = get_formula_terms_variables(self.nc)[0]
formula_terms = get_formula_terms(formula_terms_variable)
dims = get_formula_terms_dims(self.nc, formula_terms)
new_shape = z_shape(self.nc, dims)
arrays = prepare_arrays(self.nc, formula_terms, new_shape)
s = arrays["s"]
c = arrays["C"]
eta = arrays["eta"]
depth = arrays["depth"]
depth_c = arrays["depth_c"]
self.z = ocean_s_coordinate_g1(s, c, eta, depth, depth_c)
self.sliced = self.z[0, :, 30, 80]
def tearDown(self):
self.nc.close()
unittest.TestCase.tearDown(self)
def test_shape(self):
assert self.z.shape == (1, 36, 82, 130)
def test_slice(self):
assert isinstance(self.sliced, da.Array)
def test_slice_ndarray(self):
assert isinstance(self.sliced.compute(), np.ndarray)
def test_z_values(self):
z_comp = np.array(
[
-2531.46656205,
-2337.58793694,
-2167.67949235,
-2018.28088531,
-1886.27977235,
-1768.84270488,
-1663.35273478,
-1567.35533466,
-1478.516278,
-1394.59779732,
-1313.46227081,
-1233.11474357,
-1151.79449631,
-1068.11831429,
-981.26114252,
-891.13435155,
-798.4970414,
-704.92970128,
-612.62956129,
-524.05182095,
-441.49011516,
-366.71960543,
-300.79609284,
-244.03541034,
-196.13254143,
-156.34993361,
-123.71062861,
-97.1566744,
-75.65879794,
-58.28034096,
-44.20608553,
-32.74776123,
-23.33605136,
-15.50607592,
-8.88076055,
-3.15458049,
]
)
np.testing.assert_allclose(self.sliced.compute(), z_comp)
| {
"repo_name": "pyoceans/odvc",
"path": "tests/test_ocean_s_coordinate_g1.py",
"copies": "3",
"size": "2760",
"license": "mit",
"hash": 4291468013574247000,
"line_mean": 28.0526315789,
"line_max": 72,
"alpha_frac": 0.5141304348,
"autogenerated": false,
"ratio": 3.4074074074074074,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 95
} |
from __future__ import absolute_import, division, print_function
import os
import warnings
from collections import OrderedDict
from glue.core.data import Component, Data
from glue.config import data_factory
__all__ = ['is_hdf5', 'hdf5_reader']
def extract_hdf5_datasets(handle):
'''
Recursive function that returns a dictionary with all the datasets
found in an HDF5 file or group. `handle` should be an instance of
h5py.highlevel.File or h5py.highlevel.Group.
'''
import h5py
datasets = {}
for group in handle:
if isinstance(handle[group], h5py.highlevel.Group):
sub_datasets = extract_hdf5_datasets(handle[group])
for key in sub_datasets:
datasets[key] = sub_datasets[key]
elif isinstance(handle[group], h5py.highlevel.Dataset):
if handle[group].dtype.kind in ('f', 'i', 'V'):
datasets[handle[group].name] = handle[group]
return datasets
def is_hdf5(filename):
# All hdf5 files begin with the same sequence
with open(filename, 'rb') as infile:
return infile.read(8) == b'\x89HDF\r\n\x1a\n'
@data_factory(label="HDF5 file", identifier=is_hdf5, priority=100)
def hdf5_reader(filename, format='auto', auto_merge=False, **kwargs):
"""
Read in all datasets from an HDF5 file
Parameters
----------
source: str or HDUList
The pathname to the FITS file.
If an HDUList is passed in, simply use that.
"""
import h5py
from astropy.table import Table
# Open file
file_handle = h5py.File(filename, 'r')
# Define function to read
# Read in all datasets
datasets = extract_hdf5_datasets(file_handle)
label_base = os.path.basename(filename).rpartition('.')[0]
if not label_base:
label_base = os.path.basename(filename)
data_by_shape = {}
groups = OrderedDict()
for key in datasets:
label = '{0}[{1}]'.format(
label_base,
key
)
if datasets[key].dtype.kind in ('f', 'i'):
if auto_merge and datasets[key].value.shape in data_by_shape:
data = data_by_shape[datasets[key].value.shape]
else:
data = Data(label=label)
data_by_shape[datasets[key].value.shape] = data
groups[label] = data
data.add_component(component=datasets[key].value, label=key)
else:
table = Table.read(datasets[key], format='hdf5')
data = Data(label=label)
groups[label] = data
for column_name in table.columns:
column = table[column_name]
if column.ndim == 1:
component = Component(column, units=column.unit)
data.add_component(component=component,
label=column_name)
else:
warnings.warn("HDF5: Ignoring vector column {0}".format(column_name))
# Close HDF5 file
file_handle.close()
return [groups[idx] for idx in groups]
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/data_factories/hdf5.py",
"copies": "4",
"size": "3091",
"license": "bsd-3-clause",
"hash": -452199982591098240,
"line_mean": 29.3039215686,
"line_max": 89,
"alpha_frac": 0.5949530896,
"autogenerated": false,
"ratio": 3.8831658291457285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010893246187363835,
"num_lines": 102
} |
from __future__ import absolute_import, division, print_function
import os
import warnings
from qtpy import QtCore, QtGui, QtWidgets
from qtpy.QtCore import Qt
from glue.external import six
from glue.core.callback_property import add_callback
from glue.viewers.common.qt.tool import CheckableTool
from glue.icons.qt import get_icon
__all__ = ['BasicToolbar']
class BasicToolbar(QtWidgets.QToolBar):
tool_activated = QtCore.Signal()
tool_deactivated = QtCore.Signal()
def __init__(self, parent):
"""
Create a new toolbar object
"""
super(BasicToolbar, self).__init__(parent=parent)
self.actions = {}
self.tools = {}
self.setIconSize(QtCore.QSize(25, 25))
self.layout().setSpacing(1)
self.setFocusPolicy(Qt.StrongFocus)
self._active_tool = None
self.setup_default_modes()
def setup_default_modes(self):
pass
@property
def active_tool(self):
return self._active_tool
@active_tool.setter
def active_tool(self, new_tool):
old_tool = self._active_tool
# If the tool is as before, we don't need to do anything
if old_tool is new_tool:
return
# Otheriwse, if the tool changes, then we need to disable the previous
# tool...
if old_tool is not None:
self.deactivate_tool(old_tool)
if isinstance(old_tool, CheckableTool):
button = self.actions[old_tool.tool_id]
if button.isChecked():
button.blockSignals(True)
button.setChecked(False)
button.blockSignals(False)
# ... and enable the new one
if new_tool is not None:
self.activate_tool(new_tool)
if isinstance(new_tool, CheckableTool):
button = self.actions[new_tool.tool_id]
if not button.isChecked():
button.blockSignals(True)
button.setChecked(True)
button.blockSignals(False)
if isinstance(new_tool, CheckableTool):
self._active_tool = new_tool
self.parent().set_status(new_tool.status_tip)
self.tool_activated.emit()
else:
self._active_tool = None
self.parent().set_status('')
self.tool_deactivated.emit()
def activate_tool(self, tool):
tool.activate()
def deactivate_tool(self, tool):
if isinstance(tool, CheckableTool):
tool.deactivate()
def add_tool(self, tool):
parent = QtWidgets.QToolBar.parent(self)
if isinstance(tool.icon, six.string_types):
if os.path.exists(tool.icon):
icon = QtGui.QIcon(tool.icon)
else:
icon = get_icon(tool.icon)
else:
icon = tool.icon
action = QtWidgets.QAction(icon, tool.action_text, parent)
def toggle(checked):
if checked:
self.active_tool = tool
else:
self.active_tool = None
def trigger(checked):
self.active_tool = tool
parent.addAction(action)
if isinstance(tool, CheckableTool):
action.toggled.connect(toggle)
else:
action.triggered.connect(trigger)
shortcut = None
if tool.shortcut is not None:
# Make sure that the keyboard shortcut is unique
for m in self.tools.values():
if tool.shortcut == m.shortcut:
warnings.warn("Tools '{0}' and '{1}' have the same shortcut "
"('{2}'). Ignoring shortcut for "
"'{1}'".format(m.tool_id, tool.tool_id, tool.shortcut))
break
else:
shortcut = tool.shortcut
action.setShortcut(tool.shortcut)
action.setShortcutContext(Qt.WidgetShortcut)
if shortcut is None:
action.setToolTip(tool.tool_tip)
else:
action.setToolTip(tool.tool_tip + " [shortcut: {0}]".format(shortcut))
action.setCheckable(isinstance(tool, CheckableTool))
self.actions[tool.tool_id] = action
menu_actions = tool.menu_actions()
if len(menu_actions) > 0:
menu = QtWidgets.QMenu(self)
for ma in tool.menu_actions():
ma.setParent(self)
menu.addAction(ma)
action.setMenu(menu)
menu.triggered.connect(trigger)
self.addAction(action)
# Bind tool visibility to tool.enabled
def toggle(state):
action.setVisible(state)
action.setEnabled(state)
add_callback(tool, 'enabled', toggle)
self.tools[tool.tool_id] = tool
return action
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/common/qt/toolbar.py",
"copies": "3",
"size": "4898",
"license": "bsd-3-clause",
"hash": 8000714395341098000,
"line_mean": 29.0490797546,
"line_max": 89,
"alpha_frac": 0.5624744794,
"autogenerated": false,
"ratio": 4.292725679228747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6355200158628748,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from appr.commands.command_base import CommandBase
from appr.pack import ApprPackage
class PullCmd(CommandBase):
name = 'pull'
help_message = "download a package"
def __init__(self, options):
super(PullCmd, self).__init__(options)
self.package = options.package
self.registry_host = options.registry_host
self.version = options.version
self.version_parts = options.version_parts
self.dest = options.dest
self.media_type = options.media_type
if options.media_type is self.default_media_type:
self.media_type = os.getenv("APPR_DEFAULT_MEDIA_TYPE", self.default_media_type)
self.tarball = options.tarball
self.path = None
self.ssl_verify = options.cacert or not options.insecure
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_option(parser)
cls._add_mediatype_option(parser)
cls._add_packagename_option(parser)
cls._add_packageversion_option(parser)
parser.add_argument("--dest", default="./", help="directory used to extract resources")
parser.add_argument("--tarball", action="store_true", default=False,
help="download the tar.gz")
def _call(self):
client = self.RegistryClient(self.registry_host, requests_verify=self.ssl_verify)
pullpack = client.pull_json(self.package, version_parts=self.version_parts,
media_type=self.media_type)
self.media_type = pullpack.get('media_type', '-')
package = ApprPackage(pullpack['blob'], b64_encoded=True)
filename = pullpack['filename']
self.path = os.path.join(self.dest, filename)
if self.tarball:
with open(self.path, 'wb') as tarfile:
tarfile.write(package.blob)
else:
self.path = self.path.split(".tar.gz")[0]
package.extract(self.path)
def _render_dict(self):
return {
"pull": self.package,
"media_type": self.media_type,
"version": self.version,
"path": self.path}
def _render_console(self):
return "Pull package: %s... \n%s" % (self.package, self.path)
| {
"repo_name": "app-registry/appr",
"path": "appr/commands/pull.py",
"copies": "2",
"size": "2329",
"license": "apache-2.0",
"hash": -6064321086907732000,
"line_mean": 37.1803278689,
"line_max": 95,
"alpha_frac": 0.6161442679,
"autogenerated": false,
"ratio": 3.881666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000726264014752967,
"num_lines": 61
} |
from __future__ import absolute_import, division, print_function
import os
from appr.formats.utils import kub_factory
from appr.commands.command_base import CommandBase, LoadVariables
class DeployCmd(CommandBase):
name = 'deploy'
help_message = "deploy a package on kubernetes"
default_media_type = "kpm"
def __init__(self, options):
super(DeployCmd, self).__init__(options)
self.package = options.package
self.registry_host = options.registry_host
self.force = options.force
self.dry_run = options.dry_run
self.namespace = options.namespace
self.api_proxy = options.api_proxy
self.version = options.version
self.version_parts = options.version_parts
self.tmpdir = options.tmpdir
self.variables = options.variables
self.format = options.media_type
if options.media_type is self.default_media_type:
self.format = os.getenv("APPR_DEFAULT_MEDIA_TYPE", self.default_media_type)
self.status = None
self._kub = None
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_option(parser)
cls._add_mediatype_option(parser, default=cls.default_media_type)
cls._add_packagename_option(parser)
cls._add_packageversion_option(parser)
parser.add_argument("--tmpdir", default="/tmp/",
help="directory used to extract resources")
parser.add_argument("--dry-run", action='store_true', default=False,
help="do not create the resources on kubernetes")
parser.add_argument("-n", "--namespace", help="kubernetes namespace", default=None)
parser.add_argument("--api-proxy", help="kubectl proxy url", nargs="?",
const="http://localhost:8001")
parser.add_argument("-x", "--variables", help="variables", default={},
action=LoadVariables)
parser.add_argument("--force", action='store_true', default=False,
help="force upgrade, delete and recreate resources")
def kub(self):
if self._kub is None:
self._kub = kub_factory(self.format, self.package, endpoint=self.registry_host,
variables=self.variables, namespace=self.namespace,
version=self.version_parts)
return self._kub
def _call(self):
self.status = self.kub().deploy(dest=self.tmpdir, force=self.force, dry=self.dry_run,
proxy=self.api_proxy, fmt=self.output)
def _render_dict(self):
return self.status
def _render_console(self):
""" Handled by deploy """
return ''
| {
"repo_name": "app-registry/appr",
"path": "appr/commands/deploy.py",
"copies": "2",
"size": "2784",
"license": "apache-2.0",
"hash": -9176044098071153000,
"line_mean": 39.9411764706,
"line_max": 93,
"alpha_frac": 0.6041666667,
"autogenerated": false,
"ratio": 4.211800302571861,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5815966969271861,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from bag8.project import Project
from bag8.yaml import Yaml
CURR_DIR = os.path.realpath('.')
def test_data():
# normal
project = Project('busybox')
assert Yaml(project).data == {
'busybox': {
'dockerfile': os.path.join(project.build_path, 'Dockerfile'),
'environment': {
'BAG8_LINKS': 'link',
'DNSDOCK_ALIAS': 'busybox.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'nothing here',
'NGINX_UPSTREAM_SERVER_DOMAIN': 'link.docker',
},
'image': 'bag8/busybox',
'links': [
'link:link'
]
},
'link': {
'environment': {
'BAG8_LINKS': '',
'DNSDOCK_ALIAS': 'link.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'nothing here too'
},
'expose': [1234],
'image': 'bag8/link'
}
}
# develop
project = Project('busybox', develop=True)
assert Yaml(project).data == {
'busybox': {
'dockerfile': os.path.join(project.build_path, 'Dockerfile'),
'environment': {
'BAG8_LINKS': 'link',
'DNSDOCK_ALIAS': 'busybox.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'yo',
'NGINX_UPSTREAM_SERVER_DOMAIN': 'link.docker',
},
'image': 'bag8/busybox',
'links': [
'link:link'
],
'volumes': [
'{}:/tmp'.format(CURR_DIR)
]
},
'link': {
'environment': {
'BAG8_LINKS': '',
'DNSDOCK_ALIAS': 'link.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'nothing here too'
},
'expose': [1234],
'image': 'bag8/link'
}
}
def test_service_dicts():
# normal
project = Project('busybox')
assert sorted(Yaml(project).service_dicts) == sorted([
{
'name': 'busybox',
'bag8_name': 'busybox',
'dockerfile': os.path.join(project.build_path, 'Dockerfile'),
'environment': {
'BAG8_LINKS': 'link',
'DNSDOCK_ALIAS': 'busybox.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'nothing here',
'NGINX_UPSTREAM_SERVER_DOMAIN': 'link.docker',
},
'image': 'bag8/busybox',
'links': [
'link:link'
]
},
{
'name': 'link',
'bag8_name': 'link',
'environment': {
'BAG8_LINKS': '',
'DNSDOCK_ALIAS': 'link.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'nothing here too'
},
'expose': [1234],
'image': 'bag8/link'
}
])
# develop
project = Project('busybox', develop=True)
assert sorted(Yaml(project).service_dicts) == sorted([
{
'name': 'busybox',
'bag8_name': 'busybox',
'dockerfile': os.path.join(project.build_path, 'Dockerfile'),
'environment': {
'BAG8_LINKS': 'link',
'DNSDOCK_ALIAS': 'busybox.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'yo',
'NGINX_UPSTREAM_SERVER_DOMAIN': 'link.docker',
},
'image': 'bag8/busybox',
'links': [
'link:link'
],
'volumes': [
'{}:/tmp'.format(CURR_DIR)
]
},
{
'name': 'link',
'bag8_name': 'link',
'environment': {
'BAG8_LINKS': '',
'DNSDOCK_ALIAS': 'link.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'nothing here too'
},
'expose': [1234],
'image': 'bag8/link'
}
])
# complex name
project = Project('link.2')
assert sorted(Yaml(project).service_dicts) == sorted([
{
'name': 'link2',
'bag8_name': 'link.2',
'dockerfile': os.path.join(project.build_path, 'Dockerfile'),
'environment': {
'BAG8_LINKS': '',
'DNSDOCK_ALIAS': 'link2.docker',
'DNSDOCK_IMAGE': '',
},
'image': 'bag8/busybox',
'links': []
}
])
| {
"repo_name": "novafloss/bag8",
"path": "bag8/tests/test_yaml.py",
"copies": "1",
"size": "4636",
"license": "mit",
"hash": 437238647660886600,
"line_mean": 27.6172839506,
"line_max": 73,
"alpha_frac": 0.4109145815,
"autogenerated": false,
"ratio": 3.9828178694158076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48937324509158076,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from ...external.qt.QtGui import (
QMainWindow, QMessageBox, QWidget)
from ...external.qt.QtCore import Qt
from ...core.application_base import ViewerBase
from ..decorators import set_cursor
from ..layer_artist_model import QtLayerArtistContainer, LayerArtistView
from .. import get_qapp
from ..mime import LAYERS_MIME_TYPE, LAYER_MIME_TYPE
from .glue_mdi_area import GlueMdiSubWindow
__all__ = ['DataViewer']
class DataViewer(QMainWindow, ViewerBase):
"""Base class for all Qt DataViewer widgets.
This defines a minimal interface, and implemlements the following::
* An automatic call to unregister on window close
* Drag and drop support for adding data
"""
_container_cls = QtLayerArtistContainer
LABEL = 'Override this'
def __init__(self, session, parent=None):
"""
:type session: :class:`~glue.core.Session`
"""
QMainWindow.__init__(self, parent)
ViewerBase.__init__(self, session)
self.setWindowIcon(get_qapp().windowIcon())
self._view = LayerArtistView()
self._view.setModel(self._container.model)
self._tb_vis = {} # store whether toolbars are enabled
self.setAttribute(Qt.WA_DeleteOnClose)
self.setAcceptDrops(True)
self.setAnimated(False)
self._toolbars = []
self._warn_close = True
self.setContentsMargins(2, 2, 2, 2)
self._mdi_wrapper = None # GlueMdiSubWindow that self is embedded in
self.statusBar().setStyleSheet("QStatusBar{font-size:10px}")
# close window when last plot layer deleted
self._container.on_empty(lambda: self.close(warn=False))
self._container.on_changed(self.update_window_title)
def remove_layer(self, layer):
self._container.pop(layer)
def dragEnterEvent(self, event):
""" Accept the event if it has data layers"""
if event.mimeData().hasFormat(LAYER_MIME_TYPE):
event.accept()
elif event.mimeData().hasFormat(LAYERS_MIME_TYPE):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
""" Add layers to the viewer if contained in mime data """
if event.mimeData().hasFormat(LAYER_MIME_TYPE):
self.request_add_layer(event.mimeData().data(LAYER_MIME_TYPE))
assert event.mimeData().hasFormat(LAYERS_MIME_TYPE)
for layer in event.mimeData().data(LAYERS_MIME_TYPE):
self.request_add_layer(layer)
event.accept()
def mousePressEvent(self, event):
""" Consume mouse press events, and prevent them from propagating
down to the MDI area """
event.accept()
apply_roi = set_cursor(Qt.WaitCursor)(ViewerBase.apply_roi)
def close(self, warn=True):
self._warn_close = warn
super(DataViewer, self).close()
self._warn_close = True
def mdi_wrap(self):
"""Wrap this object in a GlueMdiSubWindow"""
sub = GlueMdiSubWindow()
sub.setWidget(self)
self.destroyed.connect(sub.close)
sub.resize(self.size())
self._mdi_wrapper = sub
return sub
@property
def position(self):
target = self._mdi_wrapper or self
pos = target.pos()
return pos.x(), pos.y()
@position.setter
def position(self, xy):
x, y = xy
self.move(x, y)
def move(self, x=None, y=None):
"""
Move the viewer to a new XY pixel location
You can also set the position attribute to a new tuple directly.
Parameters
----------
x : int (optional)
New x position
y : int (optional)
New y position
"""
x0, y0 = self.position
if x is None:
x = x0
if y is None:
y = y0
if self._mdi_wrapper is not None:
self._mdi_wrapper.move(x, y)
else:
QMainWindow.move(self, x, y)
@property
def viewer_size(self):
sz = QMainWindow.size(self)
return sz.width(), sz.height()
@viewer_size.setter
def viewer_size(self, value):
width, height = value
self.resize(width, height)
if self._mdi_wrapper is not None:
self._mdi_wrapper.resize(width, height)
def closeEvent(self, event):
""" Call unregister on window close """
if not self._confirm_close():
event.ignore()
return
if self._hub is not None:
self.unregister(self._hub)
super(DataViewer, self).closeEvent(event)
event.accept()
def _confirm_close(self):
"""Ask for close confirmation
:rtype: bool. True if user wishes to close. False otherwise
"""
if self._warn_close and (not os.environ.get('GLUE_TESTING')) and self.isVisible():
buttons = QMessageBox.Ok | QMessageBox.Cancel
dialog = QMessageBox.warning(self, "Confirm Close",
"Do you want to close this window?",
buttons=buttons,
defaultButton=QMessageBox.Cancel)
return dialog == QMessageBox.Ok
return True
def _confirm_large_data(self, data):
warn_msg = ("WARNING: Data set has %i points, and may render slowly."
" Continue?" % data.size)
title = "Add large data set?"
ok = QMessageBox.Ok
cancel = QMessageBox.Cancel
buttons = ok | cancel
result = QMessageBox.question(self, title, warn_msg,
buttons=buttons,
defaultButton=cancel)
return result == ok
def layer_view(self):
return self._view
def options_widget(self):
return QWidget()
def addToolBar(self, tb):
super(DataViewer, self).addToolBar(tb)
self._toolbars.append(tb)
self._tb_vis[tb] = True
def show_toolbars(self):
"""Re-enable any toolbars that were hidden with `hide_toolbars()`
Does not re-enable toolbars that were hidden by other means
"""
for tb in self._toolbars:
if self._tb_vis.get(tb, False):
tb.setEnabled(True)
def hide_toolbars(self):
""" Disable all the toolbars in the viewer.
This action can be reversed by calling `show_toolbars()`
"""
for tb in self._toolbars:
self._tb_vis[tb] = self._tb_vis.get(tb, False) or tb.isVisible()
tb.setEnabled(False)
def set_focus(self, state):
if state:
css = """
DataViewer
{
border: 2px solid;
border-color: rgb(56, 117, 215);
}
"""
self.setStyleSheet(css)
self.show_toolbars()
else:
css = """
DataViewer
{
border: none;
}
"""
self.setStyleSheet(css)
self.hide_toolbars()
def __str__(self):
return self.LABEL
def unregister(self, hub):
"""
Override to perform cleanup operations when disconnecting from hub
"""
pass
@property
def window_title(self):
return str(self)
def update_window_title(self):
self.setWindowTitle(self.window_title)
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/widgets/data_viewer.py",
"copies": "1",
"size": "7526",
"license": "bsd-3-clause",
"hash": 6262726998563272000,
"line_mean": 29.2248995984,
"line_max": 90,
"alpha_frac": 0.5721498804,
"autogenerated": false,
"ratio": 4.12609649122807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.519824637162807,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from flask import Flask, request
from flask_cors import CORS
from appr.api.config import DevelopmentConfig, ProductionConfig
from appr.exception import InvalidUsage
def getvalues():
jsonbody = request.get_json(force=True, silent=True)
values = request.values.to_dict()
if jsonbody:
values.update(jsonbody)
return values
def create_app():
app = Flask(__name__)
CORS(app)
setting = os.getenv('APP_ENV', "development")
if setting != 'production':
app.config.from_object(DevelopmentConfig)
else:
app.config.from_object(ProductionConfig)
from appr.api.info import info_app
from appr.api.registry import registry_app
app.register_blueprint(info_app, url_prefix='/cnr')
app.register_blueprint(registry_app, url_prefix='/cnr')
app.logger.info("Start service")
return app
def repo_name(namespace, name):
def _check(name, scope):
if name is None:
raise InvalidUsage("%s: %s is malformed" % (scope, name), {'name': name})
_check(namespace, 'namespace')
_check(name, 'package-name')
return "%s/%s" % (namespace, name)
if __name__ == "__main__":
application = create_app()
application.run(host='0.0.0.0')
| {
"repo_name": "app-registry/appr",
"path": "appr/api/app.py",
"copies": "2",
"size": "1307",
"license": "apache-2.0",
"hash": -2522829662725542400,
"line_mean": 26.2291666667,
"line_max": 85,
"alpha_frac": 0.6679418516,
"autogenerated": false,
"ratio": 3.580821917808219,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5248763769408219,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from functools import partial
from collections import Counter
import numpy as np
from glue.core import Coordinates
from qtpy import QtCore, QtWidgets
from glue.utils.qt import load_ui
from glue.utils.qt.widget_properties import (TextProperty,
ButtonProperty,
ValueProperty,
CurrentComboProperty)
from glue.utils import nonpartial
from glue.icons.qt import get_icon
class SliceWidget(QtWidgets.QWidget):
label = TextProperty('_ui_label')
slider_label = TextProperty('_ui_slider.label')
slider_unit = TextProperty('_ui_slider.text_unit')
slice_center = ValueProperty('_ui_slider.slider')
mode = CurrentComboProperty('_ui_mode')
use_world = ButtonProperty('_ui_slider.checkbox_world')
slice_changed = QtCore.Signal(int)
mode_changed = QtCore.Signal(str)
def __init__(self, label='', world=None, lo=0, hi=10,
parent=None, aggregation=None, world_unit=None,
world_warning=False):
super(SliceWidget, self).__init__(parent)
if aggregation is not None:
raise NotImplemented("Aggregation option not implemented")
self._world = np.asarray(world)
self._world_warning = world_warning
self._world_unit = world_unit
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(3, 1, 3, 1)
layout.setSpacing(0)
top = QtWidgets.QHBoxLayout()
top.setContentsMargins(3, 3, 3, 3)
label = QtWidgets.QLabel(label)
top.addWidget(label)
mode = QtWidgets.QComboBox()
mode.addItem('x', 'x')
mode.addItem('y', 'y')
mode.addItem('slice', 'slice')
mode.currentIndexChanged.connect(lambda x:
self.mode_changed.emit(self.mode))
mode.currentIndexChanged.connect(self._update_mode)
top.addWidget(mode)
layout.addLayout(top)
slider = load_ui('data_slice_widget.ui', None,
directory=os.path.dirname(__file__))
self._ui_slider = slider
slider.button_first.setStyleSheet('border: 0px')
slider.button_first.setIcon(get_icon('playback_first'))
slider.button_prev.setStyleSheet('border: 0px')
slider.button_prev.setIcon(get_icon('playback_prev'))
slider.button_back.setStyleSheet('border: 0px')
slider.button_back.setIcon(get_icon('playback_back'))
slider.button_stop.setStyleSheet('border: 0px')
slider.button_stop.setIcon(get_icon('playback_stop'))
slider.button_forw.setStyleSheet('border: 0px')
slider.button_forw.setIcon(get_icon('playback_forw'))
slider.button_next.setStyleSheet('border: 0px')
slider.button_next.setIcon(get_icon('playback_next'))
slider.button_last.setStyleSheet('border: 0px')
slider.button_last.setIcon(get_icon('playback_last'))
slider.slider.setMinimum(lo)
slider.slider.setMaximum(hi)
slider.slider.setValue((lo + hi) / 2)
slider.slider.valueChanged.connect(lambda x:
self.slice_changed.emit(self.mode))
slider.slider.valueChanged.connect(nonpartial(self.set_label_from_slider))
slider.label.setMinimumWidth(80)
slider.label.setText(str(slider.slider.value()))
slider.label.editingFinished.connect(nonpartial(self.set_slider_from_label))
self._play_timer = QtCore.QTimer()
self._play_timer.setInterval(500)
self._play_timer.timeout.connect(nonpartial(self._play_slice))
slider.button_first.clicked.connect(nonpartial(self._browse_slice, 'first'))
slider.button_prev.clicked.connect(nonpartial(self._browse_slice, 'prev'))
slider.button_back.clicked.connect(nonpartial(self._adjust_play, 'back'))
slider.button_stop.clicked.connect(nonpartial(self._adjust_play, 'stop'))
slider.button_forw.clicked.connect(nonpartial(self._adjust_play, 'forw'))
slider.button_next.clicked.connect(nonpartial(self._browse_slice, 'next'))
slider.button_last.clicked.connect(nonpartial(self._browse_slice, 'last'))
slider.checkbox_world.toggled.connect(nonpartial(self.set_label_from_slider))
if world is None:
self.use_world = False
slider.checkbox_world.hide()
else:
self.use_world = not world_warning
if world_unit:
self.slider_unit = world_unit
else:
self.slider_unit = ''
layout.addWidget(slider)
self.setLayout(layout)
self._ui_label = label
self._ui_mode = mode
self._update_mode()
self._frozen = False
self._play_speed = 0
self.set_label_from_slider()
def set_label_from_slider(self):
value = self._ui_slider.slider.value()
if self.use_world:
text = str(self._world[value])
if self._world_warning:
self._ui_slider.label_warning.show()
else:
self._ui_slider.label_warning.hide()
self.slider_unit = self._world_unit
else:
text = str(value)
self._ui_slider.label_warning.hide()
self.slider_unit = ''
self._ui_slider.label.setText(text)
def set_slider_from_label(self):
text = self._ui_slider.label.text()
if self.use_world:
# Don't want to assume world is sorted, pick closest value
value = np.argmin(np.abs(self._world - float(text)))
self._ui_slider.label.setText(str(self._world[value]))
else:
value = int(text)
self._ui_slider.slider.setValue(value)
def _adjust_play(self, action):
if action == 'stop':
self._play_speed = 0
elif action == 'back':
if self._play_speed > 0:
self._play_speed = -1
else:
self._play_speed -= 1
elif action == 'forw':
if self._play_speed < 0:
self._play_speed = +1
else:
self._play_speed += 1
if self._play_speed == 0:
self._play_timer.stop()
else:
self._play_timer.start()
self._play_timer.setInterval(500 / abs(self._play_speed))
def _play_slice(self):
if self._play_speed > 0:
self._browse_slice('next', play=True)
elif self._play_speed < 0:
self._browse_slice('prev', play=True)
def _browse_slice(self, action, play=False):
imin = self._ui_slider.slider.minimum()
imax = self._ui_slider.slider.maximum()
value = self._ui_slider.slider.value()
# If this was not called from _play_slice, we should stop the
# animation.
if not play:
self._adjust_play('stop')
if action == 'first':
value = imin
elif action == 'last':
value = imax
elif action == 'prev':
value = value - 1
if value < imin:
value = imax
elif action == 'next':
value = value + 1
if value > imax:
value = imin
else:
raise ValueError("Action should be one of first/prev/next/last")
self._ui_slider.slider.setValue(value)
def _update_mode(self, *args):
if self.mode != 'slice':
self._ui_slider.hide()
self._adjust_play('stop')
else:
self._ui_slider.show()
def freeze(self):
self.mode = 'slice'
self._ui_mode.setEnabled(False)
self._ui_slider.hide()
self._frozen = True
@property
def frozen(self):
return self._frozen
class DataSlice(QtWidgets.QWidget):
"""
A DatSlice widget provides an inteface for selection
slices through an N-dimensional dataset
QtCore.Signals
-------
slice_changed : triggered when the slice through the data changes
"""
slice_changed = QtCore.Signal()
def __init__(self, data=None, parent=None):
"""
:param data: :class:`~glue.core.data.Data` instance, or None
"""
super(DataSlice, self).__init__(parent)
self._slices = []
self._data = None
layout = QtWidgets.QVBoxLayout()
layout.setSpacing(4)
layout.setContentsMargins(0, 3, 0, 3)
self.layout = layout
self.setLayout(layout)
self.set_data(data)
@property
def ndim(self):
return len(self.shape)
@property
def shape(self):
return tuple() if self._data is None else self._data.shape
def _clear(self):
for _ in range(self.layout.count()):
self.layout.takeAt(0)
for s in self._slices:
s.close()
self._slices = []
def set_data(self, data):
"""
Change datasets
:parm data: :class:`~glue.core.data.Data` instance
"""
# remove old widgets
self._clear()
self._data = data
if data is None or data.ndim < 3:
return
# create slider widget for each dimension...
for i, s in enumerate(data.shape):
# TODO: For now we simply pass a single set of world coordinates,
# but we will need to generalize this in future. We deliberately
# check the type of data.coords here since we want to treat
# subclasses differently.
if type(data.coords) != Coordinates:
world = data.coords.world_axis(data, i)
world_unit = data.coords.world_axis_unit(i)
world_warning = len(data.coords.dependent_axes(i)) > 1
else:
world = None
world_unit = None
world_warning = False
slider = SliceWidget(data.get_world_component_id(i).label,
hi=s - 1, world=world, world_unit=world_unit,
world_warning=world_warning)
if i == self.ndim - 1:
slider.mode = 'x'
elif i == self.ndim - 2:
slider.mode = 'y'
else:
slider.mode = 'slice'
self._slices.append(slider)
# save ref to prevent PySide segfault
self.__on_slice = partial(self._on_slice, i)
self.__on_mode = partial(self._on_mode, i)
slider.slice_changed.connect(self.__on_slice)
slider.mode_changed.connect(self.__on_mode)
if s == 1:
slider.freeze()
# ... and add to the layout
for s in self._slices[::-1]:
self.layout.addWidget(s)
if s is not self._slices[0]:
line = QtWidgets.QFrame()
line.setFrameShape(QtWidgets.QFrame.HLine)
line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.layout.addWidget(line)
s.show() # this somehow fixes #342
self.layout.addStretch(5)
def _on_slice(self, index, slice_val):
self.slice_changed.emit()
def _on_mode(self, index, mode_index):
s = self.slice
def isok(ss):
# valid slice description: 'x' and 'y' both appear
c = Counter(ss)
return c['x'] == 1 and c['y'] == 1
if isok(s):
self.slice_changed.emit()
return
for i in range(len(s)):
if i == index:
continue
if self._slices[i].frozen:
continue
for mode in 'x', 'y', 'slice':
if self._slices[i].mode == mode:
continue
ss = list(s)
ss[i] = mode
if isok(ss):
self._slices[i].mode = mode
return
else:
raise RuntimeError("Corrupted Data Slice")
@property
def slice(self):
"""
A description of the slice through the dataset
A tuple of lenght equal to the dimensionality of the data
Each element is an integer, 'x', or 'y'
'x' and 'y' indicate the horizontal and vertical orientation
of the slice
"""
if self.ndim < 3:
return {0: tuple(), 1: ('x',), 2: ('y', 'x')}[self.ndim]
return tuple(s.mode if s.mode != 'slice' else s.slice_center
for s in self._slices)
@slice.setter
def slice(self, value):
for v, s in zip(value, self._slices):
if v in ['x', 'y']:
s.mode = v
else:
s.mode = 'slice'
s.slice_center = v
if __name__ == "__main__":
from glue.utils.qt import get_qapp
app = get_qapp()
widget = SliceWidget()
widget.show()
app.exec_()
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/common/qt/data_slice_widget.py",
"copies": "1",
"size": "13066",
"license": "bsd-3-clause",
"hash": -6819085015225970000,
"line_mean": 31.1031941032,
"line_max": 85,
"alpha_frac": 0.5538803,
"autogenerated": false,
"ratio": 4.028985507246377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5082865807246377,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from glue.core.data_factories.helpers import has_extension
from glue.core.data_factories.pandas import panda_process
from glue.config import data_factory
__all__ = []
@data_factory(label="Excel", identifier=has_extension('xls xlsx'))
def panda_read_excel(path, sheet=None, **kwargs):
""" A factory for reading excel data using pandas.
:param path: path/to/file
:param sheet: The sheet to read. If `None`, all sheets are read.
:param kwargs: All other kwargs are passed to pandas.read_excel
:return: core.data.Data object.
"""
try:
import pandas as pd
except ImportError:
raise ImportError('Pandas is required for Excel input.')
try:
import xlrd
except ImportError:
raise ImportError('xlrd is required for Excel input.')
name = os.path.basename(path)
if '.xls' in name:
name = name.rsplit('.xls', 1)[0]
xl_workbook = xlrd.open_workbook(path)
if sheet is None:
sheet_names = xl_workbook.sheet_names()
else:
sheet_names = [sheet]
all_data = []
for sheet in sheet_names:
indf = pd.read_excel(path, sheet, **kwargs)
data = panda_process(indf)
data.label = "{0}:{1}".format(name, sheet)
all_data.append(data)
return all_data
| {
"repo_name": "saimn/glue",
"path": "glue/core/data_factories/excel.py",
"copies": "5",
"size": "1367",
"license": "bsd-3-clause",
"hash": 3723305937751760400,
"line_mean": 26.34,
"line_max": 68,
"alpha_frac": 0.6488661302,
"autogenerated": false,
"ratio": 3.6356382978723403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 50
} |
from __future__ import absolute_import, division, print_function
import os
from glue.core import Subset
from glue.config import data_exporter
__all__ = []
def data_to_astropy_table(data):
if isinstance(data, Subset):
mask = data.to_mask()
data = data.data
else:
mask = None
from astropy.table import Table
table = Table()
for cid in data.visible_components:
comp = data.get_component(cid)
if comp.categorical:
values = comp.labels
else:
values = comp.data
if mask is not None:
values = values[mask]
table[cid.label] = values
return table
def table_exporter(fmt, label, extension):
@data_exporter(label=label, extension=extension)
def factory(data, filename):
if os.path.exists(filename):
os.remove(filename)
return data_to_astropy_table(data).write(filename, format=fmt)
# rename function to its variable reference below
# allows pickling to work
factory.__name__ = '%s_factory' % fmt.replace('.', '_')
return factory
ipac_exporter = table_exporter('ascii.ipac', 'IPAC Catalog', ['tbl'])
latex_exporter = table_exporter('ascii.latex', 'LaTeX Table', ['tex'])
votable_exporter = table_exporter('votable', 'VO Table', ['xml', 'vot'])
fits_exporter = table_exporter('fits', 'FITS Table', ['fits', 'fit'])
| {
"repo_name": "saimn/glue",
"path": "glue/core/data_exporters/astropy_table.py",
"copies": "2",
"size": "1396",
"license": "bsd-3-clause",
"hash": -912792003874775000,
"line_mean": 24.3818181818,
"line_max": 72,
"alpha_frac": 0.6303724928,
"autogenerated": false,
"ratio": 3.7628032345013476,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5393175727301347,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from glue.core.subset import Subset
from qtpy import QtWidgets
from glue.utils.qt import load_ui
from glue.external.echo.qt import autoconnect_callbacks_to_qt
class VolumeLayerStyleWidget(QtWidgets.QWidget):
def __init__(self, layer_artist):
super(VolumeLayerStyleWidget, self).__init__()
self.ui = load_ui('layer_style_widget.ui', self,
directory=os.path.dirname(__file__))
self.state = layer_artist.state
if self.state.subset_mode == 'outline':
self.ui.radio_subset_outline.setChecked(True)
else:
self.ui.radio_subset_data.setChecked(True)
self.layer_artist = layer_artist
self.layer = layer_artist.layer
# autoconnect needs to come after setting up the component IDs
connect_kwargs = {'value_alpha': dict(value_range=(0., 1.))}
autoconnect_callbacks_to_qt(self.state, self.ui, connect_kwargs)
# Set up radio buttons for subset mode selection if this is a subset
if isinstance(self.layer, Subset):
self._radio_size = QtWidgets.QButtonGroup()
self._radio_size.addButton(self.ui.radio_subset_outline)
self._radio_size.addButton(self.ui.radio_subset_data)
self.ui.radio_subset_outline.toggled.connect(self._update_subset_mode)
self.ui.radio_subset_data.toggled.connect(self._update_subset_mode)
self.ui.valuetext_vmin.setEnabled(False)
else:
self.ui.radio_subset_outline.hide()
self.ui.radio_subset_data.hide()
self.ui.label_subset_mode.hide()
def _update_subset_mode(self):
if self.ui.radio_subset_outline.isChecked():
self.state.subset_mode = 'outline'
self.ui.valuetext_vmin.hide()
self.ui.valuetext_vmax.hide()
else:
self.state.subset_mode = 'data'
self.ui.valuetext_vmin.show()
self.ui.valuetext_vmax.show()
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/volume/layer_style_widget.py",
"copies": "1",
"size": "2060",
"license": "bsd-2-clause",
"hash": 8303252644797740000,
"line_mean": 35.1403508772,
"line_max": 82,
"alpha_frac": 0.6359223301,
"autogenerated": false,
"ratio": 3.759124087591241,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48950464176912406,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from glue.external.modest_image import imshow
from qtpy.QtCore import Qt
from qtpy import QtCore, QtWidgets, QtGui
from glue.core.callback_property import add_callback, delay_callback
from glue import core
from glue.config import viewer_tool
from glue.viewers.image.ds9norm import DS9Normalize
from glue.viewers.image.client import MplImageClient
from glue.viewers.common.qt.mpl_toolbar import MatplotlibViewerToolbar
from glue.viewers.common.qt.mouse_mode import (RectangleMode, CircleMode, PolyMode,
ContrastMode)
from glue.icons.qt import get_icon
from glue.utils.qt.widget_properties import CurrentComboProperty, ButtonProperty, connect_current_combo, _find_combo_data
from glue.viewers.common.qt.data_slice_widget import DataSlice
from glue.viewers.common.qt.data_viewer import DataViewer
from glue.viewers.common.qt.mpl_widget import MplWidget, defer_draw
from glue.utils import nonpartial, Pointer
from glue.utils.qt import cmap2pixmap, update_combobox, load_ui
from glue.viewers.common.qt.tool import Tool
from glue.viewers.scatter.layer_artist import ScatterLayerArtist
from glue.viewers.scatter.qt.layer_style_widget import ScatterLayerStyleWidget
# We do the following import to register the custom Qt Widget there
from glue.viewers.image.qt.rgb_edit import RGBEdit # pylint: disable=W0611
WARN_THRESH = 10000000 # warn when contouring large images
__all__ = ['ImageWidget', 'StandaloneImageWidget', 'ImageWidgetBase']
class ImageWidgetBase(DataViewer):
"""
Widget for ImageClient
This base class avoids any matplotlib-specific logic
"""
LABEL = "Image Viewer"
_property_set = DataViewer._property_set + \
'data attribute rgb_mode rgb_viz ratt gatt batt slice'.split()
attribute = CurrentComboProperty('ui.attributeComboBox',
'Current attribute')
data = CurrentComboProperty('ui.displayDataCombo',
'Current data')
aspect_ratio = CurrentComboProperty('ui.aspectCombo',
'Aspect ratio for image')
rgb_mode = ButtonProperty('ui.rgb',
'RGB Mode?')
rgb_viz = Pointer('ui.rgb_options.rgb_visible')
_layer_style_widget_cls = {ScatterLayerArtist: ScatterLayerStyleWidget}
def __init__(self, session, parent=None):
super(ImageWidgetBase, self).__init__(session, parent)
self._setup_widgets()
self.client = self.make_client()
self._connect()
def _setup_widgets(self):
self.central_widget = self.make_central_widget()
self.label_widget = QtWidgets.QLabel("", self.central_widget)
self.setCentralWidget(self.central_widget)
self.option_widget = QtWidgets.QWidget()
self.ui = load_ui('options_widget.ui', self.option_widget,
directory=os.path.dirname(__file__))
self.ui.slice = DataSlice()
self.ui.slice_layout.addWidget(self.ui.slice)
self._tweak_geometry()
self.ui.aspectCombo.addItem("Square Pixels", userData='equal')
self.ui.aspectCombo.addItem("Automatic", userData='auto')
def make_client(self):
""" Instantiate and return an ImageClient subclass """
raise NotImplementedError()
def make_central_widget(self):
""" Create and return the central widget to display the image """
raise NotImplementedError()
def _tweak_geometry(self):
self.central_widget.resize(600, 400)
self.resize(self.central_widget.size())
self.ui.rgb_options.hide()
self.statusBar().setSizeGripEnabled(False)
self.setFocusPolicy(Qt.StrongFocus)
@defer_draw
def add_data(self, data):
"""
Add a new dataset to the viewer
"""
# overloaded from DataViewer
# need to delay callbacks, otherwise might
# try to set combo boxes to nonexisting items
with delay_callback(self.client, 'display_data', 'display_attribute'):
# If there is not already any image data set, we can't add 1-D
# datasets (tables/catalogs) to the image widget yet.
if data.data.ndim == 1 and self.client.display_data is None:
QtWidgets.QMessageBox.information(self.window(), "Note",
"Cannot create image viewer from a 1-D "
"dataset. You will need to first "
"create an image viewer using data "
"with 2 or more dimensions, after "
"which you will be able to overlay 1-D "
"data as a scatter plot.",
buttons=QtWidgets.QMessageBox.Ok)
return
r = self.client.add_layer(data)
if r is not None and self.client.display_data is not None:
self.add_data_to_combo(data)
if self.client.can_image_data(data):
self.client.display_data = data
self.set_attribute_combo(self.client.display_data)
return r is not None
@defer_draw
def add_subset(self, subset):
self.client.add_scatter_layer(subset)
assert subset in self.client.artists
def add_data_to_combo(self, data):
""" Add a data object to the combo box, if not already present
"""
if not self.client.can_image_data(data):
return
combo = self.ui.displayDataCombo
try:
pos = _find_combo_data(combo, data)
except ValueError:
combo.addItem(data.label, userData=data)
@property
def ratt(self):
"""ComponentID assigned to R channel in RGB Mode"""
return self.ui.rgb_options.attributes[0]
@ratt.setter
def ratt(self, value):
att = list(self.ui.rgb_options.attributes)
att[0] = value
self.ui.rgb_options.attributes = att
@property
def gatt(self):
"""ComponentID assigned to G channel in RGB Mode"""
return self.ui.rgb_options.attributes[1]
@gatt.setter
def gatt(self, value):
att = list(self.ui.rgb_options.attributes)
att[1] = value
self.ui.rgb_options.attributes = att
@property
def batt(self):
"""ComponentID assigned to B channel in RGB Mode"""
return self.ui.rgb_options.attributes[2]
@batt.setter
def batt(self, value):
att = list(self.ui.rgb_options.attributes)
att[2] = value
self.ui.rgb_options.attributes = att
@property
def slice(self):
return self.client.slice
@slice.setter
def slice(self, value):
self.client.slice = value
def set_attribute_combo(self, data):
""" Update attribute combo box to reflect components in data"""
labeldata = ((f.label, f) for f in data.visible_components)
update_combobox(self.ui.attributeComboBox, labeldata)
def _connect(self):
ui = self.ui
ui.monochrome.toggled.connect(self._update_rgb_console)
ui.rgb_options.colors_changed.connect(self.update_window_title)
# sync client and widget slices
ui.slice.slice_changed.connect(lambda: setattr(self, 'slice', self.ui.slice.slice))
update_ui_slice = lambda val: setattr(ui.slice, 'slice', val)
add_callback(self.client, 'slice', update_ui_slice)
add_callback(self.client, 'display_data', self.ui.slice.set_data)
# sync window title to data/attribute
add_callback(self.client, 'display_data', nonpartial(self._display_data_changed))
add_callback(self.client, 'display_attribute', nonpartial(self._display_attribute_changed))
add_callback(self.client, 'display_aspect', nonpartial(self.client._update_aspect))
# sync data/attribute combos with client properties
connect_current_combo(self.client, 'display_data', self.ui.displayDataCombo)
connect_current_combo(self.client, 'display_attribute', self.ui.attributeComboBox)
connect_current_combo(self.client, 'display_aspect', self.ui.aspectCombo)
def _display_data_changed(self):
if self.client.display_data is None:
self.ui.attributeComboBox.clear()
return
with self.client.artists.ignore_empty():
self.set_attribute_combo(self.client.display_data)
self.client.add_layer(self.client.display_data)
self.client._update_and_redraw()
self.update_window_title()
def _display_attribute_changed(self):
if self.client.display_attribute is None:
return
self.client._update_and_redraw()
self.update_window_title()
@defer_draw
def _update_rgb_console(self, is_monochrome):
if is_monochrome:
self.ui.rgb_options.hide()
self.ui.mono_att_label.show()
self.ui.attributeComboBox.show()
self.client.rgb_mode(False)
else:
self.ui.mono_att_label.hide()
self.ui.attributeComboBox.hide()
self.ui.rgb_options.show()
rgb = self.client.rgb_mode(True)
if rgb is not None:
self.ui.rgb_options.artist = rgb
def register_to_hub(self, hub):
super(ImageWidgetBase, self).register_to_hub(hub)
self.client.register_to_hub(hub)
dc_filt = lambda x: x.sender is self.client._data
display_data_filter = lambda x: x.data is self.client.display_data
hub.subscribe(self,
core.message.DataCollectionAddMessage,
handler=lambda x: self.add_data_to_combo(x.data),
filter=dc_filt)
hub.subscribe(self,
core.message.DataCollectionDeleteMessage,
handler=lambda x: self.remove_data_from_combo(x.data),
filter=dc_filt)
hub.subscribe(self,
core.message.DataUpdateMessage,
handler=lambda x: self._sync_data_labels()
)
hub.subscribe(self,
core.message.ComponentsChangedMessage,
handler=lambda x: self.set_attribute_combo(x.data),
filter=display_data_filter)
def unregister(self, hub):
super(ImageWidgetBase, self).unregister(hub)
for obj in [self, self.client]:
hub.unsubscribe_all(obj)
def remove_data_from_combo(self, data):
""" Remove a data object from the combo box, if present """
combo = self.ui.displayDataCombo
pos = combo.findText(data.label)
if pos >= 0:
combo.removeItem(pos)
def _set_norm(self, mode):
""" Use the `ContrastMouseMode` to adjust the transfer function """
# at least one of the clip/vmin pairs will be None
clip_lo, clip_hi = mode.get_clip_percentile()
vmin, vmax = mode.get_vmin_vmax()
stretch = mode.stretch
return self.client.set_norm(clip_lo=clip_lo, clip_hi=clip_hi,
stretch=stretch,
vmin=vmin, vmax=vmax,
bias=mode.bias, contrast=mode.contrast)
@property
def window_title(self):
if self.client.display_data is None or self.client.display_attribute is None:
title = ''
else:
data = self.client.display_data.label
a = self.client.rgb_mode()
if a is None: # monochrome mode
title = "%s - %s" % (self.client.display_data.label,
self.client.display_attribute.label)
else:
r = a.r.label if a.r is not None else ''
g = a.g.label if a.g is not None else ''
b = a.b.label if a.b is not None else ''
title = "%s Red = %s Green = %s Blue = %s" % (data, r, g, b)
return title
def _sync_data_combo_labels(self):
combo = self.ui.displayDataCombo
for i in range(combo.count()):
combo.setItemText(i, combo.itemData(i).label)
def _sync_data_labels(self):
self.update_window_title()
self._sync_data_combo_labels()
def __str__(self):
return "Image Widget"
def _confirm_large_image(self, data):
"""Ask user to confirm expensive operations
:rtype: bool. Whether the user wishes to continue
"""
warn_msg = ("WARNING: Image has %i pixels, and may render slowly."
" Continue?" % data.size)
title = "Contour large image?"
ok = QtWidgets.QMessageBox.Ok
cancel = QtWidgets.QMessageBox.Cancel
buttons = ok | cancel
result = QtWidgets.QMessageBox.question(self, title, warn_msg,
buttons=buttons,
defaultButton=cancel)
return result == ok
def options_widget(self):
return self.option_widget
@defer_draw
def restore_layers(self, rec, context):
with delay_callback(self.client, 'display_data', 'display_attribute'):
self.client.restore_layers(rec, context)
for artist in self.layers:
self.add_data_to_combo(artist.layer.data)
self.set_attribute_combo(self.client.display_data)
self._sync_data_combo_labels()
def closeEvent(self, event):
# close window and all plugins
super(ImageWidgetBase, self).closeEvent(event)
class ImageWidget(ImageWidgetBase):
"""
A matplotlib-based image widget
"""
_toolbar_cls = MatplotlibViewerToolbar
tools = ['select:rectangle', 'select:circle', 'select:polygon',
'image:contrast', 'image:colormap']
def make_client(self):
return MplImageClient(self._data,
self.central_widget.canvas.fig,
layer_artist_container=self._layer_artist_container)
def make_central_widget(self):
return MplWidget()
def initialize_toolbar(self):
super(ImageWidget, self).initialize_toolbar()
# connect viewport update buttons to client commands to
# allow resampling
cl = self.client
self.toolbar.actions['mpl:home'].triggered.connect(nonpartial(cl.check_update))
self.toolbar.actions['mpl:forward'].triggered.connect(nonpartial(cl.check_update))
self.toolbar.actions['mpl:back'].triggered.connect(nonpartial(cl.check_update))
def paintEvent(self, event):
super(ImageWidget, self).paintEvent(event)
pos = self.central_widget.canvas.mapFromGlobal(QtGui.QCursor.pos())
x, y = pos.x(), self.central_widget.canvas.height() - pos.y()
self._update_intensity_label(x, y)
def _intensity_label(self, x, y):
x, y = self.client.axes.transData.inverted().transform([(x, y)])[0]
value = self.client.point_details(x, y)['value']
lbl = '' if value is None else "data: %s" % value
return lbl
def _update_intensity_label(self, x, y):
lbl = self._intensity_label(x, y)
self.label_widget.setText(lbl)
fm = self.label_widget.fontMetrics()
w, h = fm.width(lbl), fm.height()
g = QtCore.QRect(20, self.central_widget.geometry().height() - h, w, h)
self.label_widget.setGeometry(g)
def _connect(self):
super(ImageWidget, self)._connect()
self.ui.rgb_options.current_changed.connect(lambda: self._toolbars[0].set_mode(self._contrast))
self.central_widget.canvas.resize_end.connect(self.client.check_update)
def set_cmap(self, cmap):
self.client.set_cmap(cmap)
class StandaloneImageWidget(QtWidgets.QMainWindow):
"""
A simplified image viewer, without any brushing or linking,
but with the ability to adjust contrast and resample.
"""
window_closed = QtCore.Signal()
_toolbar_cls = MatplotlibViewerToolbar
tools = ['image:contrast', 'image:colormap']
def __init__(self, image=None, wcs=None, parent=None, **kwargs):
"""
:param image: Image to display (2D numpy array)
:param parent: Parent widget (optional)
:param kwargs: Extra keywords to pass to imshow
"""
super(StandaloneImageWidget, self).__init__(parent)
self.central_widget = MplWidget()
self.setCentralWidget(self.central_widget)
self._setup_axes()
self._im = None
self._norm = DS9Normalize()
self.initialize_toolbar()
if image is not None:
self.set_image(image=image, wcs=wcs, **kwargs)
def _setup_axes(self):
from glue.viewers.common.viz_client import init_mpl
_, self._axes = init_mpl(self.central_widget.canvas.fig, axes=None, wcs=True)
self._axes.set_aspect('equal', adjustable='datalim')
def set_image(self, image=None, wcs=None, **kwargs):
"""
Update the image shown in the widget
"""
if self._im is not None:
self._im.remove()
self._im = None
kwargs.setdefault('origin', 'upper')
if wcs is not None:
self._axes.reset_wcs(wcs)
self._im = imshow(self._axes, image, norm=self._norm, cmap='gray', **kwargs)
self._im_array = image
self._wcs = wcs
self._redraw()
@property
def axes(self):
"""
The Matplolib axes object for this figure
"""
return self._axes
def show(self):
super(StandaloneImageWidget, self).show()
self._redraw()
def _redraw(self):
self.central_widget.canvas.draw()
def set_cmap(self, cmap):
self._im.set_cmap(cmap)
self._redraw()
def mdi_wrap(self):
"""
Embed this widget in a GlueMdiSubWindow
"""
from glue.app.qt.mdi_area import GlueMdiSubWindow
sub = GlueMdiSubWindow()
sub.setWidget(self)
self.destroyed.connect(sub.close)
self.window_closed.connect(sub.close)
sub.resize(self.size())
self._mdi_wrapper = sub
return sub
def closeEvent(self, event):
self.window_closed.emit()
return super(StandaloneImageWidget, self).closeEvent(event)
def _set_norm(self, mode):
""" Use the `ContrastMouseMode` to adjust the transfer function """
clip_lo, clip_hi = mode.get_clip_percentile()
vmin, vmax = mode.get_vmin_vmax()
stretch = mode.stretch
self._norm.clip_lo = clip_lo
self._norm.clip_hi = clip_hi
self._norm.stretch = stretch
self._norm.bias = mode.bias
self._norm.contrast = mode.contrast
self._norm.vmin = vmin
self._norm.vmax = vmax
self._im.set_norm(self._norm)
self._redraw()
def initialize_toolbar(self):
# TODO: remove once Python 2 is no longer supported - see below for
# simpler code.
from glue.config import viewer_tool
self.toolbar = self._toolbar_cls(self)
for tool_id in self.tools:
mode_cls = viewer_tool.members[tool_id]
mode = mode_cls(self)
self.toolbar.add_tool(mode)
self.addToolBar(self.toolbar)
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/image/qt/viewer_widget.py",
"copies": "1",
"size": "19555",
"license": "bsd-3-clause",
"hash": -8239916408426576000,
"line_mean": 35.4832089552,
"line_max": 121,
"alpha_frac": 0.6049603682,
"autogenerated": false,
"ratio": 3.9489095315024234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5053869899702423,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from idaskins import UI_DIR
from PyQt5 import uic
from PyQt5.Qt import qApp
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor, QFont, QKeySequence
from PyQt5.QtWidgets import QShortcut, QWidget
Ui_ObjectInspector, ObjectInspectorBase = uic.loadUiType(
os.path.join(UI_DIR, 'ObjectInspector.ui')
)
class ObjectInspector(ObjectInspectorBase):
"""
Rudimentary Qt object inspector.
Allows for easier finding of object names and classes
for usage in QSS stylesheets.
"""
def __init__(self, *args, **kwargs):
super(ObjectInspector, self).__init__(*args, **kwargs)
self._selected_widget = None
self._ui = Ui_ObjectInspector()
self._ui.setupUi(self)
# Make everything monospace.
font = QFont('Monospace')
font.setStyleHint(QFont.TypeWriter)
self._ui.teInspectionResults.setFont(font)
# Register signals.
self._update_key = QShortcut(QKeySequence(Qt.Key_F7), self)
self._ui.btnSelectParent.released.connect(self.select_parent)
self._update_key.activated.connect(self.update_inspection)
def update_inspection(self):
widget = qApp.widgetAt(QCursor.pos())
self.update_selected_widget(widget)
def select_parent(self):
if self._selected_widget:
parent = self._selected_widget.parent()
if parent and parent.inherits('QWidget'):
self.update_selected_widget(parent)
def update_selected_widget(self, widget):
if self._selected_widget:
self._selected_widget.destroyed.disconnect(
self.on_selected_widget_destroyed
)
self._selected_widget = widget
if widget:
self._ui.btnSelectParent.setEnabled(widget.parent() is not None)
self._ui.teInspectionResults.setText((
"Type: {}\n"
"Name: {}\n"
"Number of children: {}\n"
"QSS: {}"
).format(
widget.metaObject().className(),
widget.objectName() or '<none>',
len(widget.children()),
widget.styleSheet() or '<none>',
))
self._selected_widget.destroyed.connect(
self.on_selected_widget_destroyed
)
else:
self._ui.teInspectionResults.setText('<no object under cursor>')
def on_selected_widget_destroyed(self, obj):
self._selected_widget = None
| {
"repo_name": "zyantific/IDASkins",
"path": "plugins/idaskins/objectinspector.py",
"copies": "1",
"size": "2576",
"license": "mit",
"hash": 2552390391094649300,
"line_mean": 31.6075949367,
"line_max": 76,
"alpha_frac": 0.6129658385,
"autogenerated": false,
"ratio": 4.031298904538341,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.514426474303834,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from odo.backends.hdfstore import discover
from contextlib import contextmanager
from odo.utils import tmpfile
from odo.chunks import chunks
from odo import into, append, convert, resource, discover, odo
import datashape
import pandas as pd
from datetime import datetime
import numpy as np
try:
f = pd.HDFStore('foo')
except (RuntimeError, ImportError) as e:
import pytest
pytest.skip('skipping test_hdfstore.py %s' % e)
else:
f.close()
os.remove('foo')
df = pd.DataFrame([['a', 1, 10., datetime(2000, 1, 1)],
['ab', 2, 20., datetime(2000, 2, 2)],
['abc', 3, 30., datetime(2000, 3, 3)],
['abcd', 4, 40., datetime(2000, 4, 4)]],
columns=['name', 'a', 'b', 'time'])
@contextmanager
def file(df):
with tmpfile('.hdf5') as fn:
f = pd.HDFStore(fn)
f.put('/data', df, format='table', append=True)
try:
yield fn, f, f.get_storer('/data')
finally:
f.close()
def test_discover():
with file(df) as (fn, f, dset):
assert str(discover(dset)) == str(discover(df))
assert str(discover(f)) == str(discover({'data': df}))
def test_discover():
with tmpfile('hdf5') as fn:
df.to_hdf(fn, '/a/b/data')
df.to_hdf(fn, '/a/b/data2')
df.to_hdf(fn, '/a/data')
hdf = pd.HDFStore(fn)
try:
assert discover(hdf) == discover({'a': {'b': {'data': df, 'data2': df},
'data': df}})
finally:
hdf.close()
def eq(a, b):
if isinstance(a, pd.DataFrame):
a = into(np.ndarray, a)
if isinstance(b, pd.DataFrame):
b = into(np.ndarray, b)
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_chunks():
with file(df) as (fn, f, dset):
c = convert(chunks(pd.DataFrame), dset)
assert eq(convert(np.ndarray, c), df)
def test_resource_no_info():
with tmpfile('.hdf5') as fn:
r = resource('hdfstore://' + fn)
assert isinstance(r, pd.HDFStore)
r.close()
def test_resource_of_dataset():
with tmpfile('.hdf5') as fn:
ds = datashape.dshape('{x: int32, y: 3 * int32}')
r = resource('hdfstore://'+fn+'::/x', dshape=ds)
assert r
r.parent.close()
def test_append():
with file(df) as (fn, f, dset):
append(dset, df)
append(dset, df)
assert discover(dset).shape == (len(df) * 3,)
def test_into_resource():
with tmpfile('.hdf5') as fn:
d = into('hdfstore://' + fn + '::/x', df)
assert discover(d) == discover(df)
assert eq(into(pd.DataFrame, d), df)
d.parent.close()
def test_convert_pandas():
with file(df) as (fn, f, dset):
assert eq(convert(pd.DataFrame, dset), df)
def test_convert_chunks():
with file(df) as (fn, f, dset):
c = convert(chunks(pd.DataFrame), dset, chunksize=len(df) / 2)
assert len(list(c)) == 2
assert eq(convert(pd.DataFrame, c), df)
def test_append_chunks():
with file(df) as (fn, f, dset):
append(dset, chunks(pd.DataFrame)([df, df]))
assert discover(dset).shape[0] == len(df) * 3
def test_append_other():
with tmpfile('.hdf5') as fn:
x = into(np.ndarray, df)
dset = into('hdfstore://'+fn+'::/data', x)
assert discover(dset) == discover(df)
dset.parent.close()
def test_fixed_shape():
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, 'foo')
r = resource('hdfstore://'+fn+'::/foo')
assert isinstance(r.shape, list)
assert discover(r).shape == (len(df),)
r.parent.close()
def test_fixed_convert():
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, 'foo')
r = resource('hdfstore://'+fn+'::/foo')
assert eq(convert(pd.DataFrame, r), df)
r.parent.close()
def test_append_vs_write():
import pandas.util.testing as tm
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, 'foo', append=True)
store = odo(df, 'hdfstore://%s::foo' % fn)
try:
newdf = odo(store, pd.DataFrame)
finally:
store.parent.close()
tm.assert_frame_equal(newdf, pd.concat([df, df]))
with tmpfile('.hdf5') as fn:
store = odo(df, 'hdfstore://%s::foo' % fn, mode='w')
try:
newdf = odo(store, pd.DataFrame)
finally:
store.parent.close()
tm.assert_frame_equal(newdf, df)
| {
"repo_name": "cowlicks/odo",
"path": "odo/backends/tests/test_hdfstore.py",
"copies": "4",
"size": "4599",
"license": "bsd-3-clause",
"hash": 7903338533433737000,
"line_mean": 25.4310344828,
"line_max": 83,
"alpha_frac": 0.5475103283,
"autogenerated": false,
"ratio": 3.2524752475247523,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5799985575824753,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy import QtCore, QtWidgets
from glue.utils.qt import get_qapp
from glue.utils.qt import load_ui, connect_color
from glue.utils.qt.widget_properties import CurrentComboProperty, ValueProperty, connect_value, connect_current_combo
from glue.icons.qt import POINT_ICONS, symbol_icon
class ScatterLayerStyleWidget(QtWidgets.QWidget):
size = ValueProperty('ui.value_size')
symbol = CurrentComboProperty('ui.combo_symbol')
alpha = ValueProperty('ui.slider_alpha', value_range=(0, 1))
def __init__(self, layer_artist):
super(ScatterLayerStyleWidget, self).__init__()
self.ui = load_ui('layer_style_widget.ui', self,
directory=os.path.dirname(__file__))
self._setup_symbol_combo()
self.layer = layer_artist.layer
# Set up connections
self._connect_global()
# Set initial values
self.symbol = self.layer.style.marker
self.size = self.layer.style.markersize
self.ui.label_color.setColor(self.layer.style.color)
self.alpha = self.layer.style.alpha
def _connect_global(self):
connect_current_combo(self.layer.style, 'marker', self.ui.combo_symbol)
connect_value(self.layer.style, 'markersize', self.ui.value_size)
connect_color(self.layer.style, 'color', self.ui.label_color)
connect_value(self.layer.style, 'alpha', self.ui.slider_alpha, value_range=(0, 1))
def _setup_symbol_combo(self):
self._symbols = list(POINT_ICONS.keys())
for idx, symbol in enumerate(self._symbols):
icon = symbol_icon(symbol)
self.ui.combo_symbol.addItem(icon, '', userData=symbol)
self.ui.combo_symbol.setIconSize(QtCore.QSize(16, 16))
if __name__ == "__main__":
app = get_qapp()
options = ScatterLayerStyleWidget()
options.show()
app.exec_()
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/scatter/qt/layer_style_widget.py",
"copies": "2",
"size": "1941",
"license": "bsd-3-clause",
"hash": 6604689418227823000,
"line_mean": 34.2909090909,
"line_max": 117,
"alpha_frac": 0.6640906749,
"autogenerated": false,
"ratio": 3.607806691449814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010272840781315358,
"num_lines": 55
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy import QtCore, QtWidgets
from glue.utils.qt import load_ui
from glue.utils import nonpartial
class ComponentSelector(QtWidgets.QWidget):
""" An interface to view the components and data of a DataCollection
Components can be draged and dropped.
The currently-selected componentID is stored in the
Component property. The currently-selected Data is stored in the
Data property.
Usage:
>>> widget = ComponentSelector()
>>> widget.setup(data_collection)
"""
component_changed = QtCore.Signal()
def __init__(self, parent=None):
super(ComponentSelector, self).__init__(parent)
self._data = None
self._ui = load_ui('component_selector.ui', self,
directory=os.path.dirname(__file__))
self._init_widgets()
self._connect()
def _init_widgets(self):
self._ui.component_selector.setDragEnabled(True)
self._ui.setMinimumWidth(300)
def _connect(self):
# attach Qt signals
ds = self._ui.data_selector
ds.currentIndexChanged.connect(nonpartial(self._set_components))
self._ui.component_selector.currentItemChanged.connect(
lambda *args: self.component_changed.emit())
def set_current_row(self, row):
"""Select which component is selected
:param row: Row number
"""
self._ui.component_selector.setCurrentRow(row)
def set_data_row(self, row):
"""Select which data object is selected
:param row: Row number
"""
self._ui.data_selector.setCurrentIndex(row)
def setup(self, data_collection):
""" Set up the widgets.
:param data_collection: Object to browse
:type data_colleciton:
:class:`~glue.core.data_collection.DataCollection`
"""
self._data = data_collection
self._set_data()
self._set_components()
def _set_components(self):
""" Set list of component widgets to match current data set """
index = self._ui.data_selector.currentIndex()
if index < 0:
return
data = self._data[index]
cids = data.components
c_list = self._ui.component_selector
c_list.clear()
for c in cids:
item = QtWidgets.QListWidgetItem(c.label)
c_list.addItem(item)
c_list.set_data(item, c)
def _set_data(self):
""" Populate the data list with data sets in the collection """
d_list = self._ui.data_selector
for d in self._data:
d_list.addItem(d.label)
@property
def component(self):
"""Returns the currently-selected ComponentID
:rtype: :class:`~glue.core.data.ComponentID`
"""
item = self._ui.component_selector.currentItem()
return self._ui.component_selector.get_data(item)
@component.setter
def component(self, component):
w = self._ui.component_selector
for i in range(w.count()):
item = w.item(i)
if w.get_data(item) is component:
w.setCurrentRow(i)
return
else:
raise ValueError("Component not found: %s" % component)
@property
def data(self):
index = self._ui.data_selector.currentIndex()
if index < 0:
return
return self._data[index]
@data.setter
def data(self, value):
for i, d in enumerate(self._data):
if d is value:
self._ui.data_selector.setCurrentIndex(i)
return
else:
raise ValueError("Data is not part of the DataCollection")
def main(): # pragma: no cover
import glue
import numpy as np
from glue.utils.qt import get_qapp
d = glue.core.Data(label="hi")
d2 = glue.core.Data(label="there")
c1 = glue.core.Component(np.array([1, 2, 3]))
c2 = glue.core.Component(np.array([1, 2, 3]))
c3 = glue.core.Component(np.array([1, 2, 3]))
dc = glue.core.DataCollection()
dc.append(d)
dc.append(d2)
d.add_component(c1, "a")
d.add_component(c2, "b")
d2.add_component(c3, "c")
app = get_qapp()
w = ComponentSelector()
w.setup(dc)
w.show()
app.exec_()
if __name__ == "__main__": # pragma: no cover
main()
| {
"repo_name": "saimn/glue",
"path": "glue/dialogs/common/qt/component_selector.py",
"copies": "1",
"size": "4412",
"license": "bsd-3-clause",
"hash": -1965189886523367700,
"line_mean": 27.8366013072,
"line_max": 72,
"alpha_frac": 0.5947416138,
"autogenerated": false,
"ratio": 3.8838028169014085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9977849117114981,
"avg_score": 0.00013906271728549575,
"num_lines": 153
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy import QtCore, QtWidgets
from qtpy.QtCore import Qt
from glue.utils.qt import load_ui
from glue.utils import nonpartial
class ComponentSelector(QtWidgets.QWidget):
""" An interface to view the components and data of a DataCollection
Components can be draged and dropped.
The currently-selected componentID is stored in the
Component property. The currently-selected Data is stored in the
Data property.
Usage:
>>> widget = ComponentSelector()
>>> widget.setup(data_collection)
"""
component_changed = QtCore.Signal()
def __init__(self, parent=None):
super(ComponentSelector, self).__init__(parent)
self._data = None
self._ui = load_ui('component_selector.ui', self,
directory=os.path.dirname(__file__))
self._init_widgets()
self._connect()
def _init_widgets(self):
self._ui.component_selector.setDragEnabled(True)
self._ui.setMinimumWidth(300)
def _connect(self):
# attach Qt signals
ds = self._ui.data_selector
ds.currentIndexChanged.connect(nonpartial(self._set_components))
self._ui.component_selector.currentItemChanged.connect(
lambda *args: self.component_changed.emit())
def set_current_row(self, row):
"""Select which component is selected
:param row: Row number
"""
self._ui.component_selector.setCurrentRow(row)
def set_data_row(self, row):
"""Select which data object is selected
:param row: Row number
"""
self._ui.data_selector.setCurrentIndex(row)
def setup(self, data_collection):
""" Set up the widgets.
:param data_collection: Object to browse
:type data_colleciton:
:class:`~glue.core.data_collection.DataCollection`
"""
self._data = data_collection
self._set_data()
self._set_components()
def _set_components(self):
""" Set list of component widgets to match current data set """
index = self._ui.data_selector.currentIndex()
if index < 0:
return
data = self._data[index]
c_list = self._ui.component_selector
c_list.clear()
# Coordinate components
if len(data.coordinate_components) > 0:
item = QtWidgets.QListWidgetItem('Coordinate components')
item.setFlags(item.flags() & ~Qt.ItemIsEnabled)
c_list.addItem(item)
for c in data.coordinate_components:
item = QtWidgets.QListWidgetItem(c.label)
c_list.addItem(item)
c_list.set_data(item, c)
if len(set(data.primary_components) - set(data.coordinate_components)) > 0:
item = QtWidgets.QListWidgetItem('Main components')
item.setFlags(item.flags() & ~Qt.ItemIsEnabled)
c_list.addItem(item)
for c in data.primary_components:
if c not in data.coordinate_components:
item = QtWidgets.QListWidgetItem(c.label)
c_list.addItem(item)
c_list.set_data(item, c)
# We allow 'hidden' components because we want to show things like coordinates,
# but we don't want to include hidden AND derived components which are
# generated from links.
if len(set(data.derived_components) & set(data.visible_components)) > 0:
item = QtWidgets.QListWidgetItem('Derived components')
item.setFlags(item.flags() & ~Qt.ItemIsEnabled)
c_list.addItem(item)
for c in data.derived_components:
if not c.hidden:
item = QtWidgets.QListWidgetItem(c.label)
c_list.addItem(item)
c_list.set_data(item, c)
def _set_data(self):
""" Populate the data list with data sets in the collection """
d_list = self._ui.data_selector
for d in self._data:
d_list.addItem(d.label)
@property
def component(self):
"""Returns the currently-selected ComponentID
:rtype: :class:`~glue.core.data.ComponentID`
"""
item = self._ui.component_selector.currentItem()
return self._ui.component_selector.get_data(item)
@component.setter
def component(self, component):
w = self._ui.component_selector
for i in range(w.count()):
item = w.item(i)
if w.get_data(item) is component:
w.setCurrentRow(i)
return
else:
raise ValueError("Component not found: %s" % component)
@property
def data(self):
index = self._ui.data_selector.currentIndex()
if index < 0:
return
return self._data[index]
@data.setter
def data(self, value):
for i, d in enumerate(self._data):
if d is value:
self._ui.data_selector.setCurrentIndex(i)
return
else:
raise ValueError("Data is not part of the DataCollection")
def main(): # pragma: no cover
import glue
import numpy as np
from glue.utils.qt import get_qapp
d = glue.core.Data(label="hi")
d2 = glue.core.Data(label="there")
c1 = glue.core.Component(np.array([1, 2, 3]))
c2 = glue.core.Component(np.array([1, 2, 3]))
c3 = glue.core.Component(np.array([1, 2, 3]))
dc = glue.core.DataCollection()
dc.append(d)
dc.append(d2)
d.add_component(c1, "a")
d.add_component(c2, "b")
d2.add_component(c3, "c")
app = get_qapp()
w = ComponentSelector()
w.setup(dc)
w.show()
app.exec_()
if __name__ == "__main__": # pragma: no cover
main()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/dialogs/common/qt/component_selector.py",
"copies": "3",
"size": "5850",
"license": "bsd-3-clause",
"hash": -4024027942271601000,
"line_mean": 31.1428571429,
"line_max": 87,
"alpha_frac": 0.592991453,
"autogenerated": false,
"ratio": 3.979591836734694,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003125861155243121,
"num_lines": 182
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy import QtGui, compat
from glue.viewers.common.qt.tool import Tool, CheckableTool
from glue.config import viewer_tool
from ..extern.vispy import app, io
RECORD_START_ICON = os.path.join(os.path.dirname(__file__), 'glue_record_start.png')
RECORD_STOP_ICON = os.path.join(os.path.dirname(__file__), 'glue_record_stop.png')
ROTATE_ICON = os.path.join(os.path.dirname(__file__), 'glue_rotate.png')
@viewer_tool
class ResetTool(Tool):
icon = 'glue_home'
tool_id = 'vispy:reset'
action_text = 'Reset the view'
tool_tip = 'Reset the view'
def activate(self):
self.viewer._vispy_widget.view.camera.reset()
self.viewer._vispy_widget._toggle_perspective()
@viewer_tool
class SaveTool(Tool):
icon = 'glue_filesave'
tool_id = 'vispy:save'
action_text = 'Save the figure'
tool_tip = 'Save the figure'
shortcut = 'Ctrl+Shift+S'
def activate(self):
outfile, file_filter = compat.getsavefilename(caption='Save File',
filters='PNG Files (*.png);;'
'JPEG Files (*.jpeg);;'
'TIFF Files (*.tiff);;')
# This indicates that the user cancelled
if not outfile:
return
img = self.viewer._vispy_widget.canvas.render()
try:
file_filter = str(file_filter).split()[0]
io.imsave(outfile, img, format=file_filter)
except ImportError:
# TODO: give out a window to notify that only .png file format is supported
if '.' not in outfile:
outfile += '.png'
io.write_png(outfile, img)
@viewer_tool
class RecordTool(Tool):
icon = RECORD_START_ICON
tool_id = 'vispy:record'
action_text = 'Record an animation'
tool_tip = 'Start/Stop the recording'
def __init__(self, viewer):
super(RecordTool, self).__init__(viewer=viewer)
self.record_timer = app.Timer(connect=self.record)
self.writer = None
self.next_action = 'start'
def activate(self):
if self.next_action == 'start':
# pop up a window for file saving
outfile, file_filter = compat.getsavefilename(caption='Save Animation',
filters='GIF Files (*.gif);;')
# if outfile is not set, the user cancelled
if outfile:
import imageio
self.set_icon(RECORD_STOP_ICON)
self.writer = imageio.get_writer(outfile)
self.record_timer.start(0.1)
self.next_action = 'stop'
else:
self.record_timer.stop()
if self.writer is not None:
self.writer.close()
self.set_icon(RECORD_START_ICON)
self.next_action = 'start'
def set_icon(self, icon):
self.viewer.toolbar.actions[self.tool_id].setIcon(QtGui.QIcon(icon))
def record(self, event):
im = self.viewer._vispy_widget.canvas.render()
self.writer.append_data(im)
@viewer_tool
class RotateTool(CheckableTool):
icon = ROTATE_ICON
tool_id = 'vispy:rotate'
action_text = 'Continuously rotate view'
tool_tip = 'Start/Stop rotation'
timer = None
def activate(self):
if self.timer is None:
self.timer = app.Timer(connect=self.rotate)
self.timer.start(0.1)
def deactivate(self):
self.timer.stop()
def rotate(self, event):
self.viewer._vispy_widget.view.camera.azimuth -= 1. # set speed as constant first
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/common/tools.py",
"copies": "1",
"size": "3759",
"license": "bsd-2-clause",
"hash": 7443118557678038000,
"line_mean": 29.314516129,
"line_max": 90,
"alpha_frac": 0.5746209098,
"autogenerated": false,
"ratio": 3.831804281345566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9902218207890643,
"avg_score": 0.0008413966509846421,
"num_lines": 124
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy import QtGui, compat
try:
from glue.viewers.common.tool import Tool, CheckableTool
except ImportError: # glue-core <0.15
from glue.viewers.common.qt.tool import Tool, CheckableTool
from glue.config import viewer_tool
from ..extern.vispy import app, io
RECORD_START_ICON = os.path.join(os.path.dirname(__file__), 'glue_record_start.png')
RECORD_STOP_ICON = os.path.join(os.path.dirname(__file__), 'glue_record_stop.png')
ROTATE_ICON = os.path.join(os.path.dirname(__file__), 'glue_rotate.png')
@viewer_tool
class ResetTool(Tool):
icon = 'glue_home'
tool_id = 'vispy:reset'
action_text = 'Reset the view'
tool_tip = 'Reset the view'
def activate(self):
self.viewer._vispy_widget.view.camera.reset()
self.viewer._vispy_widget._toggle_perspective()
self.viewer.state.reset_limits()
@viewer_tool
class SaveTool(Tool):
icon = 'glue_filesave'
tool_id = 'vispy:save'
action_text = 'Save the figure to a file'
tool_tip = 'Save the figure to a file'
def activate(self):
outfile, file_filter = compat.getsavefilename(caption='Save File',
filters='PNG Files (*.png);;'
'JPEG Files (*.jpeg);;'
'TIFF Files (*.tiff);;',
selectedfilter='PNG Files (*.png);;')
# This indicates that the user cancelled
if not outfile:
return
img = self.viewer._vispy_widget.canvas.render()
try:
file_filter = str(file_filter).split()[0]
io.imsave(outfile, img, format=file_filter)
except ImportError:
# TODO: give out a window to notify that only .png file format is supported
if '.' not in outfile:
outfile += '.png'
io.write_png(outfile, img)
@viewer_tool
class RecordTool(CheckableTool):
icon = RECORD_START_ICON
tool_id = 'vispy:record'
action_text = 'Record an animation'
tool_tip = 'Start/Stop the recording'
def __init__(self, viewer):
super(RecordTool, self).__init__(viewer=viewer)
self.record_timer = app.Timer(connect=self.record)
self.writer = None
self.next_action = 'start'
def activate(self):
# pop up a window for file saving
outfile, file_filter = compat.getsavefilename(caption='Save Animation',
filters='GIF Files (*.gif);;')
# if outfile is not set, the user cancelled
if outfile:
import imageio
self.set_icon(RECORD_STOP_ICON)
self.writer = imageio.get_writer(outfile)
self.record_timer.start(0.1)
def deactivate(self):
self.record_timer.stop()
if self.writer is not None:
self.writer.close()
self.set_icon(RECORD_START_ICON)
def set_icon(self, icon):
self.viewer.toolbar.actions[self.tool_id].setIcon(QtGui.QIcon(icon))
def record(self, event):
im = self.viewer._vispy_widget.canvas.render()
self.writer.append_data(im)
@viewer_tool
class RotateTool(CheckableTool):
icon = ROTATE_ICON
tool_id = 'vispy:rotate'
action_text = 'Continuously rotate view'
tool_tip = 'Start/Stop rotation'
timer = None
def activate(self):
if self.timer is None:
self.timer = app.Timer(connect=self.rotate)
self.timer.start(0.1)
def deactivate(self):
self.timer.stop()
def rotate(self, event):
self.viewer._vispy_widget.view.camera.azimuth -= 1. # set speed as constant first
| {
"repo_name": "astrofrog/glue-3d-viewer",
"path": "glue_vispy_viewers/common/tools.py",
"copies": "2",
"size": "3840",
"license": "bsd-2-clause",
"hash": -2273169019433526800,
"line_mean": 29.4761904762,
"line_max": 91,
"alpha_frac": 0.5859375,
"autogenerated": false,
"ratio": 3.8057482656095143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5391685765609514,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy import QtWidgets
from glue import core
from glue.plugins.dendro_viewer.client import DendroClient
from glue.viewers.common.qt.mpl_toolbar import MatplotlibViewerToolbar
from glue.viewers.common.qt.mouse_mode import PickMode
from glue.utils.qt import load_ui
from glue.utils.qt.widget_properties import (ButtonProperty, CurrentComboProperty,
connect_bool_button, connect_current_combo)
from glue.viewers.common.qt.data_viewer import DataViewer
from glue.viewers.common.qt.mpl_widget import MplWidget, defer_draw
from glue.utils import nonpartial
class DendroWidget(DataViewer):
"""
An interactive dendrogram display
"""
LABEL = 'Dendrogram'
_property_set = DataViewer._property_set + \
'ylog height parent order'.split()
ylog = ButtonProperty('ui.ylog', 'log scaling on y axis?')
height = CurrentComboProperty('ui.heightCombo', 'height attribute')
parent = CurrentComboProperty('ui.parentCombo', 'parent attribute')
order = CurrentComboProperty('ui.orderCombo', 'layout sorter attribute')
_toolbar_cls = MatplotlibViewerToolbar
tools = ['Pick']
def __init__(self, session, parent=None):
super(DendroWidget, self).__init__(session, parent)
self.central_widget = MplWidget()
self.option_widget = QtWidgets.QWidget()
self.setCentralWidget(self.central_widget)
self.ui = load_ui('options_widget.ui', self.option_widget,
directory=os.path.dirname(__file__))
self.client = DendroClient(self._data,
self.central_widget.canvas.fig,
layer_artist_container=self._layer_artist_container)
self._connect()
self.initialize_toolbar()
self.statusBar().setSizeGripEnabled(False)
def _connect(self):
ui = self.ui
cl = self.client
connect_bool_button(cl, 'ylog', ui.ylog)
connect_current_combo(cl, 'parent_attr', ui.parentCombo)
connect_current_combo(cl, 'height_attr', ui.heightCombo)
connect_current_combo(cl, 'order_attr', ui.orderCombo)
def initialize_toolbar(self):
super(DendroWidget, self).initialize_toolbar()
def on_move(mode):
if mode._drag:
self.client.apply_roi(mode.roi())
self.toolbar.tools['Pick']._move_callback = on_move
def apply_roi(self, roi):
self.client.apply_roi(roi)
def _update_combos(self, data=None):
data = data or self.client.display_data
if data is None:
return
for combo in [self.ui.heightCombo,
self.ui.parentCombo,
self.ui.orderCombo]:
combo.blockSignals(True)
ids = []
idx = combo.currentIndex()
old = combo.itemData(idx) if idx > 0 else None
combo.clear()
for cid in data.components:
if cid.hidden and cid is not data.pixel_component_ids[0]:
continue
combo.addItem(cid.label, userData=cid)
ids.append(cid)
try:
combo.setCurrentIndex(ids.index(old))
except ValueError:
combo.setCurrentIndex(0)
combo.blockSignals(False)
def add_data(self, data):
"""Add a new data set to the widget
:returns: True if the addition was expected, False otherwise
"""
if data in self.client:
return
self._update_combos(data)
self.client.add_layer(data)
return True
def add_subset(self, subset):
"""Add a subset to the widget
:returns: True if the addition was accepted, False otherwise
"""
self.add_data(subset.data)
if subset.data in self.client:
self.client.add_layer(subset)
return True
def register_to_hub(self, hub):
super(DendroWidget, self).register_to_hub(hub)
self.client.register_to_hub(hub)
hub.subscribe(self, core.message.ComponentsChangedMessage,
nonpartial(self._update_combos()))
def unregister(self, hub):
super(DendroWidget, self).unregister(hub)
hub.unsubscribe_all(self.client)
hub.unsubscribe_all(self)
def options_widget(self):
return self.option_widget
@defer_draw
def restore_layers(self, rec, context):
from glue.core.callback_property import delay_callback
with delay_callback(self.client, 'height_attr',
'parent_attr',
'order_attr'):
self.client.restore_layers(rec, context)
self._update_combos()
| {
"repo_name": "saimn/glue",
"path": "glue/plugins/dendro_viewer/qt/viewer_widget.py",
"copies": "1",
"size": "4872",
"license": "bsd-3-clause",
"hash": 1916025047225315800,
"line_mean": 32.1428571429,
"line_max": 87,
"alpha_frac": 0.6063218391,
"autogenerated": false,
"ratio": 4.076987447698745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5183309286798744,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy import QtWidgets
from glue import core
from glue.utils import nonpartial
from glue.utils.qt import load_ui
__all__ = ['LinkEditor']
class LinkEditor(QtWidgets.QDialog):
def __init__(self, collection, functions=None, parent=None):
super(LinkEditor, self).__init__(parent=parent)
self._collection = collection
self._ui = load_ui('link_editor.ui', self,
directory=os.path.dirname(__file__))
self._init_widgets()
self._connect()
if len(collection) > 1:
self._ui.right_components.set_data_row(1)
self._size = None
def _init_widgets(self):
self._ui.left_components.setup(self._collection)
self._ui.right_components.setup(self._collection)
self._ui.signature_editor.hide()
for link in self._collection.links:
self._add_link(link)
def _connect(self):
self._ui.add_link.clicked.connect(nonpartial(self._add_new_link))
self._ui.remove_link.clicked.connect(nonpartial(self._remove_link))
self._ui.toggle_editor.clicked.connect(nonpartial(self._toggle_advanced))
self._ui.signature_editor._ui.addButton.clicked.connect(nonpartial(self._add_new_link))
@property
def advanced(self):
return self._ui.signature_editor.isVisible()
@advanced.setter
def advanced(self, state):
"""Set whether the widget is in advanced state"""
self._ui.signature_editor.setVisible(state)
self._ui.toggle_editor.setText("Basic" if state else "Advanced")
def _toggle_advanced(self):
"""Show or hide the signature editor widget"""
self.advanced = not self.advanced
def _selected_components(self):
result = []
id1 = self._ui.left_components.component
id2 = self._ui.right_components.component
if id1:
result.append(id1)
if id2:
result.append(id2)
return result
def _simple_links(self):
"""Return identity links which connect the highlighted items
in each component selector.
Returns:
A list of :class:`~glue.core.ComponentLink` objects
If items are not selected in the component selectors,
an empty list is returned
"""
comps = self._selected_components()
if len(comps) != 2:
return []
assert isinstance(comps[0], core.data.ComponentID), comps[0]
assert isinstance(comps[1], core.data.ComponentID), comps[1]
link1 = core.component_link.ComponentLink([comps[0]], comps[1])
return [link1]
def _add_link(self, link):
current = self._ui.current_links
item = QtWidgets.QListWidgetItem(str(link))
current.addItem(item)
item.setHidden(link.hidden)
current.set_data(item, link)
def _add_new_link(self):
if not self.advanced:
links = self._simple_links()
else:
links = self._ui.signature_editor.links()
self._ui.signature_editor.clear_inputs()
for link in links:
self._add_link(link)
def links(self):
current = self._ui.current_links
return current.data.values()
def _remove_link(self):
current = self._ui.current_links
item = current.currentItem()
row = current.currentRow()
if item is None:
return
current.drop_data(item)
deleted = current.takeItem(row)
assert deleted == item # sanity check
@classmethod
def update_links(cls, collection):
widget = cls(collection)
isok = widget._ui.exec_()
if isok:
links = widget.links()
collection.set_links(links)
def main():
import numpy as np
from glue.utils.qt import get_qapp
from glue.core import Data, DataCollection
app = get_qapp()
x = np.array([1, 2, 3])
d = Data(label='data', x=x, y=x * 2)
dc = DataCollection(d)
LinkEditor.update_links(dc)
if __name__ == "__main__":
main()
| {
"repo_name": "saimn/glue",
"path": "glue/dialogs/link_editor/qt/link_editor.py",
"copies": "1",
"size": "4149",
"license": "bsd-3-clause",
"hash": -2959285724988993500,
"line_mean": 29.0652173913,
"line_max": 95,
"alpha_frac": 0.6080983369,
"autogenerated": false,
"ratio": 3.899436090225564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000432237867064661,
"num_lines": 138
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy import QtWidgets
from qtpy.QtCore import Qt
from glue import core
from glue.utils import nonpartial
from glue.utils.qt import load_ui, HtmlItemDelegate
__all__ = ['LinkEditor']
class LinkEditor(QtWidgets.QDialog):
def __init__(self, collection, functions=None, parent=None):
super(LinkEditor, self).__init__(parent=parent)
self._collection = collection
self._ui = load_ui('link_editor.ui', self,
directory=os.path.dirname(__file__))
self._html_item_delegate = HtmlItemDelegate(self._ui.current_links)
self._ui.current_links.setItemDelegate(self._html_item_delegate)
self._ui.current_links.setWordWrap(False)
self._init_widgets()
self._connect()
if len(collection) > 1:
self._ui.right_components.set_data_row(1)
self._size = None
def _init_widgets(self):
self._ui.left_components.setup(self._collection)
self._ui.right_components.setup(self._collection)
self._ui.signature_editor.hide()
for link in self._collection.links:
self._add_link(link)
def _connect(self):
self._ui.add_link.clicked.connect(nonpartial(self._add_new_link))
self._ui.remove_link.clicked.connect(nonpartial(self._remove_link))
self._ui.toggle_editor.clicked.connect(nonpartial(self._toggle_advanced))
@property
def advanced(self):
return self._ui.signature_editor.isVisible()
@advanced.setter
def advanced(self, state):
"""Set whether the widget is in advanced state"""
self._ui.signature_editor.setVisible(state)
self._ui.toggle_editor.setText("Basic linking" if state else "Advanced linking")
def _toggle_advanced(self):
"""Show or hide the signature editor widget"""
self.advanced = not self.advanced
def _selected_components(self):
result = []
id1 = self._ui.left_components.component
id2 = self._ui.right_components.component
if id1:
result.append(id1)
if id2:
result.append(id2)
return result
def _simple_links(self):
"""Return identity links which connect the highlighted items
in each component selector.
Returns:
A list of :class:`~glue.core.ComponentLink` objects
If items are not selected in the component selectors,
an empty list is returned
"""
comps = self._selected_components()
if len(comps) != 2:
return []
assert isinstance(comps[0], core.data.ComponentID), comps[0]
assert isinstance(comps[1], core.data.ComponentID), comps[1]
link1 = core.component_link.ComponentLink([comps[0]], comps[1])
return [link1]
def _add_link(self, link):
current = self._ui.current_links
item = QtWidgets.QListWidgetItem(link.to_html())
item.setTextAlignment(Qt.AlignCenter)
current.addItem(item)
item.setHidden(link.hidden)
current.set_data(item, link)
def _add_new_link(self):
if not self.advanced:
links = self._simple_links()
else:
links = self._ui.signature_editor.links()
self._ui.signature_editor.clear_inputs()
for link in links:
self._add_link(link)
def links(self):
current = self._ui.current_links
return current.data.values()
def _remove_link(self):
current = self._ui.current_links
item = current.currentItem()
row = current.currentRow()
if item is None:
return
current.drop_data(item)
deleted = current.takeItem(row)
assert deleted == item # sanity check
@classmethod
def update_links(cls, collection):
widget = cls(collection)
isok = widget._ui.exec_()
if isok:
links = widget.links()
collection.set_links(links)
def main():
import numpy as np
from glue.utils.qt import get_qapp
from glue.core import Data, DataCollection
app = get_qapp()
x = np.array([1, 2, 3])
d = Data(label='data', x=x, y=x * 2)
dc = DataCollection(d)
LinkEditor.update_links(dc)
if __name__ == "__main__":
main()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/dialogs/link_editor/qt/link_editor.py",
"copies": "3",
"size": "4366",
"license": "bsd-3-clause",
"hash": -8995892236241781000,
"line_mean": 29.3194444444,
"line_max": 88,
"alpha_frac": 0.6142922584,
"autogenerated": false,
"ratio": 3.901697944593387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004199174586419189,
"num_lines": 144
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy import QtWidgets
from glue.external.echo.qt import autoconnect_callbacks_to_qt
from glue.utils.qt import load_ui, fix_tab_widget_fontsize
from glue.viewers.image.qt.slice_widget import MultiSliceWidgetHelper
__all__ = ['ImageOptionsWidget']
class ImageOptionsWidget(QtWidgets.QWidget):
def __init__(self, viewer_state, session, parent=None):
super(ImageOptionsWidget, self).__init__(parent=parent)
self.ui = load_ui('options_widget.ui', self,
directory=os.path.dirname(__file__))
fix_tab_widget_fontsize(self.ui.tab_widget)
self.ui.combodata_aspect.addItem("Square Pixels", userData='equal')
self.ui.combodata_aspect.addItem("Automatic", userData='auto')
self.ui.combodata_aspect.setCurrentIndex(0)
self.ui.combotext_color_mode.addItem("Colormaps")
self.ui.combotext_color_mode.addItem("One color per layer")
autoconnect_callbacks_to_qt(viewer_state, self.ui)
self.viewer_state = viewer_state
self.slice_helper = MultiSliceWidgetHelper(viewer_state=self.viewer_state,
layout=self.ui.layout_slices)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/image/qt/options_widget.py",
"copies": "1",
"size": "1267",
"license": "bsd-3-clause",
"hash": 1050973563412401700,
"line_mean": 33.2432432432,
"line_max": 82,
"alpha_frac": 0.6756116811,
"autogenerated": false,
"ratio": 3.7485207100591715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49241323911591717,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy import QtWidgets
from glue.utils import nonpartial
from glue.utils.qt import load_ui
from glue.external.echo.qt import autoconnect_callbacks_to_qt
class ScatterLayerStyleWidget(QtWidgets.QWidget):
def __init__(self, layer_artist):
super(ScatterLayerStyleWidget, self).__init__()
self.ui = load_ui('layer_style_widget.ui', self,
directory=os.path.dirname(__file__))
self.state = layer_artist.state
self.layer_artist = layer_artist
self.layer = layer_artist.layer
connect_kwargs = {'value_alpha': dict(value_range=(0., 1.)),
'value_size_scaling': dict(value_range=(0.1, 10), log=True)}
autoconnect_callbacks_to_qt(self.state, self.ui, connect_kwargs)
# Set initial values
self._update_size_mode()
self._update_color_mode()
self.state.add_callback('color_mode', nonpartial(self._update_color_mode))
self.state.add_callback('size_mode', nonpartial(self._update_size_mode))
def _update_size_mode(self):
if self.state.size_mode == "Fixed":
self.ui.size_row_2.hide()
self.ui.combosel_size_attribute.hide()
self.ui.valuetext_size.show()
else:
self.ui.valuetext_size.hide()
self.ui.combosel_size_attribute.show()
self.ui.size_row_2.show()
def _update_color_mode(self):
if self.state.color_mode == "Fixed":
self.ui.color_row_2.hide()
self.ui.color_row_3.hide()
self.ui.combosel_cmap_attribute.hide()
self.ui.spacer_color_label.show()
self.ui.color_color.show()
else:
self.ui.color_color.hide()
self.ui.combosel_cmap_attribute.show()
self.ui.spacer_color_label.hide()
self.ui.color_row_2.show()
self.ui.color_row_3.show()
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/scatter/layer_style_widget.py",
"copies": "1",
"size": "1997",
"license": "bsd-2-clause",
"hash": 7740161033935505000,
"line_mean": 31.737704918,
"line_max": 86,
"alpha_frac": 0.6014021032,
"autogenerated": false,
"ratio": 3.515845070422535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9614305524060487,
"avg_score": 0.0005883299124097251,
"num_lines": 61
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy import QtWidgets
from glue.utils.qt import load_ui
from glue.external.echo.qt import autoconnect_callbacks_to_qt
from glue_vispy_viewers.utils import fix_tab_widget_fontsize
class ScatterLayerStyleWidget(QtWidgets.QWidget):
def __init__(self, layer_artist):
super(ScatterLayerStyleWidget, self).__init__()
self.ui = load_ui('layer_style_widget.ui', self,
directory=os.path.dirname(__file__))
fix_tab_widget_fontsize(self.ui.tab_widget)
self.state = layer_artist.state
self.layer_artist = layer_artist
self.layer = layer_artist.layer
connect_kwargs = {'value_alpha': dict(value_range=(0., 1.)),
'value_size_scaling': dict(value_range=(0.1, 10), log=True)}
self._connections = autoconnect_callbacks_to_qt(self.state, self.ui, connect_kwargs)
# Set initial values
self._update_size_mode()
self._update_color_mode()
self.state.add_callback('color_mode', self._update_color_mode)
self.state.add_callback('size_mode', self._update_size_mode)
def _update_size_mode(self, *args):
if self.state.size_mode == "Fixed":
self.ui.size_row_2.hide()
self.ui.combosel_size_attribute.hide()
self.ui.valuetext_size.show()
else:
self.ui.valuetext_size.hide()
self.ui.combosel_size_attribute.show()
self.ui.size_row_2.show()
def _update_color_mode(self, *args):
if self.state.color_mode == "Fixed":
self.ui.color_row_2.hide()
self.ui.color_row_3.hide()
self.ui.combosel_cmap_attribute.hide()
self.ui.spacer_color_label.show()
self.ui.color_color.show()
else:
self.ui.color_color.hide()
self.ui.combosel_cmap_attribute.show()
self.ui.spacer_color_label.hide()
self.ui.color_row_2.show()
self.ui.color_row_3.show()
| {
"repo_name": "astrofrog/glue-3d-viewer",
"path": "glue_vispy_viewers/scatter/layer_style_widget.py",
"copies": "2",
"size": "2088",
"license": "bsd-2-clause",
"hash": 2725176566554368000,
"line_mean": 31.625,
"line_max": 92,
"alpha_frac": 0.6039272031,
"autogenerated": false,
"ratio": 3.4742096505823628,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00034760845383759734,
"num_lines": 64
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy.QtCore import Qt
from qtpy import QtCore, QtWidgets
from glue.core.application_base import ViewerBase
from glue.core.qt.layer_artist_model import QtLayerArtistContainer, LayerArtistWidget
from glue.utils.qt import get_qapp
from glue.core.qt.mime import LAYERS_MIME_TYPE, LAYER_MIME_TYPE
from glue.utils.qt import set_cursor
from glue.config import settings
from glue.external import six
from glue.utils.noconflict import classmaker
__all__ = ['DataViewer']
class ToolbarInitializer(object):
"""
This is a meta-class which ensures that initialize_toolbar is always called
on DataViewer instances and sub-class instances after all the __init__ code
has been executed. We need to do this, because often the toolbar can only
be initialized after everything else (e.g. canvas, etc.) has been set up,
so we can't do it in DataViewer.__init__.
"""
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj.initialize_toolbar()
return obj
# Note: we need to use classmaker here because otherwise we run into issues when
# trying to use the meta-class with the Qt class.
@six.add_metaclass(classmaker(left_metas=(ToolbarInitializer,)))
class DataViewer(ViewerBase, QtWidgets.QMainWindow):
"""
Base class for all Qt DataViewer widgets.
This defines a minimal interface, and implemlements the following::
* An automatic call to unregister on window close
* Drag and drop support for adding data
"""
window_closed = QtCore.Signal()
_layer_artist_container_cls = QtLayerArtistContainer
_layer_style_widget_cls = None
LABEL = 'Override this'
_toolbar_cls = None
tools = []
def __init__(self, session, parent=None):
"""
:type session: :class:`~glue.core.Session`
"""
QtWidgets.QMainWindow.__init__(self, parent)
ViewerBase.__init__(self, session)
self.setWindowIcon(get_qapp().windowIcon())
self._view = LayerArtistWidget(layer_style_widget_cls=self._layer_style_widget_cls,
hub=session.hub)
self._view.layer_list.setModel(self._layer_artist_container.model)
self._tb_vis = {} # store whether toolbars are enabled
self.setAttribute(Qt.WA_DeleteOnClose)
self.setAcceptDrops(True)
self.setAnimated(False)
self._toolbars = []
self._warn_close = True
self.setContentsMargins(2, 2, 2, 2)
self._mdi_wrapper = None # GlueMdiSubWindow that self is embedded in
self.statusBar().setStyleSheet("QStatusBar{font-size:10px}")
# close window when last plot layer deleted
self._layer_artist_container.on_empty(lambda: self.close(warn=False))
self._layer_artist_container.on_changed(self.update_window_title)
@property
def selected_layer(self):
return self._view.layer_list.current_artist()
def remove_layer(self, layer):
self._layer_artist_container.pop(layer)
def dragEnterEvent(self, event):
""" Accept the event if it has data layers"""
if event.mimeData().hasFormat(LAYER_MIME_TYPE):
event.accept()
elif event.mimeData().hasFormat(LAYERS_MIME_TYPE):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
""" Add layers to the viewer if contained in mime data """
if event.mimeData().hasFormat(LAYER_MIME_TYPE):
self.request_add_layer(event.mimeData().data(LAYER_MIME_TYPE))
assert event.mimeData().hasFormat(LAYERS_MIME_TYPE)
for layer in event.mimeData().data(LAYERS_MIME_TYPE):
self.request_add_layer(layer)
event.accept()
def mousePressEvent(self, event):
""" Consume mouse press events, and prevent them from propagating
down to the MDI area """
event.accept()
apply_roi = set_cursor(Qt.WaitCursor)(ViewerBase.apply_roi)
def close(self, warn=True):
self._warn_close = warn
if getattr(self, '_mdi_wrapper', None) is not None:
self._mdi_wrapper.close()
self._mdi_wrapper = None
else:
QtWidgets.QMainWindow.close(self)
ViewerBase.close(self)
self._warn_close = True
def mdi_wrap(self):
"""Wrap this object in a GlueMdiSubWindow"""
from glue.app.qt.mdi_area import GlueMdiSubWindow
sub = GlueMdiSubWindow()
sub.setWidget(self)
self.destroyed.connect(sub.close)
sub.resize(self.size())
self._mdi_wrapper = sub
return sub
@property
def position(self):
target = self._mdi_wrapper or self
pos = target.pos()
return pos.x(), pos.y()
@position.setter
def position(self, xy):
x, y = xy
self.move(x, y)
def move(self, x=None, y=None):
"""
Move the viewer to a new XY pixel location
You can also set the position attribute to a new tuple directly.
Parameters
----------
x : int (optional)
New x position
y : int (optional)
New y position
"""
x0, y0 = self.position
if x is None:
x = x0
if y is None:
y = y0
if self._mdi_wrapper is not None:
self._mdi_wrapper.move(x, y)
else:
QtWidgets.QMainWindow.move(self, x, y)
@property
def viewer_size(self):
if self._mdi_wrapper is not None:
sz = self._mdi_wrapper.size()
else:
sz = self.size()
return sz.width(), sz.height()
@viewer_size.setter
def viewer_size(self, value):
width, height = value
self.resize(width, height)
if self._mdi_wrapper is not None:
self._mdi_wrapper.resize(width, height)
def closeEvent(self, event):
""" Call unregister on window close """
if not self._confirm_close():
event.ignore()
return
if self._hub is not None:
self.unregister(self._hub)
self._layer_artist_container.clear_callbacks()
self._layer_artist_container.clear()
super(DataViewer, self).closeEvent(event)
event.accept()
self.window_closed.emit()
def _confirm_close(self):
"""Ask for close confirmation
:rtype: bool. True if user wishes to close. False otherwise
"""
if self._warn_close and (not os.environ.get('GLUE_TESTING')) and self.isVisible():
buttons = QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel
dialog = QtWidgets.QMessageBox.warning(self, "Confirm Close",
"Do you want to close this window?",
buttons=buttons,
defaultButton=QtWidgets.QMessageBox.Cancel)
return dialog == QtWidgets.QMessageBox.Ok
return True
def _confirm_large_data(self, data):
if not settings.SHOW_LARGE_DATA_WARNING:
# Ignoring large data warning
return True
else:
warn_msg = ("WARNING: Data set has %i points, and may render slowly."
" Continue?" % data.size)
title = "Add large data set?"
ok = QtWidgets.QMessageBox.Ok
cancel = QtWidgets.QMessageBox.Cancel
buttons = ok | cancel
result = QtWidgets.QMessageBox.question(self, title, warn_msg,
buttons=buttons,
defaultButton=cancel)
return result == ok
def layer_view(self):
return self._view
def options_widget(self):
return QtWidgets.QWidget()
def addToolBar(self, tb):
super(DataViewer, self).addToolBar(tb)
self._toolbars.append(tb)
self._tb_vis[tb] = True
def initialize_toolbar(self):
from glue.config import viewer_tool
self.toolbar = self._toolbar_cls(self)
for tool_id in self.tools:
mode_cls = viewer_tool.members[tool_id]
mode = mode_cls(self)
self.toolbar.add_tool(mode)
self.addToolBar(self.toolbar)
def show_toolbars(self):
"""Re-enable any toolbars that were hidden with `hide_toolbars()`
Does not re-enable toolbars that were hidden by other means
"""
for tb in self._toolbars:
if self._tb_vis.get(tb, False):
tb.setEnabled(True)
def hide_toolbars(self):
""" Disable all the toolbars in the viewer.
This action can be reversed by calling `show_toolbars()`
"""
for tb in self._toolbars:
self._tb_vis[tb] = self._tb_vis.get(tb, False) or tb.isVisible()
tb.setEnabled(False)
def set_focus(self, state):
if state:
css = """
DataViewer
{
border: 2px solid;
border-color: rgb(56, 117, 215);
}
"""
self.setStyleSheet(css)
self.show_toolbars()
else:
css = """
DataViewer
{
border: none;
}
"""
self.setStyleSheet(css)
self.hide_toolbars()
def __str__(self):
return self.LABEL
def unregister(self, hub):
"""
Override to perform cleanup operations when disconnecting from hub
"""
pass
@property
def window_title(self):
return str(self)
def update_window_title(self):
self.setWindowTitle(self.window_title)
def set_status(self, message):
sb = self.statusBar()
sb.showMessage(message)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/common/qt/data_viewer.py",
"copies": "1",
"size": "9978",
"license": "bsd-3-clause",
"hash": -2858473583310382600,
"line_mean": 30.2789968652,
"line_max": 94,
"alpha_frac": 0.5850871918,
"autogenerated": false,
"ratio": 4.129966887417218,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00025052425172357906,
"num_lines": 319
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy.QtCore import Qt
from qtpy import QtWidgets
from glue import core
from glue.viewers.scatter.client import ScatterClient
from glue.viewers.common.qt.mpl_toolbar import MatplotlibViewerToolbar
from glue.viewers.common.qt.mouse_mode import (RectangleMode, CircleMode,
PolyMode, HRangeMode, VRangeMode)
from glue.utils.qt import load_ui
from glue.viewers.common.qt.data_viewer import DataViewer
from glue.viewers.common.qt.mpl_widget import MplWidget, defer_draw
from glue.viewers.scatter.qt.layer_style_widget import ScatterLayerStyleWidget
from glue.viewers.scatter.layer_artist import ScatterLayerArtist
from glue.utils import nonpartial, cache_axes
from glue.utils.qt.widget_properties import (ButtonProperty, FloatLineProperty,
CurrentComboProperty,
connect_bool_button, connect_float_edit)
__all__ = ['ScatterWidget']
WARN_SLOW = 1000000 # max number of points which render quickly
class ScatterWidget(DataViewer):
"""
An interactive scatter plot.
"""
LABEL = "Scatter Plot"
_property_set = DataViewer._property_set + \
'xlog ylog xflip yflip hidden xatt yatt xmin xmax ymin ymax'.split()
xlog = ButtonProperty('ui.xLogCheckBox', 'log scaling on x axis?')
ylog = ButtonProperty('ui.yLogCheckBox', 'log scaling on y axis?')
xflip = ButtonProperty('ui.xFlipCheckBox', 'invert the x axis?')
yflip = ButtonProperty('ui.yFlipCheckBox', 'invert the y axis?')
xmin = FloatLineProperty('ui.xmin', 'Lower x limit of plot')
xmax = FloatLineProperty('ui.xmax', 'Upper x limit of plot')
ymin = FloatLineProperty('ui.ymin', 'Lower y limit of plot')
ymax = FloatLineProperty('ui.ymax', 'Upper y limit of plot')
hidden = ButtonProperty('ui.hidden_attributes', 'Show hidden attributes')
xatt = CurrentComboProperty('ui.xAxisComboBox',
'Attribute to plot on x axis')
yatt = CurrentComboProperty('ui.yAxisComboBox',
'Attribute to plot on y axis')
_layer_style_widget_cls = {ScatterLayerArtist: ScatterLayerStyleWidget}
_toolbar_cls = MatplotlibViewerToolbar
tools = ['select:rectangle', 'select:xrange', 'select:yrange', 'select:circle', 'select:polygon']
def __init__(self, session, parent=None):
super(ScatterWidget, self).__init__(session, parent)
self.central_widget = MplWidget()
self.setCentralWidget(self.central_widget)
self.option_widget = QtWidgets.QWidget()
self.ui = load_ui('options_widget.ui', self.option_widget,
directory=os.path.dirname(__file__))
self._tweak_geometry()
self.client = ScatterClient(self._data,
self.central_widget.canvas.fig,
layer_artist_container=self._layer_artist_container)
self._connect()
self.unique_fields = set()
self.statusBar().setSizeGripEnabled(False)
self.setFocusPolicy(Qt.StrongFocus)
def initialize_toolbar(self):
super(ScatterWidget, self).initialize_toolbar()
cache_axes(self.client.axes, self.toolbar)
def _tweak_geometry(self):
self.central_widget.resize(600, 400)
self.resize(self.central_widget.size())
def _connect(self):
ui = self.ui
cl = self.client
connect_bool_button(cl, 'xlog', ui.xLogCheckBox)
connect_bool_button(cl, 'ylog', ui.yLogCheckBox)
connect_bool_button(cl, 'xflip', ui.xFlipCheckBox)
connect_bool_button(cl, 'yflip', ui.yFlipCheckBox)
ui.xAxisComboBox.currentIndexChanged.connect(self.update_xatt)
ui.yAxisComboBox.currentIndexChanged.connect(self.update_yatt)
ui.hidden_attributes.toggled.connect(lambda x: self._update_combos())
ui.swapAxes.clicked.connect(nonpartial(self.swap_axes))
ui.snapLimits.clicked.connect(cl.snap)
connect_float_edit(cl, 'xmin', ui.xmin)
connect_float_edit(cl, 'xmax', ui.xmax)
connect_float_edit(cl, 'ymin', ui.ymin)
connect_float_edit(cl, 'ymax', ui.ymax)
@defer_draw
def _update_combos(self):
""" Update contents of combo boxes """
# have to be careful here, since client and/or widget
# are potentially out of sync
layer_ids = []
# show hidden attributes if needed
if ((self.client.xatt and self.client.xatt.hidden) or
(self.client.yatt and self.client.yatt.hidden)):
self.hidden = True
# determine which components to put in combos
for l in self.client.data:
if not self.client.is_layer_present(l):
continue
for lid in self.client.plottable_attributes(
l, show_hidden=self.hidden):
if lid not in layer_ids:
layer_ids.append(lid)
oldx = self.xatt
oldy = self.yatt
newx = self.client.xatt or oldx
newy = self.client.yatt or oldy
for combo, target in zip([self.ui.xAxisComboBox, self.ui.yAxisComboBox],
[newx, newy]):
combo.blockSignals(True)
combo.clear()
if not layer_ids: # empty component list
continue
# populate
for lid in layer_ids:
combo.addItem(lid.label, userData=lid)
idx = layer_ids.index(target) if target in layer_ids else 0
combo.setCurrentIndex(idx)
combo.blockSignals(False)
# ensure client and widget synced
self.client.xatt = self.xatt
self.client.lyatt = self.yatt
@defer_draw
def add_data(self, data):
"""Add a new data set to the widget
:returns: True if the addition was expected, False otherwise
"""
if self.client.is_layer_present(data):
return
if data.size > WARN_SLOW and not self._confirm_large_data(data):
return False
first_layer = self.client.layer_count == 0
self.client.add_data(data)
self._update_combos()
if first_layer: # forces both x and y axes to be rescaled
self.update_xatt(None)
self.update_yatt(None)
self.ui.xAxisComboBox.setCurrentIndex(0)
if len(data.visible_components) > 1:
self.ui.yAxisComboBox.setCurrentIndex(1)
else:
self.ui.yAxisComboBox.setCurrentIndex(0)
self.update_window_title()
return True
@defer_draw
def add_subset(self, subset):
"""Add a subset to the widget
:returns: True if the addition was accepted, False otherwise
"""
if self.client.is_layer_present(subset):
return
data = subset.data
if data.size > WARN_SLOW and not self._confirm_large_data(data):
return False
first_layer = self.client.layer_count == 0
self.client.add_layer(subset)
self._update_combos()
if first_layer: # forces both x and y axes to be rescaled
self.update_xatt(None)
self.update_yatt(None)
self.ui.xAxisComboBox.setCurrentIndex(0)
if len(data.visible_components) > 1:
self.ui.yAxisComboBox.setCurrentIndex(1)
else:
self.ui.yAxisComboBox.setCurrentIndex(0)
self.update_window_title()
return True
def register_to_hub(self, hub):
super(ScatterWidget, self).register_to_hub(hub)
self.client.register_to_hub(hub)
hub.subscribe(self, core.message.DataUpdateMessage,
nonpartial(self._sync_labels))
hub.subscribe(self, core.message.ComponentsChangedMessage,
nonpartial(self._update_combos))
hub.subscribe(self, core.message.ComponentReplacedMessage,
self._on_component_replace)
def _on_component_replace(self, msg):
# let client update its state first
self.client._on_component_replace(msg)
self._update_combos()
def unregister(self, hub):
super(ScatterWidget, self).unregister(hub)
hub.unsubscribe_all(self.client)
hub.unsubscribe_all(self)
@defer_draw
def swap_axes(self):
xid = self.ui.xAxisComboBox.currentIndex()
yid = self.ui.yAxisComboBox.currentIndex()
xlog = self.ui.xLogCheckBox.isChecked()
ylog = self.ui.yLogCheckBox.isChecked()
xflip = self.ui.xFlipCheckBox.isChecked()
yflip = self.ui.yFlipCheckBox.isChecked()
self.ui.xAxisComboBox.setCurrentIndex(yid)
self.ui.yAxisComboBox.setCurrentIndex(xid)
self.ui.xLogCheckBox.setChecked(ylog)
self.ui.yLogCheckBox.setChecked(xlog)
self.ui.xFlipCheckBox.setChecked(yflip)
self.ui.yFlipCheckBox.setChecked(xflip)
@defer_draw
def update_xatt(self, index):
component_id = self.xatt
self.client.xatt = component_id
@defer_draw
def update_yatt(self, index):
component_id = self.yatt
self.client.yatt = component_id
@property
def window_title(self):
data = self.client.data
label = ', '.join([d.label for d in data if
self.client.is_visible(d)])
return label
def _sync_labels(self):
self.update_window_title()
def options_widget(self):
return self.option_widget
@defer_draw
def restore_layers(self, rec, context):
self.client.restore_layers(rec, context)
self._update_combos()
# manually force client attributes to sync
self.update_xatt(None)
self.update_yatt(None)
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/scatter/qt/viewer_widget.py",
"copies": "1",
"size": "9914",
"license": "bsd-3-clause",
"hash": -574948424469269800,
"line_mean": 34.28113879,
"line_max": 101,
"alpha_frac": 0.6210409522,
"autogenerated": false,
"ratio": 3.8817541111981204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003157879229069068,
"num_lines": 281
} |
from __future__ import absolute_import, division, print_function
import os
from qtpy.QtCore import Qt
from qtpy import QtWidgets
from glue.utils.qt import set_cursor_cm
__all__ = ['data_wizard', 'GlueDataDialog']
def data_wizard():
""" QT Dialog to load a file into a new data object
Returns:
A list of new data objects. Returns an empty list if
selection is canceled.
"""
def report_error(error, factory, curfile):
import traceback
retry = QtWidgets.QMessageBox.Retry
cancel = QtWidgets.QMessageBox.Cancel
buttons = retry | cancel
detail = traceback.format_exc()
msg = "\n".join(["Could not load %s (wrong load method?)" % curfile,
"File load method: %s" % factory.label])
detail = "\n\n".join(["Error message: %s" % error, detail])
mb = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Critical, "Data Load Error", msg)
mb.setDetailedText(detail)
mb.setDefaultButton(cancel)
mb.setStandardButtons(buttons)
ok = mb.exec_()
return ok == retry
while True:
gdd = GlueDataDialog()
try:
result = gdd.load_data()
break
except Exception as e:
decision = report_error(e, gdd.factory(), gdd._curfile)
if not decision:
return []
return result
class GlueDataDialog(object):
def __init__(self, parent=None):
self._fd = QtWidgets.QFileDialog(parent, directory=os.curdir)
from glue.config import data_factory
self.filters = [(f, self._filter(f))
for f in data_factory.members if not f.deprecated]
self.setNameFilter()
self._fd.setFileMode(QtWidgets.QFileDialog.ExistingFiles)
self._curfile = ''
try:
self._fd.setOption(
QtWidgets.QFileDialog.Option.HideNameFilterDetails, True)
except AttributeError: # HideNameFilterDetails not present
pass
def factory(self):
fltr = self._fd.selectedNameFilter()
for k, v in self.filters:
if v.startswith(fltr):
return k
def setNameFilter(self):
fltr = ";;".join([flt for fac, flt in self.filters])
self._fd.setNameFilter(fltr)
def _filter(self, factory):
return "%s (*)" % factory.label
def paths(self):
"""
Return all selected paths, as a list of unicode strings
"""
return self._fd.selectedFiles()
def _get_paths_and_factory(self):
"""Show dialog to get a file path and data factory
:rtype: tuple of (list-of-strings, func)
giving the path and data factory.
returns ([], None) if user cancels dialog
"""
result = self._fd.exec_()
if result == QtWidgets.QDialog.Rejected:
return [], None
# path = list(map(str, self.paths())) # cast out of unicode
path = list(self.paths())
factory = self.factory()
return path, factory
def load_data(self):
"""Highest level method to interactively load a data set.
:rtype: A list of constructed data objects
"""
from glue.core.data_factories import data_label, load_data
paths, fac = self._get_paths_and_factory()
result = []
# Check that the user didn't select a .glu file by mistake
for path in paths:
if path.endswith('.glu'):
mb = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Critical,
"Error loading data",
"It looks like you have selected "
"a .glu session file. You should open "
"this using 'Open Session' under the "
"'File' menu instead")
mb.exec_()
return []
with set_cursor_cm(Qt.WaitCursor):
for path in paths:
self._curfile = path
d = load_data(path, factory=fac.function)
if not isinstance(d, list):
if not d.label:
d.label = data_label(path)
d = [d]
result.extend(d)
return result
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/dialogs/data_wizard/qt/data_wizard_dialog.py",
"copies": "2",
"size": "4394",
"license": "bsd-3-clause",
"hash": 1343342467754434800,
"line_mean": 33.328125,
"line_max": 90,
"alpha_frac": 0.5464269458,
"autogenerated": false,
"ratio": 4.407221664994985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5953648610794986,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from setuptools import setup
import versioneer
rootpath = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
return open(os.path.join(rootpath, *parts), "r").read()
long_description = "{}\n{}".format(read("README.rst"), read("CHANGES.txt"))
LICENSE = read("LICENSE.txt")
with open("requirements.txt") as f:
require = f.readlines()
install_requires = [r.strip() for r in require]
setup(
name="odvc",
python_requires=">=3.6",
version=versioneer.get_version(),
license=LICENSE,
long_description=long_description,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"License :: OSI Approved :: BSD",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering",
"Topic :: Education",
],
description="Ocean Dimensionless Vertical Coordinates",
author="Filipe Fernandes",
author_email="ocefpaf@gmail.com",
maintainer="Filipe Fernandes",
maintainer_email="ocefpaf@gmail.com",
url="https://github.com/pyoceans/odvc",
download_url="http://pypi.python.org/pypi/odvc",
platforms="any",
keywords=[
"CF-conventions",
"dimensionless coordinate",
"vertical coordinate",
],
install_requires=install_requires,
tests_require=["pytest"],
packages=["odvc"],
cmdclass=versioneer.get_cmdclass(),
)
| {
"repo_name": "ocefpaf/odvc",
"path": "setup.py",
"copies": "3",
"size": "1658",
"license": "bsd-3-clause",
"hash": 286826740314771330,
"line_mean": 27.1016949153,
"line_max": 75,
"alpha_frac": 0.646562123,
"autogenerated": false,
"ratio": 3.751131221719457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5897693344719457,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
# gpi, future
import gpi
from bart.gpi.borg import IFilePath, OFilePath, Command
# bart
import bart
base_path = bart.__path__[0] # library base for executables
import bart.python.cfl as cfl
class ExternalNode(gpi.NodeAPI):
"""Usage: cdf97 [-i] bitmask <input> <output>
Perform a wavelet (cdf97) transform.
-i inverse
-h help
"""
def initUI(self):
# Widgets
self.addWidget('SpinBox', 'bitmask', val=3)
self.addWidget('PushButton', 'compute', toggle=True)
self.addWidget(
'PushButton', 'direction', button_title='FORWARD', toggle=True)
# IO Ports
self.addInPort('input', 'NPYarray')
self.addOutPort('output', 'NPYarray')
return 0
def validate(self):
'''update the widget bounds based on the input data
'''
if 'direction' in self.widgetEvents():
direction = self.getVal('direction')
if direction:
self.setAttr('direction', button_title="INVERSE")
else:
self.setAttr('direction', button_title="FORWARD")
return 0
def compute(self):
if self.getVal('compute'):
direction = self.getVal('direction')
bm = self.getVal('bitmask')
kspace = self.getData('input')
# load up arguments list
args = [base_path+'/bart cdf97']
if direction != 0:
args += ['-i']
args += [str(bm)]
in1 = IFilePath(cfl.writecfl, kspace, asuffix=['.cfl', '.hdr'])
args += [in1]
# setup file for getting data from external command
out = OFilePath(cfl.readcfl, asuffix=['.cfl','.hdr'])
args += [out]
# run commandline
print(Command(*args))
self.setData('output', out.data())
out.close()
return 0
| {
"repo_name": "nckz/bart",
"path": "gpi/cdf97_GPI.py",
"copies": "1",
"size": "1988",
"license": "bsd-3-clause",
"hash": -7054664795357644000,
"line_mean": 24.164556962,
"line_max": 75,
"alpha_frac": 0.5533199195,
"autogenerated": false,
"ratio": 3.960159362549801,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5013479282049801,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import datashape
from datashape import DataShape, Record, to_numpy, to_numpy_dtype, discover
from datashape.predicates import isrecord
from datashape.dispatch import dispatch
import h5py
import numpy as np
from toolz import keyfilter
from ..append import append
from ..convert import convert, ooc_types
from ..create import create
from ..resource import resource
from ..chunks import chunks
h5py_attributes = ['chunks', 'compression', 'compression_opts', 'dtype',
'fillvalue', 'fletcher32', 'maxshape', 'shape']
try:
unicode_dtype = h5py.special_dtype(vlen=unicode)
except NameError:
unicode_dtype = h5py.special_dtype(vlen=str)
@discover.register((h5py.Group, h5py.File))
def discover_h5py_group_file(g):
return DataShape(Record([[k, discover(v)] for k, v in g.items()]))
def record_dshape_replace(dshape, old, new):
"""Recursively replace all instances of `old` with `new` in the record
dshape `dshape`.
Examples
--------
>>> from datashape import Record, string, object_, dshape
>>> ds = DataShape(Record([('a', 'int64'),
... ('b', 10 * Record([('c', 'object')])),
... ('d', 'int64')]))
...
>>> Record(list(record_dshape_replace(ds, object_, string)))
dshape("{a: int64, b: 10 * {c: object}, d: int64}")
"""
assert isrecord(dshape), 'input dshape must be a record'
for name, subshape in dshape.measure.fields:
if subshape == old:
yield name, new
else:
if isrecord(subshape):
yield record_dshape_replace(subshape, old, new)
else:
yield name, subshape
@discover.register(h5py.Dataset)
def discover_h5py_dataset(d):
dshape = datashape.from_numpy(d.shape, d.dtype)
shape, measure = dshape.shape, dshape.measure
if not isrecord(measure):
if dshape == datashape.object_:
args = shape + (datashape.string,)
return DataShape(*args)
return dshape
else:
records = list(record_dshape_replace(measure, datashape.object_,
datashape.string))
args = shape + (datashape.Record(records),)
return DataShape(*args)
def dtype_replace(dtype, old, new):
"""Replace the subdtype `old` in `subdtype` with `new`.
Parameters
----------
dtype, old, new : dtype
Examples
--------
>>> dt = np.dtype([('a', 'int64'), ('b', 'object'),
... ('c', [('d', 'object'), ('e', 'float64')])])
...
>>> r = np.dtype(list(dtype_replace(dt, 'int64', 'float64')))
>>> r
dtype([('a', '<f8'), ('b', 'O'), ('c', [('d', 'O'), ('e', '<f8')])])
"""
names = dtype.names
assert names is not None, 'dtype must be record-like'
for name, subdtype in zip(names, map(dtype.__getitem__, names)):
if subdtype == old:
yield name, new
else:
if subdtype.names is not None:
yield name, list(dtype_replace(subdtype, old, new))
else:
yield name, subdtype
def varlen_dtype(dt):
"""Inject variable length string element for object dtype
Examples
--------
>>> dt = np.dtype('object')
>>> dt
dtype('O')
>>> r = varlen_dtype(dt)
>>> r
dtype('O')
>>> r.metadata['vlen'] # doctest: +SKIP
<type 'unicode'>
>>> dt = np.dtype([('a', 'int64'), ('b', 'object'),
... ('c', [('d', 'object'), ('e', 'float64')])])
...
>>> dt['b'].metadata
>>> r = varlen_dtype(dt)
>>> r
dtype([('a', '<i8'), ('b', 'O'), ('c', [('d', 'O'), ('e', '<f8')])])
>>> r['b'].metadata['vlen'] # doctest: +SKIP
<type 'unicode'>
"""
if dt == np.object_:
return unicode_dtype
elif dt.names is None: # some kind of non record like dtype
return dt
else:
return np.dtype(list(dtype_replace(dt, np.dtype('object'),
unicode_dtype)))
def dataset_from_dshape(file, datapath, ds, **kwargs):
dtype = varlen_dtype(to_numpy_dtype(ds))
if datashape.var not in list(ds):
shape = to_numpy(ds)[0]
elif datashape.var not in list(ds)[1:]:
shape = (0,) + to_numpy(ds.subshape[0])[0]
else:
raise ValueError("Don't know how to handle varlen nd shapes")
if shape:
kwargs['chunks'] = kwargs.get('chunks', True)
kwargs['maxshape'] = kwargs.get('maxshape', (None,) + shape[1:])
kwargs2 = keyfilter(h5py_attributes.__contains__, kwargs)
return file.require_dataset(datapath, shape=shape, dtype=dtype, **kwargs2)
def create_from_datashape(group, ds, name=None, **kwargs):
if not isrecord(ds):
raise ValueError(
"Trying to create an HDF5 file with non-record datashape failed\n"
"Perhaps you forgot to specify a datapath?\n"
"\tdshape: %s\n"
"If you're using into consider the following change\n"
"\tBefore: into('myfile.hdf5', data)\n"
"\tAfter: into('myfile.hdf5::/datapath', data)" % ds)
if isinstance(ds, DataShape) and len(ds) == 1:
ds = ds[0]
for name, sub_ds in ds.dict.items():
if isrecord(sub_ds):
g = group.require_group(name)
create_from_datashape(g, sub_ds, **kwargs)
else:
dataset_from_dshape(file=group.file,
datapath='/'.join([group.name, name]),
ds=sub_ds, **kwargs)
@create.register(h5py.File)
def create_h5py_file(cls, path=None, dshape=None, **kwargs):
f = h5py.File(path)
create_from_datashape(f, dshape, **kwargs)
return f
@append.register(h5py.Dataset, np.ndarray)
def append_h5py(dset, x, **kwargs):
if not sum(x.shape):
return dset
shape = list(dset.shape)
shape[0] += len(x)
dset.resize(shape)
dset[-len(x):] = x
return dset
@append.register(h5py.Dataset, chunks(np.ndarray))
def append_h5py(dset, c, **kwargs):
for chunk in c:
append(dset, chunk)
return dset
@append.register(h5py.Dataset, object)
def append_h5py(dset, x, **kwargs):
return append(dset, convert(chunks(np.ndarray), x, **kwargs), **kwargs)
@convert.register(np.ndarray, h5py.Dataset, cost=3.0)
def h5py_to_numpy(dset, force=False, **kwargs):
if dset.size > 1e9:
raise MemoryError("File size is large: %0.2f GB.\n"
"Convert with flag force=True to force loading" % d.size / 1e9)
else:
return dset[:]
@convert.register(chunks(np.ndarray), h5py.Dataset, cost=3.0)
def h5py_to_numpy_chunks(dset, chunksize=2 ** 20, **kwargs):
def load():
for i in range(0, dset.shape[0], chunksize):
yield dset[i: i + chunksize]
return chunks(np.ndarray)(load)
@resource.register('h5py://.+', priority=11)
def resource_h5py(uri, datapath=None, dshape=None, **kwargs):
f = h5py.File(uri)
olddatapath = datapath
if dshape is not None:
ds = datashape.dshape(dshape)
if datapath:
while ds and datapath:
datapath, name = datapath.rsplit('/', 1)
ds = Record([[name, ds]])
ds = datashape.dshape(ds)
f = create(h5py.File, path=uri, dshape=ds, **kwargs)
if olddatapath:
return f[olddatapath]
else:
return f
@resource.register('.+\.(hdf5|h5)')
def resource_hdf5(*args, **kwargs):
return resource_h5py(*args, **kwargs)
@dispatch((h5py.Group, h5py.Dataset))
def drop(h):
del h.file[h.name]
@dispatch(h5py.File)
def drop(h):
os.remove(h.filename)
ooc_types.add(h5py.Dataset)
| {
"repo_name": "mrocklin/into",
"path": "into/backends/h5py.py",
"copies": "1",
"size": "7768",
"license": "bsd-3-clause",
"hash": -331208715482822600,
"line_mean": 29.1085271318,
"line_max": 89,
"alpha_frac": 0.5742790937,
"autogenerated": false,
"ratio": 3.3642269380684278,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9437752371131046,
"avg_score": 0.00015073212747631351,
"num_lines": 258
} |
from __future__ import absolute_import, division, print_function
import os
import datashape
from .data_descriptor import DDesc
from .. import py2help
from dynd import nd
from .dynd_data_descriptor import DyND_DDesc, Capabilities
def json_descriptor_iter(array):
for row in array:
yield DyND_DDesc(row)
class JSON_DDesc(DDesc):
"""
A Blaze data descriptor which exposes a JSON file.
Parameters
----------
path : string
A path string for the JSON file.
schema : string or datashape
A datashape (or its string representation) of the schema
in the JSON file.
"""
def __init__(self, path, mode='r', **kwargs):
if os.path.isfile(path) is not True:
raise ValueError('JSON file "%s" does not exist' % path)
self.path = path
self.mode = mode
schema = kwargs.get("schema", None)
if type(schema) in py2help._strtypes:
schema = datashape.dshape(schema)
self.schema = str(schema)
# Initially the array is not loaded (is this necessary?)
self._cache_arr = None
@property
def dshape(self):
return datashape.dshape(self.schema)
@property
def capabilities(self):
"""The capabilities for the json data descriptor."""
return Capabilities(
# json datadescriptor cannot be updated
immutable = False,
# json datadescriptors are concrete
deferred = False,
# json datadescriptor is persistent
persistent = True,
# json datadescriptor can be appended efficiently
appendable = True,
remote = False,
)
@property
def _arr_cache(self):
if self._cache_arr is not None:
return self._cache_arr
with open(self.path, mode=self.mode) as jsonfile:
# This will read everything in-memory (but a memmap approach
# is in the works)
self._cache_arr = nd.parse_json(
self.schema, jsonfile.read())
return self._cache_arr
def dynd_arr(self):
return self._arr_cache
def __array__(self):
return nd.as_numpy(self.dynd_arr())
def __len__(self):
# Not clear to me what the length of a json object should be
return None
def __getitem__(self, key):
return DyND_DDesc(self._arr_cache[key])
def __setitem__(self, key, value):
# JSON files cannot be updated (at least, not efficiently)
raise NotImplementedError
def __iter__(self):
return json_descriptor_iter(self._arr_cache)
def remove(self):
"""Remove the persistent storage."""
os.unlink(self.path)
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/datadescriptor/json_data_descriptor.py",
"copies": "3",
"size": "2737",
"license": "bsd-3-clause",
"hash": -2306633751572151000,
"line_mean": 28.1170212766,
"line_max": 72,
"alpha_frac": 0.5999269273,
"autogenerated": false,
"ratio": 4.140695915279879,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6240622842579878,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import datashape
from .data_descriptor import IDataDescriptor
from .. import py2help
from dynd import nd
from .dynd_data_descriptor import DyNDDataDescriptor, Capabilities
def json_descriptor_iter(array):
for row in array:
yield DyNDDataDescriptor(row)
class JSONDataDescriptor(IDataDescriptor):
"""
A Blaze data descriptor which exposes a JSON file.
Parameters
----------
filename : string
A path string for the JSON file.
schema : string or blaze.datashape
A blaze datashape (or its string representation) of the schema
in the JSON file.
"""
def __init__(self, filename, **kwargs):
if os.path.isfile(filename) is not True:
raise ValueError('JSON file "%s" does not exist' % filename)
self.filename = filename
schema = kwargs.get("schema", None)
if type(schema) in py2help._strtypes:
schema = datashape.dshape(schema)
self.schema = str(schema)
# Initially the array is not loaded (is this necessary?)
self._cache_arr = None
@property
def dshape(self):
return datashape.dshape(self.schema)
@property
def capabilities(self):
"""The capabilities for the json data descriptor."""
return Capabilities(
# json datadescriptor cannot be updated
immutable = False,
# json datadescriptors are concrete
deferred = False,
# json datadescriptor is persistent
persistent = True,
# json datadescriptor can be appended efficiently
appendable = True,
remote = False,
)
@property
def _arr_cache(self):
if self._cache_arr is not None:
return self._cache_arr
with open(self.filename) as jsonfile:
# This will read everything in-memory (but a memmap approach
# is in the works)
self._cache_arr = nd.parse_json(
self.schema, jsonfile.read())
return self._cache_arr
def dynd_arr(self):
return self._arr_cache
def __array__(self):
return nd.as_numpy(self.dynd_arr())
def __len__(self):
# Not clear to me what the length of a json object should be
return None
def __getitem__(self, key):
return DyNDDataDescriptor(self._arr_cache[key])
def __setitem__(self, key, value):
# JSON files cannot be updated (at least, not efficiently)
raise NotImplementedError
def __iter__(self):
return json_descriptor_iter(self._arr_cache)
| {
"repo_name": "XinSong/blaze",
"path": "blaze/datadescriptor/json_data_descriptor.py",
"copies": "7",
"size": "2681",
"license": "bsd-3-clause",
"hash": -5404987424908856000,
"line_mean": 29.1235955056,
"line_max": 72,
"alpha_frac": 0.6161879896,
"autogenerated": false,
"ratio": 4.269108280254777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0037263328673259807,
"num_lines": 89
} |
from __future__ import absolute_import, division, print_function
import os
import idaapi
from idaskins import IDA_DIR, THEMES_DIR, VERSION
from idaskins.idafontconfig import IdaFontConfig
from idaskins.objectinspector import ObjectInspector
from idaskins.settings import Settings
from idaskins.thememanifest import ThemeManifest, ManifestError
from idaskins.themeselector import ThemeSelector
from idaskins.clrapplier import load_clr_file
from PyQt5.Qt import qApp
from PyQt5.QtCore import QObject, QDir
from PyQt5.QtWidgets import QMessageBox
class UiHooks(idaapi.UI_Hooks):
"""
UI hooks. Currently only used to display a warning when
switching font settings in IDA.
"""
def __init__(self):
super(UiHooks, self).__init__()
self._last_event = None
def preprocess_action(self, name):
self._last_event = name
return super(UiHooks, self).preprocess_action(name)
def postprocess_action(self):
if self._last_event == 'SetFont':
QMessageBox.warning(
qApp.activeWindow(),
"IDASkins",
"Please note that altering the font settings when IDASkins "
"is loaded may cause strange effects on font rendering. It is "
"recommended to restart IDA after making font-related changes "
"in the settings to avoid instability."
)
return super(UiHooks, self).postprocess_action()
class IdaSkinsPlugin(QObject, idaapi.plugin_t):
"""Plugin entry point. Does most of the skinning magic."""
flags = idaapi.PLUGIN_FIX
comment = "Advanced IDA skinning"
help = "This is help"
wanted_name = "IDASkins: Settings"
wanted_hotkey = "Ctrl-Shift-S"
def __init__(self, *args, **kwargs):
print("[IDASkins] {} by athre0z (zyantific.com) loaded!".format(
VERSION
))
QObject.__init__(self, *args, **kwargs)
idaapi.plugin_t.__init__(self)
# First start dialog.
self._settings = Settings()
if self._settings.first_start:
selection = QMessageBox.information(
qApp.activeWindow(),
"IDASkins: First start",
"IDASkins has detected that this is the first time you've started IDA with "
"the plugin installed. Select a theme now?",
QMessageBox.Yes | QMessageBox.No,
)
if selection == QMessageBox.Yes:
self.open_theme_selector()
self._settings.first_start = False
else:
# v2.0.0 used absolute pathes due to a bug.
# Fix settings from this particular version here.
theme_dir = self._settings.selected_theme_dir
if theme_dir and os.path.isabs(theme_dir):
print('[IDASkins] Updating buggy v2.0.0 theme path')
self._settings.selected_theme_dir = os.path.split(theme_dir)[-1]
self._theme_selector = None
self.apply_stylesheet_from_settings()
# Subscribe UI notifications.
self._ui_hooks = UiHooks()
self._ui_hooks.hook()
def preprocess_stylesheet(self, qss, abs_theme_dir):
qss = qss.replace('<IDADIR>', QDir.fromNativeSeparators(IDA_DIR))
qss = qss.replace('<SKINDIR>', QDir.fromNativeSeparators(abs_theme_dir))
def replace_keyword(x, keyword, kind):
cfg = IdaFontConfig(kind)
prefix = '<{}_FONT_'.format(keyword)
x = x.replace(prefix + 'FAMILY>', cfg.family)
x = x.replace(prefix + 'STYLE>', ' italic' if cfg.italic else '')
x = x.replace(prefix + 'WEIGHT>', ' bold' if cfg.bold else '')
x = x.replace(prefix + 'SIZE>', '{} pt'.format(cfg.size))
return x
qss = replace_keyword(qss, 'DISASSEMBLY', 'disas')
qss = replace_keyword(qss, 'HEXVIEW', 'hexview')
qss = replace_keyword(qss, 'DEBUG_REGISTERS', 'debug_regs')
qss = replace_keyword(qss, 'TEXT_INPUT', 'text_input')
qss = replace_keyword(qss, 'OUTPUT_WINDOW', 'output_wnd')
return qss
def apply_stylesheet(self, abs_theme_dir, manifest):
try:
with open(os.path.join(abs_theme_dir, manifest.qss_file)) as f:
qss = f.read()
except IOError as exc:
print('[IDASkins] Unable to load stylesheet.')
return
qss = self.preprocess_stylesheet(qss, abs_theme_dir)
qApp.setStyleSheet(qss)
#idaapi.request_refresh(idaapi.IWID_ALL)
def apply_clr_file(self, abs_theme_dir, manifest):
try:
load_clr_file(os.path.join(abs_theme_dir, manifest.clr_file))
except IOError as exc:
print('[IDASkins] Unable to load clr file.')
return
def apply_stylesheet_from_settings(self):
theme_dir = self._settings.selected_theme_dir
if theme_dir:
abs_theme_dir = os.path.join(THEMES_DIR, theme_dir)
try:
manifest = ThemeManifest(open(os.path.join(
abs_theme_dir, 'manifest.json'
)))
except ManifestError as exc:
print('[IDASkins]', str(exc))
return
self.apply_stylesheet(abs_theme_dir, manifest)
self.apply_clr_file(abs_theme_dir, manifest)
print('[IDASkins] Skin file successfully applied!')
def open_theme_selector(self):
self._theme_selector = ThemeSelector(qApp.activeWindow())
self._theme_selector.accepted.connect(self.on_theme_selection_accepted)
self._theme_selector.show()
def on_theme_selection_accepted(self):
theme_dir = self.sender().selected_theme_dir
if theme_dir:
self._settings.selected_theme_dir = theme_dir
self.apply_stylesheet_from_settings()
def init(self):
return idaapi.PLUGIN_KEEP
def run(self, arg):
self.open_theme_selector()
def term(self):
print("term() called!")
| {
"repo_name": "zyantific/IDASkins",
"path": "plugins/idaskins/plugin.py",
"copies": "1",
"size": "6051",
"license": "mit",
"hash": -7239864608497623000,
"line_mean": 35.0178571429,
"line_max": 92,
"alpha_frac": 0.604858701,
"autogenerated": false,
"ratio": 3.8104534005037785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9913649833365027,
"avg_score": 0.00033245362775024103,
"num_lines": 168
} |
from __future__ import absolute_import, division, print_function
import os
import invoke
import fabric.api
import fabric.contrib.files
from .utils import cd, ssh_host
SALT_MASTER = "192.168.5.1"
@invoke.task(name="sync-changes")
def sync_changes():
# Push our changes to GitHub
# TODO: Determine what origin to use?
invoke.run("git push origin master", echo=True)
if os.path.isdir("pillar/prod/secrets"):
with cd("pillar/prod/secrets"):
# Push our changes into the secret repository
invoke.run("git push origin master", echo=True)
# SSH into the salt master and pull our changes
with ssh_host("salt.iad1.psf.io"):
with fabric.api.cd("/srv/salt"):
fabric.api.sudo("git pull --ff-only origin master")
with fabric.api.cd("/srv/pillar/prod/secrets"):
fabric.api.sudo("git pull --ff-only origin master")
@invoke.task
def bootstrap(host, codename="trusty", pre=[sync_changes]):
# If the host does not contain '.', we'll assume it's of the form
# [host].iad1.psf.io.
if "." not in host:
host += ".iad1.psf.io"
# SSH into the root user of this server and bootstrap the server.
with ssh_host("root@" + host):
# Make sure this host hasn't already been bootstrapped.
if fabric.contrib.files.exists("/etc/salt/minion.d/local.conf"):
raise RuntimeError("{} is already bootstrapped.".format(host))
fabric.api.run("wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/2018.3/SALTSTACK-GPG-KEY.pub | apt-key add -")
if codename == "trusty":
fabric.api.run("echo 'deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/2018.3 trusty main' > /etc/apt/sources.list.d/saltstack.list")
elif codename == "xenial":
fabric.api.run("echo 'deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2018.3 xenial main' > /etc/apt/sources.list.d/saltstack.list")
else:
raise RuntimeError("{} is not supported!".format(codename))
# Then we need to update our local apt
fabric.api.run("apt-get update -qy")
# Then, upgrade all of the packages that are currently on this
# machine.
fabric.api.run("apt-get upgrade -qy")
fabric.api.run("apt-get dist-upgrade -qy")
# We don't want the nova-agent installed.
# This doesn't appear to be installed on Xenial anymore?
if codename != "xenial":
fabric.api.run("apt-get purge nova-agent -qy")
# Install salt-minion and python-apt so we can manage things with
# salt.
fabric.api.run("apt-get install -qy salt-minion")
# Drop the /etc/salt/minion.d/local.conf onto the server so that it
# can connect with our salt master.
fabric.contrib.files.upload_template(
"conf/minion.conf",
"/etc/salt/minion.d/local.conf",
context={
"master": SALT_MASTER,
},
use_jinja=True,
mode=0o0644,
)
# Run salt-call state.highstate, this will fail the first time because
# the Master hasn't accepted our key yet.
fabric.api.run("salt-call state.highstate", warn_only=True)
# Get the minion ID of this server
minion_id = fabric.api.run("cat /etc/salt/minion_id")
# SSH into our salt master and accept the key for this server.
with ssh_host("salt.iad1.psf.io"):
fabric.api.sudo("salt-key -ya {}".format(minion_id))
# Finally SSH into our server one more time to run salt-call
# state.highstate for real this time.
with ssh_host("root@" + host):
fabric.api.run("salt-call state.highstate")
# Reboot the server to make sure any upgrades have been loaded.
fabric.api.reboot()
@invoke.task(default=True, pre=[sync_changes])
def highstate(hosts, dc="iad1"):
# Until invoke supports *args we need to hack around the lack of support
# for now.
hosts = [h.strip() for h in hosts.split(",") if h.strip()]
# Ensure we have some hosts
if not hosts:
raise ValueError("Must specify hosts for highstate")
# Loop over all the hosts and if they do not have a ., then we'll add
# .psf.io to them.
hosts = [h if "." in h else h + "." + dc + ".psf.io" for h in hosts]
# Loop over all the hosts and call salt-call state.highstate on them.
for host in hosts:
with ssh_host(host):
fabric.api.sudo("salt-call state.highstate")
| {
"repo_name": "python/psf-salt",
"path": "tasks/salt.py",
"copies": "1",
"size": "4528",
"license": "mit",
"hash": -6890022229323660000,
"line_mean": 36.1147540984,
"line_max": 149,
"alpha_frac": 0.6274293286,
"autogenerated": false,
"ratio": 3.559748427672956,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9686318456227071,
"avg_score": 0.0001718600091769908,
"num_lines": 122
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from matplotlib.colors import ColorConverter
from qtpy import QtWidgets
from glue.core.message import SettingsChangeMessage
from glue.utils import nonpartial
from glue.utils.qt import load_ui, ColorProperty
from glue.utils.qt.widget_properties import (CurrentComboTextProperty,
ValueProperty, ButtonProperty)
from glue._settings_helpers import save_settings
__all__ = ["PreferencesDialog"]
rgb = ColorConverter().to_rgb
class PreferencesDialog(QtWidgets.QDialog):
theme = CurrentComboTextProperty('ui.combo_theme')
background = ColorProperty('ui.color_background')
foreground = ColorProperty('ui.color_foreground')
data_color = ColorProperty('ui.color_default_data')
data_alpha = ValueProperty('ui.slider_alpha', value_range=(0, 1))
data_apply = ButtonProperty('ui.checkbox_apply')
show_large_data_warning = ButtonProperty('ui.checkbox_show_large_data_warning')
save_to_disk = ButtonProperty('ui.checkbox_save')
def __init__(self, application, parent=None):
super(PreferencesDialog, self).__init__(parent=parent)
self.app = application
self.ui = load_ui('preferences.ui', self,
directory=os.path.dirname(__file__))
self.ui.cancel.clicked.connect(self.reject)
self.ui.ok.clicked.connect(self.accept)
self.ui.combo_theme.currentIndexChanged.connect(nonpartial(self._update_colors_from_theme))
from glue.config import settings
self.background = settings.BACKGROUND_COLOR
self.foreground = settings.FOREGROUND_COLOR
self.data_color = settings.DATA_COLOR
self.data_alpha = settings.DATA_ALPHA
self.show_large_data_warning = settings.SHOW_LARGE_DATA_WARNING
self._update_theme_from_colors()
self.panes = []
from glue.config import preference_panes
for label, widget_cls in sorted(preference_panes):
pane = widget_cls()
self.ui.tab_widget.addTab(pane, label)
self.panes.append(pane)
def _update_theme_from_colors(self):
if (rgb(self.background) == (1, 1, 1) and rgb(self.foreground) == (0, 0, 0)
and rgb(self.data_color) == (0.35, 0.35, 0.35) and np.allclose(self.data_alpha, 0.8)):
self.theme = 'Black on White'
elif (rgb(self.background) == (0, 0, 0) and rgb(self.foreground) == (1, 1, 1)
and rgb(self.data_color) == (0.75, 0.75, 0.75) and np.allclose(self.data_alpha, 0.8)):
self.theme = 'White on Black'
else:
self.theme = 'Custom'
def _update_colors_from_theme(self):
if self.theme == 'Black on White':
self.foreground = 'black'
self.background = 'white'
self.data_color = '0.35'
self.data_alpha = 0.8
elif self.theme == 'White on Black':
self.foreground = 'white'
self.background = 'black'
self.data_color = '0.75'
self.data_alpha = 0.8
elif self.theme != 'Custom':
raise ValueError("Unknown theme: {0}".format(self.theme))
def accept(self):
# Update default settings
from glue.config import settings
settings.FOREGROUND_COLOR = self.foreground
settings.BACKGROUND_COLOR = self.background
settings.DATA_COLOR = self.data_color
settings.DATA_ALPHA = self.data_alpha
settings.SHOW_LARGE_DATA_WARNING = self.show_large_data_warning
for pane in self.panes:
pane.finalize()
# Save to disk if requested
if self.save_to_disk:
save_settings()
# Trigger viewers to update defaults
self.app._hub.broadcast(SettingsChangeMessage(self, ('FOREGROUND_COLOR', 'BACKGROUND_COLOR')))
# If requested, trigger data to update color
if self.data_apply:
self.app.set_data_color(settings.DATA_COLOR, settings.DATA_ALPHA)
super(PreferencesDialog, self).accept()
if __name__ == "__main__":
from glue.utils.qt import get_qapp
app = get_qapp()
widget = PreferencesDialog()
widget.show()
widget.raise_()
app.exec_()
| {
"repo_name": "saimn/glue",
"path": "glue/app/qt/preferences.py",
"copies": "2",
"size": "4299",
"license": "bsd-3-clause",
"hash": -3868479306875037000,
"line_mean": 33.6693548387,
"line_max": 102,
"alpha_frac": 0.6327052803,
"autogenerated": false,
"ratio": 3.797703180212014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006028701339793003,
"num_lines": 124
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import pandas as pd
import xarray as xr
from . import randint, randn, requires_dask
try:
import dask
import dask.multiprocessing
except ImportError:
pass
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
class IOSingleNetCDF:
"""
A few examples that benchmark reading/writing a single netCDF file with
xarray
"""
timeout = 300.
repeat = 1
number = 5
def make_ds(self):
# single Dataset
self.ds = xr.Dataset()
self.nt = 1000
self.nx = 90
self.ny = 45
self.block_chunks = {'time': self.nt / 4,
'lon': self.nx / 3,
'lat': self.ny / 3}
self.time_chunks = {'time': int(self.nt / 36)}
times = pd.date_range('1970-01-01', periods=self.nt, freq='D')
lons = xr.DataArray(np.linspace(0, 360, self.nx), dims=('lon', ),
attrs={'units': 'degrees east',
'long_name': 'longitude'})
lats = xr.DataArray(np.linspace(-90, 90, self.ny), dims=('lat', ),
attrs={'units': 'degrees north',
'long_name': 'latitude'})
self.ds['foo'] = xr.DataArray(randn((self.nt, self.nx, self.ny),
frac_nan=0.2),
coords={'lon': lons, 'lat': lats,
'time': times},
dims=('time', 'lon', 'lat'),
name='foo', encoding=None,
attrs={'units': 'foo units',
'description': 'a description'})
self.ds['bar'] = xr.DataArray(randn((self.nt, self.nx, self.ny),
frac_nan=0.2),
coords={'lon': lons, 'lat': lats,
'time': times},
dims=('time', 'lon', 'lat'),
name='bar', encoding=None,
attrs={'units': 'bar units',
'description': 'a description'})
self.ds['baz'] = xr.DataArray(randn((self.nx, self.ny),
frac_nan=0.2).astype(np.float32),
coords={'lon': lons, 'lat': lats},
dims=('lon', 'lat'),
name='baz', encoding=None,
attrs={'units': 'baz units',
'description': 'a description'})
self.ds.attrs = {'history': 'created for xarray benchmarking'}
self.oinds = {'time': randint(0, self.nt, 120),
'lon': randint(0, self.nx, 20),
'lat': randint(0, self.ny, 10)}
self.vinds = {'time': xr.DataArray(randint(0, self.nt, 120),
dims='x'),
'lon': xr.DataArray(randint(0, self.nx, 120),
dims='x'),
'lat': slice(3, 20)}
class IOWriteSingleNetCDF3(IOSingleNetCDF):
def setup(self):
self.format = 'NETCDF3_64BIT'
self.make_ds()
def time_write_dataset_netcdf4(self):
self.ds.to_netcdf('test_netcdf4_write.nc', engine='netcdf4',
format=self.format)
def time_write_dataset_scipy(self):
self.ds.to_netcdf('test_scipy_write.nc', engine='scipy',
format=self.format)
class IOReadSingleNetCDF4(IOSingleNetCDF):
def setup(self):
self.make_ds()
self.filepath = 'test_single_file.nc4.nc'
self.format = 'NETCDF4'
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_netcdf4(self):
xr.open_dataset(self.filepath, engine='netcdf4').load()
def time_orthogonal_indexing(self):
ds = xr.open_dataset(self.filepath, engine='netcdf4')
ds = ds.isel(**self.oinds).load()
def time_vectorized_indexing(self):
ds = xr.open_dataset(self.filepath, engine='netcdf4')
ds = ds.isel(**self.vinds).load()
class IOReadSingleNetCDF3(IOReadSingleNetCDF4):
def setup(self):
self.make_ds()
self.filepath = 'test_single_file.nc3.nc'
self.format = 'NETCDF3_64BIT'
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_scipy(self):
xr.open_dataset(self.filepath, engine='scipy').load()
def time_orthogonal_indexing(self):
ds = xr.open_dataset(self.filepath, engine='scipy')
ds = ds.isel(**self.oinds).load()
def time_vectorized_indexing(self):
ds = xr.open_dataset(self.filepath, engine='scipy')
ds = ds.isel(**self.vinds).load()
class IOReadSingleNetCDF4Dask(IOSingleNetCDF):
def setup(self):
requires_dask()
self.make_ds()
self.filepath = 'test_single_file.nc4.nc'
self.format = 'NETCDF4'
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_netcdf4_with_block_chunks(self):
xr.open_dataset(self.filepath, engine='netcdf4',
chunks=self.block_chunks).load()
def time_load_dataset_netcdf4_with_block_chunks_oindexing(self):
ds = xr.open_dataset(self.filepath, engine='netcdf4',
chunks=self.block_chunks)
ds = ds.isel(**self.oinds).load()
def time_load_dataset_netcdf4_with_block_chunks_vindexing(self):
ds = xr.open_dataset(self.filepath, engine='netcdf4',
chunks=self.block_chunks)
ds = ds.isel(**self.vinds).load()
def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(self.filepath, engine='netcdf4',
chunks=self.block_chunks).load()
def time_load_dataset_netcdf4_with_time_chunks(self):
xr.open_dataset(self.filepath, engine='netcdf4',
chunks=self.time_chunks).load()
def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(self.filepath, engine='netcdf4',
chunks=self.time_chunks).load()
class IOReadSingleNetCDF3Dask(IOReadSingleNetCDF4Dask):
def setup(self):
requires_dask()
self.make_ds()
self.filepath = 'test_single_file.nc3.nc'
self.format = 'NETCDF3_64BIT'
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_scipy_with_block_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(self.filepath, engine='scipy',
chunks=self.block_chunks).load()
def time_load_dataset_scipy_with_block_chunks_oindexing(self):
ds = xr.open_dataset(self.filepath, engine='scipy',
chunks=self.block_chunks)
ds = ds.isel(**self.oinds).load()
def time_load_dataset_scipy_with_block_chunks_vindexing(self):
ds = xr.open_dataset(self.filepath, engine='scipy',
chunks=self.block_chunks)
ds = ds.isel(**self.vinds).load()
def time_load_dataset_scipy_with_time_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(self.filepath, engine='scipy',
chunks=self.time_chunks).load()
class IOMultipleNetCDF:
"""
A few examples that benchmark reading/writing multiple netCDF files with
xarray
"""
timeout = 300.
repeat = 1
number = 5
def make_ds(self, nfiles=10):
# multiple Dataset
self.ds = xr.Dataset()
self.nt = 1000
self.nx = 90
self.ny = 45
self.nfiles = nfiles
self.block_chunks = {'time': self.nt / 4,
'lon': self.nx / 3,
'lat': self.ny / 3}
self.time_chunks = {'time': int(self.nt / 36)}
self.time_vars = np.split(
pd.date_range('1970-01-01', periods=self.nt, freq='D'),
self.nfiles)
self.ds_list = []
self.filenames_list = []
for i, times in enumerate(self.time_vars):
ds = xr.Dataset()
nt = len(times)
lons = xr.DataArray(np.linspace(0, 360, self.nx), dims=('lon', ),
attrs={'units': 'degrees east',
'long_name': 'longitude'})
lats = xr.DataArray(np.linspace(-90, 90, self.ny), dims=('lat', ),
attrs={'units': 'degrees north',
'long_name': 'latitude'})
ds['foo'] = xr.DataArray(randn((nt, self.nx, self.ny),
frac_nan=0.2),
coords={'lon': lons, 'lat': lats,
'time': times},
dims=('time', 'lon', 'lat'),
name='foo', encoding=None,
attrs={'units': 'foo units',
'description': 'a description'})
ds['bar'] = xr.DataArray(randn((nt, self.nx, self.ny),
frac_nan=0.2),
coords={'lon': lons, 'lat': lats,
'time': times},
dims=('time', 'lon', 'lat'),
name='bar', encoding=None,
attrs={'units': 'bar units',
'description': 'a description'})
ds['baz'] = xr.DataArray(randn((self.nx, self.ny),
frac_nan=0.2).astype(np.float32),
coords={'lon': lons, 'lat': lats},
dims=('lon', 'lat'),
name='baz', encoding=None,
attrs={'units': 'baz units',
'description': 'a description'})
ds.attrs = {'history': 'created for xarray benchmarking'}
self.ds_list.append(ds)
self.filenames_list.append('test_netcdf_%i.nc' % i)
class IOWriteMultipleNetCDF3(IOMultipleNetCDF):
def setup(self):
self.make_ds()
self.format = 'NETCDF3_64BIT'
def time_write_dataset_netcdf4(self):
xr.save_mfdataset(self.ds_list, self.filenames_list,
engine='netcdf4',
format=self.format)
def time_write_dataset_scipy(self):
xr.save_mfdataset(self.ds_list, self.filenames_list,
engine='scipy',
format=self.format)
class IOReadMultipleNetCDF4(IOMultipleNetCDF):
def setup(self):
requires_dask()
self.make_ds()
self.format = 'NETCDF4'
xr.save_mfdataset(self.ds_list, self.filenames_list,
format=self.format)
def time_load_dataset_netcdf4(self):
xr.open_mfdataset(self.filenames_list, engine='netcdf4').load()
def time_open_dataset_netcdf4(self):
xr.open_mfdataset(self.filenames_list, engine='netcdf4')
class IOReadMultipleNetCDF3(IOReadMultipleNetCDF4):
def setup(self):
requires_dask()
self.make_ds()
self.format = 'NETCDF3_64BIT'
xr.save_mfdataset(self.ds_list, self.filenames_list,
format=self.format)
def time_load_dataset_scipy(self):
xr.open_mfdataset(self.filenames_list, engine='scipy').load()
def time_open_dataset_scipy(self):
xr.open_mfdataset(self.filenames_list, engine='scipy')
class IOReadMultipleNetCDF4Dask(IOMultipleNetCDF):
def setup(self):
requires_dask()
self.make_ds()
self.format = 'NETCDF4'
xr.save_mfdataset(self.ds_list, self.filenames_list,
format=self.format)
def time_load_dataset_netcdf4_with_block_chunks(self):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.block_chunks).load()
def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.block_chunks).load()
def time_load_dataset_netcdf4_with_time_chunks(self):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.time_chunks).load()
def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.time_chunks).load()
def time_open_dataset_netcdf4_with_block_chunks(self):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.block_chunks)
def time_open_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.block_chunks)
def time_open_dataset_netcdf4_with_time_chunks(self):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.time_chunks)
def time_open_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.time_chunks)
class IOReadMultipleNetCDF3Dask(IOReadMultipleNetCDF4Dask):
def setup(self):
requires_dask()
self.make_ds()
self.format = 'NETCDF3_64BIT'
xr.save_mfdataset(self.ds_list, self.filenames_list,
format=self.format)
def time_load_dataset_scipy_with_block_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(self.filenames_list, engine='scipy',
chunks=self.block_chunks).load()
def time_load_dataset_scipy_with_time_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(self.filenames_list, engine='scipy',
chunks=self.time_chunks).load()
def time_open_dataset_scipy_with_block_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(self.filenames_list, engine='scipy',
chunks=self.block_chunks)
def time_open_dataset_scipy_with_time_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(self.filenames_list, engine='scipy',
chunks=self.time_chunks)
def create_delayed_write():
import dask.array as da
vals = da.random.random(300, chunks=(1,))
ds = xr.Dataset({'vals': (['a'], vals)})
return ds.to_netcdf('file.nc', engine='netcdf4', compute=False)
class IOWriteNetCDFDask:
timeout = 60
repeat = 1
number = 5
def setup(self):
requires_dask()
self.write = create_delayed_write()
def time_write(self):
self.write.compute()
class IOWriteNetCDFDaskDistributed:
def setup(self):
try:
import distributed
except ImportError:
raise NotImplementedError
self.client = distributed.Client()
self.write = create_delayed_write()
def cleanup(self):
self.client.shutdown()
def time_write(self):
self.write.compute()
| {
"repo_name": "shoyer/xray",
"path": "asv_bench/benchmarks/dataset_io.py",
"copies": "1",
"size": "16435",
"license": "apache-2.0",
"hash": 7250270921864721000,
"line_mean": 35.6852678571,
"line_max": 78,
"alpha_frac": 0.526194098,
"autogenerated": false,
"ratio": 3.927120669056153,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9953314767056154,
"avg_score": 0,
"num_lines": 448
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import seaborn as sns
import tensorflow as tf
from matplotlib import pyplot as plt
from scipy import stats
from sklearn.mixture import GaussianMixture
from tensorflow.python.keras import Sequential
from odin import visual as vis
from odin.networks import MixtureDensityNetwork
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
tf.random.set_seed(8)
np.random.seed(8)
n = 1200
n_components = 12
x = []
for i in range(n_components):
x.append(
stats.norm.rvs(size=(n, 1), loc=i * 12,
scale=np.random.randint(1, 6)).astype('float32'))
x = np.concatenate(x, axis=0)
# ====== gmm ====== #
gmm = GaussianMixture(n_components=n_components,
covariance_type='spherical',
random_state=8)
gmm.fit(x)
gmm_llk = gmm.score(x)
gmm_mean = gmm.means_.ravel().astype('float32')
# ====== mdn ====== #
def fn_loss(y_true, y_pred):
# negative log-likelihood
nllk = tf.reduce_mean(-y_pred.log_prob(y_true))
return nllk
mdn = MixtureDensityNetwork(1,
n_components=n_components,
covariance_type='none')
model = Sequential([mdn])
model.compile(optimizer='adam', loss=fn_loss)
model.fit(x=x, y=x, epochs=48, batch_size=64, verbose=True)
y = model(x)
mdn_llk = tf.reduce_mean(y.log_prob(x)).numpy()
mdn_mean = tf.reduce_mean(y.components_distribution.mean(),
axis=(0, -1)).numpy()
# ====== visualizing ====== #
fig = plt.figure()
sns.distplot(x, bins=80)
plt.title('Data')
fig = plt.figure()
sns.distplot(gmm.sample(n * n_components)[0], bins=80)
plt.title('GMM - llk: %.2f' % gmm_llk)
fig = plt.figure()
sns.distplot(y.sample().numpy(), bins=80)
plt.title('MDN - llk: %.2f' % mdn_llk)
vis.plot_save()
| {
"repo_name": "imito/odin",
"path": "tests/test_mixture_density_network.py",
"copies": "1",
"size": "1885",
"license": "mit",
"hash": 4191435767940286500,
"line_mean": 25.1805555556,
"line_max": 70,
"alpha_frac": 0.6376657825,
"autogenerated": false,
"ratio": 2.8517397881996973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3989405570699697,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.distributions import (Bernoulli, Independent,
NegativeBinomial,
Normal,
VectorDeterministic)
from odin.bay.distributions import (NegativeBinomialDisp, ZeroInflated,
concat_distribution)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
tf.random.set_seed(8)
np.random.seed(8)
def assert_consistent_statistics(d1, d2):
d = concat_distribution((d1, d2))
m1 = d1.mean()
m2 = d2.mean()
m = d.mean()
assert np.all(np.isclose(m.numpy(), tf.concat((m1, m2), axis=0).numpy()))
v1 = d1.variance()
v2 = d2.variance()
v = d.variance()
assert np.all(np.isclose(v.numpy(), tf.concat((v1, v2), axis=0).numpy()))
# This is because
# tf.random.set_seed(8)
# print(tf.random.uniform((3,), seed=1))
# print(tf.random.uniform((2,), seed=1))
# # is different from
# tf.random.set_seed(8)
# print(tf.random.uniform((5,), seed=1))
tf.random.set_seed(8)
s1 = d1.sample()
s2 = d2.sample()
tf.random.set_seed(8)
s = d.sample()
assert s.shape == tf.concat((s1, s2), axis=0).shape
assert np.all(np.isclose(s[:s1.shape[0]].numpy(), s1.numpy()))
try:
for name in d1.__class__._params_event_ndims().keys():
p1 = getattr(d1, name)
p2 = getattr(d2, name)
p = getattr(d, name)
assert np.all(np.isclose(p.numpy(), tf.concat((p1, p2), axis=0).numpy()))
except NotImplementedError:
pass
shape = (8, 2)
count = np.random.randint(0, 20, size=shape).astype('float32')
probs = np.random.rand(*shape).astype('float32')
logits = np.random.rand(*shape).astype('float32')
assert_consistent_statistics(Bernoulli(probs=probs), Bernoulli(logits=logits))
assert_consistent_statistics(Bernoulli(logits=logits), Bernoulli(logits=logits))
assert_consistent_statistics(
Independent(Bernoulli(probs=probs), reinterpreted_batch_ndims=1),
Independent(Bernoulli(logits=logits), reinterpreted_batch_ndims=1))
assert_consistent_statistics(NegativeBinomial(total_count=count, logits=logits),
NegativeBinomial(total_count=count, probs=probs))
assert_consistent_statistics(
Independent(NegativeBinomial(total_count=count, logits=logits),
reinterpreted_batch_ndims=1),
Independent(NegativeBinomial(total_count=count, probs=probs),
reinterpreted_batch_ndims=1))
assert_consistent_statistics(
ZeroInflated(NegativeBinomial(total_count=count, logits=logits),
logits=logits),
ZeroInflated(NegativeBinomial(total_count=count, probs=probs), probs=probs))
assert_consistent_statistics(
Independent(ZeroInflated(NegativeBinomial(total_count=count, logits=logits),
logits=logits),
reinterpreted_batch_ndims=1),
Independent(ZeroInflated(NegativeBinomial(total_count=count, probs=probs),
probs=probs),
reinterpreted_batch_ndims=1))
assert_consistent_statistics(
ZeroInflated(Independent(NegativeBinomial(total_count=count, logits=logits),
reinterpreted_batch_ndims=1),
logits=logits),
ZeroInflated(Independent(NegativeBinomial(total_count=count, probs=probs),
reinterpreted_batch_ndims=1),
probs=probs))
assert_consistent_statistics(NegativeBinomialDisp(loc=count, disp=count),
NegativeBinomialDisp(loc=count, disp=count))
assert_consistent_statistics(
ZeroInflated(NegativeBinomialDisp(loc=count, disp=count), probs=probs),
ZeroInflated(NegativeBinomialDisp(loc=count, disp=count), probs=probs))
inflated_dist1 = Bernoulli(logits=logits)
inflated_dist2 = Bernoulli(probs=probs)
assert_consistent_statistics(
ZeroInflated(NegativeBinomialDisp(loc=count, disp=count),
inflated_distribution=inflated_dist1),
ZeroInflated(NegativeBinomialDisp(loc=count, disp=count),
inflated_distribution=inflated_dist2))
| {
"repo_name": "imito/odin",
"path": "tests/test_stack_distributions.py",
"copies": "1",
"size": "4308",
"license": "mit",
"hash": 7284793420709947000,
"line_mean": 38.5229357798,
"line_max": 80,
"alpha_frac": 0.6511142061,
"autogenerated": false,
"ratio": 3.545679012345679,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9638319621589566,
"avg_score": 0.011694719371222623,
"num_lines": 109
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import tensorflow as tf
import torch
from tensorflow.python.keras.layers import Dense
from odin import networks_torch as nt
from odin.networks import (TimeDelay, TimeDelayConv, TimeDelayConvTied,
TimeDelayDense)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
tf.random.set_seed(8)
np.random.seed(8)
torch.manual_seed(8)
x = np.random.rand(12, 80, 23).astype('float32')
for _ in range(20):
ctx = sorted(set(int(i) for i in np.random.randint(-5, 5, size=4)))
print('\n', ctx)
# ====== tensorflow ====== #
tdd = TimeDelay(
fn_layer_creator=lambda: Dense(units=128),
delay_context=ctx, #
)
y = tdd(x)
print(y.shape)
tdd = TimeDelayDense(units=128)
y = tdd(x)
print(y.shape)
tdc = TimeDelayConv(units=128)
y = tdc(x)
print(y.shape)
tdct = TimeDelayConvTied(units=128)
y = tdct(x)
print(y.shape)
# ====== pytorch ====== #
# add `nt.` to everything and the same code will work for pytorch
tdd = nt.TimeDelay(
fn_layer_creator=lambda: nt.Dense(128),
delay_context=ctx, #
)
y = tdd(x)
print(y.shape)
tdd = nt.TimeDelayDense(units=128)
y = tdd(x)
print(y.shape)
tdc = nt.TimeDelayConv(units=128)
y = tdc(x)
print(y.shape)
tdct = nt.TimeDelayConvTied(units=128)
y = tdct(x)
print(y.shape)
| {
"repo_name": "imito/odin",
"path": "tests/test_time_delay_networks.py",
"copies": "1",
"size": "1445",
"license": "mit",
"hash": 6688619488873689000,
"line_mean": 20.8939393939,
"line_max": 71,
"alpha_frac": 0.644982699,
"autogenerated": false,
"ratio": 2.768199233716475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.880046290823936,
"avg_score": 0.02254380489542296,
"num_lines": 66
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import tensorflow as tf
import torch
from odin.bay.distributions import NegativeBinomialDisp, ZeroInflated
from odin.stats import describe
from scvi.models.log_likelihood import log_nb_positive, log_zinb_positive
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
tf.random.set_seed(8)
np.random.seed(8)
torch.manual_seed(8)
def torch_nb(mean, disp):
px_rate = torch.Tensor(mean)
px_r = torch.Tensor(disp)
p = px_rate / (px_rate + px_r)
r = px_r
l_train = torch.distributions.Gamma(concentration=r,
rate=(1 - p) / p).sample()
l_train = torch.clamp(l_train, max=1e18)
X = torch.distributions.Poisson(l_train).sample()
return X
shape = (12000, 800)
x = np.random.randint(1, 20, size=shape).astype('float32')
mean = np.random.randint(1, 20, size=shape).astype('float32')
disp = np.random.randint(1, 20, size=shape).astype('float32')
disp_col = np.random.randint(1, 20, size=shape[1]).astype('float32')
disp_row = np.random.randint(1, 20, size=shape[0]).astype('float32')
pi = np.random.rand(*shape).astype('float32')
# constant dispersion (only for tensorflow)
nb = NegativeBinomialDisp(loc=mean, disp=2)
llk1 = tf.reduce_sum(nb.log_prob(x), axis=1).numpy()
print(llk1)
# broadcast disp in column
nb = NegativeBinomialDisp(loc=mean, disp=disp_col)
llk1 = tf.reduce_sum(nb.log_prob(x), axis=1).numpy()
llk2 = log_nb_positive(x=torch.Tensor(x),
mu=torch.Tensor(mean),
theta=torch.Tensor(disp_col)).numpy()
print(np.all(np.isclose(llk1, llk2)))
# broadcast disp in row
try:
nb = NegativeBinomialDisp(loc=mean, disp=disp_row)
llk1 = tf.reduce_sum(nb.log_prob(x), axis=1).numpy()
llk2 = log_nb_positive(x=torch.Tensor(x),
mu=torch.Tensor(mean),
theta=torch.Tensor(disp_row)).numpy()
print(np.all(np.isclose(llk1, llk2)))
except:
print("NOT POSSIBLE TO BROADCAST the first dimension")
# all disp available
nb = NegativeBinomialDisp(loc=mean, disp=disp)
llk1 = tf.reduce_sum(nb.log_prob(x), axis=1).numpy()
llk2 = log_nb_positive(x=torch.Tensor(x),
mu=torch.Tensor(mean),
theta=torch.Tensor(disp)).numpy()
print(np.all(np.isclose(llk1, llk2)))
s1 = nb.sample().numpy()
s2 = torch_nb(mean, disp).numpy()
print(describe(s1))
print(describe(s2))
zinb = ZeroInflated(nb, probs=pi)
llk1 = tf.reduce_sum(zinb.log_prob(x), axis=1).numpy()
llk2 = log_zinb_positive(x=torch.Tensor(x),
mu=torch.Tensor(mean),
theta=torch.Tensor(disp),
pi=torch.Tensor(pi)).numpy()
print(llk1)
print(llk2)
| {
"repo_name": "imito/odin",
"path": "tests/test_negative_binomial_disp.py",
"copies": "1",
"size": "2797",
"license": "mit",
"hash": -1093659561195446000,
"line_mean": 31.9058823529,
"line_max": 73,
"alpha_frac": 0.6503396496,
"autogenerated": false,
"ratio": 2.86577868852459,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.401611833812459,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import tensorflow as tf
import torch
from odin import backend as K
from odin import networks as net # tensorflow networks
from odin import networks_torch as nt # pytorch networks
tf.random.set_seed(8)
torch.manual_seed(8)
np.random.seed(8)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
x = torch.Tensor(np.random.rand(12, 8))
x1 = torch.Tensor(np.random.rand(12, 25, 8))
# ===========================================================================
# RNN
# ===========================================================================
f = nt.LSTM(units=32,
go_backwards=True,
unit_forget_bias=True,
return_sequences=True,
return_state=True,
bidirectional=True)
y = f(x1)
print(x1.shape, [i.shape for i in y])
f = nt.SimpleRNN(units=32, go_backwards=True)
y = f(x1)
print(x1.shape, y.shape)
f = nt.GRU(units=32, go_backwards=False, return_state=True)
y = f(x1)
print(x1.shape, [i.shape for i in y])
# ====== tensorflow ====== #
print()
f = net.LSTM(units=32,
go_backwards=True,
unit_forget_bias=True,
return_sequences=True,
return_state=True,
bidirectional=True)
y = f(x1.numpy())
print(x1.shape, [i.shape for i in y])
f = net.SimpleRNN(units=32, go_backwards=True)
y = f(x1.numpy())
print(x1.shape, y.shape)
f = net.GRU(units=32, go_backwards=False, return_state=True)
y = f(x1.numpy())
print(x1.shape, [i.shape for i in y])
print()
# ===========================================================================
# Basics
# ===========================================================================
f = nt.Dense(units=512)
y = f(x)
print(x.shape, y.shape)
# ===========================================================================
# CNN
# ===========================================================================
x = torch.Tensor(np.random.rand(12, 25, 8))
f = nt.Conv1D(filters=128, kernel_size=3)
y = f(x)
print(x.shape, y.shape)
x = torch.Tensor(np.random.rand(12, 25, 8))
f = nt.ConvCausal(filters=128, kernel_size=3)
y = f(x)
print(x.shape, y.shape)
x = torch.Tensor(np.random.rand(12, 25, 8))
f = nt.Conv1D(filters=128, kernel_size=3, data_format='channels_first')
y = f(x)
print(x.shape, y.shape)
x = torch.Tensor(np.random.rand(12, 32, 32, 3))
f = nt.Conv2D(filters=128, kernel_size=3, padding='same')
y = f(x)
print(x.shape, y.shape)
x = torch.Tensor(np.random.rand(12, 32, 32, 32, 3))
f = nt.Conv3D(filters=128, kernel_size=3)
y = f(x)
print(x.shape, y.shape)
| {
"repo_name": "imito/odin",
"path": "tests/test_keras_torch.py",
"copies": "1",
"size": "2654",
"license": "mit",
"hash": -7446849769077663000,
"line_mean": 26.9368421053,
"line_max": 77,
"alpha_frac": 0.5346646571,
"autogenerated": false,
"ratio": 2.9820224719101125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40166871290101125,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import torch
from six import string_types
from torch import nn
from odin.backend import concatenate, expand_dims, parse_reduction, squeeze
from odin.networks_torch.keras_torch import Conv1D, Dense, Layer
from odin.utils import as_tuple
class TimeDelay(Layer):
""" A generalized implementation of time-delayed neural network by applying
Parameters
----------
fn_layer_creator : `callable`
a function that returns a `keras.Layer`
delay_context : list of `int`
list of time delay taken into account
pooling : {'none', 'sum', 'min', 'max', 'avg', 'stat'} (default='sum')
pooling in time dimension after convolution operator
for 'stat' pooling, mean and standard deviation is calculated along
time-dimension, then output the concatenation of the two.
if None, no pooling is performed, the output is returned in
shape `[n_samples, n_reduced_timestep, n_new_features]`
Input shape
-----------
3D tensor with shape: `(batch_size, timesteps, input_dim)`
Output shape
------------
3D tensor with shape: `(batch_size, new_timesteps, units)`
"""
def __init__(self,
fn_layer_creator,
delay_context=(-2, -1, 0, 1, 2),
pooling='sum',
**kwargs):
super(TimeDelay, self).__init__(**kwargs)
assert callable(fn_layer_creator), \
"fn_layer_creator must be callable and return a torch.nn.Module"
self.fn_layer_creator = fn_layer_creator
# no duplicated frame index
self.delay_context = np.array(sorted(set(int(i) for i in delay_context)))
self.context_length = self.delay_context[-1] - self.delay_context[0] + 1
self.delays = self.delay_context + max(0, -self.delay_context[0])
self.min_delay = max(0, min(self.delays))
# pooling function for aggrevate the time outputs
self.pooling = 'none' if pooling is None else pooling
self.fn_pooling = parse_reduction(pooling)
all_layers = nn.ModuleList()
for time_id in range(len(self.delay_context)):
layer = fn_layer_creator()
assert isinstance(layer, torch.nn.Module), \
"fn_layer_creator must return torch.nn.Module instance, " + \
"but return type is %s" % \
str(type(layer))
# we need to setattr so the Model will manage the Layer
all_layers.append(layer)
self.all_layers = all_layers
def call(self, inputs, training=None):
# anyway, if the smallest value is negative,
# start from 0 (i.e. relative position)
shape = inputs.shape
timestep = shape[1]
y = []
for delay, layer in zip(self.delays, self.all_layers):
start = delay
end = timestep - self.context_length + delay + 1 - self.min_delay
y.append(expand_dims(layer(inputs[:, start:end]), axis=0))
y = concatenate(y, axis=0)
y = self.fn_pooling(y, axis=0)
if isinstance(self.pooling, string_types) and \
'none' in self.pooling.lower() and \
self.context_length == 1:
y = squeeze(y, axis=0)
return y
class TimeDelayDense(TimeDelay):
""" The implementaiton of time delay neural network
Input shape
-----------
3D tensor with shape: `(batch_size, timesteps, input_dim)`
Output shape
------------
3D tensor with shape: `(batch_size, new_timesteps, units)`
"""
def __init__(self,
units,
delay_context=(-2, -1, 0, 1, 2),
pooling='sum',
activation='linear',
use_bias=False,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(TimeDelayDense, self).__init__(fn_layer_creator=lambda: Dense(
units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
),
delay_context=delay_context,
pooling=pooling,
**kwargs)
class TimeDelayConv(TimeDelay):
""" This implementaiton create multiple convolutional neural network for
each time delay.
Parameters
----------
Input shape
-----------
3D tensor with shape: `(batch_size, timesteps, input_dim)`
Output shape
------------
3D tensor with shape: `(batch_size, new_timesteps, units)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
units,
kernel_size=3,
delay_context=(-2, -1, 0, 1, 2),
pooling='sum',
activation='linear',
use_bias=False,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(TimeDelayConv, self).__init__(fn_layer_creator=lambda: Conv1D(
filters=units,
kernel_size=kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
),
delay_context=delay_context,
pooling=pooling,
**kwargs)
class TimeDelayConvTied(TimeDelay):
""" Time-delayed dense implementation but using a 1D-convolutional
neural network, only support consecutive delay context (given a number
of `delay_strides`).
From the paper, it is suggested to create multiple `TimeDelayedConv`
with variate number of feature map and length of context windows,
then concatenate the outputs for `Dense` layers
For example:
- feature_maps = [50, 100, 150, 200, 200, 200, 200]
- kernels = [1, 2, 3, 4, 5, 6, 7]
Parameters
----------
units : `int`
number of new features
delay_length : `int` (default=5)
length of time delayed context
delay_strides : `int` (default=1)
specifying the strides of time window
"""
def __init__(self,
units,
delay_length=5,
delay_strides=1,
activation='linear',
use_bias=False,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(TimeDelayConvTied, self).__init__(fn_layer_creator=lambda: Conv1D(
filters=units,
kernel_size=delay_length,
strides=delay_strides,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
),
delay_context=(0,),
pooling='none',
**kwargs)
| {
"repo_name": "imito/odin",
"path": "odin/networks_torch/time_delay.py",
"copies": "1",
"size": "7043",
"license": "mit",
"hash": 7988926047575369000,
"line_mean": 30.8687782805,
"line_max": 77,
"alpha_frac": 0.5861138719,
"autogenerated": false,
"ratio": 4.101921956901572,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5188035828801572,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from qtpy import QtCore, QtWidgets
from glue.utils.qt import load_ui
from glue.utils import nonpartial
from glue.icons.qt import get_icon
from glue.core.state_objects import State, CallbackProperty
from glue.external.echo.qt import autoconnect_callbacks_to_qt
class SliceState(State):
label = CallbackProperty()
slider_label = CallbackProperty()
slider_unit = CallbackProperty()
slice_center = CallbackProperty()
use_world = CallbackProperty()
class SliceWidget(QtWidgets.QWidget):
slice_changed = QtCore.Signal(int)
def __init__(self, label='', world=None, lo=0, hi=10,
parent=None, world_unit=None,
world_warning=False):
super(SliceWidget, self).__init__(parent)
self.state = SliceState()
self.state.label = label
self.state.slice_center = (lo + hi) // 2
self._world = np.asarray(world)
self._world_warning = world_warning
self._world_unit = world_unit
self.ui = load_ui('data_slice_widget.ui', self,
directory=os.path.dirname(__file__))
autoconnect_callbacks_to_qt(self.state, self.ui)
font = self.text_warning.font()
font.setPointSize(font.pointSize() * 0.75)
self.text_warning.setFont(font)
self.button_first.setStyleSheet('border: 0px')
self.button_first.setIcon(get_icon('playback_first'))
self.button_prev.setStyleSheet('border: 0px')
self.button_prev.setIcon(get_icon('playback_prev'))
self.button_back.setStyleSheet('border: 0px')
self.button_back.setIcon(get_icon('playback_back'))
self.button_stop.setStyleSheet('border: 0px')
self.button_stop.setIcon(get_icon('playback_stop'))
self.button_forw.setStyleSheet('border: 0px')
self.button_forw.setIcon(get_icon('playback_forw'))
self.button_next.setStyleSheet('border: 0px')
self.button_next.setIcon(get_icon('playback_next'))
self.button_last.setStyleSheet('border: 0px')
self.button_last.setIcon(get_icon('playback_last'))
self.value_slice_center.setMinimum(lo)
self.value_slice_center.setMaximum(hi)
self.value_slice_center.valueChanged.connect(nonpartial(self.set_label_from_slider))
self.text_slider_label.setMinimumWidth(80)
self.state.slider_label = str(self.value_slice_center.value())
self.text_slider_label.editingFinished.connect(nonpartial(self.set_slider_from_label))
self._play_timer = QtCore.QTimer()
self._play_timer.setInterval(500)
self._play_timer.timeout.connect(nonpartial(self._play_slice))
self.button_first.clicked.connect(nonpartial(self._browse_slice, 'first'))
self.button_prev.clicked.connect(nonpartial(self._browse_slice, 'prev'))
self.button_back.clicked.connect(nonpartial(self._adjust_play, 'back'))
self.button_stop.clicked.connect(nonpartial(self._adjust_play, 'stop'))
self.button_forw.clicked.connect(nonpartial(self._adjust_play, 'forw'))
self.button_next.clicked.connect(nonpartial(self._browse_slice, 'next'))
self.button_last.clicked.connect(nonpartial(self._browse_slice, 'last'))
self.bool_use_world.toggled.connect(nonpartial(self.set_label_from_slider))
if world is None:
self.state.use_world = False
self.bool_use_world.hide()
else:
self.state.use_world = not world_warning
if world_unit:
self.state.slider_unit = world_unit
else:
self.state.slider_unit = ''
self._play_speed = 0
self.set_label_from_slider()
def set_label_from_slider(self):
value = self.state.slice_center
if self.state.use_world:
value = self._world[value]
if self._world_warning:
self.text_warning.show()
else:
self.text_warning.hide()
self.state.slider_unit = self._world_unit
else:
self.text_warning.hide()
self.state.slider_unit = ''
self.state.slider_label = str(value)
def set_slider_from_label(self):
# Ignore recursive calls - we do this rather than ignore_callback
# below when setting slider_label, otherwise we might be stopping other
# subscribers to that event from being correctly updated
if getattr(self, '_in_set_slider_from_label', False):
return
else:
self._in_set_slider_from_label = True
text = self.text_slider_label.text()
if self.state.use_world:
# Don't want to assume world is sorted, pick closest value
value = np.argmin(np.abs(self._world - float(text)))
self.state.slider_label = str(self._world[value])
else:
value = int(text)
self.value_slice_center.setValue(value)
self._in_set_slider_from_label = False
def _adjust_play(self, action):
if action == 'stop':
self._play_speed = 0
elif action == 'back':
if self._play_speed > 0:
self._play_speed = -1
else:
self._play_speed -= 1
elif action == 'forw':
if self._play_speed < 0:
self._play_speed = +1
else:
self._play_speed += 1
if self._play_speed == 0:
self._play_timer.stop()
else:
self._play_timer.start()
self._play_timer.setInterval(500 / abs(self._play_speed))
def _play_slice(self):
if self._play_speed > 0:
self._browse_slice('next', play=True)
elif self._play_speed < 0:
self._browse_slice('prev', play=True)
def _browse_slice(self, action, play=False):
imin = self.value_slice_center.minimum()
imax = self.value_slice_center.maximum()
value = self.value_slice_center.value()
# If this was not called from _play_slice, we should stop the
# animation.
if not play:
self._adjust_play('stop')
if action == 'first':
value = imin
elif action == 'last':
value = imax
elif action == 'prev':
value = value - 1
if value < imin:
value = imax
elif action == 'next':
value = value + 1
if value > imax:
value = imin
else:
raise ValueError("Action should be one of first/prev/next/last")
self.value_slice_center.setValue(value)
if __name__ == "__main__":
from glue.utils.qt import get_qapp
app = get_qapp()
widget = SliceWidget(label='BANANA')
widget.show()
widget = SliceWidget(world=[1, 2, 3, 4, 5, 6, 7], lo=1, hi=7)
widget.show()
app.exec_()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/common/qt/data_slice_widget.py",
"copies": "2",
"size": "6972",
"license": "bsd-3-clause",
"hash": 8313825844746829000,
"line_mean": 33.1764705882,
"line_max": 94,
"alpha_frac": 0.599827883,
"autogenerated": false,
"ratio": 3.752421959095802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5352249842095802,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from toolz import first
from .dispatch import dispatch
import datashape
import shutil
from blaze.utils import tmpfile
from odo import resource
try:
import tables as tb
from tables import Table
except ImportError:
Table = type(None)
__all__ = ['PyTables']
def dtype_to_pytables(dtype):
""" Convert NumPy dtype to PyTable descriptor
Examples
--------
>>> from tables import Int32Col, StringCol, Time64Col
>>> dt = np.dtype([('name', 'S7'), ('amount', 'i4'), ('time', 'M8[us]')])
>>> dtype_to_pytables(dt) # doctest: +SKIP
{'amount': Int32Col(shape=(), dflt=0, pos=1),
'name': StringCol(itemsize=7, shape=(), dflt='', pos=0),
'time': Time64Col(shape=(), dflt=0.0, pos=2)}
"""
d = {}
for pos, name in enumerate(dtype.names):
dt, _ = dtype.fields[name]
if issubclass(dt.type, np.datetime64):
tdtype = tb.Description({name: tb.Time64Col(pos=pos)}),
else:
tdtype = tb.descr_from_dtype(np.dtype([(name, dt)]))
el = first(tdtype)
getattr(el, name)._v_pos = pos
d.update(el._v_colobjects)
return d
def PyTables(path, datapath, dshape=None, **kwargs):
"""Create or open a ``tables.Table`` object.
Parameters
----------
path : str
Path to a PyTables HDF5 file.
datapath : str
The name of the node in the ``tables.File``.
dshape : str or datashape.DataShape
DataShape to use to create the ``Table``.
Returns
-------
t : tables.Table
Examples
--------
>>> from blaze.utils import tmpfile
>>> # create from scratch
>>> with tmpfile('.h5') as f:
... t = PyTables(filename, '/bar',
... dshape='var * {volume: float64, planet: string[10, "A"]}')
... data = [(100.3, 'mars'), (100.42, 'jupyter')]
... t.append(data)
... t[:] # doctest: +SKIP
...
array([(100.3, b'mars'), (100.42, b'jupyter')],
dtype=[('volume', '<f8'), ('planet', 'S10')])
"""
def possibly_create_table(filename, dtype):
f = tb.open_file(filename, mode='a')
try:
if datapath not in f:
if dtype is None:
raise ValueError('dshape cannot be None and datapath not'
' in file')
else:
f.create_table('/', datapath.lstrip('/'), description=dtype)
finally:
f.close()
if dshape:
if isinstance(dshape, str):
dshape = datashape.dshape(dshape)
if dshape[0] == datashape.var:
dshape = dshape.subshape[0]
dtype = dtype_to_pytables(datashape.to_numpy_dtype(dshape))
else:
dtype = None
if os.path.exists(path):
possibly_create_table(path, dtype)
else:
with tmpfile('.h5') as filename:
possibly_create_table(filename, dtype)
shutil.copyfile(filename, path)
return tb.open_file(path, mode='a').get_node(datapath)
@dispatch(Table)
def chunks(b, chunksize=2**15):
start = 0
n = len(b)
while start < n:
yield b[start:start + chunksize]
start += chunksize
@dispatch(Table, int)
def get_chunk(b, i, chunksize=2**15):
start = chunksize * i
stop = chunksize * (i + 1)
return b[start:stop]
@resource.register('.+\.h5')
def resource_pytables(path, datapath=None, **kwargs):
if not datapath:
return tb.open_file(path)
else:
return PyTables(path, datapath, **kwargs)
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/pytables.py",
"copies": "1",
"size": "3639",
"license": "bsd-3-clause",
"hash": -7819087954397837000,
"line_mean": 26.7786259542,
"line_max": 83,
"alpha_frac": 0.5622423743,
"autogenerated": false,
"ratio": 3.5502439024390244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4612486276739024,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from glue.tests.helpers import make_file
from glue.core.data_factories.helpers import find_factory
from glue.core import data_factories as df
from glue.tests.helpers import requires_astrodendro
DATA = os.path.join(os.path.dirname(__file__), 'data')
@requires_astrodendro
@pytest.mark.parametrize('filename', ['dendro.fits', 'dendro_old.fits', 'dendro.hdf5'])
def test_is_dendro(filename):
from ..data_factory import is_dendro
assert is_dendro(os.path.join(DATA, filename))
@requires_astrodendro
@pytest.mark.parametrize('filename', ['dendro.fits', 'dendro_old.fits', 'dendro.hdf5'])
def test_find_factory(filename):
from ..data_factory import load_dendro
assert find_factory(os.path.join(DATA, filename)) is load_dendro
@requires_astrodendro
def test_identifier_heuristics(tmpdir):
filename = tmpdir.join('test.fits').strpath
from ..data_factory import is_dendro
from astropy.io import fits
hdulist = fits.HDUList()
hdulist.append(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU())
hdulist.append(fits.ImageHDU())
hdulist.writeto(filename)
assert not is_dendro(filename)
hdulist.append(fits.ImageHDU())
hdulist.writeto(filename, overwrite=True)
assert not is_dendro(filename)
hdulist[1].name = 'random'
hdulist.writeto(filename, overwrite=True)
assert not is_dendro(filename)
hdulist[1].name = ''
hdulist[0].data = np.array([1, 2, 3])
hdulist.writeto(filename, overwrite=True)
assert not is_dendro(filename)
hdulist[0].data = None
hdulist[1].data = np.ones((3, 4))
hdulist[2].data = np.ones((2, 4))
hdulist[3].data = np.ones((3, 5))
hdulist.writeto(filename, overwrite=True)
assert not is_dendro(filename)
hdulist[2].data = np.ones((3, 4))
hdulist.writeto(filename, overwrite=True)
assert not is_dendro(filename)
hdulist[3].data = np.ones(3)
hdulist.writeto(filename, overwrite=True)
assert is_dendro(filename)
@requires_astrodendro
def test_dendrogram_load():
from ..data_factory import load_dendro
data = b"""x\xda\xed\xda]K\xc2`\x18\xc6\xf1^\xbe\xc8}fA\xe4[X\x14\x1eX\x99<\x90S\xd8\x02O\x9f\xf2Q<\xd8&\xcf&\xe4\xb7\xcft\x82\xc9\xe6\x1be\x91\xff\xdf\xc9\xc5\xd8v\xc1vt\xeff\xaej\xb6\x9f\xeb"UI\xe1I^\xde\xc2\xa0\x17Z?\x928\x94\'\xe5\xb9\x12\xc5:\xe8j\xdb\x95T\xf7\xcak\xabNF\xdf\xcd\xa4O[\xab\xc7\xd2\xd5\xb1\x96x<4\xb2\x86S\xeb(W2\xfa\n\x93\xbe`\xe4\xbf\x1a+ao\xde<\xf0M\x10\r\xc2 J\xed\xabw\xbc\xba\xf3\x98\xf9\xbc[\x9b\x96\x01\x00\x00\xe0`|\x8e\x93\xaej9U\xc9\xa9f\xad1\x99\xa4%\xb7p:/\xca\xd7}#\xe6=\x9eM\xa5\xeb\xfaV\xcd\xcf\x95\xabo\x9e\x9f\x8b\xdb\xcf\xcf\xd3\xbebF_e\xfb\xf7\xd7~h\xbd8\xdeF\xf3\xfdP[\xed\x9b\xd8\xd8hE_cU\xdf\xd7\xe7\xed\xdbp4\x8c\x98\xef\x01\x00\x00\xf6\xeah\xe68\xc9\x93$O3\x8e\xe7\xd7\x01\x00\x00\x00\x07i\x9f\xfb\xe7r\x89\xfd3\xfbg\x00\x00\x80\x7f\xb1\x7fN\xdbA\x03\x00\x00\x00\xf8\xc5\xfd\xf3_\xff\xff\xb9t\xcd\xfe\x19\x00\x00\x00\x1b\xed\x9f\xcf\x96\xb2\x98\xe4m\x92\xe5$/\x93,d\xe4E\x92\xa5\x1d\xef?_:\xde\xf5\xfe;\xbe\x8c\x00\x00\x00\xf0\x13>\x00\x8e\xbe x"""
with make_file(data, 'fits', decompress=True) as fname:
dg, im = df.load_data(fname, factory=load_dendro)
assert_array_equal(im['intensity'], [1, 2, 3, 2, 3, 1])
assert_array_equal(im['structure'], [0, 0, 1, 0, 2, 0])
assert_array_equal(dg['parent'], [-1, 0, 0])
assert_array_equal(dg['height'], [3, 3, 3])
assert_array_equal(dg['peak'], [3, 3, 3])
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/plugins/dendro_viewer/tests/test_data_factory.py",
"copies": "2",
"size": "3608",
"license": "bsd-3-clause",
"hash": 7593548819647206000,
"line_mean": 36.9789473684,
"line_max": 998,
"alpha_frac": 0.699556541,
"autogenerated": false,
"ratio": 2.320257234726688,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4019813775726688,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import pytest
@pytest.mark.filterwarnings("default")
def test_yield_tests_deprecation(testdir):
testdir.makepyfile(
"""
def func1(arg, arg2):
assert arg == arg2
def test_gen():
yield "m1", func1, 15, 3*5
yield "m2", func1, 42, 6*7
def test_gen2():
for k in range(10):
yield func1, 1, 1
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_yield_tests_deprecation.py:3:*yield tests are deprecated*",
"*test_yield_tests_deprecation.py:6:*yield tests are deprecated*",
"*2 passed*",
]
)
assert result.stdout.str().count("yield tests are deprecated") == 2
@pytest.mark.filterwarnings("default")
def test_funcarg_prefix_deprecation(testdir):
testdir.makepyfile(
"""
def pytest_funcarg__value():
return 10
def test_funcarg_prefix(value):
assert value == 10
"""
)
result = testdir.runpytest("-ra")
result.stdout.fnmatch_lines(
[
(
"*test_funcarg_prefix_deprecation.py:1: *pytest_funcarg__value: "
'declaring fixtures using "pytest_funcarg__" prefix is deprecated*'
),
"*1 passed*",
]
)
@pytest.mark.filterwarnings("default")
def test_pytest_setup_cfg_deprecated(testdir):
testdir.makefile(
".cfg",
setup="""
[pytest]
addopts = --verbose
""",
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
["*pytest*section in setup.cfg files is deprecated*use*tool:pytest*instead*"]
)
@pytest.mark.filterwarnings("default")
def test_pytest_custom_cfg_deprecated(testdir):
testdir.makefile(
".cfg",
custom="""
[pytest]
addopts = --verbose
""",
)
result = testdir.runpytest("-c", "custom.cfg")
result.stdout.fnmatch_lines(
["*pytest*section in custom.cfg files is deprecated*use*tool:pytest*instead*"]
)
def test_str_args_deprecated(tmpdir):
"""Deprecate passing strings to pytest.main(). Scheduled for removal in pytest-4.0."""
from _pytest.main import EXIT_NOTESTSCOLLECTED
warnings = []
class Collect(object):
def pytest_warning_captured(self, warning_message):
warnings.append(str(warning_message.message))
ret = pytest.main("%s -x" % tmpdir, plugins=[Collect()])
msg = (
"passing a string to pytest.main() is deprecated, "
"pass a list of arguments instead."
)
assert msg in warnings
assert ret == EXIT_NOTESTSCOLLECTED
def test_getfuncargvalue_is_deprecated(request):
pytest.deprecated_call(request.getfuncargvalue, "tmpdir")
@pytest.mark.filterwarnings("default")
def test_resultlog_is_deprecated(testdir):
result = testdir.runpytest("--help")
result.stdout.fnmatch_lines(["*DEPRECATED path for machine-readable result log*"])
testdir.makepyfile(
"""
def test():
pass
"""
)
result = testdir.runpytest("--result-log=%s" % testdir.tmpdir.join("result.log"))
result.stdout.fnmatch_lines(
[
"*--result-log is deprecated and scheduled for removal in pytest 4.0*",
"*See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information*",
]
)
@pytest.mark.filterwarnings("always:Metafunc.addcall is deprecated")
def test_metafunc_addcall_deprecated(testdir):
testdir.makepyfile(
"""
def pytest_generate_tests(metafunc):
metafunc.addcall({'i': 1})
metafunc.addcall({'i': 2})
def test_func(i):
pass
"""
)
res = testdir.runpytest("-s")
assert res.ret == 0
res.stdout.fnmatch_lines(
["*Metafunc.addcall is deprecated*", "*2 passed, 2 warnings*"]
)
def test_terminal_reporter_writer_attr(pytestconfig):
"""Check that TerminalReporter._tw is also available as 'writer' (#2984)
This attribute is planned to be deprecated in 3.4.
"""
try:
import xdist # noqa
pytest.skip("xdist workers disable the terminal reporter plugin")
except ImportError:
pass
terminal_reporter = pytestconfig.pluginmanager.get_plugin("terminalreporter")
assert terminal_reporter.writer is terminal_reporter._tw
@pytest.mark.parametrize("plugin", ["catchlog", "capturelog"])
def test_pytest_catchlog_deprecated(testdir, plugin):
testdir.makepyfile(
"""
def test_func(pytestconfig):
pytestconfig.pluginmanager.register(None, 'pytest_{}')
""".format(
plugin
)
)
res = testdir.runpytest()
assert res.ret == 0
res.stdout.fnmatch_lines(
["*pytest-*log plugin has been merged into the core*", "*1 passed, 1 warnings*"]
)
def test_pytest_plugins_in_non_top_level_conftest_deprecated(testdir):
from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
testdir.makepyfile(
**{
"subdirectory/conftest.py": """
pytest_plugins=['capture']
"""
}
)
testdir.makepyfile(
"""
def test_func():
pass
"""
)
res = testdir.runpytest()
assert res.ret == 0
msg = str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]
res.stdout.fnmatch_lines(
"*subdirectory{sep}conftest.py:0: RemovedInPytest4Warning: {msg}*".format(
sep=os.sep, msg=msg
)
)
@pytest.mark.parametrize("use_pyargs", [True, False])
def test_pytest_plugins_in_non_top_level_conftest_deprecated_pyargs(
testdir, use_pyargs
):
"""When using --pyargs, do not emit the warning about non-top-level conftest warnings (#4039, #4044)"""
from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
files = {
"src/pkg/__init__.py": "",
"src/pkg/conftest.py": "",
"src/pkg/test_root.py": "def test(): pass",
"src/pkg/sub/__init__.py": "",
"src/pkg/sub/conftest.py": "pytest_plugins=['capture']",
"src/pkg/sub/test_bar.py": "def test(): pass",
}
testdir.makepyfile(**files)
testdir.syspathinsert(testdir.tmpdir.join("src"))
args = ("--pyargs", "pkg") if use_pyargs else ()
res = testdir.runpytest(*args)
assert res.ret == 0
msg = str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]
if use_pyargs:
assert msg not in res.stdout.str()
else:
res.stdout.fnmatch_lines("*{msg}*".format(msg=msg))
def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_top_level_conftest(
testdir
):
from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
subdirectory = testdir.tmpdir.join("subdirectory")
subdirectory.mkdir()
testdir.makeconftest(
"""
import warnings
warnings.filterwarnings('always', category=DeprecationWarning)
pytest_plugins=['capture']
"""
)
testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py"))
testdir.makepyfile(
"""
def test_func():
pass
"""
)
res = testdir.runpytest_subprocess()
assert res.ret == 0
msg = str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]
res.stdout.fnmatch_lines(
"*subdirectory{sep}conftest.py:0: RemovedInPytest4Warning: {msg}*".format(
sep=os.sep, msg=msg
)
)
def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_false_positives(
testdir
):
from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
subdirectory = testdir.tmpdir.join("subdirectory")
subdirectory.mkdir()
testdir.makeconftest(
"""
pass
"""
)
testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py"))
testdir.makeconftest(
"""
import warnings
warnings.filterwarnings('always', category=DeprecationWarning)
pytest_plugins=['capture']
"""
)
testdir.makepyfile(
"""
def test_func():
pass
"""
)
res = testdir.runpytest_subprocess()
assert res.ret == 0
msg = str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]
assert msg not in res.stdout.str()
def test_call_fixture_function_deprecated():
"""Check if a warning is raised if a fixture function is called directly (#3661)"""
@pytest.fixture
def fix():
return 1
with pytest.deprecated_call():
assert fix() == 1
def test_pycollector_makeitem_is_deprecated():
from _pytest.python import PyCollector
from _pytest.warning_types import RemovedInPytest4Warning
class PyCollectorMock(PyCollector):
"""evil hack"""
def __init__(self):
self.called = False
def _makeitem(self, *k):
"""hack to disable the actual behaviour"""
self.called = True
collector = PyCollectorMock()
with pytest.warns(RemovedInPytest4Warning):
collector.makeitem("foo", "bar")
assert collector.called
| {
"repo_name": "davidszotten/pytest",
"path": "testing/deprecated_test.py",
"copies": "1",
"size": "9280",
"license": "mit",
"hash": 4941749283685641000,
"line_mean": 27.4662576687,
"line_max": 118,
"alpha_frac": 0.6139008621,
"autogenerated": false,
"ratio": 3.857024106400665,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9967948862106731,
"avg_score": 0.0005952212787867427,
"num_lines": 326
} |
from __future__ import absolute_import, division, print_function
import os
import stripe
from flask import Flask, request, redirect
stripe.api_key = os.environ.get("STRIPE_SECRET_KEY")
stripe.client_id = os.environ.get("STRIPE_CLIENT_ID")
app = Flask(__name__)
@app.route("/")
def index():
return '<a href="/authorize">Connect with Stripe</a>'
@app.route("/authorize")
def authorize():
url = stripe.OAuth.authorize_url(scope="read_only")
return redirect(url)
@app.route("/oauth/callback")
def callback():
code = request.args.get("code")
try:
resp = stripe.OAuth.token(grant_type="authorization_code", code=code)
except stripe.oauth_error.OAuthError as e:
return "Error: " + str(e)
return """
<p>Success! Account <code>{stripe_user_id}</code> is connected.</p>
<p>Click <a href="/deauthorize?stripe_user_id={stripe_user_id}">here</a> to
disconnect the account.</p>
""".format(
stripe_user_id=resp["stripe_user_id"]
)
@app.route("/deauthorize")
def deauthorize():
stripe_user_id = request.args.get("stripe_user_id")
try:
stripe.OAuth.deauthorize(stripe_user_id=stripe_user_id)
except stripe.oauth_error.OAuthError as e:
return "Error: " + str(e)
return """
<p>Success! Account <code>{stripe_user_id}</code> is disconnected.</p>
<p>Click <a href="/">here</a> to restart the OAuth flow.</p>
""".format(
stripe_user_id=stripe_user_id
)
if __name__ == "__main__":
app.run(port=int(os.environ.get("PORT", 5000)))
| {
"repo_name": "stripe/stripe-python",
"path": "examples/oauth.py",
"copies": "1",
"size": "1527",
"license": "mit",
"hash": 9106938337279194000,
"line_mean": 24.45,
"line_max": 77,
"alpha_frac": 0.6516044532,
"autogenerated": false,
"ratio": 3.1290983606557377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4280702813855738,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
# Stripe Python bindings
# API docs at http://stripe.com/docs/api
# Authors:
# Patrick Collison <patrick@stripe.com>
# Greg Brockman <gdb@stripe.com>
# Andrew Metcalf <andrew@stripe.com>
# Configuration variables
api_key = None
client_id = None
api_base = "https://api.stripe.com"
connect_api_base = "https://connect.stripe.com"
upload_api_base = "https://files.stripe.com"
api_version = None
verify_ssl_certs = True
proxy = None
default_http_client = None
app_info = None
enable_telemetry = True
max_network_retries = 0
ca_bundle_path = os.path.join(
os.path.dirname(__file__), "data", "ca-certificates.crt"
)
# Set to either 'debug' or 'info', controls console logging
log = None
# API resources
from stripe.api_resources import * # noqa
# OAuth
from stripe.oauth import OAuth # noqa
# Webhooks
from stripe.webhook import Webhook, WebhookSignature # noqa
# Sets some basic information about the running application that's sent along
# with API requests. Useful for plugin authors to identify their plugin when
# communicating with Stripe.
#
# Takes a name and optional version and plugin URL.
def set_app_info(name, partner_id=None, url=None, version=None):
global app_info
app_info = {
"name": name,
"partner_id": partner_id,
"url": url,
"version": version,
}
| {
"repo_name": "stripe/stripe-python",
"path": "stripe/__init__.py",
"copies": "1",
"size": "1399",
"license": "mit",
"hash": -7784308622320020000,
"line_mean": 24.4363636364,
"line_max": 77,
"alpha_frac": 0.7105075054,
"autogenerated": false,
"ratio": 3.2995283018867925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45100358072867924,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
try:
from inspect import getfullargspec
except ImportError: # Python 2.7
from inspect import getargspec as getfullargspec
from qtpy import QtWidgets
from qtpy import PYSIDE
from glue import core
from glue.config import link_function, link_helper
from glue.utils import nonpartial
from glue.utils.qt import load_ui, messagebox_on_error, update_combobox
from glue.utils.qt.widget_properties import CurrentComboTextProperty, CurrentComboDataProperty
__all__ = ['LinkEquation']
def get_function_name(item):
if hasattr(item, 'display') and item.display is not None:
return item.display
else:
return item.__name__
def function_label(function):
""" Provide a label for a function
:param function: A member from the glue.config.link_function registry
"""
args = getfullargspec(function.function)[0]
args = ', '.join(args)
output = function.output_labels
output = ', '.join(output)
label = "Link from %s to %s" % (args, output)
return label
def helper_label(helper):
""" Provide a label for a link helper
:param helper: A member from the glue.config.link_helper registry
"""
return helper.info
class ArgumentWidget(QtWidgets.QWidget):
def __init__(self, argument, parent=None):
super(ArgumentWidget, self).__init__(parent)
self.layout = QtWidgets.QHBoxLayout()
self.layout.setContentsMargins(1, 0, 1, 1)
self.setLayout(self.layout)
label = QtWidgets.QLabel(argument)
self._label = label
self._component_id = None
self.layout.addWidget(label)
self.editor = QtWidgets.QLineEdit()
self.editor.setReadOnly(True)
try:
self.editor.setPlaceholderText("Drag a component from above")
except AttributeError: # feature added in Qt 4.7
pass
self.layout.addWidget(self.editor)
self.setAcceptDrops(True)
@property
def component_id(self):
return self._component_id
@component_id.setter
def component_id(self, cid):
self._component_id = cid
self.editor.setText(str(cid))
@property
def label(self):
return self._label.text()
@label.setter
def label(self, label):
self._label.setText(label)
@property
def editor_text(self):
return self.editor.text()
def clear(self):
self.component_id = None
self.editor.clear()
def dragEnterEvent(self, event):
if event.mimeData().hasFormat('application/py_instance'):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
obj = event.mimeData().data('application/py_instance')
if isinstance(obj, list):
obj = obj[0]
if not isinstance(obj, core.data.ComponentID):
event.ignore()
return
self.component_id = obj
event.accept()
class LinkEquation(QtWidgets.QWidget):
""" Interactively define ComponentLinks from existing functions
This widget inspects the calling signatures of helper functions,
and presents the user with an interface for assigning componentIDs
to the input and output arguments. It also generates ComponentLinks
from this information.
ComponentIDs are assigned to arguments via drag and drop. This
widget is used within the LinkEditor dialog
Usage::
widget = LinkEquation()
"""
category = CurrentComboTextProperty('_ui.category')
function = CurrentComboDataProperty('_ui.function')
def __init__(self, parent=None):
super(LinkEquation, self).__init__(parent)
# Set up mapping of function/helper name -> function/helper tuple. For the helpers, we use the 'display' name if available.
self._argument_widgets = []
self.spacer = None
self._output_widget = ArgumentWidget("")
# pyqt4 can't take self as second argument here
# for some reason. Manually embed
self._ui = load_ui('link_equation.ui', None,
directory=os.path.dirname(__file__))
l = QtWidgets.QHBoxLayout()
l.addWidget(self._ui)
self.setLayout(l)
self._init_widgets()
self._populate_category_combo()
self.category = 'General'
self._populate_function_combo()
self._connect()
self._setup_editor()
def set_result_visible(self, state):
self._ui.output_canvas.setVisible(state)
self._ui.output_label.setVisible(state)
def is_helper(self):
return self.function is not None and \
type(self.function).__name__ == 'LinkHelper'
def is_function(self):
return self.function is not None and \
type(self.function).__name__ == 'LinkFunction'
def _init_widgets(self):
layout = QtWidgets.QVBoxLayout()
layout.setSpacing(1)
self._ui.input_canvas.setLayout(layout)
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(1, 0, 1, 1)
self._ui.output_canvas.setLayout(layout)
layout.addWidget(self._output_widget)
spacer = QtWidgets.QSpacerItem(5, 5,
QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
layout.addItem(spacer)
font = self._ui.info.font()
font.setPointSize(font.pointSize() * 1.4)
self._ui.info.setFont(font)
@property
def signature(self):
""" Returns the ComponentIDs assigned to the input and output arguments
:rtype: tuple of (input, output). Input is a list of ComponentIDs.
output is a ComponentID
"""
inp = [a.component_id for a in self._argument_widgets]
out = self._output_widget.component_id
return inp, out
@signature.setter
def signature(self, inout):
inp, out = inout
for i, a in zip(inp, self._argument_widgets):
a.component_id = i
self._output_widget.component_id = out
@messagebox_on_error("Failed to create links")
def links(self):
""" Create ComponentLinks from the state of the widget
:rtype: list of ComponentLinks that can be created.
If no links can be created (e.g. because of missing input),
the empty list is returned
"""
inp, out = self.signature
if self.is_function():
using = self.function.function
if not all(inp) or not out:
return []
link = core.component_link.ComponentLink(inp, out, using)
return [link]
if self.is_helper():
helper = self.function.helper
if not all(inp):
return []
return helper(*inp)
def _update_add_enabled(self):
state = True
for a in self._argument_widgets:
state = state and a.component_id is not None
if self.is_function():
state = state and self._output_widget.component_id is not None
def _connect(self):
signal = self._ui.function.currentIndexChanged
signal.connect(nonpartial(self._setup_editor))
signal.connect(nonpartial(self._update_add_enabled))
self._output_widget.editor.textChanged.connect(nonpartial(self._update_add_enabled))
self._ui.category.currentIndexChanged.connect(self._populate_function_combo)
def clear_inputs(self):
for w in self._argument_widgets:
w.clear()
self._output_widget.clear()
def _setup_editor(self):
if self.is_function():
self._setup_editor_function()
else:
self._setup_editor_helper()
def _setup_editor_function(self):
""" Prepare the widget for the active function."""
assert self.is_function()
self.set_result_visible(True)
func = self.function.function
args = getfullargspec(func)[0]
label = function_label(self.function)
self._ui.info.setText(label)
self._output_widget.label = self.function.output_labels[0]
self._clear_input_canvas()
for a in args:
self._add_argument_widget(a)
self.spacer = QtWidgets.QSpacerItem(5, 5, QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
self._ui.input_canvas.layout().addItem(self.spacer)
def _setup_editor_helper(self):
"""Setup the editor for the selected link helper"""
assert self.is_helper()
self.set_result_visible(False)
label = helper_label(self.function)
args = self.function.input_labels
self._ui.info.setText(label)
self._clear_input_canvas()
for a in args:
self._add_argument_widget(a)
self.spacer = QtWidgets.QSpacerItem(5, 5, QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
self._ui.input_canvas.layout().addItem(self.spacer)
def _add_argument_widget(self, argument):
""" Create and add a single argument widget to the input canvas
:param arguement: The argument name (string)
"""
widget = ArgumentWidget(argument)
widget.editor.textChanged.connect(nonpartial(self._update_add_enabled))
self._ui.input_canvas.layout().addWidget(widget)
self._argument_widgets.append(widget)
def _clear_input_canvas(self):
""" Remove all widgets from the input canvas """
layout = self._ui.input_canvas.layout()
for a in self._argument_widgets:
layout.removeWidget(a)
a.close()
if not PYSIDE:
# PySide crashing here
layout.removeItem(self.spacer)
self._argument_widgets = []
def _populate_category_combo(self):
f = [f for f in link_function.members if len(f.output_labels) == 1]
categories = sorted(set(l.category for l in f + link_helper.members))
update_combobox(self._ui.category, list(zip(categories, categories)))
def _populate_function_combo(self):
""" Add name of functions to function combo box """
f = [f for f in link_function.members if len(f.output_labels) == 1]
functions = ((get_function_name(l[0]), l) for l in f + link_helper.members if l.category == self.category)
update_combobox(self._ui.function, functions)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/dialogs/link_editor/qt/link_equation.py",
"copies": "1",
"size": "10524",
"license": "bsd-3-clause",
"hash": 5079556409656396000,
"line_mean": 32.4095238095,
"line_max": 131,
"alpha_frac": 0.6222919042,
"autogenerated": false,
"ratio": 4.117370892018779,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5239662796218779,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
try:
import sphinx_rtd_theme
except ImportError:
sphinx_rtd_theme = None
base_dir = os.path.join(os.path.dirname(__file__), os.pardir)
about = {}
with open(os.path.join(base_dir, "virtualenv", "__about__.py")) as f:
exec(f.read(), about)
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = about["__title__"]
copyright = about["__copyright__"]
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version = release = about["__version__"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx_rtd_theme:
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = "default"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Output file base name for HTML help builder.
htmlhelp_basename = "virtualenvdoc"
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"virtualenv",
"virtualenv Documentation",
about["__author__"],
1,
),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"https://docs.python.org/": None,
}
epub_theme = "epub"
| {
"repo_name": "ionelmc/virtualenv",
"path": "docs/conf.py",
"copies": "1",
"size": "2665",
"license": "mit",
"hash": -7539529149794524000,
"line_mean": 26.7604166667,
"line_max": 79,
"alpha_frac": 0.6600375235,
"autogenerated": false,
"ratio": 3.9191176470588234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5079155170558823,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os.path as op
import keras
from .due import due, Doi # noqa
# Use duecredit (duecredit.org) to provide a citation to relevant work to
# be cited. This does nothing, unless the user has duecredit installed,
# And calls this with duecredit (as in `python -m duecredit script.py`):
# due.cite(Doi("XXX"),
# description="",
# tags=["", ""],
# path='XXX')
class LossHistory(keras.callbacks.Callback):
def __init__(self, log_path, loss_name, valid_name, pid):
keras.callbacks.Callback.__init__(self)
self.log_path = log_path
self.loss_name = loss_name
self.valid_name = valid_name
self.pid
def on_train_begin(self, logs={}):
self.losses = []
self.valid = []
self.lastiter = 0
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get(self.loss_name))
self.lastiter = len(self.losses) - 1
self.valid.append([self.lastiter, logs.get(self.valid_name)])
with open(op.join(self.log_path,
"/%d/history.txt" % self.pid), "w") as fo:
strings = ["train\t%d\t%.4f" % (i, x) for i, x in
enumerate(self.losses)]
fo.write("\n".join(strings) + "\n")
strings = ["valid\t%d\t%.4f" % (i, x) for i, x in self.valid]
fo.write("\n".join(strings) + "\n")
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get(self.loss_name))
self.lastiter = len(self.losses) - 1
with open(op.join(self.log_path,
"/%d/history.txt" % self.pid), "w") as fo:
strings = ["train\t%d\t%.4f" % (i, x) for i, x in
enumerate(self.losses)]
fo.write("\n".join(strings) + "\n")
strings = ["valid\t%d\t%.4f" % (i, x) for i, x in self.valid]
fo.write("\n".join(strings) + "\n")
| {
"repo_name": "uw-biomedical-ml/keratin",
"path": "keratin/keratin.py",
"copies": "1",
"size": "1984",
"license": "mit",
"hash": 5516094152929797000,
"line_mean": 37.1538461538,
"line_max": 77,
"alpha_frac": 0.5483870968,
"autogenerated": false,
"ratio": 3.273927392739274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4322314489539274,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os.path
from glob import glob
from io import BytesIO
from numbers import Number
import numpy as np
from .. import Dataset, backends, conventions
from ..core import indexing
from ..core.combine import auto_combine
from ..core.pycompat import basestring, path_type
from ..core.utils import close_on_error, is_remote_uri
from .common import (
HDF5_LOCK, ArrayWriter, CombinedLock, get_scheduler, get_scheduler_lock)
DATAARRAY_NAME = '__xarray_dataarray_name__'
DATAARRAY_VARIABLE = '__xarray_dataarray_variable__'
def _get_default_engine(path, allow_remote=False):
if allow_remote and is_remote_uri(path): # pragma: no cover
try:
import netCDF4
engine = 'netcdf4'
except ImportError:
try:
import pydap # flake8: noqa
engine = 'pydap'
except ImportError:
raise ValueError('netCDF4 or pydap is required for accessing '
'remote datasets via OPeNDAP')
else:
try:
import netCDF4 # flake8: noqa
engine = 'netcdf4'
except ImportError: # pragma: no cover
try:
import scipy.io.netcdf # flake8: noqa
engine = 'scipy'
except ImportError:
raise ValueError('cannot read or write netCDF files without '
'netCDF4-python or scipy installed')
return engine
def _normalize_path(path):
if is_remote_uri(path):
return path
else:
return os.path.abspath(os.path.expanduser(path))
def _default_lock(filename, engine):
if filename.endswith('.gz'):
lock = False
else:
if engine is None:
engine = _get_default_engine(filename, allow_remote=True)
if engine == 'netcdf4':
if is_remote_uri(filename):
lock = False
else:
# TODO: identify netcdf3 files and don't use the global lock
# for them
lock = HDF5_LOCK
elif engine in {'h5netcdf', 'pynio'}:
lock = HDF5_LOCK
else:
lock = False
return lock
def _validate_dataset_names(dataset):
"""DataArray.name and Dataset keys must be a string or None"""
def check_name(name):
if isinstance(name, basestring):
if not name:
raise ValueError('Invalid name for DataArray or Dataset key: '
'string must be length 1 or greater for '
'serialization to netCDF files')
elif name is not None:
raise TypeError('DataArray.name or Dataset key must be either a '
'string or None for serialization to netCDF files')
for k in dataset.variables:
check_name(k)
def _validate_attrs(dataset):
"""`attrs` must have a string key and a value which is either: a number
a string, an ndarray or a list/tuple of numbers/strings.
"""
def check_attr(name, value):
if isinstance(name, basestring):
if not name:
raise ValueError('Invalid name for attr: string must be '
'length 1 or greater for serialization to '
'netCDF files')
else:
raise TypeError("Invalid name for attr: {} must be a string for "
"serialization to netCDF files".format(name))
if not isinstance(value, (basestring, Number, np.ndarray, np.number,
list, tuple)):
raise TypeError('Invalid value for attr: {} must be a number '
'string, ndarray or a list/tuple of '
'numbers/strings for serialization to netCDF '
'files'.format(value))
# Check attrs on the dataset itself
for k, v in dataset.attrs.items():
check_attr(k, v)
# Check attrs on each variable within the dataset
for variable in dataset.variables.values():
for k, v in variable.attrs.items():
check_attr(k, v)
def _protect_dataset_variables_inplace(dataset, cache):
for name, variable in dataset.variables.items():
if name not in variable.dims:
# no need to protect IndexVariable objects
data = indexing.CopyOnWriteArray(variable._data)
if cache:
data = indexing.MemoryCachedArray(data)
variable.data = data
def _get_lock(engine, scheduler, format, path_or_file):
""" Get the lock(s) that apply to a particular scheduler/engine/format"""
locks = []
if format in ['NETCDF4', None] and engine in ['h5netcdf', 'netcdf4']:
locks.append(HDF5_LOCK)
locks.append(get_scheduler_lock(scheduler, path_or_file))
# When we have more than one lock, use the CombinedLock wrapper class
lock = CombinedLock(locks) if len(locks) > 1 else locks[0]
return lock
def _finalize_store(write, store):
""" Finalize this store by explicitly syncing and closing"""
del write # ensure writing is done first
store.sync()
store.close()
def open_dataset(filename_or_obj, group=None, decode_cf=True,
mask_and_scale=True, decode_times=True, autoclose=False,
concat_characters=True, decode_coords=True, engine=None,
chunks=None, lock=None, cache=None, drop_variables=None):
"""Load and decode a dataset from a file or file-like object.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). File-like objects are opened
with scipy.io.netcdf (only netCDF3 supported).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
autoclose : bool, optional
If True, automatically close files to avoid OS Error of too many files
being open. However, this option doesn't work with streams, e.g.,
BytesIO.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio'}, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays. ``chunks={}`` loads the dataset with dask using a single
chunk for all arrays.
lock : False, True or threading.Lock, optional
If chunks is provided, this argument is passed on to
:py:func:`dask.array.from_array`. By default, a global lock is
used when reading data from netCDF files with the netcdf4 and h5netcdf
engines to avoid issues with concurrent access when using dask's
multithreaded backend.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_mfdataset
"""
if not decode_cf:
mask_and_scale = False
decode_times = False
concat_characters = False
decode_coords = False
if cache is None:
cache = chunks is None
def maybe_decode_store(store, lock=False):
ds = conventions.decode_cf(
store, mask_and_scale=mask_and_scale, decode_times=decode_times,
concat_characters=concat_characters, decode_coords=decode_coords,
drop_variables=drop_variables)
_protect_dataset_variables_inplace(ds, cache)
if chunks is not None:
from dask.base import tokenize
# if passed an actual file path, augment the token with
# the file modification time
if (isinstance(filename_or_obj, basestring) and
not is_remote_uri(filename_or_obj)):
mtime = os.path.getmtime(filename_or_obj)
else:
mtime = None
token = tokenize(filename_or_obj, mtime, group, decode_cf,
mask_and_scale, decode_times, concat_characters,
decode_coords, engine, chunks, drop_variables)
name_prefix = 'open_dataset-%s' % token
ds2 = ds.chunk(chunks, name_prefix=name_prefix, token=token,
lock=lock)
ds2._file_obj = ds._file_obj
else:
ds2 = ds
# protect so that dataset store isn't necessarily closed, e.g.,
# streams like BytesIO can't be reopened
# datastore backend is responsible for determining this capability
if store._autoclose:
store.close()
return ds2
if isinstance(filename_or_obj, path_type):
filename_or_obj = str(filename_or_obj)
if isinstance(filename_or_obj, backends.AbstractDataStore):
store = filename_or_obj
elif isinstance(filename_or_obj, basestring):
if (isinstance(filename_or_obj, bytes) and
filename_or_obj.startswith(b'\x89HDF')):
raise ValueError('cannot read netCDF4/HDF5 file images')
elif (isinstance(filename_or_obj, bytes) and
filename_or_obj.startswith(b'CDF')):
# netCDF3 file images are handled by scipy
pass
elif isinstance(filename_or_obj, basestring):
filename_or_obj = _normalize_path(filename_or_obj)
if filename_or_obj.endswith('.gz'):
if engine is not None and engine != 'scipy':
raise ValueError('can only read gzipped netCDF files with '
"default engine or engine='scipy'")
else:
engine = 'scipy'
if engine is None:
engine = _get_default_engine(filename_or_obj,
allow_remote=True)
if engine == 'netcdf4':
store = backends.NetCDF4DataStore.open(filename_or_obj,
group=group,
autoclose=autoclose)
elif engine == 'scipy':
store = backends.ScipyDataStore(filename_or_obj,
autoclose=autoclose)
elif engine == 'pydap':
store = backends.PydapDataStore.open(filename_or_obj)
elif engine == 'h5netcdf':
store = backends.H5NetCDFStore(filename_or_obj, group=group,
autoclose=autoclose)
elif engine == 'pynio':
store = backends.NioDataStore(filename_or_obj,
autoclose=autoclose)
else:
raise ValueError('unrecognized engine for open_dataset: %r'
% engine)
if lock is None:
lock = _default_lock(filename_or_obj, engine)
with close_on_error(store):
return maybe_decode_store(store, lock)
else:
if engine is not None and engine != 'scipy':
raise ValueError('can only read file-like objects with '
"default engine or engine='scipy'")
# assume filename_or_obj is a file-like object
store = backends.ScipyDataStore(filename_or_obj)
return maybe_decode_store(store)
def open_dataarray(filename_or_obj, group=None, decode_cf=True,
mask_and_scale=True, decode_times=True, autoclose=False,
concat_characters=True, decode_coords=True, engine=None,
chunks=None, lock=None, cache=None, drop_variables=None):
"""Open an DataArray from a netCDF file containing a single data variable.
This is designed to read netCDF files with only one data variable. If
multiple variables are present then a ValueError is raised.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Paths are interpreted as a path to a netCDF file or an
OpenDAP URL and opened with python-netCDF4, unless the filename ends
with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). File-like objects are opened
with scipy.io.netcdf (only netCDF3 supported).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
autoclose : bool, optional
If True, automatically close files to avoid OS Error of too many files
being open. However, this option doesn't work with streams, e.g.,
BytesIO.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio'}, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays.
lock : False, True or threading.Lock, optional
If chunks is provided, this argument is passed on to
:py:func:`dask.array.from_array`. By default, a global lock is
used when reading data from netCDF files with the netcdf4 and h5netcdf
engines to avoid issues with concurrent access when using dask's
multithreaded backend.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
Notes
-----
This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
using `DataArray.to_netcdf` and then loading with this function will
produce an identical result.
All parameters are passed directly to `xarray.open_dataset`. See that
documentation for further details.
See also
--------
open_dataset
"""
dataset = open_dataset(filename_or_obj, group=group, decode_cf=decode_cf,
mask_and_scale=mask_and_scale,
decode_times=decode_times, autoclose=autoclose,
concat_characters=concat_characters,
decode_coords=decode_coords, engine=engine,
chunks=chunks, lock=lock, cache=cache,
drop_variables=drop_variables)
if len(dataset.data_vars) != 1:
raise ValueError('Given file dataset contains more than one data '
'variable. Please read with xarray.open_dataset and '
'then select the variable you want.')
else:
data_array, = dataset.data_vars.values()
data_array._file_obj = dataset._file_obj
# Reset names if they were changed during saving
# to ensure that we can 'roundtrip' perfectly
if DATAARRAY_NAME in dataset.attrs:
data_array.name = dataset.attrs[DATAARRAY_NAME]
del dataset.attrs[DATAARRAY_NAME]
if data_array.name == DATAARRAY_VARIABLE:
data_array.name = None
return data_array
class _MultiFileCloser(object):
def __init__(self, file_objs):
self.file_objs = file_objs
def close(self):
for f in self.file_objs:
f.close()
_CONCAT_DIM_DEFAULT = '__infer_concat_dim__'
def open_mfdataset(paths, chunks=None, concat_dim=_CONCAT_DIM_DEFAULT,
compat='no_conflicts', preprocess=None, engine=None,
lock=None, data_vars='all', coords='different',
autoclose=False, parallel=False, **kwargs):
"""Open multiple files as a single dataset.
Requires dask to be installed. See documentation for details on dask [1].
Attributes from the first dataset file are used for the combined dataset.
Parameters
----------
paths : str or sequence
Either a string glob in the form "path/to/my/files/*.nc" or an explicit
list of files to open. Paths can be given as strings or as pathlib
Paths.
chunks : int or dict, optional
Dictionary with keys given by dimension names and values given by chunk
sizes. In general, these should divide the dimensions of each dataset.
If int, chunk each dimension by ``chunks``.
By default, chunks will be chosen to load entire input files into
memory at once. This has a major impact on performance: please see the
full documentation for more details [2].
concat_dim : None, str, DataArray or Index, optional
Dimension to concatenate files along. This argument is passed on to
:py:func:`xarray.auto_combine` along with the dataset objects. You only
need to provide this argument if the dimension along which you want to
concatenate is not a dimension in the original datasets, e.g., if you
want to stack a collection of 2D arrays along a third dimension.
By default, xarray attempts to infer this argument by examining
component files. Set ``concat_dim=None`` explicitly to disable
concatenation.
compat : {'identical', 'equals', 'broadcast_equals',
'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts when merging:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio'}, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
autoclose : bool, optional
If True, automatically close files to avoid OS Error of too many files
being open. However, this option doesn't work with streams, e.g.,
BytesIO.
lock : False, True or threading.Lock, optional
This argument is passed on to :py:func:`dask.array.from_array`. By
default, a per-variable lock is used when reading data from netCDF
files with the netcdf4 and h5netcdf engines to avoid issues with
concurrent access when using dask's multithreaded backend.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
coords : {'minimal', 'different', 'all' o list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
parallel : bool, optional
If True, the open and preprocess steps of this function will be
performed in parallel using ``dask.delayed``. Default is False.
**kwargs : optional
Additional arguments passed on to :py:func:`xarray.open_dataset`.
Returns
-------
xarray.Dataset
See Also
--------
auto_combine
open_dataset
References
----------
.. [1] http://xarray.pydata.org/en/stable/dask.html
.. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
"""
if isinstance(paths, basestring):
if is_remote_uri(paths):
raise ValueError(
'cannot do wild-card matching for paths that are remote URLs: '
'{!r}. Instead, supply paths as an explicit list of strings.'
.format(paths))
paths = sorted(glob(paths))
else:
paths = [str(p) if isinstance(p, path_type) else p for p in paths]
if not paths:
raise IOError('no files to open')
if lock is None:
lock = _default_lock(paths[0], engine)
open_kwargs = dict(engine=engine, chunks=chunks or {}, lock=lock,
autoclose=autoclose, **kwargs)
if parallel:
import dask
# wrap the open_dataset, getattr, and preprocess with delayed
open_ = dask.delayed(open_dataset)
getattr_ = dask.delayed(getattr)
if preprocess is not None:
preprocess = dask.delayed(preprocess)
else:
open_ = open_dataset
getattr_ = getattr
datasets = [open_(p, **open_kwargs) for p in paths]
file_objs = [getattr_(ds, '_file_obj') for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
if parallel:
# calling compute here will return the datasets/file_objs lists,
# the underlying datasets will still be stored as dask arrays
datasets, file_objs = dask.compute(datasets, file_objs)
# close datasets in case of a ValueError
try:
if concat_dim is _CONCAT_DIM_DEFAULT:
combined = auto_combine(datasets, compat=compat,
data_vars=data_vars, coords=coords)
else:
combined = auto_combine(datasets, concat_dim=concat_dim,
compat=compat,
data_vars=data_vars, coords=coords)
except ValueError:
for ds in datasets:
ds.close()
raise
combined._file_obj = _MultiFileCloser(file_objs)
combined.attrs = datasets[0].attrs
return combined
WRITEABLE_STORES = {'netcdf4': backends.NetCDF4DataStore.open,
'scipy': backends.ScipyDataStore,
'h5netcdf': backends.H5NetCDFStore}
def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,
engine=None, writer=None, encoding=None, unlimited_dims=None,
compute=True):
"""This function creates an appropriate datastore for writing a dataset to
disk as a netCDF file
See `Dataset.to_netcdf` for full API docs.
The ``writer`` argument is only for the private use of save_mfdataset.
"""
if isinstance(path_or_file, path_type):
path_or_file = str(path_or_file)
if encoding is None:
encoding = {}
if path_or_file is None:
if engine is None:
engine = 'scipy'
elif engine != 'scipy':
raise ValueError('invalid engine for creating bytes with '
'to_netcdf: %r. Only the default engine '
"or engine='scipy' is supported" % engine)
elif isinstance(path_or_file, basestring):
if engine is None:
engine = _get_default_engine(path_or_file)
path_or_file = _normalize_path(path_or_file)
else: # file-like object
engine = 'scipy'
# validate Dataset keys, DataArray names, and attr keys/values
_validate_dataset_names(dataset)
_validate_attrs(dataset)
try:
store_open = WRITEABLE_STORES[engine]
except KeyError:
raise ValueError('unrecognized engine for to_netcdf: %r' % engine)
if format is not None:
format = format.upper()
# if a writer is provided, store asynchronously
sync = writer is None
# handle scheduler specific logic
scheduler = get_scheduler()
if (dataset.chunks and scheduler in ['distributed', 'multiprocessing'] and
engine != 'netcdf4'):
raise NotImplementedError("Writing netCDF files with the %s backend "
"is not currently supported with dask's %s "
"scheduler" % (engine, scheduler))
lock = _get_lock(engine, scheduler, format, path_or_file)
autoclose = (dataset.chunks and
scheduler in ['distributed', 'multiprocessing'])
target = path_or_file if path_or_file is not None else BytesIO()
store = store_open(target, mode, format, group, writer,
autoclose=autoclose, lock=lock)
if unlimited_dims is None:
unlimited_dims = dataset.encoding.get('unlimited_dims', None)
if isinstance(unlimited_dims, basestring):
unlimited_dims = [unlimited_dims]
try:
dataset.dump_to_store(store, sync=sync, encoding=encoding,
unlimited_dims=unlimited_dims, compute=compute)
if path_or_file is None:
return target.getvalue()
finally:
if sync and isinstance(path_or_file, basestring):
store.close()
if not compute:
import dask
return dask.delayed(_finalize_store)(store.delayed_store, store)
if not sync:
return store
def save_mfdataset(datasets, paths, mode='w', format=None, groups=None,
engine=None, compute=True):
"""Write multiple datasets to disk as netCDF files simultaneously.
This function is intended for use with datasets consisting of dask.array
objects, in which case it can write the multiple datasets to disk
simultaneously using a shared thread pool.
When not using dask, it is no different than calling ``to_netcdf``
repeatedly.
Parameters
----------
datasets : list of xarray.Dataset
List of datasets to save.
paths : list of str or list of Paths
List of paths to which to save each corresponding dataset.
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
these locations will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
groups : list of str, optional
Paths to the netCDF4 group in each corresponding file to which to save
datasets (only works for format='NETCDF4'). The groups will be created
if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
See `Dataset.to_netcdf` for additional information.
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
Examples
--------
Save a dataset into one netCDF per year of data:
>>> years, datasets = zip(*ds.groupby('time.year'))
>>> paths = ['%s.nc' % y for y in years]
>>> xr.save_mfdataset(datasets, paths)
"""
if mode == 'w' and len(set(paths)) < len(paths):
raise ValueError("cannot use mode='w' when writing multiple "
'datasets to the same path')
for obj in datasets:
if not isinstance(obj, Dataset):
raise TypeError('save_mfdataset only supports writing Dataset '
'objects, recieved type %s' % type(obj))
if groups is None:
groups = [None] * len(datasets)
if len(set([len(datasets), len(paths), len(groups)])) > 1:
raise ValueError('must supply lists of the same length for the '
'datasets, paths and groups arguments to '
'save_mfdataset')
writer = ArrayWriter() if compute else None
stores = [to_netcdf(ds, path, mode, format, group, engine, writer,
compute=compute)
for ds, path, group in zip(datasets, paths, groups)]
if not compute:
import dask
return dask.delayed(stores)
try:
delayed = writer.sync(compute=compute)
for store in stores:
store.sync()
finally:
for store in stores:
store.close()
def to_zarr(dataset, store=None, mode='w-', synchronizer=None, group=None,
encoding=None, compute=True):
"""This function creates an appropriate datastore for writing a dataset to
a zarr ztore
See `Dataset.to_zarr` for full API docs.
"""
if isinstance(store, path_type):
store = str(store)
if encoding is None:
encoding = {}
# validate Dataset keys, DataArray names, and attr keys/values
_validate_dataset_names(dataset)
_validate_attrs(dataset)
store = backends.ZarrStore.open_group(store=store, mode=mode,
synchronizer=synchronizer,
group=group, writer=None)
# I think zarr stores should always be sync'd immediately
# TODO: figure out how to properly handle unlimited_dims
dataset.dump_to_store(store, sync=True, encoding=encoding, compute=compute)
if not compute:
import dask
return dask.delayed(_finalize_store)(store.delayed_store, store)
return store
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/backends/api.py",
"copies": "1",
"size": "35346",
"license": "apache-2.0",
"hash": 8544805690203871000,
"line_mean": 41.381294964,
"line_max": 80,
"alpha_frac": 0.6264923895,
"autogenerated": false,
"ratio": 4.4555653598890705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.558205774938907,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os.path
import datetime
from warnings import warn
import copy
import csv
import sqlite3
import numpy as np
from astropy.io import fits
import pandas
from sunpy.time import parse_time
from sunpy import config
from sunpy.util.net import check_download_file
from sunpy import lightcurve
from sunpy.extern.six.moves import urllib
LYTAF_REMOTE_PATH = "http://proba2.oma.be/lyra/data/lytaf/"
LYTAF_PATH = config.get("downloads", "download_dir")
def remove_lytaf_events_from_lightcurve(lc, artifacts=None,
return_artifacts=False,
lytaf_path=None,
force_use_local_lytaf=False):
"""
Removes periods of LYRA artifacts defined in LYTAF from a LYRALightCurve.
Parameters
----------
lc : `sunpy.lightcurve.LightCurve`
artifacts : list of strings
Contain the artifact types to be removed. For list of artifact types
see reference [1]. For example, if user wants to remove only large
angle rotations, listed at reference [1] as LAR, let artifacts=["LAR"].
Default=[], i.e. no artifacts will be removed.
return_artifacts : `bool`
Set to True to return a `numpy.recarray` containing the start time, end
time and type of all artifacts removed.
Default=False
lytaf_path : `str`
directory path where the LYRA annotation files are stored.
force_use_local_lytaf : `bool`
Ensures current local version of lytaf files are not replaced by
up-to-date online versions even if current local lytaf files do not
cover entire input time range etc.
Default=False
Returns
-------
lc_new : `sunpy.lightcurve.LightCurve`
copy of input LYRALightCurve with periods corresponding to artifacts
removed.
artifact_status : `dict`
List of 4 variables containing information on what artifacts were
found, removed, etc. from the time series.
artifact_status["lytaf"] = artifacts found : `numpy.recarray`
The full LYRA annotation file for the time series time range
output by get_lytaf_events().
artifact_status["removed"] = artifacts removed : `numpy.recarray`
Artifacts which were found and removed from from time series.
artifact_status["not_removed"] = artifacts found but not removed :
`numpy.recarray`
Artifacts which were found but not removed as they were not
included when user defined artifacts kwarg.
artifact_status["not_found"] = artifacts not found : `list` of strings
Artifacts listed to be removed by user when defining
artifacts kwarg which were not found in time series time range.
References
----------
[1] http://proba2.oma.be/data/TARDIS
Examples
--------
Remove LARs (Large Angle Rotations) from LYRALightCurve for 4-Dec-2014:
>>> import sunpy.lightcurve as lc
>>> lc = lc.LYRALightCurve.create("2014-12-02")
>>> lc_nolars = lc.remove_artifacts_from_lyralightcurve(lc, artifacts=["LAR"])
To also retrieve information on the artifacts during that day:
>>> lc_nolars, artifact_status = lc.remove_artifacts_from_lyralightcurve(
lc, artifacts=["LAR"], return_artifacts=True)
"""
# Check that input argument is of correct type
if not lytaf_path:
lytaf_path = LYTAF_PATH
if not isinstance(lc, lightcurve.LightCurve):
raise TypeError("lc must be a LightCurve object.")
# Remove artifacts from time series
data_columns = lc.data.columns
time, channels, artifact_status = _remove_lytaf_events(
lc.data.index,
channels=[np.asanyarray(lc.data[col]) for col in data_columns],
artifacts=artifacts, return_artifacts=True, lytaf_path=lytaf_path,
force_use_local_lytaf=force_use_local_lytaf)
# Create new copy copy of lightcurve and replace data with
# artifact-free time series.
lc_new = copy.deepcopy(lc)
lc_new.data = pandas.DataFrame(
index=time, data=dict((col, channels[i])
for i, col in enumerate(data_columns)))
if return_artifacts:
return lc_new, artifact_status
else:
return lc_new
def _remove_lytaf_events(time, channels=None, artifacts=None,
return_artifacts=False, fitsfile=None,
csvfile=None, filecolumns=None,
lytaf_path=None, force_use_local_lytaf=False):
"""
Removes periods of LYRA artifacts from a time series.
This functions removes periods corresponding to certain artifacts recorded
in the LYRA annotation file from an array of times given by the time input.
If a list of arrays of other properties is supplied through the channels
kwarg, then the relevant values from these arrays are also removed. This
is done by assuming that each element in each array supplied corresponds to
the time in the same index in time array. The artifacts to be removed are
given via the artifacts kwarg. The default is "all", meaning that all
artifacts will be removed. However, a subset of artifacts can be removed
by supplying a list of strings of the desired artifact types.
Parameters
----------
time : `numpy.ndarray` of `datetime.datetime`
Gives the times of the timeseries.
channels : `list` of `numpy.array` convertible to float64.
Contains arrays of the irradiances taken at the times in the time
variable. Each element in the list must have the same number of
elements as time.
artifacts : `list` of strings
Contain the artifact types to be removed. For list of artifact types
see reference [1]. For example, if user wants to remove only large
angle rotations, listed at reference [1] as LAR, let artifacts=["LAR"].
Default=[], i.e. no artifacts will be removed.
return_artifacts : `bool`
Set to True to return a numpy recarray containing the start time, end
time and type of all artifacts removed.
Default=False
fitsfile : `str`
file name (including file path and suffix, .fits) of output fits file
which is generated if this kwarg is not None.
Default=None, i.e. no fits file is output.
csvfile : `str`
file name (including file path and suffix, .csv) of output csv file
which is generated if this kwarg is not None.
Default=None, i.e. no csv file is output.
filecolumns : `list` of strings
Gives names of columns of any output files produced. Although
initially set to None above, the default is in fact
["time", "channel0", "channel1",..."channelN"]
where N is the number of irradiance arrays in the channels input
(assuming 0-indexed counting).
lytaf_path : `str`
directory path where the LYRA annotation files are stored.
force_use_local_lytaf : `bool`
Ensures current local version of lytaf files are not replaced by
up-to-date online versions even if current local lytaf files do not
cover entire input time range etc.
Default=False
Returns
-------
clean_time : `numpy.ndarray` of `datetime.datetime`
time array with artifact periods removed.
clean_channels : `list` ndarrays/array-likes convertible to float64
list of irradiance arrays with artifact periods removed.
artifact_status : `dict`
List of 4 variables containing information on what artifacts were
found, removed, etc. from the time series.
artifact_status["lytaf"] = artifacts found : `numpy.recarray`
The full LYRA annotation file for the time series time range
output by get_lytaf_events().
artifact_status["removed"] = artifacts removed : `numpy.recarray`
Artifacts which were found and removed from from time series.
artifact_status["not_removed"] = artifacts found but not removed :
`numpy.recarray`
Artifacts which were found but not removed as they were not
included when user defined artifacts kwarg.
artifact_status["not_found"] = artifacts not found : `list` of strings
Artifacts listed to be removed by user when defining artifacts
kwarg which were not found in time series time range.
References
----------
[1] http://proba2.oma.be/data/TARDIS
Example
-------
Sample data for example
>>> time = np.array([datetime(2013, 2, 1)+timedelta(minutes=i)
for i in range(120)])
>>> channel_1 = np.zeros(len(TIME))+0.4
>>> channel_2 = np.zeros(len(TIME))+0.1
Remove LARs (Large Angle Rotations) from time series.
>>> time_clean, channels_clean = remove_lyra_artifacts(
time, channels=[channel_1, channel2], artifacts=['LAR'])
"""
# Check inputs
if not lytaf_path:
lytaf_path = LYTAF_PATH
if channels and type(channels) is not list:
raise TypeError("channels must be None or a list of numpy arrays "
"of dtype 'float64'.")
if not artifacts:
raise ValueError("User has supplied no artifacts to remove.")
if type(artifacts) is str:
artifacts = [artifacts]
if not all(isinstance(artifact_type, str) for artifact_type in artifacts):
raise TypeError("All elements in artifacts must in strings.")
all_lytaf_event_types = get_lytaf_event_types(lytaf_path=lytaf_path,
print_event_types=False)
for artifact in artifacts:
if artifact not in all_lytaf_event_types:
print(all_lytaf_event_types)
raise ValueError("{0} is not a valid artifact type. See above.".format(artifact))
# Define outputs
clean_time = np.array([parse_time(t) for t in time])
clean_channels = copy.deepcopy(channels)
artifacts_not_found = []
# Get LYTAF file for given time range
lytaf = get_lytaf_events(time[0], time[-1], lytaf_path=lytaf_path,
force_use_local_lytaf=force_use_local_lytaf)
# Find events in lytaf which are to be removed from time series.
artifact_indices = np.empty(0, dtype="int64")
for artifact_type in artifacts:
indices = np.where(lytaf["event_type"] == artifact_type)[0]
# If none of a given type of artifact is found, record this
# type in artifact_not_found list.
if len(indices) == 0:
artifacts_not_found.append(artifact_type)
else:
# Else, record the indices of the artifacts of this type
artifact_indices = np.concatenate((artifact_indices, indices))
artifact_indices.sort()
# Remove relevant artifacts from timeseries. If none of the
# artifacts the user wanted removed were found, raise a warning and
# continue with code.
if not len(artifact_indices):
warn("None of user supplied artifacts were found.")
artifacts_not_found = artifacts
else:
# Remove periods corresponding to artifacts from flux and time
# arrays.
bad_indices = np.empty(0, dtype="int64")
all_indices = np.arange(len(time))
for index in artifact_indices:
bad_period = np.logical_and(time >= lytaf["begin_time"][index],
time <= lytaf["end_time"][index])
bad_indices = np.append(bad_indices, all_indices[bad_period])
clean_time = np.delete(clean_time, bad_indices)
if channels:
for i, f in enumerate(clean_channels):
clean_channels[i] = np.delete(f, bad_indices)
# If return_artifacts kwarg is True, return a list containing
# information on what artifacts found, removed, etc. See docstring.
if return_artifacts:
artifact_status = {"lytaf": lytaf,
"removed": lytaf[artifact_indices],
"not_removed": np.delete(lytaf, artifact_indices),
"not_found": artifacts_not_found}
# Output FITS file if fits kwarg is set
if fitsfile:
# Create time array of time strings rather than datetime objects
# and verify filecolumns have been correctly input. If None,
# generate generic filecolumns (see docstring of function called
# below.
string_time, filecolumns = _prep_columns(time, channels, filecolumns)
# Prepare column objects.
cols = [fits.Column(name=filecolumns[0], format="26A",
array=string_time)]
if channels:
for i, f in enumerate(channels):
cols.append(fits.Column(name=filecolumns[i+1], format="D",
array=f))
coldefs = fits.ColDefs(cols)
tbhdu = fits.new_table(coldefs)
hdu = fits.PrimaryHDU()
tbhdulist = fits.HDUList([hdu, tbhdu])
# Write data to fits file.
tbhdulist.writeto(fitsfile)
# Output csv file if csv kwarg is set.
if csvfile:
# Create time array of time strings rather than datetime objects
# and verify filecolumns have been correctly input. If None,
# generate generic filecolumns (see docstring of function called
# below.
string_time, filecolumns = _prep_columns(time, channels, filecolumns)
# Open and write data to csv file.
with open(csvfile, 'w') as openfile:
csvwriter = csv.writer(openfile, delimiter=';')
# Write header.
csvwriter.writerow(filecolumns)
# Write data.
if not channels:
for i in range(len(time)):
csvwriter.writerow(string_time[i])
else:
for i in range(len(time)):
row = [string_time[i]]
for f in channels:
row.append(f[i])
csvwriter.writerow(row)
# Return values.
if return_artifacts:
if not channels:
return clean_time, artifact_status
else:
return clean_time, clean_channels, artifact_status
else:
if not channels:
return clean_time
else:
return clean_time, clean_channels
def get_lytaf_events(start_time, end_time, lytaf_path=None,
combine_files=("lyra", "manual", "ppt", "science"),
csvfile=None, force_use_local_lytaf=False):
"""
Extracts combined lytaf file for given time range.
Given a time range defined by start_time and end_time, this function
extracts the segments of each LYRA annotation file and combines them.
Parameters
----------
start_time : `datetime.datetime` or `str`
Start time of period for which annotation file is required.
end_time : `datetime.datetime` or `str`
End time of period for which annotation file is required.
lytaf_path : `str`
directory path where the LYRA annotation files are stored.
combine_files : `tuple` of strings
States which LYRA annotation files are to be combined.
Default is all four, i.e. lyra, manual, ppt, science.
See Notes section for an explanation of each.
force_use_local_lytaf : `bool`
Ensures current local version of lytaf files are not replaced by
up-to-date online versions even if current local lytaf files do not
cover entire input time range etc.
Default=False
Returns
-------
lytaf : `numpy.recarray`
Containing the various parameters stored in the LYTAF files.
Notes
-----
There are four LYRA annotation files which mark different types of events
or artifacts in the data. They are named annotation_suffix.db where
suffix is a variable equalling either lyra, manual, ppt, or science.
annotation_lyra.db : contains entries regarding possible effects to
the data due to normal operation of LYRA instrument.
annotation_manual.db : contains entries regarding possible effects
to the data due to unusual or manually logged events.
annotation_ppt.db : contains entries regarding possible effects to
the data due to pointing or positioning of PROBA2.
annotation_science.db : contains events in the data scientifically
interesting, e.g. GOES flares.
References
----------
Further documentation: http://proba2.oma.be/data/TARDIS
Examples
--------
Get all events in the LYTAF files for January 2014
>>> from sunpy.instr.lyra import get_lytaf_events
>>> lytaf = get_lytaf_events('2014-01-01', '2014-02-01')
"""
# Check inputs
# Check lytaf path
if not lytaf_path:
lytaf_path = LYTAF_PATH
# Check start_time and end_time is a date string or datetime object
start_time = parse_time(start_time)
end_time = parse_time(end_time)
# Check combine_files contains correct inputs
if not all(suffix in ["lyra", "manual", "ppt", "science"]
for suffix in combine_files):
raise ValueError("Elements in combine_files must be strings equalling "
"'lyra', 'manual', 'ppt', or 'science'.")
# Remove any duplicates from combine_files input
combine_files = list(set(combine_files))
combine_files.sort()
# Convert input times to UNIX timestamp format since this is the
# time format in the annotation files
start_time_uts = (start_time - datetime.datetime(1970, 1, 1)).total_seconds()
end_time_uts = (end_time - datetime.datetime(1970, 1, 1)).total_seconds()
# Define numpy record array which will hold the information from
# the annotation file.
lytaf = np.empty((0,), dtype=[("insertion_time", object),
("begin_time", object),
("reference_time", object),
("end_time", object),
("event_type", object),
("event_definition", object)])
# Access annotation files
for suffix in combine_files:
# Check database files are present
dbname = "annotation_{0}.db".format(suffix)
check_download_file(dbname, LYTAF_REMOTE_PATH, lytaf_path)
# Open SQLITE3 annotation files
connection = sqlite3.connect(os.path.join(lytaf_path, dbname))
# Create cursor to manipulate data in annotation file
cursor = connection.cursor()
# Check if lytaf file spans the start and end times defined by
# user. If not, download newest version.
# First get start time of first event and end time of last
# event in lytaf.
cursor.execute("select begin_time from event order by begin_time asc "
"limit 1;")
db_first_begin_time = cursor.fetchone()[0]
db_first_begin_time = datetime.datetime.fromtimestamp(db_first_begin_time)
cursor.execute("select end_time from event order by end_time desc "
"limit 1;")
db_last_end_time = cursor.fetchone()[0]
db_last_end_time = datetime.datetime.fromtimestamp(db_last_end_time)
# If lytaf does not include entire input time range...
if not force_use_local_lytaf:
if end_time > db_last_end_time or start_time < db_first_begin_time:
# ...close lytaf file...
cursor.close()
connection.close()
# ...Download latest lytaf file...
check_download_file(dbname, LYTAF_REMOTE_PATH, lytaf_path,
replace=True)
# ...and open new version of lytaf database.
connection = sqlite3.connect(os.path.join(lytaf_path, dbname))
cursor = connection.cursor()
# Select and extract the data from event table within file within
# given time range
cursor.execute("select insertion_time, begin_time, reference_time, "
"end_time, eventType_id from event where end_time >= "
"{0} and begin_time <= "
"{1}".format(start_time_uts, end_time_uts))
event_rows = cursor.fetchall()
# Select and extract the event types from eventType table
cursor.row_factory = sqlite3.Row
cursor.execute("select * from eventType")
eventType_rows = cursor.fetchall()
eventType_id = []
eventType_type = []
eventType_definition = []
for eventType_row in eventType_rows:
eventType_id.append(eventType_row["id"])
eventType_type.append(eventType_row["type"])
eventType_definition.append(eventType_row["definition"])
# Enter desired information into the lytaf numpy record array
for event_row in event_rows:
id_index = eventType_id.index(event_row[4])
lytaf = np.append(lytaf,
np.array((datetime.datetime.utcfromtimestamp(event_row[0]),
datetime.datetime.utcfromtimestamp(event_row[1]),
datetime.datetime.utcfromtimestamp(event_row[2]),
datetime.datetime.utcfromtimestamp(event_row[3]),
eventType_type[id_index],
eventType_definition[id_index]), dtype=lytaf.dtype))
# Close file
cursor.close()
connection.close()
# Sort lytaf in ascending order of begin time
np.recarray.sort(lytaf, order="begin_time")
# If csvfile kwarg is set, write out lytaf to csv file
if csvfile:
# Open and write data to csv file.
with open(csvfile, 'w') as openfile:
csvwriter = csv.writer(openfile, delimiter=';')
# Write header.
csvwriter.writerow(lytaf.dtype.names)
# Write data.
for row in lytaf:
new_row = []
new_row.append(row[0].strftime("%Y-%m-%dT%H:%M:%S"))
new_row.append(row[1].strftime("%Y-%m-%dT%H:%M:%S"))
new_row.append(row[2].strftime("%Y-%m-%dT%H:%M:%S"))
new_row.append(row[3].strftime("%Y-%m-%dT%H:%M:%S"))
new_row.append(row[4])
new_row.append(row[5])
csvwriter.writerow(new_row)
return lytaf
def get_lytaf_event_types(lytaf_path=None, print_event_types=True):
"""Prints the different event types in the each of the LYTAF databases.
Parameters
----------
lytaf_path : `str`
Path location where LYTAF files are stored.
Default = LYTAF_PATH defined above.
print_event_types : `bool`
If True, prints the artifacts in each lytaf database to screen.
Returns
-------
all_event_types : `list`
List of all events types in all lytaf databases.
"""
# Set lytaf_path is not done by user
if not lytaf_path:
lytaf_path = LYTAF_PATH
suffixes = ["lyra", "manual", "ppt", "science"]
all_event_types = []
# For each database file extract the event types and print them.
if print_event_types:
print("\nLYTAF Event Types\n-----------------\n")
for suffix in suffixes:
dbname = "annotation_{0}.db".format(suffix)
# Check database file exists, else download it.
check_download_file(dbname, LYTAF_REMOTE_PATH, lytaf_path)
# Open SQLITE3 LYTAF files
connection = sqlite3.connect(os.path.join(lytaf_path, dbname))
# Create cursor to manipulate data in annotation file
cursor = connection.cursor()
cursor.execute("select type from eventType;")
event_types = cursor.fetchall()
all_event_types.append(event_types)
if print_event_types:
print("----------------\n{0} database\n----------------"
.format(suffix))
for event_type in event_types:
print(str(event_type[0]))
print(" ")
# Unpack event types in all_event_types into single list
all_event_types = [event_type[0] for event_types in all_event_types
for event_type in event_types]
return all_event_types
def download_lytaf_database(lytaf_dir=''):
"""download latest Proba2 pointing database from Proba2 Science Center"""
url = 'http://proba2.oma.be/lyra/data/lytaf/annotation_ppt.db'
destination = os.path.join(lytaf_dir, 'annotation_ppt.db')
urllib.request.urlretrieve(url, destination)
return
def split_series_using_lytaf(timearray, data, lytaf):
"""
Proba-2 analysis code for splitting up LYRA timeseries around locations
where LARs (and other data events) are observed.
Parameters
----------
timearray : `numpy.ndarray` of times understood by `sunpy.time.parse_time`
function.
data : `numpy.array` corresponding to the given time array
lytaf : `numpy.recarray`
Events obtained from querying LYTAF database using
lyra.get_lytaf_events().
Output
------
output : `list` of dictionaries
Each dictionary contains a sub-series corresponding to an interval of
'good data'.
"""
n = len(timearray)
mask = np.ones(n)
el = len(lytaf)
# make the input time array a list of datetime objects
datetime_array = []
for tim in timearray:
datetime_array.append(parse_time(tim))
# scan through each entry retrieved from the LYTAF database
for j in range(0, el):
# want to mark all times with events as bad in the mask, i.e. = 0
start_dt = lytaf['begin_time'][j]
end_dt = lytaf['end_time'][j]
# find the start and end indices for each event
start_ind = np.searchsorted(datetime_array, start_dt)
end_ind = np.searchsorted(datetime_array, end_dt)
# append the mask to mark event as 'bad'
mask[start_ind:end_ind] = 0
diffmask = np.diff(mask)
tmp_discontinuity = np.where(diffmask != 0.)
# disc contains the indices of mask where there are discontinuities
disc = tmp_discontinuity[0]
if len(disc) == 0:
print('No events found within time series interval. '
'Returning original series.')
return [{'subtimes': datetime_array, 'subdata': data}]
# -1 in diffmask means went from good data to bad
# +1 means went from bad data to good
# want to get the data between a +1 and the next -1
# if the first discontinuity is a -1 then the start of the series was good.
if diffmask[disc[0]] == -1.0:
# make sure we can always start from disc[0] below
disc = np.insert(disc, 0, 0)
split_series = []
limit = len(disc)
# now extract the good data regions and ignore the bad ones
for h in range(0, limit, 2):
if h == limit-1:
# can't index h+1 here. Go to end of series
subtimes = datetime_array[disc[h]:-1]
subdata = data[disc[h]:-1]
subseries = {'subtimes':subtimes, 'subdata':subdata}
split_series.append(subseries)
else:
subtimes = datetime_array[disc[h]:disc[h+1]]
subdata = data[disc[h]:disc[h+1]]
subseries = {'subtimes':subtimes, 'subdata':subdata}
split_series.append(subseries)
return split_series
def _lytaf_event2string(integers):
if type(integers) == int:
integers = [integers]
#else:
# n=len(integers)
out = []
for i in integers:
if i == 1:
out.append('LAR')
if i == 2:
out.append('N/A')
if i == 3:
out.append('UV occult.')
if i == 4:
out.append('Vis. occult.')
if i == 5:
out.append('Offpoint')
if i == 6:
out.append('SAA')
if i == 7:
out.append('Auroral zone')
if i == 8:
out.append('Moon in LYRA')
if i == 9:
out.append('Moon in SWAP')
if i == 10:
out.append('Venus in LYRA')
if i == 11:
out.append('Venus in SWAP')
return out
def _prep_columns(time, channels=None, filecolumns=None):
"""
Checks and prepares data to be written out to a file.
Firstly, this function converts the elements of time, whose entries are
assumed to be datetime objects, to time strings. Secondly, it checks
whether the number of elements in an input list of column names,
filecolumns, is equal to the number of arrays in the list, channels.
If not, a ValueError is raised. If however filecolumns equals None, a
filenames list is generated equal to ["time", "channel0", "channel1",...,
"channelN"] where N is the number of arrays in the list, channels
(assuming 0-indexed counting).
"""
# Convert time which contains datetime objects to time strings.
string_time = np.array([t.strftime("%Y-%m-%dT%H:%M:%S.%f") for t in time])
# If filenames is given...
if filecolumns:
# ...check all the elements are strings...
if all(isinstance(column, str) for column in filecolumns) is False:
raise TypeError("All elements in filecolumns must by strings.")
# ...and that there are the same number of elements as there
# are arrays in channels, plus 1 for a time array. Otherwise
# raise a ValueError.
if channels:
ncol = 1 + len(channels)
else:
ncol = 1
if len(filecolumns) != ncol:
raise ValueError("Number of elements in filecolumns must be "
"equal to the number of input data arrays, "
"i.e. time + elements in channels.")
# If filenames not given, create a list of columns names of the
# form: ["time", "channel0", "channel1",...,"channelN"] where N
# is the number of arrays in channels (assuming 0-indexed counting).
else:
if channels:
filecolumns = ["channel{0}".format(fluxnum)
for fluxnum in range(len(channels))]
filecolumns.insert(0, "time")
else:
filecolumns = ["time"]
return string_time, filecolumns
| {
"repo_name": "Alex-Ian-Hamilton/sunpy",
"path": "sunpy/instr/lyra.py",
"copies": "1",
"size": "30619",
"license": "bsd-2-clause",
"hash": 8336615997560095000,
"line_mean": 40.4891598916,
"line_max": 93,
"alpha_frac": 0.6130833796,
"autogenerated": false,
"ratio": 4.093449197860963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004645845460382737,
"num_lines": 738
} |
from __future__ import absolute_import, division, print_function
import os.path
import sys
from Cython.Build import cythonize
from Cython.Distutils import extension
from echomesh.base import Path
from echomesh.base import Platform
_DEBUG_FLAG = '--debug'
DEBUG = _DEBUG_FLAG in sys.argv
if DEBUG:
sys.argv.remove(_DEBUG_FLAG)
VERBOSE = True
class Config(object):
def __init__(self):
self.debug = DEBUG
self.verbose = VERBOSE
self.module_name = 'cechomesh'
self.library_name = '%s.so' % self.module_name
pyx_files = ['cechomesh.pyx']
libraries = ['echomesh', 'pthread', 'glog']
extra_compile_args = (
'-I. -fmessage-length=0 -std=c++11 '
' -IJuceLibraryCode -Ibuild/include -Wno-unused-function ')
if DEBUG:
extra_args = {'cython_gdb': True, 'pyrex_gdb': True}
extra_compile_args += '-O0 -g -D_DEBUG=1 -DDEBUG=1 '
extra_link_args = '-g '
else:
extra_args = {}
extra_compile_args += '-O2 '
extra_link_args = '-flto '
if Platform.PLATFORM == Platform.MAC:
extra_link_args += (
'-framework Cocoa -framework WebKit -framework CoreMidi ')
extra_compile_args += '-stdlib=libc++ -x c++ -arch x86_64 '
if DEBUG:
echomesh_lib = 'Builds/MacOSX/build/Debug'
else:
echomesh_lib = 'Builds/MacOSX/build/Release'
elif Platform.PLATFORM == Platform.UBUNTU:
extra_link_args += (
'-lc++ -L/usr/X11R6/lib/ -lX11 -lXext -lXinerama -lasound '
'-ldl -lfreetype -lrt -lechomesh')
extra_compile_args += '-stdlib=libc++ -arch x86_64 -x c++ '
if DEBUG:
echomesh_lib = 'Builds/Linux/build/Debug'
else:
echomesh_lib = 'Builds/Linux/build'
elif Platform.PLATFORM == Platform.RASPBERRY_PI:
extra_link_args += (
'-L/usr/X11R6/lib/ -lX11 -lXext -lXinerama -lasound '
'-ldl -lfreetype -lrt -lechomesh')
echomesh_lib = 'Builds/Linux/build'
else:
raise Exception("Don't understand platform %s" % Platform.PLATFORM)
extra_compile_args = extra_compile_args.split()
extra_link_args = extra_link_args.split()
self.bin_dir = Path.LIBRARY_PATH
lib_dirs = ['build/lib', echomesh_lib]
ext = extension.Extension(
self.module_name,
pyx_files,
library_dirs=lib_dirs,
libraries=libraries,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
**extra_args)
self.modules = cythonize([ext], **extra_args)
self.c_library = os.path.join(echomesh_lib, 'libechomesh.a')
CONFIG = Config()
| {
"repo_name": "rec/echomesh",
"path": "code/python/echomesh/build/BuildConfig.py",
"copies": "1",
"size": "2875",
"license": "mit",
"hash": 1014947364197734700,
"line_mean": 29.585106383,
"line_max": 79,
"alpha_frac": 0.5648695652,
"autogenerated": false,
"ratio": 3.4307875894988067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44956571546988067,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os.path
import sys
import yaml
__all__ = ['ManifestChart']
MANIFEST_FILES = ["Chart.yaml", "Chart.yml"]
class ManifestChart(dict):
def __init__(self, package=None, values=None):
def __init__(self):
super(ManifestChart, self).__init__()
self.values = values
if package is None:
self._load_from_path()
else:
self._load_yaml(package.manifest)
def _load_yaml(self, yamlstr):
try:
self.update(yaml.load(yamlstr))
except yaml.YAMLError as exc:
print("Error in configuration file:", sys.stderr)
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark # pylint: disable=no-member
print("Error position: (%s:%s)" % (mark.line + 1, mark.column + 1), sys.stderr)
raise exc
def _load_from_path(self):
mfile = None
for f in MANIFEST_FILES:
if os.path.exists(f):
mfile = f
break
if mfile is not None:
with open(mfile) as f:
self._load_yaml(f.read())
else:
raise IOError("Error: Chart not found.")
@property
def keywords(self):
return self.get("keywords", [])
@property
def engine(self):
return self.get("engine", "gotpl")
@property
def home(self):
return self.get("home", "")
@property
def description(self):
return self.get("description", "")
@property
def version(self):
return self.get("version", "")
@property
def maintainers(self):
return self.get("maintainers", [])
@property
def sources(self):
return self.get("sources", [])
@property
def name(self):
return self.get("name", [])
def metadata(self):
return {"maintainers": self.maintainers, "source": self.sources}
| {
"repo_name": "app-registry/appr",
"path": "appr/formats/helm/manifest_chart.py",
"copies": "2",
"size": "1983",
"license": "apache-2.0",
"hash": -3774338324030189600,
"line_mean": 23.4814814815,
"line_max": 95,
"alpha_frac": 0.5511850731,
"autogenerated": false,
"ratio": 4.006060606060606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5557245679160606,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os.path
import yaml
from appr.formats.appr.manifest import ManifestBase
from appr.pack import all_files
from appr.render_jsonnet import RenderJsonnet, yaml_to_jsonnet
__all__ = ['ManifestJsonnet']
MANIFEST_FILES = ['manifest.jsonnet', 'manifest.yaml', 'manifest.yml', 'kpm-manifest.jsonnet']
class ManifestJsonnet(ManifestBase):
def __init__(self, package=None, tla_codes=None):
self.tla_codes = tla_codes
if package is not None:
self._load_from_package(package)
else:
self._load_from_path()
super(ManifestJsonnet, self).__init__()
def _isjsonnet(self, package):
if "manifest.yaml" in package.files:
return False
elif "manifest.jsonnet" in package.files or "appr.jsonnet":
return True
else:
raise RuntimeError("Unknown manifest format")
def _load_from_package(self, package):
if self._isjsonnet(package):
self._load_jsonnet(package.manifest, package.files)
else:
self._load_yaml(package.manifest, package.files)
def _load_from_path(self):
for filepath in MANIFEST_FILES:
if os.path.exists(filepath):
mfile = filepath
break
_, ext = os.path.splitext(mfile)
with open(mfile) as f:
auth_files = all_files()
files = dict(zip(auth_files, [None] * len(auth_files)))
if ext == '.jsonnet':
self._load_jsonnet(f.read(), files)
else:
self._load_yaml(f.read(), files)
def _load_jsonnet(self, jsonnetstr, files):
k = RenderJsonnet(files)
r = k.render_jsonnet(jsonnetstr, self.tla_codes)
self.update(r)
def _load_yaml(self, yamlstr, files):
try:
jsonnetstr = yaml_to_jsonnet(yamlstr, self.tla_codes)
files['manifest.jsonnet'] = jsonnetstr
self._load_jsonnet(jsonnetstr, files)
except yaml.YAMLError as exc:
print('Error in configuration file:')
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark # pylint: disable=E1101
print('Error position: (%s:%s)' % (mark.line + 1, mark.column + 1))
raise exc
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/formats/appr/manifest_jsonnet.py",
"copies": "2",
"size": "2349",
"license": "apache-2.0",
"hash": -1232279893608426800,
"line_mean": 33.0434782609,
"line_max": 94,
"alpha_frac": 0.5917411665,
"autogenerated": false,
"ratio": 3.7285714285714286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5320312595071428,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os, sys, argparse
import urllib
import tflearn
from tflearn.data_utils import *
parser = argparse.ArgumentParser(description=
'Pass a text file to generate LSTM output')
parser.add_argument('filename')
parser.add_argument('-t','--temp', help=
'Defaults to displaying multiple temperature outputs which is suggested.' +
' If temp is specified, a value of 0.0 to 2.0 is recommended.' +
' Temperature is the novelty or' +
' riskiness of the generated output. A value closer to 0 will result' +
' in output closer to the input, so higher is riskier.',
required=False, nargs=1, type=float)
parser.add_argument('-l','--length', help=
'Optional length of text sequences to analyze. Defaults to 25.',
required=False, default=25, nargs=1, type=int)
args = vars(parser.parse_args())
path = args['filename']
if args['temp'] and args['temp'][0] is not None:
temp = args['temp'][0]
print("Temperature set to", temp)
if temp > 2 or temp < 0:
print("Temperature out of suggested range. Suggested temp range is 0.0-2.0")
else:
print("Will display multiple temperature outputs")
if args['length'] is not 25:
maxlen = args['length'][0] # default 25 is set in .add_argument above if not set by user
print("Sequence max length set to ", maxlen)
else:
maxlen = args['length']
model_name=path.split('.')[0] # create model name from textfile input
if not os.path.isfile(path):
print("Couldn't find the text file. Are you sure the you passed is correct?")
X, Y, char_idx = \
textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3)
g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0,
checkpoint_path='model_'+ model_name)
for i in range(50):
seed = random_sequence_from_textfile(path, maxlen)
m.fit(X, Y, validation_set=0.1, batch_size=128,
n_epoch=1, run_id=model_name)
print("-- TESTING...")
if args['temp'] is not None:
temp = args['temp'][0]
print("-- Test with temperature of %s --" % temp)
print(m.generate(600, temperature=temp, seq_seed=seed))
else:
print("-- Test with temperature of 1.0 --")
print(m.generate(600, temperature=1.0, seq_seed=seed))
print("-- Test with temperature of 0.5 --")
print(m.generate(600, temperature=0.5, seq_seed=seed))
| {
"repo_name": "hashware/tflearn-learn",
"path": "examples/nlp/lstm_generator_textfile.py",
"copies": "2",
"size": "2983",
"license": "mit",
"hash": 7709433194300414000,
"line_mean": 37.7402597403,
"line_max": 92,
"alpha_frac": 0.6473348978,
"autogenerated": false,
"ratio": 3.4208715596330275,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5068206457433028,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
from ..expr import (Expr, Symbol, Field, Arithmetic, Math,
Date, Time, DateTime, Millisecond, Microsecond, broadcast,
sin, cos, Map, UTCFromTimestamp, DateTimeTruncate, symbol,
USub, Not)
from ..expr import math as expr_math
from ..expr.expressions import valid_identifier
from ..dispatch import dispatch
from . import pydatetime
import datetime
import math
import toolz
import itertools
funcnames = ('func_%d' % i for i in itertools.count())
def parenthesize(s):
if ' ' in s:
return '(%s)' % s
else:
return s
def print_python(leaves, expr):
""" Print expression to be evaluated in Python
>>> from blaze.expr import ceil, sin
>>> t = symbol('t', '{x: int, y: int, z: int, when: datetime}')
>>> print_python([t], t.x + t.y)
('t[0] + t[1]', {})
Supports mathematical and datetime access
>>> print_python([t], sin(t.x) > ceil(t.y)) # doctest: +SKIP
('math.sin(t[0]) > math.ceil(t[1])', {'math':<module 'math'>})
>>> print_python([t], t.when.day + 1)
('t[3].day + 1', {})
Specify leaves of the expression to control level of printing
>>> print_python([t.x, t.y], t.x + t.y)
('x + y', {})
Returns
-------
s: string
A evalable string
scope: dict
A namespace to add to be given to eval
"""
if isinstance(expr, Expr) and any(expr.isidentical(lf) for lf in leaves):
return valid_identifier(expr._name), {}
return _print_python(expr, leaves=leaves)
@dispatch(object)
def _print_python(expr, leaves=None):
return repr(expr), {}
@dispatch((datetime.datetime, datetime.date))
def _print_python(expr, leaves=None):
return repr(expr), {'datetime': datetime, 'Timestamp': pd.Timestamp}
@dispatch(Symbol)
def _print_python(expr, leaves=None):
return valid_identifier(expr._name), {}
@dispatch(Field)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
index = expr._child.fields.index(expr._name)
return '%s[%d]' % (parenthesize(child), index), scope
@dispatch(Arithmetic)
def _print_python(expr, leaves=None):
lhs, left_scope = print_python(leaves, expr.lhs)
rhs, right_scope = print_python(leaves, expr.rhs)
return ('%s %s %s' % (parenthesize(lhs),
expr.symbol,
parenthesize(rhs)),
toolz.merge(left_scope, right_scope))
@dispatch(USub)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return '%s%s' % (expr.symbol, parenthesize(child)), scope
@dispatch(Not)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return 'not %s' % parenthesize(child), scope
@dispatch(Math)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('math.%s(%s)' % (type(expr).__name__, child),
toolz.merge(scope, {'math': math}))
@dispatch(expr_math.abs)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('abs(%s)' % child, scope)
@dispatch(Date)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('%s.date()' % parenthesize(child), scope)
@dispatch(Time)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('%s.time()' % parenthesize(child), scope)
@dispatch(Millisecond)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('%s.microsecond // 1000' % parenthesize(child), scope)
@dispatch(UTCFromTimestamp)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('datetime.datetime.utcfromtimestamp(%s)' % parenthesize(child),
toolz.merge({'datetime': datetime}, scope))
@dispatch(DateTime)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
attr = type(expr).__name__.lower()
return ('%s.%s' % (parenthesize(child), attr), scope)
@dispatch(DateTimeTruncate)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
scope['truncate'] = pydatetime.truncate
return ('truncate(%s, %s, "%s")' % (child, expr.measure, expr.unit),
scope)
@dispatch(Map)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
funcname = next(funcnames)
return ('%s(%s)' % (funcname, child),
toolz.assoc(scope, funcname, expr.func))
@dispatch(Expr)
def _print_python(expr, leaves=None):
raise NotImplementedError("Do not know how to write expressions of type %s"
" to Python code" % type(expr).__name__)
def funcstr(leaves, expr):
""" Lambda string for an expresion
>>> t = symbol('t', '{x: int, y: int, z: int, when: datetime}')
>>> funcstr([t], t.x + t.y)
('lambda t: t[0] + t[1]', {})
>>> funcstr([t.x, t.y], t.x + t.y)
('lambda x, y: x + y', {})
Also returns scope for libraries like math or datetime
>>> funcstr([t.x, t.y], sin(t.x) + t.y) # doctest: +SKIP
('lambda x, y: math.sin(x) + y', {'math': <module 'math'>})
>>> from datetime import date
>>> funcstr([t.x, t.y, t.when], t.when.date > date(2001, 12, 25)) #doctest: +SKIP
('lambda x, y, when: when.day > datetime.date(2001, 12, 25)', {'datetime': <module 'datetime'>})
"""
result, scope = print_python(leaves, expr)
leaf_names = [print_python([leaf], leaf)[0] for leaf in leaves]
return 'lambda %s: %s' % (', '.join(leaf_names),
result), scope
def lambdify(leaves, expr):
""" Lambda for an expresion
>>> t = symbol('t', '{x: int, y: int, z: int, when: datetime}')
>>> f = lambdify([t], t.x + t.y)
>>> f((1, 10, 100, ''))
11
>>> f = lambdify([t.x, t.y, t.z, t.when], t.x + cos(t.y))
>>> f(1, 0, 100, '')
2.0
"""
s, scope = funcstr(leaves, expr)
return eval(s, scope)
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/compute/pyfunc.py",
"copies": "3",
"size": "6148",
"license": "bsd-3-clause",
"hash": 4429572772047655000,
"line_mean": 29.8944723618,
"line_max": 100,
"alpha_frac": 0.6107677293,
"autogenerated": false,
"ratio": 3.2615384615384615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5372306190838462,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
from ..expr import (Expr, Symbol, Field, Arithmetic, UnaryMath, BinaryMath,
Date, Time, DateTime, Millisecond, Microsecond, broadcast,
sin, cos, Map, UTCFromTimestamp, DateTimeTruncate, symbol,
USub, Not, notnull, greatest, least, atan2, Like)
from ..expr import math as expr_math
from ..expr.expressions import valid_identifier
from ..dispatch import dispatch
from . import pydatetime
import numpy as np
import datetime
import fnmatch
import math
import toolz
import itertools
funcnames = ('func_%d' % i for i in itertools.count())
def parenthesize(s):
if ' ' in s:
return '(%s)' % s
else:
return s
def print_python(leaves, expr):
""" Print expression to be evaluated in Python
>>> from blaze.expr import ceil, sin
>>> t = symbol('t', '{x: int, y: int, z: int, when: datetime}')
>>> print_python([t], t.x + t.y)
('t[0] + t[1]', {})
Supports mathematical and datetime access
>>> print_python([t], sin(t.x) > ceil(t.y)) # doctest: +SKIP
('math.sin(t[0]) > math.ceil(t[1])', {'math':<module 'math'>})
>>> print_python([t], t.when.day + 1)
('t[3].day + 1', {})
Specify leaves of the expression to control level of printing
>>> print_python([t.x, t.y], t.x + t.y)
('x + y', {})
Returns
-------
s: string
A evalable string
scope: dict
A namespace to add to be given to eval
"""
if isinstance(expr, Expr) and any(expr.isidentical(lf) for lf in leaves):
return valid_identifier(expr._name), {}
return _print_python(expr, leaves=leaves)
@dispatch(object)
def _print_python(expr, leaves=None):
return repr(expr), {}
@dispatch((datetime.datetime, datetime.date))
def _print_python(expr, leaves=None):
return repr(expr), {'datetime': datetime, 'Timestamp': pd.Timestamp}
@dispatch(Symbol)
def _print_python(expr, leaves=None):
return valid_identifier(expr._name), {}
@dispatch(Field)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
index = expr._child.fields.index(expr._name)
return '%s[%d]' % (parenthesize(child), index), scope
@dispatch(Arithmetic)
def _print_python(expr, leaves=None):
lhs, left_scope = print_python(leaves, expr.lhs)
rhs, right_scope = print_python(leaves, expr.rhs)
return ('%s %s %s' % (parenthesize(lhs),
expr.symbol,
parenthesize(rhs)),
toolz.merge(left_scope, right_scope))
@dispatch(USub)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return '%s%s' % (expr.symbol, parenthesize(child)), scope
@dispatch(Not)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return 'not %s' % parenthesize(child), scope
@dispatch(UnaryMath)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('np.%s(%s)' % (type(expr).__name__, child),
toolz.merge(scope, {'np': np}))
@dispatch(BinaryMath)
def _print_python(expr, leaves=None):
lhs, scope_lhs = print_python(leaves, expr.lhs)
rhs, scope_rhs = print_python(leaves, expr.rhs)
return ('np.%s(%s, %s)' % (type(expr).__name__, lhs, rhs),
toolz.merge(scope_lhs, scope_rhs, {'np': np}))
@dispatch(atan2)
def _print_python(expr, leaves=None):
lhs, scope_lhs = print_python(leaves, expr.lhs)
rhs, scope_rhs = print_python(leaves, expr.rhs)
return ('np.arctan2(%s, %s)' % (lhs, rhs),
toolz.merge(scope_lhs, scope_rhs, {'np': np}))
@dispatch(greatest)
def _print_python(expr, leaves=None):
lhs, scope_lhs = print_python(leaves, expr.lhs)
rhs, scope_rhs = print_python(leaves, expr.rhs)
return 'max(%s, %s)' % (lhs, rhs), toolz.merge(scope_lhs, scope_rhs)
@dispatch(least)
def _print_python(expr, leaves=None):
lhs, scope_lhs = print_python(leaves, expr.lhs)
rhs, scope_rhs = print_python(leaves, expr.rhs)
return 'min(%s, %s)' % (lhs, rhs), toolz.merge(scope_lhs, scope_rhs)
@dispatch(expr_math.abs)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('abs(%s)' % child, scope)
@dispatch(Date)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('%s.date()' % parenthesize(child), scope)
@dispatch(Time)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('%s.time()' % parenthesize(child), scope)
@dispatch(Millisecond)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('%s.microsecond // 1000' % parenthesize(child), scope)
@dispatch(UTCFromTimestamp)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('datetime.datetime.utcfromtimestamp(%s)' % parenthesize(child),
toolz.merge({'datetime': datetime}, scope))
@dispatch(DateTime)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
attr = type(expr).__name__.lower()
return ('%s.%s' % (parenthesize(child), attr), scope)
@dispatch(DateTimeTruncate)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
scope['truncate'] = pydatetime.truncate
return ('truncate(%s, %s, "%s")' % (child, expr.measure, expr.unit),
scope)
@dispatch(Map)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
funcname = next(funcnames)
return ('%s(%s)' % (funcname, child),
toolz.assoc(scope, funcname, expr.func))
@dispatch(notnull)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('notnull(%s)' % child,
toolz.merge(scope, dict(notnull=lambda x: x is not None)))
@dispatch(Like)
def _print_python(expr, leaves):
child, scope = print_python(leaves, expr._child)
return (
'fnmatch(%s, %r)' % (child, expr.pattern),
toolz.merge(scope, dict(fnmatch=fnmatch.fnmatch))
)
@dispatch(Expr)
def _print_python(expr, leaves=None):
raise NotImplementedError("Do not know how to write expressions of type %s"
" to Python code" % type(expr).__name__)
def funcstr(leaves, expr):
""" Lambda string for an expresion
>>> t = symbol('t', '{x: int, y: int, z: int, when: datetime}')
>>> funcstr([t], t.x + t.y)
('lambda t: t[0] + t[1]', {})
>>> funcstr([t.x, t.y], t.x + t.y)
('lambda x, y: x + y', {})
Also returns scope for libraries like math or datetime
>>> funcstr([t.x, t.y], sin(t.x) + t.y) # doctest: +SKIP
('lambda x, y: math.sin(x) + y', {'math': <module 'math'>})
>>> from datetime import date
>>> funcstr([t.x, t.y, t.when], t.when.date > date(2001, 12, 25)) #doctest: +SKIP
('lambda x, y, when: when.day > datetime.date(2001, 12, 25)', {'datetime': <module 'datetime'>})
"""
result, scope = print_python(leaves, expr)
leaf_names = [print_python([leaf], leaf)[0] for leaf in leaves]
return 'lambda %s: %s' % (', '.join(leaf_names),
result), scope
def lambdify(leaves, expr):
""" Lambda for an expresion
>>> t = symbol('t', '{x: int, y: int, z: int, when: datetime}')
>>> f = lambdify([t], t.x + t.y)
>>> f((1, 10, 100, ''))
11
>>> f = lambdify([t.x, t.y, t.z, t.when], t.x + cos(t.y))
>>> f(1, 0, 100, '')
2.0
"""
s, scope = funcstr(leaves, expr)
return eval(s, scope)
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/compute/pyfunc.py",
"copies": "5",
"size": "7748",
"license": "bsd-3-clause",
"hash": -5583132074411088000,
"line_mean": 28.572519084,
"line_max": 100,
"alpha_frac": 0.6143520909,
"autogenerated": false,
"ratio": 3.213604313562837,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6327956404462837,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
from toolz import partial
from dask.base import compute
def _categorize_block(df, categories):
""" Categorize a dataframe with given categories
df: DataFrame
categories: dict mapping column name to iterable of categories
"""
df = df.copy()
for col, vals in categories.items():
df[col] = pd.Categorical(df[col], categories=vals, ordered=False)
return df
def categorize(df, columns=None, **kwargs):
"""
Convert columns of dataframe to category dtype
This aids performance, both in-memory and in spilling to disk
"""
if columns is None:
dtypes = df.dtypes
columns = [name for name, dt in zip(dtypes.index, dtypes.values)
if dt == 'O']
if not isinstance(columns, (list, tuple)):
columns = [columns]
distincts = [df[col].drop_duplicates() for col in columns]
values = compute(*distincts, **kwargs)
func = partial(_categorize_block, categories=dict(zip(columns, values)))
meta = func(df._meta)
return df.map_partitions(func, meta=meta)
def _categorize(categories, df):
""" Categorize columns in dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': [0, 2, 0]})
>>> categories = {'y': ['A', 'B', 'c']}
>>> _categorize(categories, df)
x y
0 1 A
1 2 c
2 3 A
>>> _categorize(categories, df.y)
0 A
1 c
2 A
dtype: category
Categories (3, object): [A, B, c]
"""
if '.index' in categories:
index = pd.CategoricalIndex(
pd.Categorical.from_codes(df.index.values, categories['.index']))
else:
index = df.index
if isinstance(df, pd.Series):
if df.name in categories:
cat = pd.Categorical.from_codes(df.values, categories[df.name])
return pd.Series(cat, index=index)
else:
return df
else:
return pd.DataFrame(
dict((col, pd.Categorical.from_codes(df[col].values, categories[col])
if col in categories
else df[col].values)
for col in df.columns),
columns=df.columns,
index=index)
def strip_categories(df):
""" Strip categories from dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': ['A', 'B', 'A']})
>>> df['y'] = df.y.astype('category')
>>> strip_categories(df)
x y
0 1 0
1 2 1
2 3 0
"""
return pd.DataFrame(dict((col, df[col].cat.codes.values
if iscategorical(df.dtypes[col])
else df[col].values)
for col in df.columns),
columns=df.columns,
index=df.index.codes
if iscategorical(df.index.dtype)
else df.index)
def iscategorical(dt):
return isinstance(dt, pd.core.common.CategoricalDtype)
def get_categories(df):
"""
Get Categories of dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': ['A', 'B', 'A']})
>>> df['y'] = df.y.astype('category')
>>> get_categories(df)
{'y': Index([u'A', u'B'], dtype='object')}
"""
result = dict((col, df[col].cat.categories) for col in df.columns
if iscategorical(df.dtypes[col]))
if iscategorical(df.index.dtype):
result['.index'] = df.index.categories
return result
| {
"repo_name": "cowlicks/dask",
"path": "dask/dataframe/categorical.py",
"copies": "2",
"size": "3559",
"license": "bsd-3-clause",
"hash": -985202191573583100,
"line_mean": 28.4132231405,
"line_max": 85,
"alpha_frac": 0.5465018264,
"autogenerated": false,
"ratio": 3.790202342917998,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5336704169317997,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
import nfldb
from nfldb.update import log
from nfldbproj.names import name_to_id
from nfldbproj import update
def from_dataframe(db, df, metadata, single_week_only=False, season_totals=False,
fp_projection=True, stat_projection=True, fp_score=False, dfs_salary=False):
if 'opp' in df:
df = drop_byes(df)
if 'gsis_id' not in df and not season_totals:
# Not needed for season projections.
assign_gsis_ids(db, df, metadata)
fix_dst_names(df)
if 'fantasy_player_id' not in df:
assign_player_ids(db, df)
if fp_projection:
fp_df = pd.DataFrame(index=df.index)
for column in ('fantasy_player_id', 'gsis_id', 'team', 'fantasy_pos', 'projected_fp', 'fp_variance', 'week'):
if column in df:
fp_df[column] = df[column]
_from_dataframe_filtered(db, drop_null(fp_df, 'projected_fp'), metadata,
season_totals=season_totals,
single_week_only=single_week_only)
if stat_projection:
stat_df = df.copy()
stat_metadata = metadata.copy()
stat_metadata['fpsys_name'] = 'None'
if 'fpsys_url' in stat_metadata:
del stat_metadata['fpsys_url']
for column in ('projected_fp', 'fp_variance', 'actual_fp', 'salary'):
if column in stat_df:
del stat_df[column]
_from_dataframe_filtered(db, stat_df, stat_metadata,
season_totals=season_totals,
single_week_only=single_week_only)
if fp_score:
results_df = pd.DataFrame(index=df.index)
for column in ('fantasy_player_id', 'gsis_id', 'team', 'fantasy_pos', 'actual_fp', 'week'):
if column in df:
results_df[column] = df[column]
_from_dataframe_filtered(db, results_df, metadata,
season_totals=season_totals,
single_week_only=single_week_only)
if dfs_salary:
salary_df = pd.DataFrame(index=df.index)
for column in ('fantasy_player_id', 'gsis_id', 'team', 'fantasy_pos', 'salary', 'week'):
if column in df:
salary_df[column] = df[column]
_from_dataframe_filtered(db, drop_null(salary_df, 'salary'), metadata,
season_totals=season_totals,
single_week_only=single_week_only)
def _from_dataframe_filtered(db, df, metadata, season_totals=False, single_week_only=False):
if season_totals:
return _from_season_dataframe(db, df, metadata)
if single_week_only:
return _from_week_dataframe(db, df, metadata)
for week, week_df in df.groupby('week'):
week_metadata = metadata.copy()
week_metadata['week'] = week
_from_week_dataframe(db, week_df, week_metadata)
def _from_week_dataframe(db, df, metadata):
if len(df['week'].unique()) > 1:
raise ValueError('More than one week in data')
metadata['week'] = df['week'].iloc[0]
update.insert_data(db, metadata, _df_to_dicts(df))
def _from_season_dataframe(db, df, metadata):
pass
def _df_to_dicts(df):
for _, row in df.iterrows():
yield dict(row[~row.isnull()])
def drop_byes(df):
return df.drop(df.index[(df['opp'].isnull()) | (df['opp'] == '-')], axis=0)
def drop_null(df, column):
return df.drop(df.index[df[column].isnull()], axis=0)
def assign_gsis_ids(db, df, metadata):
log('finding game ids...', end='')
for (week, team, home, opp), sub_df in df.groupby(['week', 'team', 'home', 'opp']):
gsis_id = get_gsis_id(
db,
season_year=metadata['season_year'],
season_type=metadata.get('season_type', 'Regular'),
week=week,
home_team=team if home else opp,
)
df.loc[(df['week'] == week) & (df['team'] == team), 'gsis_id'] = gsis_id
log('done')
def get_gsis_id(db, **data):
q = nfldb.Query(db)
games = q.game(**data).as_games()
if not games:
raise ValueError('Cound not find game matching {}'.format(data))
if len(games) > 1:
raise ValueError('Found {} games matching {}'.format(len(games), data))
return games[0].gsis_id
def assign_player_ids(db, df):
log('finding player ids...', end='')
df['fantasy_player_id'] = None
for name, sub_df in df.groupby('name'):
df.loc[df['name'] == name, 'fantasy_player_id'] = name_to_id(db, sub_df['name'].iloc[0])
log('done')
def fix_dst_names(df):
df.loc[df['fantasy_pos'] == 'DST', 'name'] = df.loc[df['fantasy_pos'] == 'DST', 'team']
| {
"repo_name": "hsharrison/nfldb-projections",
"path": "nfldbproj/import_.py",
"copies": "1",
"size": "4785",
"license": "bsd-2-clause",
"hash": -5380721281696894000,
"line_mean": 33.6739130435,
"line_max": 117,
"alpha_frac": 0.5740856844,
"autogenerated": false,
"ratio": 3.353188507358094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4427274191758094,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
import numpy as np
from ..core import tokenize, DataFrame
from .io import from_delayed
from ...delayed import delayed
from ...utils import random_state_data
__all__ = ['make_timeseries']
def make_float(n, rstate):
return rstate.rand(n) * 2 - 1
def make_int(n, rstate):
return rstate.poisson(1000, size=n)
names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank', 'George',
'Hannah', 'Ingrid', 'Jerry', 'Kevin', 'Laura', 'Michael', 'Norbert',
'Oliver', 'Patricia', 'Quinn', 'Ray', 'Sarah', 'Tim', 'Ursula',
'Victor', 'Wendy', 'Xavier', 'Yvonne', 'Zelda']
def make_string(n, rstate):
return rstate.choice(names, size=n)
def make_categorical(n, rstate):
return pd.Categorical.from_codes(rstate.randint(0, len(names), size=n),
names)
make = {float: make_float,
int: make_int,
str: make_string,
object: make_string,
'category': make_categorical}
def make_timeseries_part(start, end, dtypes, freq, state_data):
index = pd.DatetimeIndex(start=start, end=end, freq=freq)
state = np.random.RandomState(state_data)
columns = dict((k, make[dt](len(index), state)) for k, dt in dtypes.items())
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def make_timeseries(start, end, dtypes, freq, partition_freq, seed=None):
""" Create timeseries dataframe with random data
Parameters
----------
start: datetime (or datetime-like string)
Start of time series
end: datetime (or datetime-like string)
End of time series
dtypes: dict
Mapping of column names to types.
Valid types include {float, int, str, 'category'}
freq: string
String like '2s' or '1H' or '12W' for the time series frequency
partition_freq: string
String like '1M' or '2Y' to divide the dataframe into partitions
seed: int (optional)
Randomstate seed
>>> import dask.dataframe as dd
>>> df = dd.demo.make_timeseries('2000', '2010',
... {'value': float, 'name': str, 'id': int},
... freq='2H', partition_freq='1D', seed=1)
>>> df.head() # doctest: +SKIP
id name value
2000-01-01 00:00:00 969 Jerry -0.309014
2000-01-01 02:00:00 1010 Ray -0.760675
2000-01-01 04:00:00 1016 Patricia -0.063261
2000-01-01 06:00:00 960 Charlie 0.788245
2000-01-01 08:00:00 1031 Kevin 0.466002
"""
divisions = list(pd.DatetimeIndex(start=start, end=end,
freq=partition_freq))
state_data = random_state_data(len(divisions) - 1, seed)
name = 'make-timeseries-' + tokenize(start, end, dtypes, freq,
partition_freq, state_data)
dsk = {(name, i): (make_timeseries_part, divisions[i], divisions[i + 1],
dtypes, freq, state_data[i])
for i in range(len(divisions) - 1)}
head = make_timeseries_part('2000', '2000', dtypes, '1H', state_data[0])
return DataFrame(dsk, name, head, divisions)
def generate_day(date, open, high, low, close, volume,
freq=pd.Timedelta(seconds=60), random_state=None):
""" Generate a day of financial data from open/close high/low values """
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
if not isinstance(date, pd.Timestamp):
date = pd.Timestamp(date)
if not isinstance(freq, pd.Timedelta):
freq = pd.Timedelta(freq)
time = pd.date_range(date + pd.Timedelta(hours=9),
date + pd.Timedelta(hours=12 + 4),
freq=freq / 5, name='timestamp')
n = len(time)
while True:
values = (random_state.random_sample(n) - 0.5).cumsum()
values *= (high - low) / (values.max() - values.min()) # scale
values += np.linspace(open - values[0], close - values[-1],
len(values)) # endpoints
assert np.allclose(open, values[0])
assert np.allclose(close, values[-1])
mx = max(close, open)
mn = min(close, open)
ind = values > mx
values[ind] = (values[ind] - mx) * (high - mx) / (values.max() - mx) + mx
ind = values < mn
values[ind] = (values[ind] - mn) * (low - mn) / (values.min() - mn) + mn
# The process fails if min/max are the same as open close. This is rare
if (np.allclose(values.max(), high) and np.allclose(values.min(), low)):
break
s = pd.Series(values.round(3), index=time)
rs = s.resample(freq)
# TODO: add in volume
return pd.DataFrame({'open': rs.first(),
'close': rs.last(),
'high': rs.max(),
'low': rs.min()})
def daily_stock(symbol, start, stop, freq=pd.Timedelta(seconds=1),
data_source='yahoo', random_state=None):
""" Create artificial stock data
This data matches daily open/high/low/close values from Yahoo! Finance, but
interpolates values within each day with random values. This makes the
results look natural without requiring the downloading of large volumes of
data. This is useful for education and benchmarking.
Parameters
----------
symbol: string
A stock symbol like "GOOG" or "F"
start: date, str, or pd.Timestamp
The start date, input will be fed into pd.Timestamp for normalization
stop: date, str, or pd.Timestamp
The start date, input will be fed into pd.Timestamp for normalization
freq: timedelta, str, or pd.Timedelta
The frequency of sampling
data_source: str, optional
defaults to 'yahoo'. See pandas_datareader.data.DataReader for options
random_state: int, np.random.RandomState object
random seed, defaults to randomly chosen
Examples
--------
>>> import dask.dataframe as dd
>>> df = dd.demo.daily_stock('GOOG', '2010', '2011', freq='1s')
>>> df # doctest: +NORMALIZE_WHITESPACE
Dask DataFrame Structure:
close high low open
npartitions=252
2010-01-04 09:00:00 float64 float64 float64 float64
2010-01-05 09:00:00 ... ... ... ...
... ... ... ... ...
2010-12-31 09:00:00 ... ... ... ...
2010-12-31 16:00:00 ... ... ... ...
Dask Name: from-delayed, 504 tasks
>>> df.head() # doctest: +SKIP
close high low open
timestamp
2010-01-04 09:00:00 626.944 626.964 626.944 626.951
2010-01-04 09:00:01 626.906 626.931 626.906 626.931
2010-01-04 09:00:02 626.901 626.911 626.901 626.905
2010-01-04 09:00:03 626.920 626.920 626.905 626.905
2010-01-04 09:00:04 626.894 626.917 626.894 626.906
"""
from pandas_datareader import data
df = data.DataReader(symbol, data_source, start, stop)
seeds = random_state_data(len(df), random_state=random_state)
parts = []
divisions = []
for i, seed in zip(range(len(df)), seeds):
s = df.iloc[i]
part = delayed(generate_day)(s.name, s.loc['Open'], s.loc['High'], s.loc['Low'],
s.loc['Close'], s.loc['Volume'],
freq=freq, random_state=seed)
parts.append(part)
divisions.append(s.name + pd.Timedelta(hours=9))
divisions.append(s.name + pd.Timedelta(hours=12 + 4))
meta = generate_day('2000-01-01', 1, 2, 0, 1, 100)
return from_delayed(parts, meta=meta, divisions=divisions)
| {
"repo_name": "cpcloud/dask",
"path": "dask/dataframe/io/demo.py",
"copies": "1",
"size": "7953",
"license": "bsd-3-clause",
"hash": -8599157415794791000,
"line_mean": 38.1773399015,
"line_max": 88,
"alpha_frac": 0.5743744499,
"autogenerated": false,
"ratio": 3.4683820322721326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4542756482172132,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
import numpy as np
from .core import tokenize, DataFrame
from ..utils import different_seeds
__all__ = ['make_timeseries']
def make_float(n, rstate):
return rstate.rand(n) * 2 - 1
def make_int(n, rstate):
return rstate.poisson(1000, size=n)
names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank', 'George',
'Hannah', 'Ingrid', 'Jerry', 'Kevin', 'Laura', 'Michael', 'Norbert', 'Oliver',
'Patricia', 'Quinn', 'Ray', 'Sarah', 'Tim', 'Ursula', 'Victor', 'Wendy',
'Xavier', 'Yvonne', 'Zelda']
def make_string(n, rstate):
return rstate.choice(names, size=n)
def make_categorical(n, rstate):
return pd.Categorical.from_codes(rstate.randint(0, len(names), size=n),
names)
make = {float: make_float,
int: make_int,
str: make_string,
object: make_string,
'category': make_categorical}
def make_timeseries_part(start, end, dtypes, freq, seed):
index = pd.DatetimeIndex(start=start, end=end, freq=freq)
state = np.random.RandomState(seed)
columns = dict((k, make[dt](len(index), state)) for k, dt in dtypes.items())
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def make_timeseries(start, end, dtypes, freq, partition_freq, seed=None):
""" Create timeseries dataframe with random data
Parameters
----------
start: datetime (or datetime-like string)
Start of time series
end: datetime (or datetime-like string)
End of time series
dtypes: dict
Mapping of column names to types.
Valid types include {float, int, str, 'category'}
freq: string
String like '2s' or '1H' or '12W' for the time series frequency
partition_freq: string
String like '1M' or '2Y' to divide the dataframe into partitions
seed: int (optional)
Randomstate seed
>>> import dask.dataframe as dd
>>> df = dd.demo.make_timeseries('2000', '2010',
... {'value': float, 'name': str, 'id': int},
... freq='2H', partition_freq='1D', seed=1)
>>> df.head()
id name value
2000-01-01 00:00:00 960 Dan 0.824008
2000-01-01 02:00:00 1033 Xavier 0.575390
2000-01-01 04:00:00 986 George 0.693842
2000-01-01 06:00:00 1073 Sarah 0.900580
2000-01-01 08:00:00 976 Hannah -0.373847
"""
divisions = list(pd.DatetimeIndex(start=start, end=end,
freq=partition_freq))
state = np.random.RandomState(seed)
seeds = different_seeds(len(divisions), state)
name = 'make-timeseries-' + tokenize(start, end, dtypes, freq, partition_freq)
dsk = dict(((name, i), (make_timeseries_part, divisions[i], divisions[i + 1],
dtypes, freq, seeds[i]))
for i in range(len(divisions) - 1))
head = make_timeseries_part('2000','2000', dtypes, '1H', 1)
return DataFrame(dsk, name, head, divisions)
| {
"repo_name": "mikegraham/dask",
"path": "dask/dataframe/demo.py",
"copies": "1",
"size": "3180",
"license": "bsd-3-clause",
"hash": -5252871372486875000,
"line_mean": 34.3333333333,
"line_max": 82,
"alpha_frac": 0.593081761,
"autogenerated": false,
"ratio": 3.3263598326359833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9407061875295886,
"avg_score": 0.002475943668019406,
"num_lines": 90
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
import numpy as np
from ..core import tokenize, DataFrame
from ...utils import random_state_data
__all__ = ['make_timeseries']
def make_float(n, rstate):
return rstate.rand(n) * 2 - 1
def make_int(n, rstate):
return rstate.poisson(1000, size=n)
names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank', 'George',
'Hannah', 'Ingrid', 'Jerry', 'Kevin', 'Laura', 'Michael', 'Norbert',
'Oliver', 'Patricia', 'Quinn', 'Ray', 'Sarah', 'Tim', 'Ursula',
'Victor', 'Wendy', 'Xavier', 'Yvonne', 'Zelda']
def make_string(n, rstate):
return rstate.choice(names, size=n)
def make_categorical(n, rstate):
return pd.Categorical.from_codes(rstate.randint(0, len(names), size=n),
names)
make = {float: make_float,
int: make_int,
str: make_string,
object: make_string,
'category': make_categorical}
def make_timeseries_part(start, end, dtypes, freq, state_data):
index = pd.DatetimeIndex(start=start, end=end, freq=freq)
state = np.random.RandomState(state_data)
columns = dict((k, make[dt](len(index), state)) for k, dt in dtypes.items())
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def make_timeseries(start, end, dtypes, freq, partition_freq, seed=None):
""" Create timeseries dataframe with random data
Parameters
----------
start: datetime (or datetime-like string)
Start of time series
end: datetime (or datetime-like string)
End of time series
dtypes: dict
Mapping of column names to types.
Valid types include {float, int, str, 'category'}
freq: string
String like '2s' or '1H' or '12W' for the time series frequency
partition_freq: string
String like '1M' or '2Y' to divide the dataframe into partitions
seed: int (optional)
Randomstate seed
>>> import dask.dataframe as dd
>>> df = dd.demo.make_timeseries('2000', '2010',
... {'value': float, 'name': str, 'id': int},
... freq='2H', partition_freq='1D', seed=1)
>>> df.head()
id name value
2000-01-01 00:00:00 969 Jerry -0.309014
2000-01-01 02:00:00 1010 Ray -0.760675
2000-01-01 04:00:00 1016 Patricia -0.063261
2000-01-01 06:00:00 960 Charlie 0.788245
2000-01-01 08:00:00 1031 Kevin 0.466002
"""
divisions = list(pd.DatetimeIndex(start=start, end=end,
freq=partition_freq))
state_data = random_state_data(len(divisions) - 1, seed)
name = 'make-timeseries-' + tokenize(start, end, dtypes, freq, partition_freq)
dsk = {(name, i): (make_timeseries_part, divisions[i], divisions[i + 1],
dtypes, freq, state_data[i])
for i in range(len(divisions) - 1)}
head = make_timeseries_part('2000', '2000', dtypes, '1H', state_data[0])
return DataFrame(dsk, name, head, divisions)
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/dataframe/io/demo.py",
"copies": "1",
"size": "3187",
"license": "mit",
"hash": -1467380368623516000,
"line_mean": 34.4111111111,
"line_max": 82,
"alpha_frac": 0.5917791026,
"autogenerated": false,
"ratio": 3.31633714880333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9404703423135526,
"avg_score": 0.000682565653560745,
"num_lines": 90
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
import re
import ast
log_pattern = re.compile("^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d+ - \w+ - .*$")
def _is_legal_log_line(line):
return log_pattern.match(line) is not None
def _hublog_read_scan_line(line):
""" Parses a single scan line from a hub log
Parameters
----------
line : str
A single line of log file
Returns
-------
dictionary:
Scan data of a single badge. When an advertisement packet was available,
it will include voltage, sync and recording status, etc. If the lien is
not a scan line, it will return None
"""
# Removing ANSI from line (colors)
ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
line = ansi_escape.sub('', line)
# remove end of line
line = line.rstrip("\n\r")
# Filter out rows with illegal structure
if not _is_legal_log_line(line):
return None
# parse
data = line.split(" - ")[2]
if not data.startswith("Found"):
return None
scan_data = {}
adv_payload_raw = data.split("adv_payload': ")[1][0:-1]
adv_payload = ast.literal_eval(adv_payload_raw)
if not adv_payload:
adv_payload = {'proximity_status': None, \
'sync_status': None, \
'audio_status': None, \
'mac': None, \
'badge_id': None, \
'voltage': None, \
'status_flags': None, \
'project_id': None}
scan_data.update(adv_payload)
scan_data['mac'] = data.split(" ")[1][0:-1]
scan_data['rssi'] = data.split(": ")[2].split(",")[0]
scan_data['datetime'] = line.split(" - ")[0]
scan_data['adv_payload'] = re.sub('[ :\'\[]', '', adv_payload_raw) # shortenning it
return scan_data
def hublog_scans(fileobject, log_tz, tz='US/Eastern'):
"""Creates a DataFrame of hub scans.
Parameters
----------
fileobject : file or iterable list of str
The raw log file from a hub.
log_tz : str
The time zone used in the logfile itself
tz : str
The time zone used for localization of dates. Defaults to 'US/Eastern'.
Returns
-------
pd.Series :
A scan record with mac, rssi, and device status (if available)
"""
def readfile(fileobject):
for line in fileobject:
line_num = line_num + 1
data = _hublog_read_scan_line(line)
if data:
yield (data['datetime'],
str(data['mac']),
float(data['rssi']),
data['voltage'],
data['badge_id'],
data['project_id'],
data['sync_status'],
data['audio_status'],
data['proximity_status'],
)
else:
continue # skip unneeded lines
df = pd.DataFrame(readfile(fileobject), columns=['datetime', 'mac', 'rssi', 'voltage', 'badge_id', \
'project_id', 'sync_status', 'audio_status', \
'proximity_status'])
# Localized record date
df['datetime'] = pd.to_datetime(df['datetime'], utc=True) \
.dt.tz_localize(log_tz).dt.tz_convert(tz)
# Sort
df = df.set_index('datetime')
df.sort_index(inplace=True)
return df
def _hublog_read_reset_line(line):
""" Parses a single reset line from a hub log
Parameters
----------
line : str
A single line of log file
Returns
-------
dictionary:
Parses a sync event - when badge was previously not synced and was sent a new date
"""
# remove end of line
line = line.rstrip("\n\r")
# Filter out rows with illegal structure
if not _is_legal_log_line(line):
return None
# Parse data
data = line.split(" - ")[2]
if not data.endswith("Badge previously unsynced."):
return None
sync_data = {}
sync_data['datetime'] = line.split(" - ")[0]
sync_data['mac'] = data[1:18]
return sync_data
def hublog_resets(fileobject, log_tz, tz='US/Eastern'):
"""Creates a DataFrame of reset events - when badge were previously not synced and
the hub sent a new date
Parameters
----------
fileobject : file or iterable list of str
The raw log file from a hub.
log_tz : str
The time zone used in the logfile itself
tz : str
The time zone used for localization of dates. Defaults to 'US/Eastern'.
Returns
-------
pd.Series :
A record with mac and timestamp
"""
def readfile(fileobject):
for line in fileobject:
data = _hublog_read_reset_line(line)
if data:
yield (data['datetime'],
str(data['mac']),
)
else:
continue # skip unneeded lines
df = pd.DataFrame(readfile(fileobject), columns=['datetime', 'mac'])
# Localized record date
df['datetime'] = pd.to_datetime(df['datetime'], utc=True) \
.dt.tz_localize(log_tz).dt.tz_convert(tz)
# Sort
df = df.set_index('datetime')
df.sort_index(inplace=True)
return df
def _hublog_read_clock_sync_line(line):
""" Parses a single clock line from a hub log
Parameters
----------
line : str
A single line of log file
Returns
-------
dictionary:
Parses a sync event
"""
# remove end of line
line = line.rstrip("\n\r")
# look for clock syncs
if "Badge datetime was" not in line:
return None
# Parse data
data = re.match('(.*) - INFO - \[(.*)\] Badge datetime was: ([\d,]*)', line).group(1, 2, 3)
d = {}
d['datetime'] = data[0]
d['mac'] = data[1]
d['badge_timestamp'] = data[2].replace(",", ".")
return d
def hublog_clock_syncs(fileobject, log_tz, tz='US/Eastern'):
"""Creates a DataFrame of sync events - when badge were previously not synced and
the hub sent a new date
Parameters
----------
fileobject : file or iterable list of str
The raw log file from a hub.
log_tz : str
The time zone used in the logfile itself
tz : str
The time zone used for localization of dates. Defaults to 'US/Eastern'.
Returns
-------
pd.Series :
A record with mac and timestamps
"""
def readfile(fileobject):
for line in fileobject:
data = _hublog_read_clock_sync_line(line)
if data:
yield (data['datetime'],
str(data['mac']),
str(data['badge_timestamp']),
)
else:
continue # skip unneeded lines
df = pd.DataFrame(readfile(fileobject), columns=['datetime', 'mac', 'badge_timestamp'])
# Localized record date
df['datetime'] = pd.to_datetime(df['datetime'], utc=True) \
.dt.tz_localize(log_tz).dt.tz_convert(tz)
# Convert the badge timestamp to a datetime, localized in UTC
df['badge_datetime'] = pd.to_datetime(df['badge_timestamp'], unit='s', utc=True) \
.dt.tz_localize('UTC').dt.tz_convert(tz)
del df['badge_timestamp']
# Sort
df = df.set_index('datetime')
df.sort_index(inplace=True)
return df | {
"repo_name": "HumanDynamics/openbadge-analysis",
"path": "openbadge_analysis/preprocessing/hublog.py",
"copies": "1",
"size": "7539",
"license": "mit",
"hash": -2815039506591023000,
"line_mean": 26.5182481752,
"line_max": 104,
"alpha_frac": 0.5370738825,
"autogenerated": false,
"ratio": 3.890092879256966,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9915530033390689,
"avg_score": 0.0023273456732551745,
"num_lines": 274
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
from .alignment import deep_align
from .pycompat import OrderedDict, basestring
from .utils import Frozen
from .variable import as_variable, assert_unique_multiindex_level_names
PANDAS_TYPES = (pd.Series, pd.DataFrame, pd.Panel)
_VALID_COMPAT = Frozen({'identical': 0,
'equals': 1,
'broadcast_equals': 2,
'minimal': 3,
'no_conflicts': 4})
def broadcast_dimension_size(variables):
# type: (List[Variable],) -> Variable
"""Extract dimension sizes from a dictionary of variables.
Raises ValueError if any dimensions have different sizes.
"""
dims = OrderedDict()
for var in variables:
for dim, size in zip(var.dims, var.shape):
if dim in dims and size != dims[dim]:
raise ValueError('index %r not aligned' % dim)
dims[dim] = size
return dims
class MergeError(ValueError):
"""Error class for merge failures due to incompatible arguments.
"""
# inherits from ValueError for backward compatibility
# TODO: move this to an xarray.exceptions module?
def unique_variable(name, variables, compat='broadcast_equals'):
# type: (Any, List[Variable], str) -> Variable
"""Return the unique variable from a list of variables or raise MergeError.
Parameters
----------
name : hashable
Name for this variable.
variables : list of xarray.Variable
List of Variable objects, all of which go by the same name in different
inputs.
compat : {'identical', 'equals', 'broadcast_equals',
'no_conflicts'}, optional
Type of equality check to use.
Returns
-------
Variable to use in the result.
Raises
------
MergeError: if any of the variables are not equal.
"""
out = variables[0]
if len(variables) > 1:
combine_method = None
if compat == 'minimal':
compat = 'broadcast_equals'
if compat == 'broadcast_equals':
dim_lengths = broadcast_dimension_size(variables)
out = out.set_dims(dim_lengths)
if compat == 'no_conflicts':
combine_method = 'fillna'
for var in variables[1:]:
if not getattr(out, compat)(var):
raise MergeError('conflicting values for variable %r on '
'objects to be combined:\n'
'first value: %r\nsecond value: %r'
% (name, out, var))
if combine_method:
# TODO: add preservation of attrs into fillna
out = getattr(out, combine_method)(var)
out.attrs = var.attrs
return out
def _assert_compat_valid(compat):
if compat not in _VALID_COMPAT:
raise ValueError("compat=%r invalid: must be %s"
% (compat, set(_VALID_COMPAT)))
class OrderedDefaultDict(OrderedDict):
# minimal version of an ordered defaultdict
# beware: does not pickle or copy properly
def __init__(self, default_factory):
self.default_factory = default_factory
super(OrderedDefaultDict, self).__init__()
def __missing__(self, key):
self[key] = default = self.default_factory()
return default
def merge_variables(
list_of_variables_dicts, # type: List[Mapping[Any, Variable]]
priority_vars=None, # type: Optional[Mapping[Any, Variable]]
compat='minimal', # type: str
):
# type: (...) -> OrderedDict[Any, Variable]
"""Merge dicts of variables, while resolving conflicts appropriately.
Parameters
----------
lists_of_variables_dicts : list of mappings with Variable values
List of mappings for which each value is a xarray.Variable object.
priority_vars : mapping with Variable or None values, optional
If provided, variables are always taken from this dict in preference to
the input variable dictionaries, without checking for conflicts.
compat : {'identical', 'equals', 'broadcast_equals',
'minimal', 'no_conflicts'}, optional
Type of equality check to use when checking for conflicts.
Returns
-------
OrderedDict with keys taken by the union of keys on list_of_variable_dicts,
and Variable values corresponding to those that should be found on the
merged result.
"""
if priority_vars is None:
priority_vars = {}
_assert_compat_valid(compat)
dim_compat = min(compat, 'equals', key=_VALID_COMPAT.get)
lookup = OrderedDefaultDict(list)
for variables in list_of_variables_dicts:
for name, var in variables.items():
lookup[name].append(var)
# n.b. it's important to fill up merged in the original order in which
# variables appear
merged = OrderedDict()
for name, variables in lookup.items():
if name in priority_vars:
# one of these arguments (e.g., the first for in-place arithmetic
# or the second for Dataset.update) takes priority
merged[name] = priority_vars[name]
else:
dim_variables = [var for var in variables if (name,) == var.dims]
if dim_variables:
# if there are dimension coordinates, these must be equal (or
# identical), and they take priority over non-dimension
# coordinates
merged[name] = unique_variable(name, dim_variables, dim_compat)
else:
try:
merged[name] = unique_variable(name, variables, compat)
except MergeError:
if compat != 'minimal':
# we need more than "minimal" compatibility (for which
# we drop conflicting coordinates)
raise
return merged
def expand_variable_dicts(list_of_variable_dicts):
# type: (List[Union[Dataset, Dict]]) -> List[Dict[Any, Variable]]
"""Given a list of dicts with xarray object values, expand the values.
Parameters
----------
list_of_variable_dicts : list of dict or Dataset objects
Each value for the mappings must be of the following types:
- an xarray.Variable
- a tuple `(dims, data[, attrs[, encoding]])` that can be converted in
an xarray.Variable
- or an xarray.DataArray
Returns
-------
A list of ordered dictionaries corresponding to inputs, or coordinates from
an input's values. The values of each ordered dictionary are all
xarray.Variable objects.
"""
var_dicts = []
for variables in list_of_variable_dicts:
if hasattr(variables, 'variables'): # duck-type Dataset
sanitized_vars = variables.variables
else:
# append coords to var_dicts before appending sanitized_vars,
# because we want coords to appear first
sanitized_vars = OrderedDict()
for name, var in variables.items():
if hasattr(var, '_coords'): # duck-type DataArray
# use private API for speed
coords = var._coords.copy()
# explicitly overwritten variables should take precedence
coords.pop(name, None)
var_dicts.append(coords)
var = as_variable(var, name=name)
sanitized_vars[name] = var
var_dicts.append(sanitized_vars)
return var_dicts
def determine_coords(list_of_variable_dicts):
# type: (List[Dict]) -> Tuple[Set, Set]
"""Given a list of dicts with xarray object values, identify coordinates.
Parameters
----------
list_of_variable_dicts : list of dict or Dataset objects
Of the same form as the arguments to expand_variable_dicts.
Returns
-------
coord_names : set of variable names
noncoord_names : set of variable names
All variable found in the input should appear in either the set of
coordinate or non-coordinate names.
"""
coord_names = set()
noncoord_names = set()
for variables in list_of_variable_dicts:
if hasattr(variables, 'coords') and hasattr(variables, 'data_vars'):
# duck-type Dataset
coord_names.update(variables.coords)
noncoord_names.update(variables.data_vars)
else:
for name, var in variables.items():
if hasattr(var, '_coords'): # duck-type DataArray
coords = set(var._coords) # use private API for speed
# explicitly overwritten variables should take precedence
coords.discard(name)
coord_names.update(coords)
return coord_names, noncoord_names
def coerce_pandas_values(objects):
"""Convert pandas values found in a list of labeled objects.
Parameters
----------
objects : list of Dataset or mappings
The mappings may contain any sort of objects coercible to
xarray.Variables as keys, including pandas objects.
Returns
-------
List of Dataset or OrderedDict objects. Any inputs or values in the inputs
that were pandas objects have been converted into native xarray objects.
"""
from .dataset import Dataset
from .dataarray import DataArray
out = []
for obj in objects:
if isinstance(obj, Dataset):
variables = obj
else:
variables = OrderedDict()
if isinstance(obj, PANDAS_TYPES):
obj = OrderedDict(obj.iteritems())
for k, v in obj.items():
if isinstance(v, PANDAS_TYPES):
v = DataArray(v)
variables[k] = v
out.append(variables)
return out
def merge_coords_for_inplace_math(objs, priority_vars=None):
"""Merge coordinate variables without worrying about alignment.
This function is used for merging variables in coordinates.py.
"""
expanded = expand_variable_dicts(objs)
variables = merge_variables(expanded, priority_vars)
assert_unique_multiindex_level_names(variables)
return variables
def _get_priority_vars(objects, priority_arg, compat='equals'):
"""Extract the priority variable from a list of mappings.
We need this method because in some cases the priority argument itself
might have conflicting values (e.g., if it is a dict with two DataArray
values with conflicting coordinate values).
Parameters
----------
objects : list of dictionaries of variables
Dictionaries in which to find the priority variables.
priority_arg : int or None
Integer object whose variable should take priority.
compat : {'identical', 'equals', 'broadcast_equals',
'no_conflicts'}, optional
Compatibility checks to use when merging variables.
Returns
-------
None, if priority_arg is None, or an OrderedDict with Variable objects as
values indicating priority variables.
"""
if priority_arg is None:
priority_vars = {}
else:
expanded = expand_variable_dicts([objects[priority_arg]])
priority_vars = merge_variables(expanded, compat=compat)
return priority_vars
def expand_and_merge_variables(objs, priority_arg=None):
"""Merge coordinate variables without worrying about alignment.
This function is used for merging variables in computation.py.
"""
expanded = expand_variable_dicts(objs)
priority_vars = _get_priority_vars(objs, priority_arg)
variables = merge_variables(expanded, priority_vars)
return variables
def merge_coords(objs, compat='minimal', join='outer', priority_arg=None,
indexes=None):
"""Merge coordinate variables.
See merge_core below for argument descriptions. This works similarly to
merge_core, except everything we don't worry about whether variables are
coordinates or not.
"""
_assert_compat_valid(compat)
coerced = coerce_pandas_values(objs)
aligned = deep_align(coerced, join=join, copy=False, indexes=indexes)
expanded = expand_variable_dicts(aligned)
priority_vars = _get_priority_vars(aligned, priority_arg, compat=compat)
variables = merge_variables(expanded, priority_vars, compat=compat)
assert_unique_multiindex_level_names(variables)
return variables
def merge_data_and_coords(data, coords, compat='broadcast_equals',
join='outer'):
"""Used in Dataset.__init__."""
objs = [data, coords]
explicit_coords = coords.keys()
return merge_core(objs, compat, join, explicit_coords=explicit_coords)
def assert_valid_explicit_coords(variables, dims, explicit_coords):
"""Validate explicit coordinate names/dims.
Raise a MergeError if an explicit coord shares a name with a dimension
but is comprised of arbitrary dimensions.
"""
for coord_name in explicit_coords:
if coord_name in dims and variables[coord_name].dims != (coord_name,):
raise MergeError(
'coordinate %s shares a name with a dataset dimension, but is '
'not a 1D variable along that dimension. This is disallowed '
'by the xarray data model.' % coord_name)
def merge_core(objs,
compat='broadcast_equals',
join='outer',
priority_arg=None,
explicit_coords=None,
indexes=None):
"""Core logic for merging labeled objects.
This is not public API.
Parameters
----------
objs : list of mappings
All values must be convertable to labeled arrays.
compat : {'identical', 'equals', 'broadcast_equals',
'no_conflicts'}, optional
Compatibility checks to use when merging variables.
join : {'outer', 'inner', 'left', 'right'}, optional
How to combine objects with different indexes.
priority_arg : integer, optional
Optional argument in `objs` that takes precedence over the others.
explicit_coords : set, optional
An explicit list of variables from `objs` that are coordinates.
indexes : dict, optional
Dictionary with values given by pandas.Index objects.
Returns
-------
variables : OrderedDict
Ordered dictionary of Variable objects.
coord_names : set
Set of coordinate names.
dims : dict
Dictionary mapping from dimension names to sizes.
Raises
------
MergeError if the merge cannot be done successfully.
"""
from .dataset import calculate_dimensions
_assert_compat_valid(compat)
coerced = coerce_pandas_values(objs)
aligned = deep_align(coerced, join=join, copy=False, indexes=indexes)
expanded = expand_variable_dicts(aligned)
coord_names, noncoord_names = determine_coords(coerced)
priority_vars = _get_priority_vars(aligned, priority_arg, compat=compat)
variables = merge_variables(expanded, priority_vars, compat=compat)
assert_unique_multiindex_level_names(variables)
dims = calculate_dimensions(variables)
if explicit_coords is not None:
assert_valid_explicit_coords(variables, dims, explicit_coords)
coord_names.update(explicit_coords)
for dim, size in dims.items():
if dim in variables:
coord_names.add(dim)
ambiguous_coords = coord_names.intersection(noncoord_names)
if ambiguous_coords:
raise MergeError('unable to determine if these variables should be '
'coordinates or not in the merged result: %s'
% ambiguous_coords)
return variables, coord_names, dict(dims)
def merge(objects, compat='no_conflicts', join='outer'):
"""Merge any number of xarray objects into a single Dataset as variables.
Parameters
----------
objects : Iterable[Union[xarray.Dataset, xarray.DataArray, dict]]
Merge together all variables from these objects. If any of them are
DataArray objects, they must have a name.
compat : {'identical', 'equals', 'broadcast_equals',
'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
How to combine objects with different indexes.
Returns
-------
Dataset
Dataset with combined variables from each object.
Examples
--------
>>> arrays = [xr.DataArray(n, name='var%d' % n) for n in range(5)]
>>> xr.merge(arrays)
<xarray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
var0 int64 0
var1 int64 1
var2 int64 2
var3 int64 3
var4 int64 4
Raises
------
xarray.MergeError
If any variables with the same name have conflicting values.
See also
--------
concat
"""
from .dataarray import DataArray
from .dataset import Dataset
dict_like_objects = [
obj.to_dataset() if isinstance(obj, DataArray) else obj
for obj in objects]
variables, coord_names, dims = merge_core(dict_like_objects, compat, join)
merged = Dataset._construct_direct(variables, coord_names, dims)
return merged
def dataset_merge_method(dataset, other, overwrite_vars, compat, join):
"""Guts of the Dataset.merge method."""
# we are locked into supporting overwrite_vars for the Dataset.merge
# method due for backwards compatibility
# TODO: consider deprecating it?
if isinstance(overwrite_vars, basestring):
overwrite_vars = set([overwrite_vars])
overwrite_vars = set(overwrite_vars)
if not overwrite_vars:
objs = [dataset, other]
priority_arg = None
elif overwrite_vars == set(other):
objs = [dataset, other]
priority_arg = 1
else:
other_overwrite = OrderedDict()
other_no_overwrite = OrderedDict()
for k, v in other.items():
if k in overwrite_vars:
other_overwrite[k] = v
else:
other_no_overwrite[k] = v
objs = [dataset, other_no_overwrite, other_overwrite]
priority_arg = 2
return merge_core(objs, compat, join, priority_arg=priority_arg)
def dataset_update_method(dataset, other):
"""Guts of the Dataset.update method
This drops a duplicated coordinates from `other` (GH:2068)
"""
from .dataset import Dataset
from .dataarray import DataArray
other = other.copy()
for k, obj in other.items():
if isinstance(obj, (Dataset, DataArray)):
# drop duplicated coordinates
coord_names = [c for c in obj.coords
if c not in obj.dims and c in dataset.coords]
if coord_names:
other[k] = obj.drop(coord_names)
return merge_core([dataset, other], priority_arg=1,
indexes=dataset.indexes)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/merge.py",
"copies": "1",
"size": "19661",
"license": "apache-2.0",
"hash": -4577991834176582000,
"line_mean": 33.6754850088,
"line_max": 79,
"alpha_frac": 0.6252988149,
"autogenerated": false,
"ratio": 4.524971231300345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5650270046200345,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
from .core import Series, map_partitions, partial
class Accessor(object):
"""
Base class for pandas Accessor objects cat, dt, and str.
Properties
----------
_meta_attributes : set
set of strings indicting attributes that can be computed
on just the ``_meta_nonempty`` attribute
Notes
-----
Subclasses should implement
* getattr
* call
"""
_meta_attributes = set()
def __init__(self, series):
if not isinstance(series, Series):
raise ValueError('Accessor cannot be initialized')
self._series = series
def _get_property_out(self, key):
# For CategoricalAccessor to override with _meta
return self.getattr(self._series._meta_nonempty, key)
def _property_map(self, key):
out = self._get_property_out(key)
if key in self._meta_attributes:
return out
meta = self._series._partition_type([], dtype=out.dtype,
name=getattr(out, 'name', None))
return map_partitions(self.getattr, self._series, key, meta=meta)
def _function_map(self, key, *args, **kwargs):
out = self.call(self._series._meta_nonempty, key, *args, **kwargs)
meta = self._series._partition_type([], dtype=out.dtype,
name=getattr(out, 'name', None))
return map_partitions(self.call, self._series, key, *args, meta=meta,
**kwargs)
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
dir(self.ns)))
def __getattr__(self, key):
if key in dir(self.ns):
if isinstance(getattr(self.ns, key), property):
return self._property_map(key)
else:
return partial(self._function_map, key)
else:
raise AttributeError(key)
class DatetimeAccessor(Accessor):
""" Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.microsecond # doctest: +SKIP
"""
ns = pd.Series.dt
@staticmethod
def getattr(obj, attr):
return getattr(obj.dt, attr)
@staticmethod
def call(obj, attr, *args):
return getattr(obj.dt, attr)(*args)
class StringAccessor(Accessor):
""" Accessor object for string properties of the Series values.
Examples
--------
>>> s.str.lower() # doctest: +SKIP
"""
ns = pd.Series.str
@staticmethod
def getattr(obj, attr):
return getattr(obj.str, attr)
@staticmethod
def call(obj, attr, *args, **kwargs):
return getattr(obj.str, attr)(*args, **kwargs)
class CategoricalAccessor(Accessor):
"""
Accessor object for categorical properties of the Series values.
Examples
--------
>>> s.cat.categories # doctest: +SKIP
Notes
-----
Attributes that depend only on metadata are eager
* categories
* ordered
Attributes depending on the entire dataset are lazy
* codes
* ...
So `df.a.cat.categories` <=> `df.a._meta.cat.categories`
So `df.a.cat.codes` <=> `df.a.map_partitions(lambda x: x.cat.codes)`
"""
ns = pd.Series.cat
_meta_attributes = {'categories', 'ordered'}
def _function_map(self, key, *args, **kwargs):
out = self.call(self._series._meta, key, *args, **kwargs)
meta = self._series._partition_type(
pd.Categorical([], categories=out.cat.categories,
ordered=out.cat.ordered),
name=getattr(out, 'name', None)
)
return map_partitions(self.call, self._series, key, *args, meta=meta,
**kwargs)
def _get_property_out(self, key):
# _meta should have all type-info, and _meta_nonempty may fail
# See https://github.com/dask/dask/issues/1705
return self.getattr(self._series._meta, key)
@staticmethod
def getattr(obj, attr):
return getattr(obj.cat, attr)
@staticmethod
def call(obj, attr, *args, **kwargs):
return getattr(obj.cat, attr)(*args, **kwargs)
def remove_unused_categories(self):
"""
Removes categories which are not used
Notes
-----
This method requires a full scan of the data to compute the
unique values, which can be expensive.
"""
# get the set of used categories
present = self._series.dropna().unique()
present = pd.Index(present.compute())
# Reorder to keep cat:code relationship, filtering unused (-1)
ordered, mask = present.reindex(self._series._meta.cat.categories)
new_categories = ordered[mask != -1]
meta = self._series._meta.cat.set_categories(
new_categories,
ordered=self._series._meta.cat.ordered
)
result = map_partitions(self.call, self._series, 'set_categories',
meta=meta, new_categories=new_categories)
return result
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/dataframe/accessor.py",
"copies": "3",
"size": "5164",
"license": "mit",
"hash": 4052590670606854000,
"line_mean": 28.3409090909,
"line_max": 77,
"alpha_frac": 0.5807513555,
"autogenerated": false,
"ratio": 4.141138732959102,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6221890088459102,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
import datashape
from datashape import discover
from ..append import append
from ..convert import convert, ooc_types
from ..chunks import chunks, Chunks
from ..resource import resource
HDFDataset = (pd.io.pytables.AppendableFrameTable, pd.io.pytables.FrameFixed)
@discover.register(pd.HDFStore)
def discover_hdfstore(f):
d = dict()
for key in f.keys():
d2 = d
key2 = key.lstrip('/')
while '/' in key2:
group, key2 = key2.split('/', 1)
if group not in d2:
d2[group] = dict()
d2 = d2[group]
d2[key2] = f.get_storer(key)
return discover(d)
@discover.register(pd.io.pytables.Fixed)
def discover_hdfstore_storer(storer):
f = storer.parent
n = storer.shape
if isinstance(n, list):
n = n[0]
measure = discover(f.select(storer.pathname, start=0, stop=10)).measure
return n * measure
@convert.register(chunks(pd.DataFrame), pd.io.pytables.AppendableFrameTable)
def hdfstore_to_chunks_dataframes(data, chunksize=1000000, **kwargs):
return chunks(pd.DataFrame)(data.parent.select(data.pathname, chunksize=chunksize))
@convert.register(pd.DataFrame, (pd.io.pytables.AppendableFrameTable,
pd.io.pytables.FrameFixed))
def hdfstore_to_chunks_dataframes(data, **kwargs):
return data.read()
from collections import namedtuple
EmptyHDFStoreDataset = namedtuple('EmptyHDFStoreDataset', 'parent,pathname,dshape')
@resource.register('hdfstore://.+', priority=11)
def resource_hdfstore(uri, datapath=None, dshape=None, **kwargs):
# TODO:
# 1. Support nested datashapes (e.g. groups)
# 2. Try translating unicode to ascii? (PyTables fails here)
fn = uri.split('://')[1]
f = pd.HDFStore(fn)
if dshape is None:
if datapath:
return f.get_storer(datapath)
else:
return f
dshape = datashape.dshape(dshape)
# Already exists, return it
if datapath in f:
return f.get_storer(datapath)
# Need to create new datast.
# HDFStore doesn't support empty datasets, so we use a proxy object.
return EmptyHDFStoreDataset(f, datapath, dshape)
@append.register((pd.io.pytables.Fixed, EmptyHDFStoreDataset), pd.DataFrame)
def append_dataframe_to_hdfstore(store, df, **kwargs):
store.parent.append(store.pathname, df, append=True)
return store.parent.get_storer(store.pathname)
@append.register((pd.io.pytables.Fixed, EmptyHDFStoreDataset),
chunks(pd.DataFrame))
def append_chunks_dataframe_to_hdfstore(store, c, **kwargs):
parent = store.parent
for chunk in c:
parent.append(store.pathname, chunk)
return parent.get_storer(store.pathname)
@append.register((pd.io.pytables.Fixed, EmptyHDFStoreDataset), object)
def append_object_to_hdfstore(store, o, **kwargs):
return append(store, convert(chunks(pd.DataFrame), o, **kwargs), **kwargs)
ooc_types |= set(HDFDataset)
| {
"repo_name": "mrocklin/into",
"path": "into/backends/hdfstore.py",
"copies": "1",
"size": "3041",
"license": "bsd-3-clause",
"hash": -3648227019648136700,
"line_mean": 30.3505154639,
"line_max": 87,
"alpha_frac": 0.6797106215,
"autogenerated": false,
"ratio": 3.420697412823397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46004080343233966,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
import xarray as xr
from . import randn, requires_dask
try:
import dask # noqa
except ImportError:
pass
def make_bench_data(shape, frac_nan, chunks):
vals = randn(shape, frac_nan)
coords = {'time': pd.date_range('2000-01-01', freq='D',
periods=shape[0])}
da = xr.DataArray(vals, dims=('time', 'x', 'y'), coords=coords)
if chunks is not None:
da = da.chunk(chunks)
return da
def time_interpolate_na(shape, chunks, method, limit):
if chunks is not None:
requires_dask()
da = make_bench_data(shape, 0.1, chunks=chunks)
actual = da.interpolate_na(dim='time', method='linear', limit=limit)
if chunks is not None:
actual = actual.compute()
time_interpolate_na.param_names = ['shape', 'chunks', 'method', 'limit']
time_interpolate_na.params = ([(3650, 200, 400), (100, 25, 25)],
[None, {'x': 25, 'y': 25}],
['linear', 'spline', 'quadratic', 'cubic'],
[None, 3])
def time_ffill(shape, chunks, limit):
da = make_bench_data(shape, 0.1, chunks=chunks)
actual = da.ffill(dim='time', limit=limit)
if chunks is not None:
actual = actual.compute()
time_ffill.param_names = ['shape', 'chunks', 'limit']
time_ffill.params = ([(3650, 200, 400), (100, 25, 25)],
[None, {'x': 25, 'y': 25}],
[None, 3])
def time_bfill(shape, chunks, limit):
da = make_bench_data(shape, 0.1, chunks=chunks)
actual = da.bfill(dim='time', limit=limit)
if chunks is not None:
actual = actual.compute()
time_bfill.param_names = ['shape', 'chunks', 'limit']
time_bfill.params = ([(3650, 200, 400), (100, 25, 25)],
[None, {'x': 25, 'y': 25}],
[None, 3])
| {
"repo_name": "shoyer/xray",
"path": "asv_bench/benchmarks/dataarray_missing.py",
"copies": "3",
"size": "1938",
"license": "apache-2.0",
"hash": -6081267601055488000,
"line_mean": 26.2957746479,
"line_max": 73,
"alpha_frac": 0.5521155831,
"autogenerated": false,
"ratio": 3.3241852487135506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5376300831813551,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pandas
import os
from toolz import curry, concat, map
import pandas as pd
import numpy as np
from collections import Iterator, Iterable
from odo import into
from odo.chunks import chunks, Chunks
from odo.backends.csv import CSV, csv_to_DataFrame
from multipledispatch import MDNotImplementedError
from ..dispatch import dispatch
from ..expr import Expr, Head, ElemWise, Distinct, Symbol, Projection, Field
from ..expr.core import path
from ..utils import available_memory
from ..expr.split import split
from .core import compute
from ..expr.optimize import lean_projection
from .pmap import get_default_pmap
@dispatch(Expr, CSV)
def optimize(expr, _):
return lean_projection(expr) # This is handled in pre_compute
@dispatch(Expr, CSV)
def pre_compute(expr, data, comfortable_memory=None, chunksize=2**18, **kwargs):
comfortable_memory = comfortable_memory or min(1e9, available_memory() / 4)
kwargs = dict()
# Chunk if the file is large
if os.path.getsize(data.path) > comfortable_memory:
kwargs['chunksize'] = chunksize
else:
chunksize = None
# Insert projection into read_csv
oexpr = optimize(expr, data)
leaf = oexpr._leaves()[0]
pth = list(path(oexpr, leaf))
if len(pth) >= 2 and isinstance(pth[-2], (Projection, Field)):
kwargs['usecols'] = pth[-2].fields
if chunksize:
return into(chunks(pd.DataFrame), data, dshape=leaf.dshape, **kwargs)
else:
return into(pd.DataFrame, data, dshape=leaf.dshape, **kwargs)
Cheap = (Head, ElemWise, Distinct, Symbol)
@dispatch(Head, CSV)
def pre_compute(expr, data, **kwargs):
leaf = expr._leaves()[0]
if all(isinstance(e, Cheap) for e in path(expr, leaf)):
return into(Iterator, data, chunksize=10000, dshape=leaf.dshape)
else:
raise MDNotImplementedError()
def compute_chunk(chunk, chunk_expr, part):
return compute(chunk_expr, {chunk: part})
@dispatch(Expr, pandas.io.parsers.TextFileReader)
def compute_down(expr, data, map=None, **kwargs):
if map is None:
map = get_default_pmap()
leaf = expr._leaves()[0]
(chunk, chunk_expr), (agg, agg_expr) = split(leaf, expr)
parts = list(map(curry(compute_chunk, chunk, chunk_expr), data))
if isinstance(parts[0], np.ndarray):
intermediate = np.concatenate(parts)
elif isinstance(parts[0], pd.DataFrame):
intermediate = pd.concat(parts)
elif isinstance(parts[0], (Iterable, Iterator)):
intermediate = concat(parts)
return compute(agg_expr, {agg: intermediate})
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/compute/csv.py",
"copies": "1",
"size": "2625",
"license": "bsd-3-clause",
"hash": -498708794102937200,
"line_mean": 29.523255814,
"line_max": 80,
"alpha_frac": 0.6933333333,
"autogenerated": false,
"ratio": 3.5377358490566038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47310691823566037,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pandas
import os
from toolz import curry, concat
import pandas as pd
import numpy as np
from collections import Iterator, Iterable
from odo import into
from odo.chunks import chunks
from odo.backends.csv import CSV
from multipledispatch import MDNotImplementedError
from ..dispatch import dispatch
from ..expr import Expr, Head, ElemWise, Distinct, Symbol, Projection, Field
from ..expr.core import path
from ..utils import available_memory
from ..expr.split import split
from .core import compute
from ..expr.optimize import lean_projection
from .pmap import get_default_pmap
__all__ = ['optimize', 'pre_compute', 'compute_chunk', 'compute_down']
@dispatch(Expr, CSV)
def optimize(expr, _):
return lean_projection(expr) # This is handled in pre_compute
@dispatch(Expr, CSV)
def pre_compute(expr, data, comfortable_memory=None, chunksize=2**18, **kwargs):
comfortable_memory = comfortable_memory or min(1e9, available_memory() / 4)
kwargs = dict()
# Chunk if the file is large
if os.path.getsize(data.path) > comfortable_memory:
kwargs['chunksize'] = chunksize
else:
chunksize = None
# Insert projection into read_csv
oexpr = optimize(expr, data)
leaf = oexpr._leaves()[0]
pth = list(path(oexpr, leaf))
if len(pth) >= 2 and isinstance(pth[-2], (Projection, Field)):
kwargs['usecols'] = pth[-2].fields
if chunksize:
return into(chunks(pd.DataFrame), data, dshape=leaf.dshape, **kwargs)
else:
return into(pd.DataFrame, data, dshape=leaf.dshape, **kwargs)
Cheap = (Head, ElemWise, Distinct, Symbol)
@dispatch(Head, CSV)
def pre_compute(expr, data, **kwargs):
leaf = expr._leaves()[0]
if all(isinstance(e, Cheap) for e in path(expr, leaf)):
return into(Iterator, data, chunksize=10000, dshape=leaf.dshape)
else:
raise MDNotImplementedError()
def compute_chunk(chunk, chunk_expr, part):
return compute(chunk_expr, {chunk: part})
@dispatch(Expr, pandas.io.parsers.TextFileReader)
def compute_down(expr, data, map=None, **kwargs):
if map is None:
map = get_default_pmap()
leaf = expr._leaves()[0]
(chunk, chunk_expr), (agg, agg_expr) = split(leaf, expr)
parts = list(map(curry(compute_chunk, chunk, chunk_expr), data))
if isinstance(parts[0], np.ndarray):
intermediate = np.concatenate(parts)
elif isinstance(parts[0], pd.DataFrame):
intermediate = pd.concat(parts)
elif isinstance(parts[0], (Iterable, Iterator)):
intermediate = concat(parts)
return compute(agg_expr, {agg: intermediate})
| {
"repo_name": "LiaoPan/blaze",
"path": "blaze/compute/csv.py",
"copies": "11",
"size": "2667",
"license": "bsd-3-clause",
"hash": -4761388397392479000,
"line_mean": 28.9662921348,
"line_max": 80,
"alpha_frac": 0.6902887139,
"autogenerated": false,
"ratio": 3.532450331125828,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9722739045025829,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import paramiko
from contextlib import contextmanager
from toolz import keyfilter, memoize, take, curry
from datashape import discover
import re
import uuid
from ..directory import Directory
from ..utils import keywords, tmpfile, sample, ignoring, copydoc
from ..resource import resource
from ..append import append
from ..convert import convert
from ..temp import Temp, _Temp
from ..drop import drop
from .csv import CSV
from .json import JSON, JSONLines
from .text import TextFile
connection_pool = dict()
def connect(**auth):
key = tuple(sorted(auth.items()))
if key in connection_pool:
ssh = connection_pool[key]
if not ssh.get_transport() or not ssh.get_transport().is_active():
ssh.connect(**auth)
else:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(**auth)
connection_pool[key] = ssh
return ssh
sftp_pool = dict()
def sftp(**auth):
ssh = connect(**auth) # Need to call this explicitly (can't memoize)
key = tuple(sorted(auth.items()))
if key in sftp_pool:
conn = sftp_pool[key]
else:
conn = ssh.open_sftp()
sftp_pool[key] = conn
conn.sock.setblocking(True)
return conn
class _SSH(object):
""" Parent class for data accessed through ``ssh``
See ``paramiko.SSHClient.connect`` for authentication keyword arguments
Examples
--------
>>> from odo import SSH, CSV
>>> s = SSH(CSV)('/path/to/file.csv', hostname='hostname', username='alice')
Normally create through resource uris
>>> data = resource('ssh://alice@host:/path/to/file.csv', password='pass')
>>> data.path
'/path/to/file.csv'
>>> data.auth['hostname']
'host'
"""
def __init__(self, *args, **kwargs):
self.auth = keyfilter(keywords(paramiko.SSHClient.connect).__contains__,
kwargs)
self.subtype.__init__(self, *args, **kwargs)
def lines(self):
conn = sftp(**self.auth)
return conn.file(self.path, 'r')
@memoize
@copydoc(_SSH)
def SSH(cls):
return type('SSH(%s)' % cls.__name__, (_SSH, cls), {'subtype': cls})
types_by_extension = {'csv': CSV, 'json': JSONLines}
ssh_pattern = '((?P<username>[a-zA-Z]\w*)@)?(?P<hostname>[\w.-]*)(:(?P<port>\d+))?:(?P<path>[/\w.*-]+)'
@resource.register('ssh://.+', priority=16)
def resource_ssh(uri, **kwargs):
if 'ssh://' in uri:
uri = uri[len('ssh://'):]
d = re.match(ssh_pattern, uri).groupdict()
d = dict((k, v) for k, v in d.items() if v is not None)
path = d.pop('path')
kwargs.update(d)
try:
subtype = types_by_extension[path.split('.')[-1]]
if '*' in path:
subtype = Directory(subtype)
path = path.rsplit('/', 1)[0] + '/'
except KeyError:
subtype = type(resource(path))
return SSH(subtype)(path, **kwargs)
@sample.register((SSH(CSV),
SSH(JSON),
SSH(JSONLines)))
@contextmanager
def sample_ssh(data, lines=500):
""" Grab a few lines from the remote file """
with tmpfile() as fn:
with open(fn, 'w') as f:
for line in take(lines, data.lines()):
f.write(line)
yield fn
@sample.register((SSH(Directory(CSV)),
SSH(Directory(JSON)),
SSH(Directory(JSONLines))))
@contextmanager
def sample_ssh(data, **kwargs):
""" Grab a few lines from a file in a remote directory """
conn = sftp(**data.auth)
fn = data.path + '/' + conn.listdir(data.path)[0]
one_file = SSH(data.container)(fn, **data.auth)
with sample(one_file, **kwargs) as result:
yield result
@discover.register(_SSH)
def discover_ssh(data, **kwargs):
with sample(data) as fn:
o = data.subtype(fn)
result = discover(o)
return result
@discover.register(SSH(CSV))
def discover_ssh_csv(data, **kwargs):
with sample(data) as fn:
o = CSV(fn, encoding=data.encoding, has_header=data.has_header, **data.dialect)
result = discover(o)
return result
@discover.register((SSH(JSON), SSH(JSONLines)))
def discover_ssh_json(data, **kwargs):
with sample(data) as fn:
result = discover(data.subtype(fn))
return result
@discover.register((SSH(Directory(CSV)),
SSH(Directory(JSON)),
SSH(Directory(JSONLines))))
def discover_ssh_directory(data, **kwargs):
conn = sftp(**data.auth)
fn = data.path + '/' + conn.listdir(data.path)[0]
one_file = SSH(data.container)(fn, **data.auth)
result = discover(one_file)
return result
@drop.register((_SSH, SSH(CSV), SSH(JSON), SSH(JSONLines)))
def drop_ssh(data, **kwargs):
conn = sftp(**data.auth)
with ignoring(IOError):
conn.remove(data.path)
@append.register(_SSH, object)
def append_anything_to_ssh(target, source, **kwargs):
if not isinstance(source, target.subtype):
source = convert(Temp(target.subtype), source, **kwargs)
# TODO: handle overwrite case
conn = sftp(**target.auth)
conn.put(source.path, target.path)
return target
@append.register(TextFile, SSH(TextFile))
@append.register(JSONLines, SSH(JSONLines))
@append.register(JSON, SSH(JSON))
@append.register(CSV, SSH(CSV))
def append_sshX_to_X(target, source, **kwargs):
# TODO: handle overwrite case
conn = sftp(**source.auth)
conn.get(source.path, target.path)
return target
@curry
def file_to_temp_ssh_file(typ, data, **kwargs):
""" Generic convert function sending data to ssh(data)
Needs to be partially evaluated with a type"""
# don't use . prefix to hide because Hive doesn't like it
fn = '%s.%s' % (uuid.uuid1(), typ.canonical_extension)
target = Temp(SSH(typ))(fn, **kwargs)
return append(target, data, **kwargs)
for typ in [CSV, JSON, JSONLines, TextFile]:
convert.register(Temp(SSH(typ)), (Temp(typ), typ))(
file_to_temp_ssh_file(typ))
@convert.register(Temp(TextFile), (Temp(SSH(TextFile)), SSH(TextFile)))
@convert.register(Temp(JSONLines), (Temp(SSH(JSONLines)), SSH(JSONLines)))
@convert.register(Temp(JSON), (Temp(SSH(JSON)), SSH(JSON)))
@convert.register(Temp(CSV), (Temp(SSH(CSV)), SSH(CSV)))
def ssh_file_to_temp_file(data, **kwargs):
fn = '.%s' % uuid.uuid1()
target = Temp(data.subtype)(fn, **kwargs)
return append(target, data, **kwargs)
@convert.register(Temp(SSH(TextFile)), (TextFile, Temp(TextFile)))
@convert.register(Temp(SSH(JSONLines)), (JSONLines, Temp(JSONLines)))
@convert.register(Temp(SSH(JSON)), (JSON, Temp(JSON)))
@convert.register(Temp(SSH(CSV)), (CSV, Temp(CSV)))
def file_to_temp_ssh_file(data, **kwargs):
fn = '%s' % uuid.uuid1()
if isinstance(data, _Temp):
target = Temp(SSH(data.persistent_type))(fn, **kwargs)
else:
target = Temp(SSH(type(data)))(fn, **kwargs)
return append(target, data, **kwargs)
| {
"repo_name": "Dannnno/odo",
"path": "odo/backends/ssh.py",
"copies": "9",
"size": "7008",
"license": "bsd-3-clause",
"hash": 1505857868421652700,
"line_mean": 28.6949152542,
"line_max": 103,
"alpha_frac": 0.6245719178,
"autogenerated": false,
"ratio": 3.421875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.85464469178,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import paramiko
from contextlib import contextmanager
from toolz import keyfilter, memoize, take
from datashape import discover
import re
from ..directory import _Directory, Directory
from ..utils import keywords, tmpfile, sample
from ..resource import resource
from ..append import append
from ..drop import drop
@contextmanager
def connect(**auth):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(**auth)
try:
yield ssh
finally:
ssh.close()
@contextmanager
def sftp(**auth):
with connect(**auth) as ssh:
sftp = ssh.open_sftp()
yield sftp
class _SSH(object):
""" Parent class for data accessed through ``ssh``
See ``paramiko.SSHClient.connect`` for authentication keyword arguments
Examples
--------
>>> from into import SSH, CSV
>>> s = SSH(CSV)('/path/to/file.csv', hostname='hostname', username='alice')
Normally create through resource uris
>>> data = resource('ssh://alice@host:/path/to/file.csv', password='pass')
>>> data.path
'/path/to/file.csv'
>>> data.auth['hostname']
'host'
"""
def __init__(self, *args, **kwargs):
self.auth = keyfilter(keywords(paramiko.SSHClient.connect).__contains__,
kwargs)
self.subtype.__init__(self, *args, **kwargs)
def lines(self):
with sftp(**self.auth) as conn:
for line in conn.file(self.path, 'r'):
yield line
def SSH(cls):
return type('SSH(%s)' % cls.__name__, (_SSH, cls), {'subtype': cls})
SSH.__doc__ = _SSH.__doc__
SSH = memoize(SSH)
from .csv import CSV
from .json import JSON, JSONLines
types_by_extension = {'csv': CSV, 'json': JSONLines}
ssh_pattern = '((?P<username>[a-zA-Z]\w*)@)?(?P<hostname>[\w.-]*)(:(?P<port>\d+))?:(?P<path>[/\w.*-]+)'
@resource.register('ssh://.+', priority=16)
def resource_ssh(uri, **kwargs):
if 'ssh://' in uri:
uri = uri[len('ssh://'):]
d = re.match(ssh_pattern, uri).groupdict()
d = dict((k, v) for k, v in d.items() if v is not None)
path = d.pop('path')
kwargs.update(d)
try:
subtype = types_by_extension[path.split('.')[-1]]
if '*' in path:
subtype = Directory(subtype)
path = path.rsplit('/', 1)[0] + '/'
except KeyError:
subtype = type(resource(path))
return SSH(subtype)(path, **kwargs)
@sample.register(SSH(CSV))
@contextmanager
def sample_ssh(data, lines=500):
""" Grab a few lines from the remote file """
with tmpfile('csv') as fn:
with open(fn, 'w') as f:
for line in take(lines, data.lines()):
f.write(line)
f.write('\n')
yield fn
@sample.register(SSH(Directory(CSV)))
@contextmanager
def sample_ssh(data, **kwargs):
""" Grab a few lines from a file in a remote directory """
with sftp(**data.auth) as conn:
fn = data.path + '/' + conn.listdir(data.path)[0]
one_file = SSH(data.container)(fn, **data.auth)
with sample(one_file, **kwargs) as result:
yield result
@discover.register(_SSH)
def discover_ssh(data, **kwargs):
with sample(data) as fn:
o = data.subtype(fn)
result = discover(o)
return result
@discover.register(SSH(CSV))
def discover_ssh_csv(data, **kwargs):
with sample(data) as fn:
o = CSV(fn, encoding=data.encoding, has_header=data.has_header, **data.dialect)
result = discover(o)
return result
@discover.register(SSH(Directory(CSV)))
def discover_ssh_directory(data, **kwargs):
with sftp(**data.auth) as conn:
fn = data.path + '/' + conn.listdir(data.path)[0]
one_file = SSH(data.container)(fn, **data.auth)
result = discover(one_file)
return result
@drop.register(_SSH)
def drop_ssh(data, **kwargs):
with sftp(**data.auth) as conn:
conn.remove(data.path)
@append.register(_SSH, object)
def append_anything_to_ssh(target, source, **kwargs):
if not isinstance(source, target.subtype):
raise NotImplementedError() # TODO: create local temp
# TODO: handle overwrite case
with sftp(**target.auth) as conn:
conn.put(source.path, target.path)
return target
@append.register(JSONLines, SSH(JSONLines))
@append.register(JSON, SSH(JSON))
@append.register(CSV, SSH(CSV))
def append_sshX_to_X(target, source, **kwargs):
# TODO: handle overwrite case
with sftp(**source.auth) as conn:
conn.get(source.path, target.path)
return target
| {
"repo_name": "mrocklin/into",
"path": "into/backends/ssh.py",
"copies": "1",
"size": "4636",
"license": "bsd-3-clause",
"hash": 8174014660481400000,
"line_mean": 25.7976878613,
"line_max": 103,
"alpha_frac": 0.6147540984,
"autogenerated": false,
"ratio": 3.520121488230828,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9628024255075551,
"avg_score": 0.0013702663110552315,
"num_lines": 173
} |
from __future__ import absolute_import, division, print_function
import pickle
from collections import OrderedDict
import numpy as np
import theano.tensor as T
from functools import reduce
from .. import optimizer
from ..layers import layers
class ModelBasis(object):
"""
Arguments
model_config: model configuration dictionary
Attributes of model_config
input_size: input image size, (height, width).
num_ch: number of input channels
mae: mean absolute error (MAE) / mean square error (MSE) ratio.
0: only MSE / 1: only MAE
lr: initial learning rate
"""
def __init__(self, model_config={}, rng=None):
# Check input_size
input_size = model_config.get('input_size', None)
if isinstance(input_size, int):
self.input_size = (input_size, input_size)
elif isinstance(input_size, (list, tuple)):
assert len(input_size) == 2
self.input_size = tuple(input_size)
else:
raise ValueError('Wrong input_size:', input_size)
self.num_ch = model_config.get('num_ch', None)
assert self.num_ch is not None
self.input_shape = (None, self.num_ch) + self.input_size
self.mae = float(model_config.get('mae', 0.0))
self.opt = optimizer.Optimizer()
self.set_opt_configs(model_config)
if rng is None:
rng = np.random.RandomState(1234)
self.rng = rng
self.layers = OrderedDict()
self.params = OrderedDict()
def set_opt_configs(self, model_config=None, opt_scheme=None, lr=None):
if model_config is None:
assert lr is not None and opt_scheme is not None
else:
lr = float(model_config.get('lr', 1e-3))
opt_scheme = model_config.get('opt_scheme', 'adam')
self.lr = lr
self.opt_scheme = opt_scheme
self.opt.set_learning_rate(self.lr)
###########################################################################
# Functions for cost calculation
def get_l2_regularization(self, layer_keys=None, mode='sum'):
if layer_keys is None:
layer_keys = list(self.layers.keys())
l2 = []
if mode == 'sum':
for key in layer_keys:
for layer in self.layers[key]:
if hasattr(layer, 'W'):
l2.append(T.sum(layer.W ** 2))
return T.sum(l2)
elif mode == 'mean':
for key in layer_keys:
for layer in self.layers[key]:
if hasattr(layer, 'W'):
l2.append(T.mean(layer.W ** 2))
return T.mean(l2)
else:
raise NotImplementedError
def get_cost_mse_mae(self, x, y):
diff = x - y
if self.mae == 0:
return T.mean(diff ** 2)
elif self.mae > 0 and self.mae < 1.0:
return ((1.0 - self.mae) * T.mean(diff ** 2) +
self.mae * T.mean(T.abs_(diff)))
else:
return T.mean(T.abs_(diff))
def get_mse(self, x, y, return_map=False):
if return_map:
return (x - y) ** 2
else:
return T.mean(((x - y) ** 2).flatten(2), axis=1)
def add_all_losses_with_weight(self, losses, weights):
"""Add the losses with the weights multiplied.
If the weight is 0, the corresponding loss is ignored.
"""
assert len(losses) == len(weights)
loss_list = []
for loss, weight in zip(losses, weights):
if weight != 0:
loss_list.append(weight * loss)
return reduce(lambda x, y: x + y, loss_list)
###########################################################################
# Functions to help build layers
def get_input_shape(self, batch_size=None):
"""Get the input shape of the Mode.
Returns
-------
(batch_size, self.num_ch) + self.input_size
"""
return (batch_size, self.num_ch) + self.input_size
def get_out_shape(self, key, nth=-1):
"""Get the `nth` output shape in the `key` layers
"""
if nth < 0:
idx = len(self.layers[key]) + nth
else:
idx = nth
out_sh = None
while out_sh is None:
if idx < 0:
raise ValueError('Cannot obtain the output size from %s' % key)
out_sh = self.layers[key][idx].get_out_shape()
idx = idx - 1
return out_sh
def get_conc_shape(self, key0, key1):
"""Get the concatenated shape of the ouputs of
`key0` and `key1` layers
"""
prev_sh0 = self.get_out_shape(key0)
prev_sh1 = self.get_out_shape(key1)
if isinstance(prev_sh0, (list, tuple)):
assert prev_sh0[0] == prev_sh1[0]
assert prev_sh0[2:] == prev_sh1[2:]
return (prev_sh0[0], prev_sh0[1] + prev_sh1[1]) + prev_sh0[2:]
else:
return prev_sh0 + prev_sh1
###########################################################################
# Functions to help make computation graph
def image_vec_to_tensor(self, input):
"""Reshape input into 4D tensor.
"""
# im_sh = (-1, self.input_size[0],
# self.input_size[1], self.num_ch)
# return input.reshape(im_sh).dimshuffle(0, 3, 1, 2)
return input.dimshuffle(0, 3, 1, 2)
# def tensor_to_image_vec(self, input):
# """Reshape 4D tensor into input."""
# return input.dimshuffle(0, 2, 3, 1).flatten(2)
def get_key_layers_output(self, input, key, var_shape=False):
"""Put input to the `key` layers and get output.
"""
prev_out = input
for layer in self.layers[key]:
prev_out = layer.get_output(prev_out, var_shape=var_shape)
return prev_out
def get_updates(self, cost, wrt_params):
return self.opt.get_updates_cost(cost, wrt_params, self.opt_scheme)
def get_updates_keys(self, cost, keys=[], params=[]):
wrt_params = []
for key in keys:
wrt_params += self.params[key]
if params:
wrt_params += params
print(' - Update w.r.t.: %s' % ', '.join(keys))
return self.opt.get_updates_cost(cost, wrt_params, self.opt_scheme)
###########################################################################
# Functions to contol batch normalization and dropout layers
def get_batch_norm_layers(self, keys=[]):
layers = []
for key in list(self.layers.keys()):
layers += self.bn_layers[key]
return layers
def set_batch_norm_update_averages(self, update_averages, keys=[]):
# if update_averages:
# print(' - Batch norm: update the stored averages')
# else:
# print(' - Batch norm: not update the stored averages')
layers = self.get_batch_norm_layers(keys)
for layer in layers:
layer.update_averages = update_averages
def set_batch_norm_training(self, training, keys=[]):
# if training:
# print(' - Batch norm: use mini-batch statistics')
# else:
# print(' - Batch norm: use the stored statistics')
layers = self.get_batch_norm_layers(keys)
for layer in layers:
layer.deterministic = not training
def set_dropout_on(self, training):
layers.DropoutLayer.set_dropout_training(training)
def set_training_mode(self, training):
"""Decide the behavior of batch normalization and dropout.
Parameters
----------
training: boolean
True: trainig mode / False: testing mode.
"""
# Decide behaviors of the model during training
# Batch normalization
l_keys = [key for key in list(self.layers.keys())]
self.set_batch_norm_update_averages(training, l_keys)
self.set_batch_norm_training(training, l_keys)
# Dropout
self.set_dropout_on(training)
###########################################################################
# Functions to help deal with parameters of the model
def make_param_list(self):
"""collect all the parameters from `self.layers` and
store into `self.params['layer_key']`
"""
self.params, self.bn_layers = {}, {}
for key in list(self.layers.keys()):
self.params[key] = []
self.bn_layers[key] = []
for layer in self.layers[key]:
if layer.get_params():
self.params[key] += layer.get_params()
if layer.has_batch_norm():
self.bn_layers[key].append(layer.bn_layer)
def show_num_params(self):
"""Dislay the number of paraemeters for each layer_key.
"""
paramscnt = {}
for key in list(self.layers.keys()):
paramscnt[key] = 0
for p in self.params[key]:
paramscnt[key] += np.prod(p.get_value(borrow=True).shape)
if paramscnt[key] > 0:
print(' - Num params %s:' % key, '{:,}'.format(paramscnt[key]))
def get_params(self, layer_keys=None):
"""Get concatenated parameter list
from layers belong to layer_keys"""
if layer_keys is None:
layer_keys = list(self.layers.keys())
params = []
bn_mean_std = []
for key in layer_keys:
params += self.params[key]
for key in layer_keys:
for layer in self.bn_layers[key]:
bn_mean_std += layer.statistics
params += bn_mean_std
return params
def save(self, filename):
"""Save parameters to file.
"""
params = self.get_params()
with open(filename, 'wb') as f:
pickle.dump(params, f, protocol=2)
# pickle.dump(params, f, protocol=pickle.HIGHEST_PROTOCOL)
print(' = Save params: %s' % (filename))
def load(self, filename):
"""Load parameters from file.
"""
params = self.get_params()
with open(filename, 'rb') as f:
newparams = pickle.load(f)
assert len(newparams) == len(params)
for p, new_p in zip(params, newparams):
if p.name != new_p.name:
print((' @ WARNING: Different name - (loaded) %s != %s'
% (new_p.name, p.name)))
new_p_sh = new_p.get_value(borrow=True).shape
p_sh = p.get_value(borrow=True).shape
if p_sh != new_p_sh:
# print(new_p.name, p_sh, new_p_sh)
print(' @ WARNING: Different shape %s - (loaded)' % new_p.name,
new_p_sh, end='')
print(' !=', p_sh)
continue
p.set_value(new_p.get_value())
print(' = Load all params: %s ' % (filename))
def load_params_keys(self, layer_keys, filename):
"""Load the selecte parameters from file.
Parameters from layers belong to layer_keys.
"""
print(' = Load params: %s (keys = %s)' % (
filename, ', '.join(layer_keys)))
to_params = self.get_params(layer_keys)
with open(filename, 'rb') as f:
from_params = pickle.load(f)
# Copy the params having same shape and name
copied_idx = []
for fidx, f_param in enumerate(from_params):
f_val = f_param.get_value(borrow=True)
for tidx, t_param in enumerate(to_params):
t_val = t_param.get_value(borrow=True)
if f_val.shape == t_val.shape and f_param.name == t_param.name:
t_param.set_value(f_val)
del to_params[tidx]
copied_idx.append(fidx)
break
# print(' = Copied from_param: ', [
# from_params[idx] for idx in copied_idx])
if to_params:
print(' = Not existing to_param: ', to_params)
| {
"repo_name": "jongyookim/IQA_BIECON_release",
"path": "IQA_BIECON_release/models/model_basis.py",
"copies": "1",
"size": "12088",
"license": "mit",
"hash": -2358152926982187000,
"line_mean": 34.8694362018,
"line_max": 79,
"alpha_frac": 0.524652548,
"autogenerated": false,
"ratio": 3.841118525579917,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48657710735799176,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pickle
from distutils.version import LooseVersion
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
import xarray as xr
import xarray.ufuncs as xu
from xarray import DataArray, Dataset, Variable
from xarray.core.pycompat import OrderedDict, suppress
from xarray.tests import mock
from . import (
TestCase, assert_allclose, assert_array_equal, assert_equal,
assert_frame_equal, assert_identical, raises_regex)
dask = pytest.importorskip('dask')
da = pytest.importorskip('dask.array')
dd = pytest.importorskip('dask.dataframe')
class DaskTestCase(TestCase):
def assertLazyAnd(self, expected, actual, test):
with dask.set_options(get=dask.get):
test(actual, expected)
if isinstance(actual, Dataset):
for k, v in actual.variables.items():
if k in actual.dims:
assert isinstance(v.data, np.ndarray)
else:
assert isinstance(v.data, da.Array)
elif isinstance(actual, DataArray):
assert isinstance(actual.data, da.Array)
for k, v in actual.coords.items():
if k in actual.dims:
assert isinstance(v.data, np.ndarray)
else:
assert isinstance(v.data, da.Array)
elif isinstance(actual, Variable):
assert isinstance(actual.data, da.Array)
else:
assert False
class TestVariable(DaskTestCase):
def assertLazyAndIdentical(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_identical)
def assertLazyAndAllClose(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_allclose)
def setUp(self):
self.values = np.random.RandomState(0).randn(4, 6)
self.data = da.from_array(self.values, chunks=(2, 2))
self.eager_var = Variable(('x', 'y'), self.values)
self.lazy_var = Variable(('x', 'y'), self.data)
def test_basics(self):
v = self.lazy_var
assert self.data is v.data
assert self.data.chunks == v.chunks
assert_array_equal(self.values, v)
def test_copy(self):
self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy())
self.assertLazyAndIdentical(self.eager_var,
self.lazy_var.copy(deep=True))
def test_chunk(self):
for chunks, expected in [(None, ((2, 2), (2, 2, 2))),
(3, ((3, 1), (3, 3))),
({'x': 3, 'y': 3}, ((3, 1), (3, 3))),
({'x': 3}, ((3, 1), (2, 2, 2))),
({'x': (3, 1)}, ((3, 1), (2, 2, 2)))]:
rechunked = self.lazy_var.chunk(chunks)
assert rechunked.chunks == expected
self.assertLazyAndIdentical(self.eager_var, rechunked)
def test_indexing(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0], v[0])
self.assertLazyAndIdentical(u[:1], v[:1])
self.assertLazyAndIdentical(u[[0, 1], [0, 1, 2]], v[[0, 1], [0, 1, 2]])
with raises_regex(TypeError, 'stored in a dask array'):
v[:1] = 0
def test_squeeze(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0].squeeze(), v[0].squeeze())
def test_equals(self):
v = self.lazy_var
assert v.equals(v)
assert isinstance(v.data, da.Array)
assert v.identical(v)
assert isinstance(v.data, da.Array)
def test_transpose(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.T, v.T)
def test_shift(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.shift(x=2), v.shift(x=2))
self.assertLazyAndIdentical(u.shift(x=-2), v.shift(x=-2))
assert v.data.chunks == v.shift(x=1).data.chunks
def test_roll(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.roll(x=2), v.roll(x=2))
assert v.data.chunks == v.roll(x=1).data.chunks
def test_unary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(-u, -v)
self.assertLazyAndIdentical(abs(u), abs(v))
self.assertLazyAndIdentical(u.round(), v.round())
def test_binary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(2 * u, 2 * v)
self.assertLazyAndIdentical(u + u, v + v)
self.assertLazyAndIdentical(u[0] + u, v[0] + v)
def test_repr(self):
expected = dedent("""\
<xarray.Variable (x: 4, y: 6)>
dask.array<shape=(4, 6), dtype=float64, chunksize=(2, 2)>""")
assert expected == repr(self.lazy_var)
def test_pickle(self):
# Test that pickling/unpickling does not convert the dask
# backend to numpy
a1 = Variable(['x'], build_dask_array('x'))
a1.compute()
assert not a1._in_memory
assert kernel_call_count == 1
a2 = pickle.loads(pickle.dumps(a1))
assert kernel_call_count == 1
assert_identical(a1, a2)
assert not a1._in_memory
assert not a2._in_memory
def test_reduce(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(u.mean(), v.mean())
self.assertLazyAndAllClose(u.std(), v.std())
self.assertLazyAndAllClose(u.argmax(dim='x'), v.argmax(dim='x'))
self.assertLazyAndAllClose((u > 1).any(), (v > 1).any())
self.assertLazyAndAllClose((u < 1).all('x'), (v < 1).all('x'))
with raises_regex(NotImplementedError, 'dask'):
v.median()
def test_missing_values(self):
values = np.array([0, 1, np.nan, 3])
data = da.from_array(values, chunks=(2,))
eager_var = Variable('x', values)
lazy_var = Variable('x', data)
self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var))
self.assertLazyAndIdentical(Variable('x', range(4)),
lazy_var.fillna(2))
self.assertLazyAndIdentical(eager_var.count(), lazy_var.count())
def test_concat(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], 'x'))
self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], 'x'))
self.assertLazyAndIdentical(u[:2], Variable.concat([u[0], v[1]], 'x'))
self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], u[1]], 'x'))
self.assertLazyAndIdentical(
u[:3],
Variable.concat([v[[0, 2]], v[[1]]], 'x', positions=[[0, 2], [1]]))
def test_missing_methods(self):
v = self.lazy_var
try:
v.argsort()
except NotImplementedError as err:
assert 'dask' in str(err)
try:
v[0].item()
except NotImplementedError as err:
assert 'dask' in str(err)
def test_univariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
def test_bivariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(v, 0))
self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(0, v))
@pytest.mark.skipif(LooseVersion(dask.__version__) <= '0.15.4',
reason='Need dask 0.16 for new interface')
def test_compute(self):
u = self.eager_var
v = self.lazy_var
assert dask.is_dask_collection(v)
(v2,) = dask.compute(v + 1)
assert not dask.is_dask_collection(v2)
assert ((u + 1).data == v2.data).all()
@pytest.mark.skipif(LooseVersion(dask.__version__) <= '0.15.4',
reason='Need dask 0.16 for new interface')
def test_persist(self):
u = self.eager_var
v = self.lazy_var + 1
(v2,) = dask.persist(v)
assert v is not v2
assert len(v2.__dask_graph__()) < len(v.__dask_graph__())
assert v2.__dask_keys__() == v.__dask_keys__()
assert dask.is_dask_collection(v)
assert dask.is_dask_collection(v2)
self.assertLazyAndAllClose(u + 1, v)
self.assertLazyAndAllClose(u + 1, v2)
class TestDataArrayAndDataset(DaskTestCase):
def assertLazyAndIdentical(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_identical)
def assertLazyAndAllClose(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_allclose)
def assertLazyAndEqual(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_equal)
def setUp(self):
self.values = np.random.randn(4, 6)
self.data = da.from_array(self.values, chunks=(2, 2))
self.eager_array = DataArray(self.values, coords={'x': range(4)},
dims=('x', 'y'), name='foo')
self.lazy_array = DataArray(self.data, coords={'x': range(4)},
dims=('x', 'y'), name='foo')
def test_rechunk(self):
chunked = self.eager_array.chunk({'x': 2}).chunk({'y': 2})
assert chunked.chunks == ((2,) * 2, (2,) * 3)
self.assertLazyAndIdentical(self.lazy_array, chunked)
def test_new_chunk(self):
chunked = self.eager_array.chunk()
assert chunked.data.name.startswith('xarray-<this-array>')
def test_lazy_dataset(self):
lazy_ds = Dataset({'foo': (('x', 'y'), self.data)})
assert isinstance(lazy_ds.foo.variable.data, da.Array)
def test_lazy_array(self):
u = self.eager_array
v = self.lazy_array
self.assertLazyAndAllClose(u, v)
self.assertLazyAndAllClose(-u, -v)
self.assertLazyAndAllClose(u.T, v.T)
self.assertLazyAndAllClose(u.mean(), v.mean())
self.assertLazyAndAllClose(1 + u, 1 + v)
actual = xr.concat([v[:2], v[2:]], 'x')
self.assertLazyAndAllClose(u, actual)
@pytest.mark.skipif(LooseVersion(dask.__version__) <= '0.15.4',
reason='Need dask 0.16 for new interface')
def test_compute(self):
u = self.eager_array
v = self.lazy_array
assert dask.is_dask_collection(v)
(v2,) = dask.compute(v + 1)
assert not dask.is_dask_collection(v2)
assert ((u + 1).data == v2.data).all()
@pytest.mark.skipif(LooseVersion(dask.__version__) <= '0.15.4',
reason='Need dask 0.16 for new interface')
def test_persist(self):
u = self.eager_array
v = self.lazy_array + 1
(v2,) = dask.persist(v)
assert v is not v2
assert len(v2.__dask_graph__()) < len(v.__dask_graph__())
assert v2.__dask_keys__() == v.__dask_keys__()
assert dask.is_dask_collection(v)
assert dask.is_dask_collection(v2)
self.assertLazyAndAllClose(u + 1, v)
self.assertLazyAndAllClose(u + 1, v2)
def test_concat_loads_variables(self):
# Test that concat() computes not-in-memory variables at most once
# and loads them in the output, while leaving the input unaltered.
d1 = build_dask_array('d1')
c1 = build_dask_array('c1')
d2 = build_dask_array('d2')
c2 = build_dask_array('c2')
d3 = build_dask_array('d3')
c3 = build_dask_array('c3')
# Note: c is a non-index coord.
# Index coords are loaded by IndexVariable.__init__.
ds1 = Dataset(data_vars={'d': ('x', d1)}, coords={'c': ('x', c1)})
ds2 = Dataset(data_vars={'d': ('x', d2)}, coords={'c': ('x', c2)})
ds3 = Dataset(data_vars={'d': ('x', d3)}, coords={'c': ('x', c3)})
assert kernel_call_count == 0
out = xr.concat([ds1, ds2, ds3], dim='n', data_vars='different',
coords='different')
# each kernel is computed exactly once
assert kernel_call_count == 6
# variables are loaded in the output
assert isinstance(out['d'].data, np.ndarray)
assert isinstance(out['c'].data, np.ndarray)
out = xr.concat(
[ds1, ds2, ds3], dim='n', data_vars='all', coords='all')
# no extra kernel calls
assert kernel_call_count == 6
assert isinstance(out['d'].data, dask.array.Array)
assert isinstance(out['c'].data, dask.array.Array)
out = xr.concat(
[ds1, ds2, ds3], dim='n', data_vars=['d'], coords=['c'])
# no extra kernel calls
assert kernel_call_count == 6
assert isinstance(out['d'].data, dask.array.Array)
assert isinstance(out['c'].data, dask.array.Array)
out = xr.concat([ds1, ds2, ds3], dim='n', data_vars=[], coords=[])
# variables are loaded once as we are validing that they're identical
assert kernel_call_count == 12
assert isinstance(out['d'].data, np.ndarray)
assert isinstance(out['c'].data, np.ndarray)
out = xr.concat([ds1, ds2, ds3], dim='n', data_vars='different',
coords='different', compat='identical')
# compat=identical doesn't do any more kernel calls than compat=equals
assert kernel_call_count == 18
assert isinstance(out['d'].data, np.ndarray)
assert isinstance(out['c'].data, np.ndarray)
# When the test for different turns true halfway through,
# stop computing variables as it would not have any benefit
ds4 = Dataset(data_vars={'d': ('x', [2.0])},
coords={'c': ('x', [2.0])})
out = xr.concat([ds1, ds2, ds4, ds3], dim='n', data_vars='different',
coords='different')
# the variables of ds1 and ds2 were computed, but those of ds3 didn't
assert kernel_call_count == 22
assert isinstance(out['d'].data, dask.array.Array)
assert isinstance(out['c'].data, dask.array.Array)
# the data of ds1 and ds2 was loaded into numpy and then
# concatenated to the data of ds3. Thus, only ds3 is computed now.
out.compute()
assert kernel_call_count == 24
# Finally, test that riginals are unaltered
assert ds1['d'].data is d1
assert ds1['c'].data is c1
assert ds2['d'].data is d2
assert ds2['c'].data is c2
assert ds3['d'].data is d3
assert ds3['c'].data is c3
def test_groupby(self):
if LooseVersion(dask.__version__) == LooseVersion('0.15.3'):
pytest.xfail('upstream bug in dask: '
'https://github.com/dask/dask/issues/2718')
u = self.eager_array
v = self.lazy_array
expected = u.groupby('x').mean()
actual = v.groupby('x').mean()
self.assertLazyAndAllClose(expected, actual)
def test_groupby_first(self):
u = self.eager_array
v = self.lazy_array
for coords in [u.coords, v.coords]:
coords['ab'] = ('x', ['a', 'a', 'b', 'b'])
with raises_regex(NotImplementedError, 'dask'):
v.groupby('ab').first()
expected = u.groupby('ab').first()
actual = v.groupby('ab').first(skipna=False)
self.assertLazyAndAllClose(expected, actual)
def test_reindex(self):
u = self.eager_array.assign_coords(y=range(6))
v = self.lazy_array.assign_coords(y=range(6))
for kwargs in [{'x': [2, 3, 4]},
{'x': [1, 100, 2, 101, 3]},
{'x': [2.5, 3, 3.5], 'y': [2, 2.5, 3]}]:
expected = u.reindex(**kwargs)
actual = v.reindex(**kwargs)
self.assertLazyAndAllClose(expected, actual)
def test_to_dataset_roundtrip(self):
u = self.eager_array
v = self.lazy_array
expected = u.assign_coords(x=u['x'])
self.assertLazyAndEqual(expected, v.to_dataset('x').to_array('x'))
def test_merge(self):
def duplicate_and_merge(array):
return xr.merge([array, array.rename('bar')]).to_array()
expected = duplicate_and_merge(self.eager_array)
actual = duplicate_and_merge(self.lazy_array)
self.assertLazyAndEqual(expected, actual)
def test_ufuncs(self):
u = self.eager_array
v = self.lazy_array
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
def test_where_dispatching(self):
a = np.arange(10)
b = a > 3
x = da.from_array(a, 5)
y = da.from_array(b, 5)
expected = DataArray(a).where(b)
self.assertLazyAndEqual(expected, DataArray(a).where(y))
self.assertLazyAndEqual(expected, DataArray(x).where(b))
self.assertLazyAndEqual(expected, DataArray(x).where(y))
def test_simultaneous_compute(self):
ds = Dataset({'foo': ('x', range(5)),
'bar': ('x', range(5))}).chunk()
count = [0]
def counting_get(*args, **kwargs):
count[0] += 1
return dask.get(*args, **kwargs)
with dask.set_options(get=counting_get):
ds.load()
assert count[0] == 1
def test_stack(self):
data = da.random.normal(size=(2, 3, 4), chunks=(1, 3, 4))
arr = DataArray(data, dims=('w', 'x', 'y'))
stacked = arr.stack(z=('x', 'y'))
z = pd.MultiIndex.from_product([np.arange(3), np.arange(4)],
names=['x', 'y'])
expected = DataArray(data.reshape(2, -1), {'z': z}, dims=['w', 'z'])
assert stacked.data.chunks == expected.data.chunks
self.assertLazyAndEqual(expected, stacked)
def test_dot(self):
eager = self.eager_array.dot(self.eager_array[0])
lazy = self.lazy_array.dot(self.lazy_array[0])
self.assertLazyAndAllClose(eager, lazy)
def test_dataarray_repr(self):
# Test that __repr__ converts the dask backend to numpy
# in neither the data variable nor the non-index coords
data = build_dask_array('data')
nonindex_coord = build_dask_array('coord')
a = DataArray(data, dims=['x'], coords={'y': ('x', nonindex_coord)})
expected = dedent("""\
<xarray.DataArray 'data' (x: 1)>
dask.array<shape=(1,), dtype=int64, chunksize=(1,)>
Coordinates:
y (x) int64 dask.array<shape=(1,), chunksize=(1,)>
Dimensions without coordinates: x""")
assert expected == repr(a)
assert kernel_call_count == 0
def test_dataset_repr(self):
# Test that pickling/unpickling converts the dask backend
# to numpy in neither the data variables nor the non-index coords
data = build_dask_array('data')
nonindex_coord = build_dask_array('coord')
ds = Dataset(data_vars={'a': ('x', data)},
coords={'y': ('x', nonindex_coord)})
expected = dedent("""\
<xarray.Dataset>
Dimensions: (x: 1)
Coordinates:
y (x) int64 dask.array<shape=(1,), chunksize=(1,)>
Dimensions without coordinates: x
Data variables:
a (x) int64 dask.array<shape=(1,), chunksize=(1,)>""")
assert expected == repr(ds)
assert kernel_call_count == 0
def test_dataarray_pickle(self):
# Test that pickling/unpickling converts the dask backend
# to numpy in neither the data variable nor the non-index coords
data = build_dask_array('data')
nonindex_coord = build_dask_array('coord')
a1 = DataArray(data, dims=['x'], coords={'y': ('x', nonindex_coord)})
a1.compute()
assert not a1._in_memory
assert not a1.coords['y']._in_memory
assert kernel_call_count == 2
a2 = pickle.loads(pickle.dumps(a1))
assert kernel_call_count == 2
assert_identical(a1, a2)
assert not a1._in_memory
assert not a2._in_memory
assert not a1.coords['y']._in_memory
assert not a2.coords['y']._in_memory
def test_dataset_pickle(self):
# Test that pickling/unpickling converts the dask backend
# to numpy in neither the data variables nor the non-index coords
data = build_dask_array('data')
nonindex_coord = build_dask_array('coord')
ds1 = Dataset(data_vars={'a': ('x', data)},
coords={'y': ('x', nonindex_coord)})
ds1.compute()
assert not ds1['a']._in_memory
assert not ds1['y']._in_memory
assert kernel_call_count == 2
ds2 = pickle.loads(pickle.dumps(ds1))
assert kernel_call_count == 2
assert_identical(ds1, ds2)
assert not ds1['a']._in_memory
assert not ds2['a']._in_memory
assert not ds1['y']._in_memory
assert not ds2['y']._in_memory
def test_dataarray_getattr(self):
# ipython/jupyter does a long list of getattr() calls to when trying to
# represent an object.
# Make sure we're not accidentally computing dask variables.
data = build_dask_array('data')
nonindex_coord = build_dask_array('coord')
a = DataArray(data, dims=['x'],
coords={'y': ('x', nonindex_coord)})
with suppress(AttributeError):
getattr(a, 'NOTEXIST')
assert kernel_call_count == 0
def test_dataset_getattr(self):
# Test that pickling/unpickling converts the dask backend
# to numpy in neither the data variables nor the non-index coords
data = build_dask_array('data')
nonindex_coord = build_dask_array('coord')
ds = Dataset(data_vars={'a': ('x', data)},
coords={'y': ('x', nonindex_coord)})
with suppress(AttributeError):
getattr(ds, 'NOTEXIST')
assert kernel_call_count == 0
def test_values(self):
# Test that invoking the values property does not convert the dask
# backend to numpy
a = DataArray([1, 2]).chunk()
assert not a._in_memory
assert a.values.tolist() == [1, 2]
assert not a._in_memory
def test_from_dask_variable(self):
# Test array creation from Variable with dask backend.
# This is used e.g. in broadcast()
a = DataArray(self.lazy_array.variable,
coords={'x': range(4)}, name='foo')
self.assertLazyAndIdentical(self.lazy_array, a)
class TestToDaskDataFrame(TestCase):
def test_to_dask_dataframe(self):
# Test conversion of Datasets to dask DataFrames
x = da.from_array(np.random.randn(10), chunks=4)
y = np.arange(10, dtype='uint8')
t = list('abcdefghij')
ds = Dataset(OrderedDict([('a', ('t', x)),
('b', ('t', y)),
('t', ('t', t))]))
expected_pd = pd.DataFrame({'a': x,
'b': y},
index=pd.Index(t, name='t'))
# test if 1-D index is correctly set up
expected = dd.from_pandas(expected_pd, chunksize=4)
actual = ds.to_dask_dataframe(set_index=True)
# test if we have dask dataframes
assert isinstance(actual, dd.DataFrame)
# use the .equals from pandas to check dataframes are equivalent
assert_frame_equal(expected.compute(), actual.compute())
# test if no index is given
expected = dd.from_pandas(expected_pd.reset_index(drop=False),
chunksize=4)
actual = ds.to_dask_dataframe(set_index=False)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected.compute(), actual.compute())
def test_to_dask_dataframe_2D(self):
# Test if 2-D dataset is supplied
w = da.from_array(np.random.randn(2, 3), chunks=(1, 2))
ds = Dataset({'w': (('x', 'y'), w)})
ds['x'] = ('x', np.array([0, 1], np.int64))
ds['y'] = ('y', list('abc'))
# dask dataframes do not (yet) support multiindex,
# but when it does, this would be the expected index:
exp_index = pd.MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1], ['a', 'b', 'c', 'a', 'b', 'c']],
names=['x', 'y'])
expected = pd.DataFrame({'w': w.reshape(-1)},
index=exp_index)
# so for now, reset the index
expected = expected.reset_index(drop=False)
actual = ds.to_dask_dataframe(set_index=False)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
@pytest.mark.xfail(raises=NotImplementedError)
def test_to_dask_dataframe_2D_set_index(self):
# This will fail until dask implements MultiIndex support
w = da.from_array(np.random.randn(2, 3), chunks=(1, 2))
ds = Dataset({'w': (('x', 'y'), w)})
ds['x'] = ('x', np.array([0, 1], np.int64))
ds['y'] = ('y', list('abc'))
expected = ds.compute().to_dataframe()
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_coordinates(self):
# Test if coordinate is also a dask array
x = da.from_array(np.random.randn(10), chunks=4)
t = da.from_array(np.arange(10) * 2, chunks=4)
ds = Dataset(OrderedDict([('a', ('t', x)),
('t', ('t', t))]))
expected_pd = pd.DataFrame({'a': x},
index=pd.Index(t, name='t'))
expected = dd.from_pandas(expected_pd, chunksize=4)
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected.compute(), actual.compute())
def test_to_dask_dataframe_not_daskarray(self):
# Test if DataArray is not a dask array
x = np.random.randn(10)
y = np.arange(10, dtype='uint8')
t = list('abcdefghij')
ds = Dataset(OrderedDict([('a', ('t', x)),
('b', ('t', y)),
('t', ('t', t))]))
expected = pd.DataFrame({'a': x, 'b': y},
index=pd.Index(t, name='t'))
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_no_coordinate(self):
x = da.from_array(np.random.randn(10), chunks=4)
ds = Dataset({'x': ('dim_0', x)})
expected = ds.compute().to_dataframe().reset_index()
actual = ds.to_dask_dataframe()
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
expected = ds.compute().to_dataframe()
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_dim_order(self):
values = np.array([[1, 2], [3, 4]], dtype=np.int64)
ds = Dataset({'w': (('x', 'y'), values)}).chunk(1)
expected = ds['w'].to_series().reset_index()
actual = ds.to_dask_dataframe(dim_order=['x', 'y'])
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
expected = ds['w'].T.to_series().reset_index()
actual = ds.to_dask_dataframe(dim_order=['y', 'x'])
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
with raises_regex(ValueError, 'does not match the set of dimensions'):
ds.to_dask_dataframe(dim_order=['x'])
@pytest.mark.parametrize("method", ['load', 'compute'])
def test_dask_kwargs_variable(method):
x = Variable('y', da.from_array(np.arange(3), chunks=(2,)))
# args should be passed on to da.Array.compute()
with mock.patch.object(da.Array, 'compute',
return_value=np.arange(3)) as mock_compute:
getattr(x, method)(foo='bar')
mock_compute.assert_called_with(foo='bar')
@pytest.mark.parametrize("method", ['load', 'compute', 'persist'])
def test_dask_kwargs_dataarray(method):
data = da.from_array(np.arange(3), chunks=(2,))
x = DataArray(data)
if method in ['load', 'compute']:
dask_func = 'dask.array.compute'
else:
dask_func = 'dask.persist'
# args should be passed on to "dask_func"
with mock.patch(dask_func) as mock_func:
getattr(x, method)(foo='bar')
mock_func.assert_called_with(data, foo='bar')
@pytest.mark.parametrize("method", ['load', 'compute', 'persist'])
def test_dask_kwargs_dataset(method):
data = da.from_array(np.arange(3), chunks=(2,))
x = Dataset({'x': (('y'), data)})
if method in ['load', 'compute']:
dask_func = 'dask.array.compute'
else:
dask_func = 'dask.persist'
# args should be passed on to "dask_func"
with mock.patch(dask_func) as mock_func:
getattr(x, method)(foo='bar')
mock_func.assert_called_with(data, foo='bar')
kernel_call_count = 0
def kernel(name):
"""Dask kernel to test pickling/unpickling and __repr__.
Must be global to make it pickleable.
"""
print("kernel(%s)" % name)
global kernel_call_count
kernel_call_count += 1
return np.ones(1, dtype=np.int64)
def build_dask_array(name):
global kernel_call_count
kernel_call_count = 0
return dask.array.Array(
dask={(name, 0): (kernel, name)}, name=name,
chunks=((1,),), dtype=np.int64)
# test both the perist method and the dask.persist function
# the dask.persist function requires a new version of dask
@pytest.mark.parametrize('persist', [
lambda x: x.persist(),
pytest.mark.skipif(LooseVersion(dask.__version__) <= '0.15.4',
lambda x: dask.persist(x)[0],
reason='Need Dask 0.16')
])
def test_persist_Dataset(persist):
ds = Dataset({'foo': ('x', range(5)),
'bar': ('x', range(5))}).chunk()
ds = ds + 1
n = len(ds.foo.data.dask)
ds2 = persist(ds)
assert len(ds2.foo.data.dask) == 1
assert len(ds.foo.data.dask) == n # doesn't mutate in place
@pytest.mark.parametrize('persist', [
lambda x: x.persist(),
pytest.mark.skipif(LooseVersion(dask.__version__) <= '0.15.4',
lambda x: dask.persist(x)[0],
reason='Need Dask 0.16')
])
def test_persist_DataArray(persist):
x = da.arange(10, chunks=(5,))
y = DataArray(x)
z = y + 1
n = len(z.data.dask)
zz = persist(z)
assert len(z.data.dask) == n
assert len(zz.data.dask) == zz.data.npartitions
@pytest.mark.skipif(LooseVersion(dask.__version__) <= '0.15.4',
reason='Need dask 0.16 for new interface')
def test_dataarray_with_dask_coords():
import toolz
x = xr.Variable('x', da.arange(8, chunks=(4,)))
y = xr.Variable('y', da.arange(8, chunks=(4,)) * 2)
data = da.random.random((8, 8), chunks=(4, 4)) + 1
array = xr.DataArray(data, dims=['x', 'y'])
array.coords['xx'] = x
array.coords['yy'] = y
assert dict(array.__dask_graph__()) == toolz.merge(data.__dask_graph__(),
x.__dask_graph__(),
y.__dask_graph__())
(array2,) = dask.compute(array)
assert not dask.is_dask_collection(array2)
assert all(isinstance(v._variable.data, np.ndarray)
for v in array2.coords.values())
def test_basic_compute():
ds = Dataset({'foo': ('x', range(5)),
'bar': ('x', range(5))}).chunk({'x': 2})
for get in [dask.threaded.get,
dask.multiprocessing.get,
dask.local.get_sync,
None]:
with dask.set_options(get=get):
ds.compute()
ds.foo.compute()
ds.foo.variable.compute()
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/tests/test_dask.py",
"copies": "1",
"size": "32253",
"license": "apache-2.0",
"hash": -8719932196461235000,
"line_mean": 36.9001175088,
"line_max": 79,
"alpha_frac": 0.5667689827,
"autogenerated": false,
"ratio": 3.5153133514986377,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4582082334198638,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pickle
import random
from collections import namedtuple
import pandas as pd
from caar.cleanthermostat import _sort_meta_in_col_order, dict_from_file
from future import standard_library
standard_library.install_aliases()
Cycle = namedtuple('Cycle', ['device_id', 'cycle_mode', 'start_time'])
Sensor = namedtuple('Sensor', ['sensor_id', 'timestamp'])
Geospatial = namedtuple('Geospatial', ['location_id', 'timestamp'])
def create_sensors_df(dict_or_pickle_file, sensor_ids=None):
"""Returns pandas DataFrame containing sensor ID, timestamps and
sensor observations.
Args:
dict_or_pickle_file (dict or str): The object must have been created with dict_from_file() or pickle_from_file() function.
sensor_ids (Optional[list or other iterable of ints or strings]): Sensor IDs. If no argument is specified, all IDs from the first arg will be in the DataFrame.
Returns:
sensors_df (pandas DataFrame): DataFrame has MultiIndex based on the
ID(s) and timestamps.
"""
fields = list(Sensor._fields)
multi_ids, vals, meta = _records_as_lists_of_tuples(dict_or_pickle_file,
fields, ids=sensor_ids)
id_labels = [meta[col]['heading'] for col in ['id', 'time']]
data_labels = _data_labels_from_meta(meta, id_labels)
sensors_df = _create_multi_index_df(id_labels, multi_ids, data_labels, vals)
return sensors_df
def sensors_df_from_text(raw_file, states=None, sensors_file=None,
postal_file=None, auto='sensors', id_col_heading=None,
encoding='UTF-8', delimiter=None, quote=None,
cols_to_ignore=None, meta=False, sensor_ids=None):
sensors = dict_from_file(raw_file, states=states,
sensors_file=sensors_file,
postal_file=postal_file, auto=auto,
id_col_heading=id_col_heading,
encoding=encoding, delimiter=delimiter,
quote=quote, cols_to_ignore=cols_to_ignore,
meta=meta)
return create_sensors_df(sensors, sensor_ids=sensor_ids)
def sensors_df_from_bin(pickle_file, sensor_ids=None):
"""Returns pandas DataFrame containing sensor ID, timestamps and
sensor observations.
Args:
pickle_file (str): The pickle file must have been created with pickle_from_file() function.
sensor_ids (Optional[list or other iterable of ints or strings]): Sensor IDs. If no argument is specified, all IDs from the first arg will be in the DataFrame.
Returns:
sensors_df (pandas DataFrame): DataFrame has MultiIndex based on the
ID(s) and timestamps.
"""
fields = list(Sensor._fields)
multi_ids, vals, meta = _records_as_lists_of_tuples(pickle_file,
fields, ids=sensor_ids)
id_labels = [meta[col]['heading'] for col in ['id', 'time']]
data_labels = _data_labels_from_meta(meta, id_labels)
sensors_df = _create_multi_index_df(id_labels, multi_ids, data_labels, vals)
return sensors_df
def create_cycles_df(dict_or_pickle_file, device_ids=None):
"""Returns pandas DataFrame containing sensor ids and cycle beginning
timestamps as multi-part indexes, and cycle ending times as values.
Args:
dict_or_pickle_file (dict or str): Must have been created with dict_from_file() or pickle_from_file() function.
device_ids (Optional[list or other iterable of ints or strings]): Sensor IDs. If no argument is specified, all IDs from the first arg will be in the DataFrame.
Returns:
cycles_df (pandas DataFrame): DataFrame has MultiIndex based on the ID(s) and timestamps.
"""
multi_ids, vals, meta = _records_as_lists_of_tuples(dict_or_pickle_file,
list(Cycle._fields),
ids=device_ids)
id_labels = [meta[col]['heading'] for col in ['id', 'cycle', 'start_time']]
data_labels = _data_labels_from_meta(meta, id_labels)
cycles_df = _create_multi_index_df(id_labels, multi_ids, data_labels, vals)
return cycles_df
def cycles_df_from_text(raw_file, cycle=None, states=None, postal_file=None,
auto='cycles', id_col_heading=None, cycle_col_heading=None,
encoding='UTF-8', delimiter=None, quote=None,
cols_to_ignore=None, meta=False, device_ids=None):
cycles = dict_from_file(raw_file, cycle=cycle, states=states,
postal_file=postal_file, auto=auto,
id_col_heading=id_col_heading,
cycle_col_heading=cycle_col_heading,
encoding=encoding, delimiter=delimiter,
quote=quote, cols_to_ignore=cols_to_ignore,
meta=meta)
return create_cycles_df(cycles, device_ids=device_ids)
def cycles_df_from_bin(pickle_file, device_ids=None):
"""Returns pandas DataFrame containing sensor ids and cycle beginning
timestamps as multi-part indexes, and cycle ending times as values.
Args:
pickle_file (dict or str): Must have been created with dict_from_file() or pickle_from_file() function.
device_ids (Optional[list or other iterable of ints or strings]): Sensor IDs. If no argument is specified, all IDs from the first arg will be in the DataFrame.
Returns:
cycles_df (pandas DataFrame): DataFrame has MultiIndex based on the ID(s) and timestamps.
"""
multi_ids, vals, meta = _records_as_lists_of_tuples(pickle_file,
list(Cycle._fields),
ids=device_ids)
id_labels = [meta[col]['heading'] for col in ['id', 'cycle', 'start_time']]
data_labels = _data_labels_from_meta(meta, id_labels)
cycles_df = _create_multi_index_df(id_labels, multi_ids, data_labels, vals)
return cycles_df
def create_geospatial_df(dict_or_pickle_file, location_ids=None):
"""Returns pandas DataFrame containing records with location IDs and time
stamps as multi-part indexes and outdoor temperatures as values.
Args:
dict_or_pickle_file (dict or str): Must have been created with dict_from_file() or pickle_from_file() function.
location_ids (Optional[list or other iterable of ints or strings]): Location IDs. If no argument is specified, all IDs from the first arg will be in the DataFrame.
Returns:
geospatial_df (pandas DataFrame): DataFrame has MultiIndex based on the ID(s) and timestamps.
"""
fields = list(Geospatial._fields)
multi_ids, vals, meta = _records_as_lists_of_tuples(dict_or_pickle_file,
fields,
ids=location_ids)
id_labels = [meta[col]['heading'] for col in ['id', 'time']]
data_labels = _data_labels_from_meta(meta, id_labels)
geospatial_df = _create_multi_index_df(id_labels, multi_ids, data_labels, vals)
return geospatial_df
def geospatial_df_from_text(raw_file, states=None, sensors_file=None,
postal_file=None, auto='geospatial',
id_col_heading=None, encoding='UTF-8',
delimiter=None, quote=None, cols_to_ignore=None,
meta=False, location_ids=None):
geos = dict_from_file(raw_file, states=states, sensors_file=sensors_file,
postal_file=postal_file, auto=auto,
id_col_heading=id_col_heading,
encoding=encoding, delimiter=delimiter, quote=quote,
cols_to_ignore=cols_to_ignore, meta=meta)
return create_geospatial_df(geos, locations_ids=location_ids)
def geospatial_df_from_bin(pickle_file, location_ids=None):
"""Returns pandas DataFrame containing records with location IDs and time
stamps as multi-part indexes and outdoor temperatures as values.
Args:
pickle_file (str): Must have been created with pickle_from_file() function.
location_ids (Optional[list or other iterable of ints or strings]): Location IDs. If no argument is specified, all IDs from the first arg will be in the DataFrame.
Returns:
geospatial_df (pandas DataFrame): DataFrame has MultiIndex based on the ID(s) and timestamps.
"""
fields = list(Geospatial._fields)
multi_ids, vals, meta = _records_as_lists_of_tuples(pickle_file, fields,
ids=location_ids)
id_labels = [meta[col]['heading'] for col in ['id', 'time']]
data_labels = _data_labels_from_meta(meta, id_labels)
geospatial_df = _create_multi_index_df(id_labels, multi_ids, data_labels, vals)
return geospatial_df
def _records_as_lists_of_tuples(dict_or_pickle_file, fields,
ids=None):
"""Returns tuple containing
1) a list of named tuples containing sensor (or outdoor location) ids
and timestamps and
2) a list of either indoor (or outdoor) temperatures, or the ending time
of a cycle, based on input of a pickle file containing a dict.
"""
records = {}
if isinstance(dict_or_pickle_file, dict):
records = dict_or_pickle_file['records']
meta = dict_or_pickle_file['cols_meta']
else:
try:
with open(dict_or_pickle_file, 'rb') as cp:
container = pickle.load(cp)
records = container['records']
meta = container['cols_meta']
except ValueError:
print('The first argument must be a pickle file or dict.')
if ids is not None:
for record_key in list(records.keys()):
# Discard record if it is not among the desired ids.
if getattr(record_key, fields[0]) not in ids:
records.pop(record_key, None)
multi_ids, vals = _multi_ids_and_data_vals(records, fields)
return multi_ids, vals, meta
def _data_labels_from_meta(meta, id_labels):
sorted_meta = _sort_meta_in_col_order(meta)
data_labels = [meta[col]['heading'] for col in
list(sorted_meta)[len(id_labels):]]
return data_labels
def random_record(dict_or_pickle_file, value_only=False):
"""Returns a randomly chosen key-value pair from a dict or pickle file."""
records = {}
if isinstance(dict_or_pickle_file, dict):
records = dict_or_pickle_file['records']
else:
try:
with open(dict_or_pickle_file, 'rb') as cp:
container = pickle.load(cp)
records = container['records']
except ValueError:
print('The first argument must be a pickle file or dict.')
copied_keys = list(records.keys())
random_record_key = _random_record_key(copied_keys)
if value_only:
return records[random_record_key]
else:
return random_record_key, records[random_record_key]
def _random_record_key(keys):
try:
random_record_key = random.choice(keys)
except IndexError:
print('No records in the dict or pickle file.')
else:
return random_record_key
def _multi_ids_and_data_vals(records, fields):
"""Returns tuple containing
1) a list of named tuples containing ids and timestamps (and cycle modes
if applicable) and
2) a list of either temperatures or cycle ending times, based on items
(records) in a dict.
"""
multi_ids = []
vals = []
for k, v in records.items():
ids = tuple(getattr(k, f) for f in fields)
multi_ids.append(ids)
vals.append(v)
return multi_ids, vals
def _create_multi_index_df(multiindex_names, multi_ids, column_names, values):
"""Returns MultiIndex pandas dataframe in which the index columns are for
an id and timestamp and the value is for a temperature or a timestamp
indicating the end of a cycle.
"""
multiindex_columns = tuple(multiindex_names)
multicols = pd.MultiIndex.from_tuples(multi_ids, names=multiindex_columns)
df = pd.DataFrame(values, index=multicols, columns=column_names)
df.sort_index(inplace=True, sort_remaining=True)
return df
| {
"repo_name": "nickpowersys/CaaR",
"path": "caar/history.py",
"copies": "1",
"size": "12592",
"license": "bsd-3-clause",
"hash": -2693026809062823000,
"line_mean": 42.8745644599,
"line_max": 172,
"alpha_frac": 0.6194409149,
"autogenerated": false,
"ratio": 3.9923906150919466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5111831529991946,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.