text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
from datetime import datetime
from functools import partial
from datashape import discover
from datashape import float32, float64, string, Option, object_, datetime_
import datashape
import pandas as pd
import numpy as np
from ..convert import convert
possibly_missing = set((string, datetime_, float32, float64))
@discover.register(pd.DataFrame)
def discover_dataframe(df):
obj = object_
names = list(df.columns)
dtypes = list(map(datashape.CType.from_numpy_dtype, df.dtypes))
dtypes = [string if dt == obj else dt for dt in dtypes]
odtypes = [Option(dt) if dt in possibly_missing else dt
for dt in dtypes]
schema = datashape.Record(list(zip(names, odtypes)))
return len(df) * schema
@discover.register(pd.Series)
def discover_series(s):
return len(s) * datashape.CType.from_numpy_dtype(s.dtype)
def coerce_datetimes(df):
""" Make object columns into datetimes if possible
Warning: this operates inplace.
Example
-------
>>> df = pd.DataFrame({'dt': ['2014-01-01'], 'name': ['Alice']})
>>> df.dtypes # note that these are strings/object
dt object
name object
dtype: object
>>> df2 = coerce_datetimes(df)
>>> df2
dt name
0 2014-01-01 Alice
>>> df2.dtypes # note that only the datetime-looking-one was transformed
dt datetime64[ns]
name object
dtype: object
"""
objects = df.select_dtypes(include=['object'])
# NOTE: In pandas < 0.17, pd.to_datetime(' ') == datetime(...), which is
# not what we want. So we have to remove columns with empty or
# whitespace-only strings to prevent erroneous datetime coercion.
columns = [c for c in objects.columns if not np.any(objects[c].str.isspace() | objects[c].str.isalpha())]
df2 = objects[columns].apply(partial(pd.to_datetime, errors='ignore'))
for c in df2.columns:
df[c] = df2[c]
return df
@convert.register(pd.Timestamp, datetime)
def convert_datetime_to_timestamp(dt, **kwargs):
return pd.Timestamp(dt)
@convert.register(pd.Timestamp, float)
def nan_to_nat(fl, **kwargs):
try:
if np.isnan(fl):
# Only nan->nat edge
return pd.NaT
except TypeError:
pass
raise NotImplementedError()
@convert.register(pd.Timestamp, (pd.tslib.NaTType, type(None)))
def convert_null_or_nat_to_nat(n, **kwargs):
return pd.NaT
| {
"repo_name": "cowlicks/odo",
"path": "odo/backends/pandas.py",
"copies": "1",
"size": "2498",
"license": "bsd-3-clause",
"hash": 141218348342755790,
"line_mean": 26.4505494505,
"line_max": 109,
"alpha_frac": 0.6561248999,
"autogenerated": false,
"ratio": 3.5839311334289814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9739556532829481,
"avg_score": 0.0000999000999000999,
"num_lines": 91
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from functools import partial
from datashape import discover
from datashape import string, object_, datetime_, Option
import datashape
import pandas as pd
import numpy as np
from ..convert import convert
possibly_missing = frozenset({string, datetime_})
def dshape_from_pandas(dtype):
dshape = datashape.CType.from_numpy_dtype(dtype)
dshape = string if dshape == object_ else dshape
return Option(dshape) if dshape in possibly_missing else dshape
@discover.register(pd.DataFrame)
def discover_dataframe(df):
return len(df) * datashape.Record(
zip(df.columns, map(dshape_from_pandas, df.dtypes)),
)
@discover.register(pd.Series)
def discover_series(s):
return len(s) * dshape_from_pandas(s.dtype)
def coerce_datetimes(df):
""" Make object columns into datetimes if possible
Warning: this operates inplace.
Example
-------
>>> df = pd.DataFrame({'dt': ['2014-01-01'], 'name': ['Alice']})
>>> df.dtypes # note that these are strings/object
dt object
name object
dtype: object
>>> df2 = coerce_datetimes(df)
>>> df2
dt name
0 2014-01-01 Alice
>>> df2.dtypes # note that only the datetime-looking-one was transformed
dt datetime64[ns]
name object
dtype: object
"""
objects = df.select_dtypes(include=['object'])
# NOTE: In pandas < 0.17, pd.to_datetime(' ') == datetime(...), which is
# not what we want. So we have to remove columns with empty or
# whitespace-only strings to prevent erroneous datetime coercion.
columns = [
c for c in objects.columns
if not np.any(objects[c].str.isspace() | objects[c].str.isalpha())
]
df2 = objects[columns].apply(partial(pd.to_datetime, errors='ignore'))
for c in df2.columns:
df[c] = df2[c]
return df
@convert.register(pd.Timestamp, datetime)
def convert_datetime_to_timestamp(dt, **kwargs):
return pd.Timestamp(dt)
@convert.register(pd.Timestamp, float)
def nan_to_nat(fl, **kwargs):
try:
if np.isnan(fl):
# Only nan->nat edge
return pd.NaT
except TypeError:
pass
raise NotImplementedError()
@convert.register(pd.Timestamp, (pd.tslib.NaTType, type(None)))
def convert_null_or_nat_to_nat(n, **kwargs):
return pd.NaT
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/pandas.py",
"copies": "1",
"size": "2435",
"license": "bsd-3-clause",
"hash": -2816490791385178000,
"line_mean": 24.9042553191,
"line_max": 77,
"alpha_frac": 0.6554414784,
"autogenerated": false,
"ratio": 3.5967503692762186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47521918476762187,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from rfc822 import parsedate_tz, mktime_tz
from urlparse import urlparse
from time import time
from typing import Any, Optional # NOQA
from changes.utils.http import build_patch_uri
from .base import Vcs, RevisionResult, BufferParser, CommandError, UnknownRevision
import logging
LOG_FORMAT = '{node}\x01{author}\x01{date|rfc822date}\x01{p1node} {p2node}\x01{branches}\x01{desc}\x02'
BASH_CLONE_STEP = """
#!/bin/bash -eux
REMOTE_URL=%(remote_url)s
LOCAL_PATH=%(local_path)s
REVISION=%(revision)s
if [ ! -d $LOCAL_PATH/.hg ]; then
hg clone $REMOTE_URL $LOCAL_PATH
pushd $LOCAL_PATH
else
pushd $LOCAL_PATH
hg recover || true
hg pull $REMOTE_URL
fi
if ! hg up %(clean_arg)s $REVISION ; then
echo "Failed to update to $REVISION"
exit 1
fi
# similar to hg purge, but without requiring the extension
hg status -un0 | xargs -0 rm -rf
""".strip()
BASH_PATCH_STEP = """
#!/bin/bash -eux
LOCAL_PATH=%(local_path)s
PATCH_URL=%(patch_url)s
pushd $LOCAL_PATH
PATCH_PATH=/tmp/$(mktemp patch.XXXXXXXXXX)
curl -o $PATCH_PATH $PATCH_URL
hg import --no-commit $PATCH_PATH
""".strip()
class MercurialVcs(Vcs):
binary_path = 'hg'
def get_default_env(self):
return {
'HGPLAIN': '1',
}
# This is static so that the repository serializer can easily use it
@staticmethod
def get_default_revision():
return 'default'
@property
def remote_url(self):
if self.url.startswith(('ssh:', 'http:', 'https:')):
parsed = urlparse(self.url)
url = '%s://%s@%s/%s' % (
parsed.scheme,
parsed.username or self.username or 'hg',
parsed.hostname + (':%s' % (parsed.port,) if parsed.port else ''),
parsed.path.lstrip('/'),
)
else:
url = self.url
return url
def run(self, cmd, **kwargs):
cmd = [
self.binary_path,
'--config',
'ui.ssh={0}'.format(self.ssh_connect_path)
] + cmd
try:
return super(MercurialVcs, self).run(cmd, **kwargs)
except CommandError as e:
if "abort: unknown revision '" in e.stderr:
raise UnknownRevision(
cmd=e.cmd,
retcode=e.retcode,
stdout=e.stdout,
stderr=e.stderr,
)
raise
def clone(self):
self.run(['clone', self.remote_url, self.path], cwd='/')
def update(self):
self.run(['pull'])
def log(self, parent=None, branch=None, author=None, offset=0, limit=100, paths=None):
""" Gets the commit log for the repository.
Each revision returned has exactly one branch name associated with it.
This is the branch name encoded into the revision changeset description.
See documentation for the base for general information on this function.
"""
start_time = time()
# TODO(dcramer): we should make this streaming
cmd = ['log', '--template=%s' % (LOG_FORMAT,)]
if parent and branch:
raise ValueError('Both parent and branch cannot be set')
# Build the -r parameter value into r_str with branch, parent and author
r_str = None
if branch:
cmd.append('-b{0}'.format(branch))
if parent:
r_str = ('ancestors(%s)' % parent)
if author:
r_str = ('({r}) and author("{0}")' if r_str else 'author("{0}")')\
.format(author, r=r_str)
if r_str:
cmd.append('-r reverse({0})'.format(r_str))
if limit:
cmd.append('--limit=%d' % (offset + limit,))
if paths:
cmd.extend(["glob:" + p.strip() for p in paths])
result = self.run(cmd)
self.log_timing('log', start_time)
for idx, chunk in enumerate(BufferParser(result, '\x02')):
if idx < offset:
continue
(sha, author, author_date, parents, branches, message) = chunk.split('\x01')
branches = filter(bool, branches.split(' ')) or ['default']
parents = filter(lambda x: x and x != '0' * 40, parents.split(' '))
author_date = datetime.utcfromtimestamp(
mktime_tz(parsedate_tz(author_date)))
yield RevisionResult(
id=sha,
author=author,
author_date=author_date,
message=message,
parents=parents,
branches=branches,
)
def export(self, id):
"""Get the textual diff for a revision.
Args:
id (str): The id of the revision.
Returns:
A string with the text of the diff for the revision.
Raises:
UnknownRevision: If the revision wasn't found.
"""
cmd = ['diff', '-g', '-c %s' % (id,)]
result = self.run(cmd)
return result
def get_changed_files(self, id):
"""Returns the list of files changed in a revision.
Args:
id (str): The id of the revision.
Returns:
A set of filenames
Raises:
UnknownRevision: If the revision wan't found.
"""
cmd = ['status', '--rev', '%s^..%s' % (id, id), '-n']
output = self.run(cmd)
return set([x.strip() for x in output.splitlines()])
def is_child_parent(self, child_in_question, parent_in_question):
cmd = ['debugancestor', parent_in_question, child_in_question]
result = self.run(cmd)
return parent_in_question in result
def get_known_branches(self):
""" Gets all the named branches.
:return: A list of unique names for the branches.
"""
start_time = time()
cmd = ['branches']
results = self.run(cmd)
branch_names = set()
for line in results.splitlines():
if line:
name = line.split(None, 1)
if name[0]:
branch_names.add(name[0])
self.log_timing('get_known_branches', start_time)
return list(branch_names)
@staticmethod
def get_clone_command(remote_url, path, revision, clean=True, cache_dir=None):
# type: (str, str, str, bool, Optional[str]) -> str
if cache_dir is not None:
logging.warning("unexpected cache_dir provided for hg repository")
return BASH_CLONE_STEP % dict(
remote_url=remote_url,
local_path=path,
revision=revision,
clean_arg='--clean' if clean else '',
)
def get_buildstep_clone(self, source, workspace, clean=True, cache_dir=None):
# type: (Any, str, bool, Optional[str]) -> str
return MercurialVcs.get_clone_command(self.remote_url, workspace, source.revision_sha, clean, cache_dir)
def get_buildstep_patch(self, source, workspace):
# type: (Any, str) -> str
return BASH_PATCH_STEP % dict(
local_path=workspace,
patch_url=build_patch_uri(source.patch_id),
)
def read_file(self, sha, file_path, diff=None):
"""Read the content of a file at a given revision.
Args:
sha (str): the sha identifying the revision
file_path (str): the path to the file from the root of the repo
diff (str): the optional patch to apply before reading the config
Returns:
str - the content of the file
Raises:
CommandError - if the file or the revision cannot be found
"""
content = self.run(['cat', '-r', sha, file_path])
if diff is None:
return content
return self._selectively_apply_diff(file_path, content, diff)
def get_patch_hash(self, rev_sha):
"""Not possible for mercurial repositories"""
return None
| {
"repo_name": "dropbox/changes",
"path": "changes/vcs/hg.py",
"copies": "1",
"size": "8035",
"license": "apache-2.0",
"hash": -3294238981464419300,
"line_mean": 30.0231660232,
"line_max": 112,
"alpha_frac": 0.5632856254,
"autogenerated": false,
"ratio": 3.932941752325012,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4996227377725012,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from rfc822 import parsedate_tz, mktime_tz
from urlparse import urlparse
from time import time
from changes.utils.http import build_uri
from .base import Vcs, RevisionResult, BufferParser, CommandError, UnknownRevision
LOG_FORMAT = '{node}\x01{author}\x01{date|rfc822date}\x01{p1node} {p2node}\x01{branches}\x01{desc}\x02'
BASH_CLONE_STEP = """
#!/bin/bash -eux
REMOTE_URL=%(remote_url)s
LOCAL_PATH=%(local_path)s
REVISION=%(revision)s
if [ ! -d $LOCAL_PATH/.hg ]; then
hg clone --uncompressed $REMOTE_URL $LOCAL_PATH
pushd $LOCAL_PATH
else
pushd $LOCAL_PATH
hg recover || true
hg pull $REMOTE_URL
fi
if ! hg up --clean $REVISION ; then
echo "Failed to update to $REVISION"
exit 1
fi
# similar to hg purge, but without requiring the extension
hg status -un0 | xargs -0 rm -rf
""".strip()
BASH_PATCH_STEP = """
#!/bin/bash -eux
LOCAL_PATH=%(local_path)s
PATCH_URL=%(patch_url)s
pushd $LOCAL_PATH
PATCH_PATH=/tmp/$(mktemp patch.XXXXXXXXXX)
curl -o $PATCH_PATH $PATCH_URL
hg import --no-commit $PATCH_PATH
""".strip()
class MercurialVcs(Vcs):
binary_path = 'hg'
def get_default_env(self):
return {
'HGPLAIN': '1',
}
# This is static so that the repository serializer can easily use it
@staticmethod
def get_default_revision():
return 'default'
@property
def remote_url(self):
if self.url.startswith(('ssh:', 'http:', 'https:')):
parsed = urlparse(self.url)
url = '%s://%s@%s/%s' % (
parsed.scheme,
parsed.username or self.username or 'hg',
parsed.hostname + (':%s' % (parsed.port,) if parsed.port else ''),
parsed.path.lstrip('/'),
)
else:
url = self.url
return url
def run(self, cmd, **kwargs):
cmd = [
self.binary_path,
'--config',
'ui.ssh={0}'.format(self.ssh_connect_path)
] + cmd
try:
return super(MercurialVcs, self).run(cmd, **kwargs)
except CommandError as e:
if "abort: unknown revision '" in e.stderr:
raise UnknownRevision(
cmd=e.cmd,
retcode=e.retcode,
stdout=e.stdout,
stderr=e.stderr,
)
raise
def clone(self):
self.run(['clone', '--uncompressed', self.remote_url, self.path])
def update(self):
self.run(['pull'])
def log(self, parent=None, branch=None, author=None, offset=0, limit=100, paths=None):
""" Gets the commit log for the repository.
Each revision returned has exactly one branch name associated with it.
This is the branch name encoded into the revision changeset description.
See documentation for the base for general information on this function.
"""
start_time = time()
# TODO(dcramer): we should make this streaming
cmd = ['log', '--template=%s' % (LOG_FORMAT,)]
if parent and branch:
raise ValueError('Both parent and branch cannot be set')
# Build the -r parameter value into r_str with branch, parent and author
r_str = None
if branch:
cmd.append('-b{0}'.format(branch))
if parent:
r_str = ('ancestors(%s)' % parent)
if author:
r_str = ('({r}) and author("{0}")' if r_str else 'author("{0}")')\
.format(author, r=r_str)
if r_str:
cmd.append('-r reverse({0})'.format(r_str))
if limit:
cmd.append('--limit=%d' % (offset + limit,))
if paths:
cmd.extend(["glob:" + p.strip() for p in paths])
result = self.run(cmd)
self.log_timing('log', start_time)
for idx, chunk in enumerate(BufferParser(result, '\x02')):
if idx < offset:
continue
(sha, author, author_date, parents, branches, message) = chunk.split('\x01')
branches = filter(bool, branches.split(' ')) or ['default']
parents = filter(lambda x: x and x != '0' * 40, parents.split(' '))
author_date = datetime.utcfromtimestamp(
mktime_tz(parsedate_tz(author_date)))
yield RevisionResult(
id=sha,
author=author,
author_date=author_date,
message=message,
parents=parents,
branches=branches,
)
def export(self, id):
"""Get the textual diff for a revision.
Args:
id (str): The id of the revision.
Returns:
A string with the text of the diff for the revision.
Raises:
UnknownRevision: If the revision wasn't found.
"""
cmd = ['diff', '-g', '-c %s' % (id,)]
result = self.run(cmd)
return result
def is_child_parent(self, child_in_question, parent_in_question):
cmd = ['debugancestor', parent_in_question, child_in_question]
result = self.run(cmd)
return parent_in_question in result
def get_known_branches(self):
""" Gets all the named branches.
:return: A list of unique names for the branches.
"""
start_time = time()
cmd = ['branches']
results = self.run(cmd)
branch_names = set()
for line in results.splitlines():
if line:
name = line.split(None, 1)
if name[0]:
branch_names.add(name[0])
self.log_timing('get_known_branches', start_time)
return list(branch_names)
def get_buildstep_clone(self, source, workspace):
return BASH_CLONE_STEP % dict(
remote_url=self.remote_url,
local_path=workspace,
revision=source.revision_sha,
)
def get_buildstep_patch(self, source, workspace):
return BASH_PATCH_STEP % dict(
local_path=workspace,
patch_url=build_uri('/api/0/patches/{0}/?raw=1'.format(
source.patch_id.hex)),
)
def read_file(self, sha, file_path, diff=None):
"""Read the content of a file at a given revision.
Args:
sha (str): the sha identifying the revision
file_path (str): the path to the file from the root of the repo
diff (str): the optional patch to apply before reading the config
Returns:
str - the content of the file
Raises:
CommandError - if the file or the revision cannot be found
"""
content = self.run(['cat', '-r', sha, file_path])
if diff is None:
return content
return self._selectively_apply_diff(file_path, content, diff)
| {
"repo_name": "bowlofstew/changes",
"path": "changes/vcs/hg.py",
"copies": "2",
"size": "6962",
"license": "apache-2.0",
"hash": 51399475761799250,
"line_mean": 29.4017467249,
"line_max": 103,
"alpha_frac": 0.5583165757,
"autogenerated": false,
"ratio": 3.9579306424104606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00040599900978070347,
"num_lines": 229
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from rfc822 import parsedate_tz, mktime_tz
from urlparse import urlparse
from .base import Vcs, RevisionResult, BufferParser
LOG_FORMAT = '{node}\x01{author}\x01{date|rfc822date}\x01{p1node} {p2node}\x01{branches}\x01{desc}\x02'
class MercurialVcs(Vcs):
binary_path = 'hg'
def get_default_env(self):
return {
'HGPLAIN': '1',
}
def get_default_revision(self):
return 'default'
@property
def remote_url(self):
if self.url.startswith(('ssh:', 'http:', 'https:')):
parsed = urlparse(self.url)
url = '%s://%s@%s/%s' % (
parsed.scheme,
parsed.username or self.username or 'hg',
parsed.hostname + (':%s' % (parsed.port,) if parsed.port else ''),
parsed.path.lstrip('/'),
)
else:
url = self.url
return url
def run(self, cmd, **kwargs):
cmd = [
self.binary_path,
'--config',
'ui.ssh={0}'.format(self.ssh_connect_path)
] + cmd
return super(MercurialVcs, self).run(cmd, **kwargs)
def clone(self):
self.run(['clone', '--uncompressed', self.remote_url, self.path])
def update(self):
self.run(['pull'])
def log(self, parent=None, offset=0, limit=100):
# TODO(dcramer): we should make this streaming
cmd = ['log', '--template=%s' % (LOG_FORMAT,)]
if parent:
cmd.append('-r reverse(ancestors(%s))' % (parent,))
if limit:
cmd.append('--limit=%d' % (offset + limit,))
result = self.run(cmd)
for idx, chunk in enumerate(BufferParser(result, '\x02')):
if idx < offset:
continue
(sha, author, author_date, parents, branches, message) = chunk.split('\x01')
branches = filter(bool, branches.split(' ')) or ['default']
parents = filter(lambda x: x and x != '0' * 40, parents.split(' '))
author_date = datetime.utcfromtimestamp(
mktime_tz(parsedate_tz(author_date)))
yield RevisionResult(
id=sha,
author=author,
author_date=author_date,
message=message,
parents=parents,
branches=branches,
)
def export(self, id):
cmd = ['diff', '-g', '-c %s' % (id,)]
result = self.run(cmd)
return result
def is_child_parent(self, child_in_question, parent_in_question):
cmd = ['debugancestor', parent_in_question, child_in_question]
result = self.run(cmd)
return parent_in_question in result
| {
"repo_name": "alex/changes",
"path": "changes/vcs/hg.py",
"copies": "1",
"size": "2790",
"license": "apache-2.0",
"hash": -4492430468260170000,
"line_mean": 30.3483146067,
"line_max": 103,
"alpha_frac": 0.5394265233,
"autogenerated": false,
"ratio": 3.84297520661157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.488240172991157,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from urlparse import urlparse
from typing import Any, Optional # NOQA
from changes.utils.cache import memoize
from changes.utils.http import build_patch_uri
from .base import (
Vcs, RevisionResult, BufferParser, ConcurrentUpdateError, CommandError,
ContentReadError, MissingFileError, UnknownChildRevision, UnknownParentRevision, UnknownRevision,
)
import re
from time import time
LOG_FORMAT = '%H\x01%an <%ae>\x01%at\x01%cn <%ce>\x01%ct\x01%P\x01%B\x02'
ORIGIN_PREFIX = 'remotes/origin/'
BASH_CLONE_STEP = """
#!/bin/bash -eux
REMOTE_URL=%(remote_url)s
LOCAL_PATH=%(local_path)s
REVISION=%(revision)s
CACHE_PATH="%(cache_dir)s/%(cache_base_name)s"
if [ -d "$CACHE_PATH" ]; then
echo "Using local repository cache."
REMOTE_URL="$CACHE_PATH"
fi
if [ ! -d $LOCAL_PATH/.git ]; then
GIT_SSH_COMMAND="ssh -v" \
git clone $REMOTE_URL $LOCAL_PATH || \
git clone $REMOTE_URL $LOCAL_PATH
pushd $LOCAL_PATH
else
pushd $LOCAL_PATH
git remote set-url origin $REMOTE_URL
GIT_SSH_COMMAND="ssh -v" \
git fetch --all -p || \
GIT_SSH_COMMAND="ssh -v" \
git fetch --all -p
fi
GIT_SSH_COMMAND="ssh -v" \
git fetch origin +refs/*:refs/remotes-all-refs/origin/* || \
GIT_SSH_COMMAND="ssh -v" \
git fetch origin +refs/*:refs/remotes-all-refs/origin/*
%(pre_reset_command)s
%(clean_command)s
if ! git reset --hard $REVISION -- ; then
echo "Failed to update to $REVISION"
exit 1
fi
""".strip()
BASH_PATCH_STEP = """
#!/bin/bash -eux
LOCAL_PATH=%(local_path)s
PATCH_URL=%(patch_url)s
pushd $LOCAL_PATH
PATCH_PATH=/tmp/$(mktemp patch.XXXXXXXXXX)
curl -o $PATCH_PATH $PATCH_URL
git apply --index $PATCH_PATH
export GIT_COMMITTER_NAME="Patch applier" GIT_COMMITTER_EMAIL="dev-tools+git-patch@dropbox.com"
export GIT_AUTHOR_NAME=$GIT_COMMITTER_NAME GIT_AUTHOR_EMAIL=$GIT_COMMITTER_EMAIL
git commit -m 'Diff build'
""".strip()
class LazyGitRevisionResult(RevisionResult):
def __init__(self, vcs, *args, **kwargs):
self.vcs = vcs
super(LazyGitRevisionResult, self).__init__(*args, **kwargs)
@memoize
def branches(self):
return self.vcs.branches_for_commit(self.id)
class GitVcs(Vcs):
binary_path = 'git'
def get_default_env(self):
return {
'GIT_SSH': self.ssh_connect_path,
}
# This is static so that the repository serializer can easily use it
@staticmethod
def get_default_revision():
return 'master'
@property
def remote_url(self):
if self.url.startswith(('ssh:', 'http:', 'https:')):
parsed = urlparse(self.url)
url = '%s://%s@%s/%s' % (
parsed.scheme,
parsed.username or self.username or 'git',
parsed.hostname + (':%s' % (parsed.port,) if parsed.port else ''),
parsed.path.lstrip('/'),
)
else:
url = self.url
return url
def branches_for_commit(self, _id):
return self.get_known_branches(commit_id=_id)
def get_known_branches(self, commit_id=None):
""" List all branches or those related to the commit for this repo.
Either gets all the branches (if the commit_id is not specified) or then
the branches related to the given commit reference.
:param commit_id: A commit ID for fetching all related branches. If not
specified, returns all branch names for this repository.
:return: List of branches for the commit, or all branches for the repo.
"""
start_time = time()
results = []
command_parameters = ['branch', '-a']
if commit_id:
command_parameters.extend(['--contains', commit_id])
output = self.run(command_parameters)
for result in output.splitlines():
# HACK(dcramer): is there a better way around removing the prefix?
result = result[2:].strip()
if result.startswith(ORIGIN_PREFIX):
result = result[len(ORIGIN_PREFIX):]
if result == 'HEAD':
continue
results.append(result)
self.log_timing('get_known_branches', start_time)
return list(set(results))
def run(self, cmd, **kwargs):
cmd = [self.binary_path] + cmd
try:
return super(GitVcs, self).run(cmd, **kwargs)
except CommandError as e:
if 'unknown revision or path' in e.stderr:
raise UnknownRevision(
cmd=e.cmd,
retcode=e.retcode,
stdout=e.stdout,
stderr=e.stderr,
)
raise
def clone(self):
self.run(['clone', '--mirror', self.remote_url, self.path], cwd='/')
def update(self):
self.run(['remote', 'set-url', 'origin', self.remote_url])
try:
self.run(['fetch', '--all', '-p'])
except CommandError as e:
if 'error: cannot lock ref' in e.stderr.lower():
raise ConcurrentUpdateError(
cmd=e.cmd,
retcode=e.retcode,
stdout=e.stdout,
stderr=e.stderr
)
raise e
def log(self, parent=None, branch=None, author=None, offset=0, limit=100,
paths=None, first_parent=True):
""" Gets the commit log for the repository.
Each revision returned includes all the branches with which this commit
is associated. There will always be at least one associated branch.
See documentation for the base for general information on this function.
"""
start_time = time()
# TODO(dcramer): we should make this streaming
cmd = ['log', '--pretty=format:%s' % (LOG_FORMAT,)]
if not first_parent:
# --date-order can be vastly slower. When --first-parent is
# provided, the history returned is inherently linear, so
# there's no freedom to reorder commits at all, meaning
# --date-order would be effectively a no-op.
cmd.append('--date-order')
if first_parent:
cmd.append('--first-parent')
if author:
cmd.append('--author=%s' % (author,))
if offset:
cmd.append('--skip=%d' % (offset,))
if limit:
cmd.append('--max-count=%d' % (limit,))
if parent and branch:
raise ValueError('Both parent and branch cannot be set')
if branch:
cmd.append(branch)
# TODO(dcramer): determine correct way to paginate results in git as
# combining --all with --parent causes issues
elif not parent:
cmd.append('--all')
if parent:
cmd.append(parent)
if paths:
cmd.append("--")
cmd.extend([p.strip() for p in paths])
try:
result = self.run(cmd)
except CommandError as cmd_error:
err_msg = cmd_error.stderr
if branch and branch in err_msg:
import traceback
import logging
msg = traceback.format_exception(CommandError, cmd_error, None)
logging.warning(msg)
raise ValueError('Unable to fetch commit log for branch "{0}".'
.format(branch))
raise
self.log_timing('log', start_time)
for chunk in BufferParser(result, '\x02'):
(sha, author, author_date, committer, committer_date,
parents, message) = chunk.split('\x01')
# sha may have a trailing newline due to git log adding it
sha = sha.lstrip('\n')
parents = filter(bool, parents.split(' '))
author_date = datetime.utcfromtimestamp(float(author_date))
committer_date = datetime.utcfromtimestamp(float(committer_date))
yield LazyGitRevisionResult(
vcs=self,
id=sha,
author=author,
committer=committer,
author_date=author_date,
committer_date=committer_date,
parents=parents,
message=message,
)
def export(self, id):
"""Get the textual diff for a revision.
Args:
id (str): The id of the revision.
Returns:
A string with the text of the diff for the revision.
Raises:
UnknownRevision: If the revision wasn't found.
"""
cmd = ['diff', '%s^..%s' % (id, id)]
result = self.run(cmd)
return result
def get_changed_files(self, id):
"""Returns the list of files changed in a revision.
Args:
id (str): The id of the revision.
Returns:
A set of filenames
Raises:
UnknownRevision: If the revision wan't found.
"""
cmd = ['diff', '--name-only', '%s^..%s' % (id, id)]
output = self.run(cmd)
return set([x.strip() for x in output.splitlines()])
def is_child_parent(self, child_in_question, parent_in_question):
cmd = ['merge-base', '--is-ancestor', parent_in_question, child_in_question]
try:
self.run(cmd)
return True
except CommandError as e:
expected_err = "fatal: Not a valid commit name "
m = re.match(r'^\s*fatal: Not a valid commit name (?P<sha>\S+)\s*$', e.stderr)
if e.retcode == 1:
return False
elif e.retcode == 128 and m:
sha = m.group('sha')
if sha == parent_in_question:
raise UnknownParentRevision(
cmd=e.cmd,
retcode=e.retcode,
stdout=e.stdout,
stderr=e.stderr)
elif sha == child_in_question:
raise UnknownChildRevision(
cmd=e.cmd,
retcode=e.retcode,
stdout=e.stdout,
stderr=e.stderr)
else:
raise
@classmethod
def get_repository_name(cls, repository_url):
# type: (str) -> str
name = super(GitVcs, cls).get_repository_name(repository_url)
if not name.endswith('.git'):
name += '.git'
return name
@staticmethod
def get_clone_command(remote_url, path, revision, clean=True, cache_dir=None):
# type: (str, str, str, bool, Optional[str]) -> str
return BASH_CLONE_STEP % dict(
remote_url=remote_url,
local_path=path,
revision=revision,
cache_dir=cache_dir or "/dev/null",
clean_command='git clean -fdx' if clean else '',
pre_reset_command='git checkout -q master',
cache_base_name=GitVcs.get_repository_name(remote_url),
)
def get_buildstep_clone(self, source, workspace, clean=True, cache_dir=None):
# type: (Any, str, bool, Optional[str]) -> str
return GitVcs.get_clone_command(self.remote_url, workspace, source.revision_sha, clean, cache_dir)
def get_buildstep_patch(self, source, workspace):
# type: (Any, str) -> str
return BASH_PATCH_STEP % dict(
local_path=workspace,
patch_url=build_patch_uri(source.patch_id),
)
def get_buildstep_checkout_revision(self, revision_sha):
# type: (str) -> str
return "#!/bin/bash\n/usr/bin/git checkout {sha}".format(sha=revision_sha)
def get_buildstep_checkout_parent_revision(self, revision_sha):
# type: (str) -> str
return "#!/bin/bash\n/usr/bin/git checkout {sha}^".format(sha=revision_sha)
def get_buildstep_changed_files(self, revision_sha):
# type: (str) -> str
return "#!/bin/bash\n/usr/bin/git diff --name-only {sha}^..{sha}".format(sha=revision_sha)
def read_file(self, sha, file_path, diff=None):
"""Read the content of a file at a given revision.
Args:
sha (str): the sha identifying the revision
file_path (str): the path to the file from the root of the repo
diff (str): the optional patch to apply before reading the config
Returns:
str - the content of the file
Raises:
CommandError - if the git invocation fails.
ContentReadError - if the content can't be read because the named file is missing,
links to something outside the tree, links to an absent file, or links to itself.
UnknownRevision - if revision doesn't seem to exist
"""
cmd = ['cat-file', '--batch', '--follow-symlinks']
obj_key = '{revision}:{file_path}'.format(revision=sha, file_path=file_path)
content = self.run(cmd, input=obj_key)
info, content = content.split('\n', 1)
if info.endswith('missing'):
# either revision is missing or file is missing
try:
# will raise CommandError if revision is missing
self.run(['cat-file', 'commit', sha])
except CommandError as e:
raise UnknownRevision(
cmd=e.cmd,
retcode=e.retcode,
stdout=e.stdout,
stderr=e.stderr)
# file could have been added in the patch
if diff is not None:
content = self._selectively_apply_diff(file_path, '', diff)
if content:
return content
# TODO(adai): return empty string instead of raising MissingFileError when
# empty file is added in the patch
raise MissingFileError('No such file at revision: {}'.format(obj_key))
if any(info.startswith(s) for s in ('symlink', 'dangling', 'loop')):
raise ContentReadError('Unable to read file contents: {}'.format(info.split()[0]))
if not re.match(r'^[a-f0-9]+ blob \d+$', info):
raise ContentReadError('Unrecognized metadata for {}: {!r}'.format(obj_key, info))
assert content.endswith('\n')
content = content[:-1]
if diff is None:
return content
return self._selectively_apply_diff(file_path, content, diff)
def get_patch_hash(self, rev_sha):
# type: (str) -> str
"""Get the patch id for the revision"""
# This is a workaround to be able to pipe the output of diff to patch-id
p_diff = self._construct_subprocess([self.binary_path, 'diff', '{rev_sha}^..{rev_sha}'.format(rev_sha=rev_sha)])
patch_cmd = [self.binary_path, 'patch-id', '--stable']
p_patch = self._construct_subprocess(patch_cmd, stdin=p_diff.stdout)
p_diff.stdout.close()
# There is also a commit id string that follows the patch id, which we want to throw away.
return self._execute_subproccess(p_patch, patch_cmd).split(' ')[0]
| {
"repo_name": "dropbox/changes",
"path": "changes/vcs/git.py",
"copies": "1",
"size": "15157",
"license": "apache-2.0",
"hash": 5873495937274967000,
"line_mean": 35.0023752969,
"line_max": 120,
"alpha_frac": 0.5679883882,
"autogenerated": false,
"ratio": 4.00449141347424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.507247980167424,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from urlparse import urlparse
from changes.utils.cache import memoize
from changes.utils.http import build_uri
from .base import Vcs, RevisionResult, BufferParser, CommandError, UnknownRevision
from time import time
LOG_FORMAT = '%H\x01%an <%ae>\x01%at\x01%cn <%ce>\x01%ct\x01%P\x01%B\x02'
ORIGIN_PREFIX = 'remotes/origin/'
BASH_CLONE_STEP = """
#!/bin/bash -eux
REMOTE_URL=%(remote_url)s
LOCAL_PATH=%(local_path)s
REVISION=%(revision)s
if [ ! -d $LOCAL_PATH/.git ]; then
GIT_SSH_COMMAND="ssh -v" \
git clone $REMOTE_URL $LOCAL_PATH || \
git clone $REMOTE_URL $LOCAL_PATH
pushd $LOCAL_PATH
else
pushd $LOCAL_PATH
git remote set-url origin $REMOTE_URL
GIT_SSH_COMMAND="ssh -v" \
git fetch --all -p || \
GIT_SSH_COMMAND="ssh -v" \
git fetch --all -p
fi
GIT_SSH_COMMAND="ssh -v" \
git fetch origin +refs/*:refs/remotes-all-refs/origin/* || \
GIT_SSH_COMMAND="ssh -v" \
git fetch origin +refs/*:refs/remotes-all-refs/origin/*
git clean -fdx
if ! git reset --hard $REVISION ; then
echo "Failed to update to $REVISION"
exit 1
fi
""".strip()
BASH_PATCH_STEP = """
#!/bin/bash -eux
LOCAL_PATH=%(local_path)s
PATCH_URL=%(patch_url)s
pushd $LOCAL_PATH
PATCH_PATH=/tmp/$(mktemp patch.XXXXXXXXXX)
curl -o $PATCH_PATH $PATCH_URL
git apply $PATCH_PATH
""".strip()
class LazyGitRevisionResult(RevisionResult):
def __init__(self, vcs, *args, **kwargs):
self.vcs = vcs
super(LazyGitRevisionResult, self).__init__(*args, **kwargs)
@memoize
def branches(self):
return self.vcs.branches_for_commit(self.id)
class GitVcs(Vcs):
binary_path = 'git'
def get_default_env(self):
return {
'GIT_SSH': self.ssh_connect_path,
}
# This is static so that the repository serializer can easily use it
@staticmethod
def get_default_revision():
return 'master'
@property
def remote_url(self):
if self.url.startswith(('ssh:', 'http:', 'https:')):
parsed = urlparse(self.url)
url = '%s://%s@%s/%s' % (
parsed.scheme,
parsed.username or self.username or 'git',
parsed.hostname + (':%s' % (parsed.port,) if parsed.port else ''),
parsed.path.lstrip('/'),
)
else:
url = self.url
return url
def branches_for_commit(self, _id):
return self.get_known_branches(commit_id=_id)
def get_known_branches(self, commit_id=None):
""" List all branches or those related to the commit for this repo.
Either gets all the branches (if the commit_id is not specified) or then
the branches related to the given commit reference.
:param commit_id: A commit ID for fetching all related branches. If not
specified, returns all branch names for this repository.
:return: List of branches for the commit, or all branches for the repo.
"""
start_time = time()
results = []
command_parameters = ['branch', '-a']
if commit_id:
command_parameters.extend(['--contains', commit_id])
output = self.run(command_parameters)
for result in output.splitlines():
# HACK(dcramer): is there a better way around removing the prefix?
result = result[2:].strip()
if result.startswith(ORIGIN_PREFIX):
result = result[len(ORIGIN_PREFIX):]
if result == 'HEAD':
continue
results.append(result)
self.log_timing('get_known_branches', start_time)
return list(set(results))
def run(self, cmd, **kwargs):
cmd = [self.binary_path] + cmd
try:
return super(GitVcs, self).run(cmd, **kwargs)
except CommandError as e:
if 'unknown revision or path' in e.stderr:
raise UnknownRevision(
cmd=e.cmd,
retcode=e.retcode,
stdout=e.stdout,
stderr=e.stderr,
)
raise
def clone(self):
self.run(['clone', '--mirror', self.remote_url, self.path])
def update(self):
self.run(['remote', 'set-url', 'origin', self.remote_url])
self.run(['fetch', '--all', '-p'])
def log(self, parent=None, branch=None, author=None, offset=0, limit=100, paths=None):
""" Gets the commit log for the repository.
Each revision returned includes all the branches with which this commit
is associated. There will always be at least one associated branch.
See documentation for the base for general information on this function.
"""
start_time = time()
# TODO(dcramer): we should make this streaming
cmd = ['log', '--date-order', '--pretty=format:%s' % (LOG_FORMAT,), '--first-parent']
if author:
cmd.append('--author=%s' % (author,))
if offset:
cmd.append('--skip=%d' % (offset,))
if limit:
cmd.append('--max-count=%d' % (limit,))
if parent and branch:
raise ValueError('Both parent and branch cannot be set')
if branch:
cmd.append(branch)
# TODO(dcramer): determine correct way to paginate results in git as
# combining --all with --parent causes issues
elif not parent:
cmd.append('--all')
if parent:
cmd.append(parent)
if paths:
cmd.append("--")
cmd.extend([p.strip() for p in paths])
try:
result = self.run(cmd)
except CommandError as cmd_error:
err_msg = cmd_error.stderr
if branch and branch in err_msg:
import traceback
import logging
msg = traceback.format_exception(CommandError, cmd_error, None)
logging.warning(msg)
raise ValueError('Unable to fetch commit log for branch "{0}".'
.format(branch))
raise
self.log_timing('log', start_time)
for chunk in BufferParser(result, '\x02'):
(sha, author, author_date, committer, committer_date,
parents, message) = chunk.split('\x01')
# sha may have a trailing newline due to git log adding it
sha = sha.lstrip('\n')
parents = filter(bool, parents.split(' '))
author_date = datetime.utcfromtimestamp(float(author_date))
committer_date = datetime.utcfromtimestamp(float(committer_date))
yield LazyGitRevisionResult(
vcs=self,
id=sha,
author=author,
committer=committer,
author_date=author_date,
committer_date=committer_date,
parents=parents,
message=message,
)
def export(self, id):
"""Get the textual diff for a revision.
Args:
id (str): The id of the revision.
Returns:
A string with the text of the diff for the revision.
Raises:
UnknownRevision: If the revision wasn't found.
"""
cmd = ['diff', '%s^..%s' % (id, id)]
result = self.run(cmd)
return result
def is_child_parent(self, child_in_question, parent_in_question):
cmd = ['merge-base', '--is-ancestor', parent_in_question, child_in_question]
try:
self.run(cmd)
return True
except CommandError:
return False
def get_buildstep_clone(self, source, workspace):
return BASH_CLONE_STEP % dict(
remote_url=self.remote_url,
local_path=workspace,
revision=source.revision_sha,
)
def get_buildstep_patch(self, source, workspace):
return BASH_PATCH_STEP % dict(
local_path=workspace,
patch_url=build_uri('/api/0/patches/{0}/?raw=1'.format(
source.patch_id.hex)),
)
def read_file(self, sha, file_path, diff=None):
"""Read the content of a file at a given revision.
Args:
sha (str): the sha identifying the revision
file_path (str): the path to the file from the root of the repo
diff (str): the optional patch to apply before reading the config
Returns:
str - the content of the file
Raises:
CommandError - if the file or the revision cannot be found
"""
cmd = ['show', '{revision}:{file_path}'.format(
revision=sha, file_path=file_path
)]
content = self.run(cmd)
if diff is None:
return content
return self._selectively_apply_diff(file_path, content, diff)
| {
"repo_name": "bowlofstew/changes",
"path": "changes/vcs/git.py",
"copies": "2",
"size": "8959",
"license": "apache-2.0",
"hash": 468944811673513300,
"line_mean": 30.7695035461,
"line_max": 93,
"alpha_frac": 0.5702645385,
"autogenerated": false,
"ratio": 4.015688032272523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00029141758266157587,
"num_lines": 282
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from urlparse import urlparse
from changes.utils.cache import memoize
from .base import Vcs, RevisionResult, BufferParser, CommandError
LOG_FORMAT = '%H\x01%an <%ae>\x01%at\x01%cn <%ce>\x01%ct\x01%P\x01%B\x02'
ORIGIN_PREFIX = 'remotes/origin/'
class LazyGitRevisionResult(RevisionResult):
def __init__(self, vcs, *args, **kwargs):
self.vcs = vcs
super(LazyGitRevisionResult, self).__init__(*args, **kwargs)
@memoize
def branches(self):
return self.vcs.branches_for_commit(self.id)
class GitVcs(Vcs):
binary_path = 'git'
def get_default_env(self):
return {
'GIT_SSH': self.ssh_connect_path,
}
def get_default_revision(self):
return 'master'
@property
def remote_url(self):
if self.url.startswith(('ssh:', 'http:', 'https:')):
parsed = urlparse(self.url)
url = '%s://%s@%s/%s' % (
parsed.scheme,
parsed.username or self.username or 'git',
parsed.hostname + (':%s' % (parsed.port,) if parsed.port else ''),
parsed.path.lstrip('/'),
)
else:
url = self.url
return url
def branches_for_commit(self, id):
results = []
output = self.run(['branch', '-a', '--contains', id])
for result in output.splitlines():
# HACK(dcramer): is there a better way around removing the prefix?
result = result[2:].strip()
if result.startswith(ORIGIN_PREFIX):
result = result[len(ORIGIN_PREFIX):]
if result == 'HEAD':
continue
results.append(result)
return list(set(results))
def run(self, cmd, **kwargs):
cmd = [self.binary_path] + cmd
return super(GitVcs, self).run(cmd, **kwargs)
def clone(self):
self.run(['clone', '--mirror', self.remote_url, self.path])
def update(self):
self.run(['fetch', '--all'])
def log(self, parent=None, offset=0, limit=100):
# TODO(dcramer): we should make this streaming
cmd = ['log', '--all', '--pretty=format:%s' % (LOG_FORMAT,)]
if parent:
cmd.append(parent)
if offset:
cmd.append('--skip=%d' % (offset,))
if limit:
cmd.append('--max-count=%d' % (limit,))
result = self.run(cmd)
for chunk in BufferParser(result, '\x02'):
(sha, author, author_date, committer, committer_date,
parents, message) = chunk.split('\x01')
# sha may have a trailing newline due to git log adding it
sha = sha.lstrip('\n')
parents = filter(bool, parents.split(' '))
author_date = datetime.utcfromtimestamp(float(author_date))
committer_date = datetime.utcfromtimestamp(float(committer_date))
yield LazyGitRevisionResult(
vcs=self,
id=sha,
author=author,
committer=committer,
author_date=author_date,
committer_date=committer_date,
parents=parents,
message=message,
)
def export(self, id):
cmd = ['log', '-n 1', '-p', '--pretty="%b"', id]
result = self.run(cmd)[4:]
return result
def is_child_parent(self, child_in_question, parent_in_question):
cmd = ['merge-base', '--is-ancestor', parent_in_question, child_in_question]
try:
self.run(cmd)
return True
except CommandError:
return False
| {
"repo_name": "alex/changes",
"path": "changes/vcs/git.py",
"copies": "1",
"size": "3714",
"license": "apache-2.0",
"hash": -2885829676541598700,
"line_mean": 30.4745762712,
"line_max": 84,
"alpha_frac": 0.5479267636,
"autogenerated": false,
"ratio": 3.9177215189873418,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9964639261457959,
"avg_score": 0.0002018042258765871,
"num_lines": 118
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from toolz import merge
import bisect
import numpy as np
import pandas as pd
from .core import new_dd_object, Series
from . import methods
from ..base import tokenize
class _LocIndexer(object):
""" Helper class for the .loc accessor """
def __init__(self, obj):
self.obj = obj
@property
def _name(self):
return self.obj._name
def _make_meta(self, iindexer, cindexer):
"""
get metadata
"""
if cindexer is None:
return self.obj
else:
return self.obj._meta.loc[:, cindexer]
def __getitem__(self, key):
if isinstance(key, tuple):
# multi-dimensional selection
if len(key) > self.obj.ndim:
# raise from pandas
msg = 'Too many indexers'
raise pd.core.indexing.IndexingError(msg)
iindexer = key[0]
cindexer = key[1]
else:
# if self.obj is Series, cindexer is always None
iindexer = key
cindexer = None
return self._loc(iindexer, cindexer)
def _loc(self, iindexer, cindexer):
""" Helper function for the .loc accessor """
if isinstance(iindexer, Series):
return self._loc_series(iindexer, cindexer)
if self.obj.known_divisions:
iindexer = self._maybe_partial_time_string(iindexer)
if isinstance(iindexer, slice):
return self._loc_slice(iindexer, cindexer)
else:
# element should raise KeyError
return self._loc_element(iindexer, cindexer)
else:
if not isinstance(iindexer, slice):
iindexer = slice(iindexer, iindexer)
meta = self._make_meta(iindexer, cindexer)
return self.obj.map_partitions(methods.try_loc, iindexer, cindexer,
meta=meta)
def _maybe_partial_time_string(self, iindexer):
"""
Convert index-indexer for partial time string slicing
if obj.index is DatetimeIndex / PeriodIndex
"""
iindexer = _maybe_partial_time_string(self.obj._meta_nonempty.index,
iindexer, kind='loc')
return iindexer
def _loc_series(self, iindexer, cindexer):
meta = self._make_meta(iindexer, cindexer)
return self.obj.map_partitions(methods.loc, iindexer, cindexer,
token='loc-series', meta=meta)
def _loc_element(self, iindexer, cindexer):
name = 'loc-%s' % tokenize(iindexer, self.obj)
part = self._partition_of_index_value(iindexer)
if iindexer < self.obj.divisions[0] or iindexer > self.obj.divisions[-1]:
raise KeyError('the label [%s] is not in the index' % str(iindexer))
dsk = {(name, 0): (methods.loc, (self._name, part),
slice(iindexer, iindexer), cindexer)}
meta = self._make_meta(iindexer, cindexer)
return new_dd_object(merge(self.obj.dask, dsk), name,
meta=meta, divisions=[iindexer, iindexer])
def _partition_of_index_value(self, key):
return _partition_of_index_value(self.obj.divisions, key)
def _coerce_loc_index(self, key):
return _coerce_loc_index(self.obj.divisions, key)
def _loc_slice(self, iindexer, cindexer):
name = 'loc-%s' % tokenize(iindexer, cindexer, self)
assert isinstance(iindexer, slice)
assert iindexer.step in (None, 1)
if iindexer.start is not None:
start = self._partition_of_index_value(iindexer.start)
else:
start = 0
if iindexer.stop is not None:
stop = self._partition_of_index_value(iindexer.stop)
else:
stop = self.obj.npartitions - 1
istart = self._coerce_loc_index(iindexer.start)
istop = self._coerce_loc_index(iindexer.stop)
if stop == start:
dsk = {(name, 0): (methods.loc, (self._name, start),
slice(iindexer.start, iindexer.stop), cindexer)}
divisions = [istart, istop]
else:
dsk = {(name, 0): (methods.loc, (self._name, start),
slice(iindexer.start, None), cindexer)}
for i in range(1, stop - start):
if cindexer is None:
dsk[name, i] = (self._name, start + i)
else:
dsk[name, i] = (methods.loc, (self._name, start + i),
slice(None, None), cindexer)
dsk[name, stop - start] = (methods.loc, (self._name, stop),
slice(None, iindexer.stop), cindexer)
if iindexer.start is None:
div_start = self.obj.divisions[0]
else:
div_start = max(istart, self.obj.divisions[start])
if iindexer.stop is None:
div_stop = self.obj.divisions[-1]
else:
div_stop = min(istop, self.obj.divisions[stop + 1])
divisions = ((div_start, ) +
self.obj.divisions[start + 1:stop + 1] +
(div_stop, ))
assert len(divisions) == len(dsk) + 1
meta = self._make_meta(iindexer, cindexer)
return new_dd_object(merge(self.obj.dask, dsk), name,
meta=meta, divisions=divisions)
def _partition_of_index_value(divisions, val):
""" In which partition does this value lie?
>>> _partition_of_index_value([0, 5, 10], 3)
0
>>> _partition_of_index_value([0, 5, 10], 8)
1
>>> _partition_of_index_value([0, 5, 10], 100)
1
>>> _partition_of_index_value([0, 5, 10], 5) # left-inclusive divisions
1
"""
if divisions[0] is None:
msg = "Can not use loc on DataFrame without known divisions"
raise ValueError(msg)
val = _coerce_loc_index(divisions, val)
i = bisect.bisect_right(divisions, val)
return min(len(divisions) - 2, max(0, i - 1))
def _coerce_loc_index(divisions, o):
""" Transform values to be comparable against divisions
This is particularly valuable to use with pandas datetimes
"""
if divisions and isinstance(divisions[0], datetime):
return pd.Timestamp(o)
if divisions and isinstance(divisions[0], np.datetime64):
return np.datetime64(o).astype(divisions[0].dtype)
return o
def _maybe_partial_time_string(index, indexer, kind):
"""
Convert indexer for partial string selection
if data has DatetimeIndex/PeriodIndex
"""
# do not pass dd.Index
assert isinstance(index, pd.Index)
if not isinstance(index, (pd.DatetimeIndex, pd.PeriodIndex)):
return indexer
if isinstance(indexer, slice):
if isinstance(indexer.start, pd.compat.string_types):
start = index._maybe_cast_slice_bound(indexer.start, 'left', kind)
else:
start = indexer.start
if isinstance(indexer.stop, pd.compat.string_types):
stop = index._maybe_cast_slice_bound(indexer.stop, 'right', kind)
else:
stop = indexer.stop
return slice(start, stop)
elif isinstance(indexer, pd.compat.string_types):
start = index._maybe_cast_slice_bound(indexer, 'left', 'loc')
stop = index._maybe_cast_slice_bound(indexer, 'right', 'loc')
return slice(start, stop)
return indexer
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/dataframe/indexing.py",
"copies": "1",
"size": "7647",
"license": "mit",
"hash": -4306386220820054500,
"line_mean": 33.2914798206,
"line_max": 81,
"alpha_frac": 0.5678043677,
"autogenerated": false,
"ratio": 3.8836973082783137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49515016759783137,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
import bisect
import numpy as np
import pandas as pd
def _partition_of_index_value(divisions, val):
""" In which partition does this value lie?
>>> _partition_of_index_value([0, 5, 10], 3)
0
>>> _partition_of_index_value([0, 5, 10], 8)
1
>>> _partition_of_index_value([0, 5, 10], 100)
1
>>> _partition_of_index_value([0, 5, 10], 5) # left-inclusive divisions
1
"""
if divisions[0] is None:
raise ValueError(
"Can not use loc on DataFrame without known divisions")
val = _coerce_loc_index(divisions, val)
i = bisect.bisect_right(divisions, val)
return min(len(divisions) - 2, max(0, i - 1))
def _loc(df, start, stop, include_right_boundary=True):
"""
>>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])
>>> _loc(df, 2, None)
x
2 20
2 30
3 40
4 50
>>> _loc(df, 1, 3)
x
1 10
2 20
2 30
3 40
>>> _loc(df, 1, 3, include_right_boundary=False)
x
1 10
2 20
2 30
"""
result = df.loc[start:stop]
if not include_right_boundary:
right_index = result.index.get_slice_bound(stop, 'left', 'loc')
result = result.iloc[:right_index]
return result
def _try_loc(df, ind):
try:
return df.loc[ind]
except KeyError:
return df.head(0)
def _coerce_loc_index(divisions, o):
""" Transform values to be comparable against divisions
This is particularly valuable to use with pandas datetimes
"""
if divisions and isinstance(divisions[0], datetime):
return pd.Timestamp(o)
if divisions and isinstance(divisions[0], np.datetime64):
return np.datetime64(o).astype(divisions[0].dtype)
return o
def _maybe_partial_time_string(index, indexer, kind):
"""
Convert indexer for partial string selection
if data has DatetimeIndex/PeriodIndex
"""
# do not pass dd.Index
assert isinstance(index, pd.Index)
if not isinstance(index, (pd.DatetimeIndex, pd.PeriodIndex)):
return indexer
if isinstance(indexer, slice):
if isinstance(indexer.start, pd.compat.string_types):
start = index._maybe_cast_slice_bound(indexer.start, 'left', kind)
else:
start = indexer.start
if isinstance(indexer.stop, pd.compat.string_types):
stop = index._maybe_cast_slice_bound(indexer.stop, 'right', kind)
else:
stop = indexer.stop
return slice(start, stop)
elif isinstance(indexer, pd.compat.string_types):
start = index._maybe_cast_slice_bound(indexer, 'left', 'loc')
stop = index._maybe_cast_slice_bound(indexer, 'right', 'loc')
return slice(start, stop)
return indexer
| {
"repo_name": "cowlicks/dask",
"path": "dask/dataframe/indexing.py",
"copies": "2",
"size": "2876",
"license": "bsd-3-clause",
"hash": -4400707311823775000,
"line_mean": 26.1320754717,
"line_max": 78,
"alpha_frac": 0.6077885953,
"autogenerated": false,
"ratio": 3.4776299879081014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 106
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
import numpy as np
import numpy.ma as ma
from scipy.spatial import cKDTree as KDTree
import cf_units
import iris
from iris import Constraint
from iris.cube import CubeList
from iris.pandas import as_cube
from iris.exceptions import CoordinateNotFoundError, CoordinateMultiDimError
iris.FUTURE.netcdf_promote = True
iris.FUTURE.cell_datetime_objects = True
__all__ = ['is_model',
'z_coord',
'get_surface',
'time_coord',
'time_near',
'time_slice',
'bbox_extract_2Dcoords',
'bbox_extract_1Dcoords',
'subset',
'quick_load_cubes',
'proc_cube',
'add_mesh',
'ensure_timeseries',
'add_station',
'remove_ssh',
'save_timeseries',
'make_tree',
'is_water',
'get_nearest_series',
'get_nearest_water']
def is_model(cube):
"""
Heuristic way to find if a cube data is `modelResult` or not.
WARNING: This function may return False positives and False
negatives!!!
Examples
--------
>>> import iris
>>> iris.FUTURE.netcdf_promote = True
>>> url = ("http://crow.marine.usf.edu:8080/thredds/dodsC/"
... "FVCOM-Nowcast-Agg.nc")
>>> cubes = iris.load_raw(url, 'sea_surface_height_above_geoid')
>>> [is_model(cube) for cube in cubes]
[True]
>>> url = ("http://thredds.cdip.ucsd.edu/thredds/dodsC/cdip/archive/"
... "043p1/043p1_d17.nc")
>>> cubes = iris.load_raw(url, 'sea_surface_temperature')
>>> [is_model(cube) for cube in cubes]
[False]
"""
# First criteria (Strong): "forecast" word in the time coord.
try:
coords = cube.coords(axis='T')
for coord in coords:
if 'forecast' in coord.name():
return True
except CoordinateNotFoundError:
pass
# Second criteria (Strong): `UGRID` cubes are models.
conventions = cube.attributes.get('Conventions', 'None')
if 'UGRID' in conventions.upper():
return True
# Third criteria (Strong): dimensionless coords are present.
try:
coords = cube.coords(axis='Z')
for coord in coords:
if 'ocean_' in coord.name():
return True
except CoordinateNotFoundError:
pass
# Forth criteria (weak): Assumes that all "GRID" attribute are models.
cdm_data_type = cube.attributes.get('cdm_data_type', 'None')
feature_type = cube.attributes.get('featureType', 'None')
source = cube.attributes.get('source', 'None')
if cdm_data_type.upper() == 'GRID' or feature_type.upper() == 'GRID':
if 'AVHRR' not in source:
return True
return False
def z_coord(cube):
"""
Return the canonical vertical coordinate.
Examples
--------
>>> import iris
>>> url = ("http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/"
... "SABGOM_Forecast_Model_Run_Collection_best.ncd")
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> str(z_coord(cube).name())
'ocean_s_coordinate_g1'
"""
non_dimensional = ['atmosphere_hybrid_height_coordinate',
'atmosphere_hybrid_sigma_pressure_coordinate',
'atmosphere_sigma_coordinate',
'atmosphere_sleve_coordinate',
'ocean_s_coordinate',
'ocean_s_coordinate_g1',
'ocean_s_coordinate_g2',
'ocean_sigma_coordinate',
'ocean_sigma_z_coordinate']
z = None
# If only one exists get that.
try:
z = cube.coord(axis='Z')
except CoordinateNotFoundError:
# If a named `z_coord` exist.
try:
z = cube.coord(axis='altitude')
except CoordinateNotFoundError:
# OK, let's use the non-dimensional names.
for coord in cube.coords(axis='Z'):
if coord.name() in non_dimensional:
z = coord
break
return z
def _get_surface_idx(cube):
"""
Return the `cube` index for the surface layer of for any model grid
(rgrid, ugrid, sgrid), and any non-dimensional coordinate.
"""
z = z_coord(cube)
if not z:
msg = "Cannot find the surface for cube {!r}".format
raise ValueError(msg(cube))
else:
if np.argmin(z.shape) == 0 and z.ndim == 2:
points = z[:, 0].points
elif np.argmin(z.shape) == 1 and z.ndim == 2:
points = z[0, :].points
else:
points = z.points
positive = z.attributes.get('positive', None)
if positive == 'up':
idx = np.unique(points.argmax(axis=0))[0]
else:
idx = np.unique(points.argmin(axis=0))[0]
return idx
def get_surface(cube):
"""
Work around `iris.cube.Cube.slices` error:
The requested coordinates are not orthogonal.
Examples
--------
>>> import iris
>>> url = ("http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/"
... "SABGOM_Forecast_Model_Run_Collection_best.ncd")
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> cube.ndim == 4
True
>>> get_surface(cube).ndim == 3
True
"""
conventions = cube.attributes.get('Conventions', 'None')
idx = _get_surface_idx(cube)
if cube.ndim == 4 or 'UGRID' in conventions.upper():
return cube[:, int(idx), ...]
elif cube.ndim == 3 and 'UGRID' not in conventions.upper():
return cube[int(idx), ...]
else:
msg = "Cannot find the surface for cube {!r}".format
raise ValueError(msg(cube))
def time_coord(cube):
"""
Return the variable attached to time axis and rename it to time.
Examples
--------
>>> import iris
>>> url = ("http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/"
... "SABGOM_Forecast_Model_Run_Collection_best.ncd")
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> str(time_coord(cube).name())
'time'
"""
try:
cube.coord(axis='T').rename('time')
except CoordinateNotFoundError:
pass
timevar = cube.coord('time')
return timevar
def time_near(cube, datetime_obj):
"""
Return the nearest index to a `datetime_obj`.
Examples
--------
>>> import iris
>>> from datetime import datetime
>>> url = ("http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/"
... "SABGOM_Forecast_Model_Run_Collection_best.ncd")
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> isinstance(time_near(cube, datetime.utcnow()), np.integer)
True
"""
timevar = time_coord(cube)
try:
time = timevar.units.date2num(datetime_obj)
idx = timevar.nearest_neighbour_index(time)
except IndexError:
idx = -1
return idx
def time_slice(cube, start, stop=None):
"""
Slice time by indexes using a nearest criteria.
NOTE: Assumes time is the first dimension!
Examples
--------
>>> import iris
>>> from datetime import datetime, timedelta
>>> url = ("http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/"
... "SABGOM_Forecast_Model_Run_Collection_best.ncd")
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> stop = datetime.utcnow()
>>> start = stop - timedelta(days=7)
>>> time_slice(cube, start, stop).shape[0] < cube.shape[0]
True
"""
istart = time_near(cube, start)
if stop:
istop = time_near(cube, stop)
if istart == istop:
raise ValueError('istart must be different from istop! '
'Got istart {!r} and '
' istop {!r}'.format(istart, istop))
return cube[istart:istop, ...]
else:
return cube[istart, ...]
def _minmax(v):
return np.min(v), np.max(v)
def _get_indices(cube, bbox):
"""
Get the 4 corner indices of a `cube` given a `bbox`.
Examples
--------
>>> import iris
>>> url = ("http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/"
... "SABGOM_Forecast_Model_Run_Collection_best.ncd")
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> bbox = [-87.40, 24.25, -74.70, 36.70]
>>> idxs = _get_indices(cube, bbox)
>>> [isinstance(idx, np.integer) for idx in idxs]
[True, True, True, True]
>>> idxs
(27, 320, 164, 429)
"""
from oceans import wrap_lon180
lons = cube.coord('longitude').points
lats = cube.coord('latitude').points
lons = wrap_lon180(lons)
inregion = np.logical_and(np.logical_and(lons > bbox[0],
lons < bbox[2]),
np.logical_and(lats > bbox[1],
lats < bbox[3]))
region_inds = np.where(inregion)
imin, imax = _minmax(region_inds[0])
jmin, jmax = _minmax(region_inds[1])
return imin, imax+1, jmin, jmax+1
def bbox_extract_2Dcoords(cube, bbox):
"""
Extract a sub-set of a cube inside a lon, lat bounding box
bbox = [lon_min lon_max lat_min lat_max].
NOTE: This is a work around too subset an iris cube that has
2D lon, lat coords.
Examples
--------
>>> import iris
>>> url = ("http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/"
... "SABGOM_Forecast_Model_Run_Collection_best.ncd")
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> bbox = [-87.40, 24.25, -74.70, 36.70]
>>> new_cube = bbox_extract_2Dcoords(cube, bbox)
>>> cube.shape != new_cube.shape
True
"""
imin, imax, jmin, jmax = _get_indices(cube, bbox)
return cube[..., imin:imax, jmin:jmax]
def bbox_extract_1Dcoords(cube, bbox):
"""
Same as bbox_extract_2Dcoords but for 1D coords.
Examples
--------
>>> import iris
>>> url = "http://oos.soest.hawaii.edu/thredds/dodsC/pacioos/hycom/global"
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> bbox = [272.6, 24.25, 285.3, 36.70]
>>> new_cube = bbox_extract_1Dcoords(cube, bbox)
>>> cube.shape != new_cube.shape
True
"""
lat = Constraint(latitude=lambda cell: bbox[1] <= cell <= bbox[3])
lon = Constraint(longitude=lambda cell: bbox[0] <= cell <= bbox[2])
cube = cube.extract(lon & lat)
return cube
def subset(cube, bbox):
"""
Subsets cube with 1D or 2D lon, lat coords.
Using `intersection` instead of `extract` we deal with 0--360
longitudes automagically.
Examples
--------
>>> import iris
>>> url = "http://oos.soest.hawaii.edu/thredds/dodsC/pacioos/hycom/global"
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> bbox = [272.6, 24.25, 285.3, 36.70]
>>> new_cube = subset(cube, bbox)
>>> cube.shape != new_cube.shape
True
"""
if (cube.coord(axis='X').ndim == 1 and cube.coord(axis='Y').ndim == 1):
# Workaround `cube.intersection` hanging up on FVCOM models.
title = cube.attributes.get('title', 'untitled')
featureType = cube.attributes.get('featureType', None)
if (('FVCOM' in title) or ('ESTOFS' in title) or
featureType == 'timeSeries'):
cube = bbox_extract_1Dcoords(cube, bbox)
else:
cube = cube.intersection(longitude=(bbox[0], bbox[2]),
latitude=(bbox[1], bbox[3]))
elif (cube.coord(axis='X').ndim == 2 and
cube.coord(axis='Y').ndim == 2):
cube = bbox_extract_2Dcoords(cube, bbox)
else:
msg = "Cannot deal with X:{!r} and Y:{!r} dimensions."
raise CoordinateMultiDimError(msg.format(cube.coord(axis='X').ndim),
cube.coord(axis='y').ndim)
return cube
def _filter_none(lista):
return [x for x in lista if x is not None]
def _in_list(cube, name_list):
return cube.standard_name in name_list
def quick_load_cubes(url, name_list, callback=None, strict=False):
"""
Return all cubes found using a `name_list` of standard_names. The cubes
found can be transformed via a `callback` function.
If `strict` is set to True the function will return only one cube is
possible, otherwise an exception will be raise.
TODO: Create a criteria to choose a sensor.
buoy = "http://129.252.139.124/thredds/dodsC/fldep.stlucieinlet..nc"
buoy = "http://129.252.139.124/thredds/dodsC/lbhmc.cherrygrove.pier.nc"
Examples
--------
>>> import iris
>>> url = ("http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/"
... "SABGOM_Forecast_Model_Run_Collection_best.ncd")
>>> name_list = ['sea_water_potential_temperature']
>>> cubes = quick_load_cubes(url, name_list)
>>> cube = quick_load_cubes(url, name_list, strict=True)
>>> isinstance(cubes, list)
True
>>> isinstance(cube, iris.cube.Cube)
True
"""
cubes = iris.load_raw(url, callback=callback)
cubes = CubeList([cube for cube in cubes if _in_list(cube, name_list)])
cubes = _filter_none(cubes)
if not cubes:
raise ValueError('Cannot find {!r} in {}.'.format(name_list, url))
if strict:
if len(cubes) == 1:
return cubes[0]
else:
msg = "> 1 cube found! Expected just one.\n {!r}".format
raise ValueError(msg(cubes))
return cubes
def proc_cube(cube, bbox=None, time=None, constraint=None, units=None):
"""
Constraining by `bbox`, `time`, and iris `constraint` object.
and the `units` can be converted.
Examples
--------
>>> import pytz
>>> import iris
>>> from datetime import date, datetime, timedelta
>>> url = ('http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/'
... 'SABGOM_Forecast_Model_Run_Collection_best.ncd')
>>> today = date.today().timetuple()
>>> stop = datetime(today.tm_year, today.tm_mon, today.tm_mday, 12)
>>> start = stop - timedelta(days=7)
>>> bbox = [-87.40, 24.25, -74.70, 36.70]
>>> name_list = ['sea_water_potential_temperature']
>>> cube = quick_load_cubes(url, name_list, strict=True)
>>> new_cube = proc_cube(cube, bbox=bbox, time=(start, stop))
>>> cube.shape != new_cube.shape
True
"""
if constraint:
cube = cube.extract(constraint)
if not cube:
raise ValueError('No cube using {!r}'.format(constraint))
if bbox:
cube = subset(cube, bbox)
if not cube:
raise ValueError('No cube using {!r}'.format(bbox))
if time:
if isinstance(time, datetime):
start, stop = time, None
elif isinstance(time, tuple):
start, stop = time[0], time[1]
else:
raise ValueError('Time must be start or (start, stop).'
' Got {!r}'.format(time))
cube = time_slice(cube, start, stop)
if units:
if cube.units != units:
cube.convert_units(units)
return cube
def add_mesh(cube, url):
"""
Adds the unstructured mesh info the to cube. Soon in an iris near you!
"""
from pyugrid import UGrid
ug = UGrid.from_ncfile(url)
cube.mesh = ug
cube.mesh_dimension = 1
return cube
def _make_aux_coord(cube, axis='Y'):
"""Make any given coordinate an Auxiliary Coordinate."""
coord = cube.coord(axis=axis)
cube.remove_coord(coord)
if cube.ndim == 2:
cube.add_aux_coord(coord, 1)
else:
cube.add_aux_coord(coord)
return cube
def ensure_timeseries(cube):
"""Ensure that the cube is CF-timeSeries compliant."""
if not cube.coord('time').shape == cube.shape[0]:
cube.transpose()
_make_aux_coord(cube, axis='Y')
_make_aux_coord(cube, axis='X')
cube.attributes.update({'featureType': 'timeSeries'})
cube.coord("station name").attributes = dict(cf_role='timeseries_id')
return cube
def add_station(cube, station):
"""Add a station Auxiliary Coordinate and its name."""
kw = dict(var_name="station", long_name="station name")
coord = iris.coords.AuxCoord(station, **kw)
cube.add_aux_coord(coord)
return cube
def remove_ssh(cube):
"""
Remove all `aux_coords` but time. Should that has the same shape as
the data. NOTE: This also removes `aux_factories` to avoid update error
when removing the coordinate.
Examples
--------
>>> import iris
>>> url = ("http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/"
... "SABGOM_Forecast_Model_Run_Collection_best.ncd")
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> cube = get_surface(cube)
>>> len(cube.coords())
10
>>> cube = remove_ssh(cube)
>>> len(cube.coords())
8
"""
for factory in cube.aux_factories:
cube.remove_aux_factory(factory)
for coord in cube.aux_coords:
if coord.shape == cube.shape:
if 'time' not in coord.name():
cube.remove_coord(coord.name())
return cube
def save_timeseries(df, outfile, standard_name, **kw):
"""http://cfconventions.org/Data/cf-convetions/cf-conventions-1.6/build
/cf-conventions.html#idp5577536"""
cube = as_cube(df, calendars={1: cf_units.CALENDAR_GREGORIAN})
cube.coord("index").rename("time")
# Cast all station names to strings and renamed it.
columns = cube.coord('columns').points.astype(str).tolist()
cube.coord('columns').points = columns
cube.coord("columns").rename("station name")
cube.rename(standard_name)
cube.coord("station name").var_name = 'station'
longitude = kw.get("longitude")
latitude = kw.get("latitude")
if longitude is not None:
longitude = iris.coords.AuxCoord(np.float_(longitude),
var_name="lon",
standard_name="longitude",
long_name="station longitude",
units=cf_units.Unit("degrees"))
cube.add_aux_coord(longitude, data_dims=1)
if latitude is not None:
latitude = iris.coords.AuxCoord(np.float_(latitude),
var_name="lat",
standard_name="latitude",
long_name="station latitude",
units=cf_units.Unit("degrees"))
cube.add_aux_coord(latitude, data_dims=1)
cube.units = kw.get('units')
station_attr = kw.get("station_attr")
if station_attr is not None:
cube.coord("station name").attributes.update(station_attr)
cube_attr = kw.get("cube_attr")
if cube_attr is not None:
cube.attributes.update(cube_attr)
iris.save(cube, outfile)
def make_tree(cube):
"""
Return a scipy KDTree object to search a cube.
NOTE: iris does have its own implementation for searching with KDTrees, but
it does not work for 2D coords like this one.
Examples
--------
>>> import iris
>>> from scipy.spatial import cKDTree as KDTree
>>> url = ("http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/"
... "SABGOM_Forecast_Model_Run_Collection_best.ncd")
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> cube = get_surface(cube)
>>> tree, lon, lat = make_tree(cube)
>>> isinstance(tree, KDTree)
True
"""
lon = cube.coord(axis='X').points
lat = cube.coord(axis='Y').points
# Structured models with 1D lon, lat.
if (lon.ndim == 1) and (lat.ndim == 1) and (cube.ndim == 3):
lon, lat = np.meshgrid(lon, lat)
# Unstructured are already paired!
tree = KDTree(list(zip(lon.ravel(), lat.ravel())))
return tree, lon, lat
def is_water(cube, min_var=0.01):
"""
Use only data where the standard deviation of the time cube exceeds
0.01 m (1 cm) this eliminates flat line model time cube that come from
land points that should have had missing values.
(Accounts for wet-and-dry models.)
"""
arr = ma.masked_invalid(cube.data).filled(fill_value=0)
if arr.std() <= min_var:
return False
return True
def get_nearest_series(cube, tree, xi, yi, k=10, max_dist=0.04):
"""
Find `k` nearest model data points from an iris `cube` at station
lon: `xi`, lat: `yi` up to `max_dist` in degrees. Must provide a Scipy's
KDTree `tree`.
Examples
--------
>>> import iris
>>> from scipy.spatial import cKDTree as KDTree
>>> url = ("http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/"
... "SABGOM_Forecast_Model_Run_Collection_best.ncd")
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> cube = get_surface(cube)
>>> tree, lon, lat = make_tree(cube)
>>> series, dist, idx = get_nearest_series(cube, tree,
... lon[0, 10], lat[0, 10],
... k=10, max_dist=0.04)
>>> idx == (0, 10)
True
>>> is_water(series, min_var=0.01)
False
>>> series, dist, idx = get_nearest_series(cube, tree,
... -75.7135500943, 36.1640944084,
... k=10, max_dist=0.04)
>>> is_water(series, min_var=0.01)
True
"""
distances, indices = tree.query(np.array([xi, yi]).T, k=k)
if indices.size == 0:
raise ValueError("No data found.")
# Get data up to specified distance.
mask = distances <= max_dist
distances, indices = distances[mask], indices[mask]
if distances.size == 0:
msg = "No data near ({}, {}) max_dist={}.".format
raise ValueError(msg(xi, yi, max_dist))
# Unstructured model.
if (cube.coord(axis='X').ndim == 1) and (cube.ndim == 2):
i = j = indices
unstructured = True
# Structured model.
else:
unstructured = False
if cube.coord(axis='X').ndim == 2: # CoordinateMultiDim
i, j = np.unravel_index(indices, cube.coord(axis='X').shape)
else:
shape = (cube.coord(axis='Y').shape[0],
cube.coord(axis='X').shape[0])
i, j = np.unravel_index(indices, shape)
series, dist, idx = None, None, None
IJs = list(zip(i, j))
for dist, idx in zip(distances, IJs):
idx = tuple([int(kk) for kk in idx])
if unstructured: # NOTE: This would be so elegant in py3k!
idx = (idx[0],)
# This weird syntax allow for idx to be len 1 or 2.
series = cube[(slice(None),)+idx]
return series, dist, idx
def get_nearest_water(cube, tree, xi, yi, k=10, max_dist=0.04, min_var=0.01):
"""
Legacy function. Use `get_nearest_series`+`is_water` instead!
"""
distances, indices = tree.query(np.array([xi, yi]).T, k=k)
if indices.size == 0:
raise ValueError("No data found.")
# Get data up to specified distance.
mask = distances <= max_dist
distances, indices = distances[mask], indices[mask]
if distances.size == 0:
msg = "No data near ({}, {}) max_dist={}.".format
raise ValueError(msg(xi, yi, max_dist))
# Unstructured model.
if (cube.coord(axis='X').ndim == 1) and (cube.ndim == 2):
i = j = indices
unstructured = True
# Structured model.
else:
unstructured = False
if cube.coord(axis='X').ndim == 2: # CoordinateMultiDim
i, j = np.unravel_index(indices, cube.coord(axis='X').shape)
else:
shape = (cube.coord(axis='Y').shape[0],
cube.coord(axis='X').shape[0])
i, j = np.unravel_index(indices, shape)
IJs = list(zip(i, j))
for dist, idx in zip(distances, IJs):
idx = tuple([int(kk) for kk in idx])
if unstructured: # NOTE: This would be so elegant in py3k!
idx = (idx[0],)
# This weird syntax allow for idx to be len 1 or 2.
series = cube[(slice(None),)+idx]
if is_water(series, min_var=0.01):
break
else:
series = None
continue
return series, dist, idx
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "ocefpaf/utilities",
"path": "utilities/tardis.py",
"copies": "2",
"size": "24614",
"license": "mit",
"hash": 7318299326451595000,
"line_mean": 32.1278600269,
"line_max": 79,
"alpha_frac": 0.5768668238,
"autogenerated": false,
"ratio": 3.4579938184883394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5034860642288339,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime, timedelta
from django.db.models import Q
from django.utils import timezone
from rest_framework import serializers
from rest_framework.response import Response
from sentry.app import search
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint, ProjectEventPermission
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import StreamGroupSerializer
from sentry.constants import (
DEFAULT_SORT_OPTION, STATUS_CHOICES
)
from sentry.db.models.query import create_or_update
from sentry.models import (
Activity, Group, GroupBookmark, GroupSeen, GroupStatus, TagKey
)
from sentry.search.utils import parse_query
from sentry.tasks.deletion import delete_group
from sentry.tasks.merge import merge_group
from sentry.utils.cursors import Cursor
class GroupSerializer(serializers.Serializer):
status = serializers.ChoiceField(choices=zip(
STATUS_CHOICES.keys(), STATUS_CHOICES.keys()
))
hasSeen = serializers.BooleanField()
isBookmarked = serializers.BooleanField()
isPublic = serializers.BooleanField()
merge = serializers.BooleanField()
class ProjectGroupIndexEndpoint(ProjectEndpoint):
doc_section = DocSection.EVENTS
permission_classes = (ProjectEventPermission,)
def _parse_date(self, value):
return datetime.utcfromtimestamp(float(value)).replace(
tzinfo=timezone.utc,
)
# bookmarks=0/1
# status=<x>
# <tag>=<value>
def get(self, request, project):
"""
List a project's aggregates
Return a list of aggregates bound to a project.
{method} {path}
A default query of 'is:resolved' is applied. To return results with
other statuses send an new query value (i.e. ?query= for all results).
Any standard Sentry structured search query can be passed via the
``query`` parameter.
"""
query_kwargs = {
'project': project,
}
if request.GET.get('status'):
try:
query_kwargs['status'] = STATUS_CHOICES[request.GET['status']]
except KeyError:
return Response('{"detail": "invalid status"}', status=400)
if request.user.is_authenticated() and request.GET.get('bookmarks'):
query_kwargs['bookmarked_by'] = request.user
if request.user.is_authenticated() and request.GET.get('assigned'):
query_kwargs['assigned_to'] = request.user
sort_by = request.GET.get('sort')
if sort_by is None:
sort_by = DEFAULT_SORT_OPTION
query_kwargs['sort_by'] = sort_by
tags = {}
for tag_key in TagKey.objects.all_keys(project):
if request.GET.get(tag_key):
tags[tag_key] = request.GET[tag_key]
if tags:
query_kwargs['tags'] = tags
# TODO: dates should include timestamps
date_from = request.GET.get('since')
date_to = request.GET.get('until')
date_filter = request.GET.get('date_filter')
limit = request.GET.get('limit')
if limit:
try:
query_kwargs['limit'] = int(limit)
except ValueError:
return Response('{"detail": "invalid limit"}', status=400)
today = timezone.now()
if date_from:
date_from = self._parse_date(date_from)
else:
date_from = today - timedelta(days=5)
if date_to:
date_to = self._parse_date(date_to)
query_kwargs['date_from'] = date_from
query_kwargs['date_to'] = date_to
if date_filter:
query_kwargs['date_filter'] = date_filter
# TODO: proper pagination support
cursor = request.GET.get('cursor')
if cursor:
query_kwargs['cursor'] = Cursor.from_string(cursor)
query = request.GET.get('query', 'is:unresolved')
if query is not None:
query_kwargs.update(parse_query(query, request.user))
cursor_result = search.query(**query_kwargs)
context = list(cursor_result)
response = Response(serialize(context, request.user, StreamGroupSerializer()))
response['Link'] = ', '.join([
self.build_cursor_link(request, 'previous', cursor_result.prev),
self.build_cursor_link(request, 'next', cursor_result.next),
])
return response
def put(self, request, project):
"""
Bulk mutate a list of aggregates
Bulk mutate various attributes on aggregates.
{method} {path}?id=1&id=2&id=3
{{
"status": "resolved",
"isBookmarked": true
}}
- For non-status updates, the 'id' parameter is required.
- For status updates, the 'id' parameter may be omitted for a batch
"update all" query.
- An optional 'status' parameter may be used to restrict mutations to
only events with the given status.
For example, to resolve all aggregates (project-wide):
{method} {path}
{{
"status": "resolved"
}}
Attributes:
- status: resolved, unresolved, muted
- hasSeen: true, false
- isBookmarked: true, false
- isPublic: true, false
- merge: true, false
If any ids are out of scope this operation will succeed without any data
mutation.
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
if not group_ids:
return Response(status=204)
else:
group_list = None
serializer = GroupSerializer(data=request.DATA, partial=True)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = serializer.object
# validate that we've passed a selector for non-status bulk operations
if not group_ids and result.keys() != ['status']:
return Response('{"detail": "You must specify a list of IDs for this operation"}', status=400)
if group_ids:
filters = [Q(id__in=group_ids)]
else:
filters = [Q(project=project)]
if request.GET.get('status'):
try:
status_filter = STATUS_CHOICES[request.GET['status']]
except KeyError:
return Response('{"detail": "Invalid status"}', status=400)
filters.append(Q(status=status_filter))
if result.get('status') == 'resolved':
now = timezone.now()
happened = Group.objects.filter(*filters).exclude(
status=GroupStatus.RESOLVED,
).update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
if group_list and happened:
for group in group_list:
group.status = GroupStatus.RESOLVED
group.resolved_at = now
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_RESOLVED,
user=request.user,
)
activity.send_notification()
elif result.get('status'):
new_status = STATUS_CHOICES[result['status']]
happened = Group.objects.filter(*filters).exclude(
status=new_status,
).update(
status=new_status,
)
if group_list and happened:
if new_status == GroupStatus.UNRESOLVED:
activity_type = Activity.SET_UNRESOLVED
elif new_status == GroupStatus.MUTED:
activity_type = Activity.SET_MUTED
for group in group_list:
group.status = new_status
activity = Activity.objects.create(
project=group.project,
group=group,
type=activity_type,
user=request.user,
)
activity.send_notification()
if result.get('hasSeen'):
for group in group_list:
instance, created = create_or_update(
GroupSeen,
group=group,
user=request.user,
project=group.project,
values={
'last_seen': timezone.now(),
}
)
elif result.get('hasSeen') is False:
GroupSeen.objects.filter(
group__in=group_ids,
user=request.user,
).delete()
if result.get('isBookmarked'):
for group in group_list:
GroupBookmark.objects.get_or_create(
project=group.project,
group=group,
user=request.user,
)
elif result.get('isBookmarked') is False:
GroupBookmark.objects.filter(
group__in=group_ids,
user=request.user,
).delete()
if result.get('isPublic'):
Group.objects.filter(
id__in=group_ids,
).update(is_public=True)
for group in group_list:
if group.is_public:
continue
group.is_public = True
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PUBLIC,
user=request.user,
)
elif result.get('isPublic') is False:
Group.objects.filter(
id__in=group_ids,
).update(is_public=False)
for group in group_list:
if not group.is_public:
continue
group.is_public = False
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PRIVATE,
user=request.user,
)
# XXX(dcramer): this feels a bit shady like it should be its own
# endpoint
if result.get('merge') and len(group_list) > 1:
primary_group = sorted(group_list, key=lambda x: -x.times_seen)[0]
for group in group_list:
if group == primary_group:
continue
merge_group.delay(
from_object_id=group.id,
to_object_id=primary_group.id,
)
return Response(dict(result))
def delete(self, request, project):
"""
Bulk remove a list of aggregates
Permanently remove the given aggregates.
Only queries by 'id' are accepted.
{method} {path}?id=1&id=2&id=3
If any ids are out of scope this operation will succeed without any data
mutation
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
else:
# missing any kind of filter
return Response('{"detail": "You must specify a list of IDs for this operation"}', status=400)
if not group_ids:
return Response(status=204)
# TODO(dcramer): set status to pending deletion
for group in group_list:
delete_group.delay(object_id=group.id)
return Response(status=204)
| {
"repo_name": "1tush/sentry",
"path": "src/sentry/api/endpoints/project_group_index.py",
"copies": "2",
"size": "12113",
"license": "bsd-3-clause",
"hash": -5303712696215774000,
"line_mean": 32.9299719888,
"line_max": 106,
"alpha_frac": 0.5516387352,
"autogenerated": false,
"ratio": 4.511359404096834,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6062998139296834,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime, timedelta
from functools import partial
from datashape import discover, Categorical, DateTime
from datashape import string, object_, datetime_, Option
import datashape
import pandas as pd
import numpy as np
from ..convert import convert
possibly_missing = frozenset({string, datetime_})
categorical = type(pd.Categorical.dtype)
def dshape_from_pandas(col):
if isinstance(col.dtype, categorical):
return Categorical(col.cat.categories.tolist())
elif col.dtype.kind == 'M':
tz = getattr(col.dtype, 'tz', None)
if tz is not None:
# Pandas stores this as a pytz.tzinfo, but DataShape wants a
# string.
tz = str(tz)
return Option(DateTime(tz=tz))
dshape = datashape.CType.from_numpy_dtype(col.dtype)
dshape = string if dshape == object_ else dshape
return Option(dshape) if dshape in possibly_missing else dshape
@discover.register(pd.DataFrame)
def discover_dataframe(df):
return len(df) * datashape.Record([(k, dshape_from_pandas(df[k]))
for k in df.columns])
@discover.register((pd.Series, pd.Index))
def discover_1d(array_like):
return len(array_like) * dshape_from_pandas(array_like)
def coerce_datetimes(df):
""" Make object columns into datetimes if possible
Warning: this operates inplace.
Example
-------
>>> df = pd.DataFrame({'dt': ['2014-01-01'], 'name': ['Alice']})
>>> df.dtypes # note that these are strings/object
dt object
name object
dtype: object
>>> df2 = coerce_datetimes(df)
>>> df2
dt name
0 2014-01-01 Alice
>>> df2.dtypes # note that only the datetime-looking-one was transformed
dt datetime64[ns]
name object
dtype: object
"""
objects = df.select_dtypes(include=['object'])
# NOTE: In pandas < 0.17, pd.to_datetime(' ') == datetime(...), which is
# not what we want. So we have to remove columns with empty or
# whitespace-only strings to prevent erroneous datetime coercion.
columns = [
c for c in objects.columns
if not np.any(objects[c].str.isspace() | objects[c].str.isalpha())
]
df2 = objects[columns].apply(partial(pd.to_datetime, errors='ignore'))
for c in df2.columns:
df[c] = df2[c]
return df
@convert.register(pd.Timestamp, datetime)
def convert_datetime_to_timestamp(dt, **kwargs):
return pd.Timestamp(dt)
@convert.register((pd.Timestamp, pd.Timedelta), float)
def nan_to_nat(fl, **kwargs):
try:
if np.isnan(fl):
# Only nan->nat edge
return pd.NaT
except TypeError:
pass
raise NotImplementedError()
@convert.register((pd.Timestamp, pd.Timedelta), (type(pd.NaT), type(None)))
def convert_null_or_nat_to_nat(n, **kwargs):
return pd.NaT
@convert.register(pd.Timedelta, timedelta)
def convert_timedelta_to_pd_timedelta(dt, **kwargs):
if dt is None:
return pd.NaT
return pd.Timedelta(dt)
| {
"repo_name": "quantopian/odo",
"path": "odo/backends/pandas.py",
"copies": "1",
"size": "3123",
"license": "bsd-3-clause",
"hash": 2349492924634632700,
"line_mean": 27.1351351351,
"line_max": 77,
"alpha_frac": 0.6452129363,
"autogenerated": false,
"ratio": 3.6526315789473682,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9797844515247368,
"avg_score": 0,
"num_lines": 111
} |
from __future__ import absolute_import, division, print_function
from datetime import timedelta
from datetime import datetime
import astropy.units as u
from sunpy.time import parse_time
from sunpy import config
from sunpy.extern.six.moves import range
TIME_FORMAT = config.get('general', 'time_format')
__all__ = ['TimeRange']
class TimeRange(object):
"""
An object to handle time ranges.
.. note::
Regardless of how a TimeRange is constructed it will always provide a
positive time range where the start time is before the end time.
Parameters
----------
a : str, number, `datetime.datetime`
A time (usually the start time) specified as a parse_time-compatible
time string, number, or a datetime object.
b : str, number, `datetime.datetime`, `datetime.timedelta`, `astropy.units.Quantity` (time)
Another time (usually the end time) specified as a
parse_time-compatible time string, number, or a datetime object.
May also be the size of the time range specified as a timedelta object,
or a `astropy.units.Quantity`.
Examples
--------
>>> from sunpy.time import TimeRange
>>> time_range = TimeRange('2010/03/04 00:10', '2010/03/04 00:20')
>>> time_range = TimeRange(('2010/03/04 00:10', '2010/03/04 00:20'))
>>> import astropy.units as u
>>> time_range = TimeRange('2010/03/04 00:10', 400 * u.s)
>>> time_range = TimeRange('2010/03/04 00:10', 400 * u.day)
"""
def __init__(self, a, b=None):
"""Creates a new TimeRange instance"""
# If a is a TimeRange object, copy attributes to new instance.
self._t1 = None
self._t2 = None
if isinstance(a, TimeRange):
self.__dict__ = a.__dict__.copy()
return
# Normalize different input types
if b is None:
x = parse_time(a[0])
if len(a) != 2:
raise ValueError('If b is None a must have two elements')
else:
y = a[1]
else:
x = parse_time(a)
y = b
if isinstance(y, u.Quantity):
y = timedelta(seconds=y.to('s').value)
# Timedelta
if isinstance(y, timedelta):
if y.days >= 0:
self._t1 = x
self._t2 = x + y
else:
self._t1 = x + y
self._t2 = x
return
# Otherwise, assume that the second argument is parse_time-compatible
y = parse_time(y)
if isinstance(y, datetime):
if x < y:
self._t1 = x
self._t2 = y
else:
self._t1 = y
self._t2 = x
@property
def start(self):
"""
Get the start time
Returns
-------
start : `datetime.datetime`
"""
return self._t1
@property
def end(self):
"""
Get the end time
Returns
-------
end : `datetime.datetime`
"""
return self._t2
@property
def dt(self):
"""
Get the length of the time range. Always a positive value.
Returns
-------
dt : `datetime.timedelta`
"""
return self._t2 - self._t1
@property
def center(self):
"""
Gets the center of the TimeRange instance.
Returns
-------
value : `datetime.datetime`
"""
return self.start + self.dt // 2
@property
def hours(self):
"""
Get the number of hours elapsed.
Returns
-------
value : `astropy.units.Quantity`
"""
return self._duration.to('hour')
@property
def days(self):
"""
Gets the number of days elapsed.
Returns
-------
value : `astropy.units.Quantity`
"""
return self._duration.to('d')
@property
def seconds(self):
"""
Gets the number of seconds elapsed.
Returns
-------
value : `astropy.units.Quantity`
"""
return self._duration.to('s')
@property
def minutes(self):
"""
Gets the number of minutes elapsed.
Returns
-------
value : `astropy.units.Quantity`
"""
return self._duration.to('min')
@property
def _duration(self):
"""
The duration of the time range.
Returns
-------
value : `astropy.units.Quantity`
"""
result = self.dt.microseconds * u.Unit('us') + self.dt.seconds * u.Unit('s') + self.dt.days * u.Unit('day')
return result
def __repr__(self):
"""
Returns a human-readable representation of the TimeRange instance."""
t1 = self.start.strftime(TIME_FORMAT)
t2 = self.end.strftime(TIME_FORMAT)
center = self.center.strftime(TIME_FORMAT)
return (' Start:'.ljust(11) + t1 +
'\n End:'.ljust(12) + t2 +
'\n Center:'.ljust(12) + center +
'\n Duration:'.ljust(12) + str(self.days.value) + ' days or' +
'\n '.ljust(12) + str(self.hours.value) + ' hours or' +
'\n '.ljust(12) + str(self.minutes.value) + ' minutes or' +
'\n '.ljust(12) + str(self.seconds.value) + ' seconds' +
'\n')
def split(self, n=2):
"""
Splits the TimeRange into multiple equally sized parts.
Parameters
----------
n : int
The number of times to split the time range (must > 1)
Returns
-------
time ranges: list
An list of equally sized TimeRange objects between
the start and end times.
Raises
------
ValueError
If requested amount is less than 1
"""
if n <= 0:
raise ValueError('n must be greater than or equal to 1')
subsections = []
previous_time = self.start
next_time = None
for _ in range(n):
next_time = previous_time + self.dt // n
next_range = TimeRange(previous_time, next_time)
subsections.append(next_range)
previous_time = next_time
return subsections
def window(self, cadence, window):
"""
Split the TimeRange up into a series of TimeRange windows,
'window' long, between the start and end with a cadence of 'cadence'.
Parameters
----------
cadence : `astropy.units.Quantity`, `datetime.timedelta`
Cadence in seconds or a timedelta instance
window : `astropy.units.quantity`, `datetime.timedelta`
The length of the Time's, assumed to be seconds if int.
Returns
-------
time ranges : list
A list of TimeRange objects, that are window long and separated by
cadence.
Examples
--------
>>> import astropy.units as u
>>> from sunpy.time import TimeRange
>>> time_range = TimeRange('2010/03/04 00:10', '2010/03/04 01:20')
>>> time_range.window(60*60*u.s, window=12*u.s) # doctest: +NORMALIZE_WHITESPACE
[ Start: 2010-03-04 00:10:00
End: 2010-03-04 00:10:12
Center:2010-03-04 00:10:06
Duration:0.000138888888889 days or
0.00333333333333 hours or
0.2 minutes or
12.0 seconds
, Start: 2010-03-04 01:10:00
End: 2010-03-04 01:10:12
Center:2010-03-04 01:10:06
Duration:0.000138888888889 days or
0.00333333333333 hours or
0.2 minutes or
12.0 seconds
, Start: 2010-03-04 02:10:00
End: 2010-03-04 02:10:12
Center:2010-03-04 02:10:06
Duration:0.000138888888889 days or
0.00333333333333 hours or
0.2 minutes or
12.0 seconds
]
"""
if not isinstance(window, timedelta):
window = timedelta(seconds=window.to('s').value)
if not isinstance(cadence, timedelta):
cadence = timedelta(seconds=cadence.to('s').value)
n = 1
times = [TimeRange(self.start, self.start + window)]
while times[-1].end < self.end:
times.append(TimeRange(self.start + cadence * n,
self.start + cadence * n + window))
n += 1
return times
def next(self):
"""Shift the time range forward by the amount of time elapsed"""
dt = self.dt
self._t1 = self._t1 + dt
self._t2 = self._t2 + dt
return self
def previous(self):
"""Shift the time range backward by the amount of time elapsed"""
dt = self.dt
self._t1 = self._t1 - dt
self._t2 = self._t2 - dt
return self
def extend(self, dt_start, dt_end):
"""Extend the time range forwards and backwards
Parameters
----------
dt_start : `datetime.timedelta`
The amount to shift the start time
dt_end : `datetime.timedelta`
The amount to shift the end time
"""
# Only a timedelta object is acceptable here
self._t1 = self._t1 + dt_start
self._t2 = self._t2 + dt_end
def __contains__(self, time):
"""
Checks whether the given time lies within this range.
Both limits are inclusive (i.e. __contains__(t1) and __contains__(t2)
always return true)
Parameters
----------
time : `datetime.datetime`, str
A parse_time-compatible time to be checked.
Returns
-------
value : bool
True if time lies between start and end, False otherwise.
Examples
--------
>>> from sunpy.time import TimeRange
>>> time1 = '2014/5/5 12:11'
>>> time2 = '2012/5/5 12:11'
>>> time_range = TimeRange('2014/05/04 13:54', '2018/02/03 12:12')
>>> time1 in time_range
True
>>> time2 in time_range
False
"""
this_time = parse_time(time)
return this_time >= self.start and this_time <= self.end
| {
"repo_name": "Alex-Ian-Hamilton/sunpy",
"path": "sunpy/time/timerange.py",
"copies": "1",
"size": "10431",
"license": "bsd-2-clause",
"hash": 4260113573274030000,
"line_mean": 28.1368715084,
"line_max": 115,
"alpha_frac": 0.5175917937,
"autogenerated": false,
"ratio": 4.142573471008737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5160165264708737,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import timedelta
from django.db import IntegrityError, transaction
from django.utils import timezone
from rest_framework import serializers
from rest_framework.response import Response
from sentry.app import search
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint, ProjectEventPermission
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import StreamGroupSerializer
from sentry.constants import DEFAULT_SORT_OPTION
from sentry.db.models.query import create_or_update
from sentry.models import (
Activity, EventMapping, Group, GroupBookmark, GroupResolution, GroupSeen,
GroupSnooze, GroupStatus, Release, TagKey
)
from sentry.search.utils import parse_query
from sentry.tasks.deletion import delete_group
from sentry.tasks.merge import merge_group
from sentry.utils.cursors import Cursor
from sentry.utils.apidocs import scenario, attach_scenarios
ERR_INVALID_STATS_PERIOD = "Invalid stats_period. Valid choices are '', '24h', and '14d'"
@scenario('BulkUpdateAggregates')
def bulk_update_aggregates_scenario(runner):
project = runner.default_project
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method='PUT',
path='/projects/%s/%s/issues/?id=%s&id=%s' % (
runner.org.slug, project.slug, group1.id, group2.id),
data={'status': 'unresolved', 'isPublic': False}
)
@scenario('BulkRemoveAggregates')
def bulk_remove_aggregates_scenario(runner):
with runner.isolated_project('Amazing Plumbing') as project:
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method='DELETE',
path='/projects/%s/%s/issues/?id=%s&id=%s' % (
runner.org.slug, project.slug, group1.id, group2.id),
)
@scenario('ListProjectAggregates')
def list_project_aggregates_scenario(runner):
project = runner.default_project
runner.request(
method='GET',
path='/projects/%s/%s/issues/?statsPeriod=24h' % (
runner.org.slug, project.slug),
)
STATUS_CHOICES = {
'resolved': GroupStatus.RESOLVED,
'unresolved': GroupStatus.UNRESOLVED,
'muted': GroupStatus.MUTED,
'resolvedInNextRelease': GroupStatus.UNRESOLVED,
}
class ValidationError(Exception):
pass
class GroupSerializer(serializers.Serializer):
status = serializers.ChoiceField(choices=zip(
STATUS_CHOICES.keys(), STATUS_CHOICES.keys()
))
hasSeen = serializers.BooleanField()
isBookmarked = serializers.BooleanField()
isPublic = serializers.BooleanField()
merge = serializers.BooleanField()
snoozeDuration = serializers.IntegerField()
class ProjectGroupIndexEndpoint(ProjectEndpoint):
doc_section = DocSection.EVENTS
permission_classes = (ProjectEventPermission,)
def _build_query_params_from_request(self, request, project):
query_kwargs = {
'project': project,
}
if request.GET.get('status'):
try:
query_kwargs['status'] = STATUS_CHOICES[request.GET['status']]
except KeyError:
raise ValidationError('invalid status')
if request.user.is_authenticated() and request.GET.get('bookmarks'):
query_kwargs['bookmarked_by'] = request.user
if request.user.is_authenticated() and request.GET.get('assigned'):
query_kwargs['assigned_to'] = request.user
sort_by = request.GET.get('sort')
if sort_by is None:
sort_by = DEFAULT_SORT_OPTION
query_kwargs['sort_by'] = sort_by
tags = {}
for tag_key in TagKey.objects.all_keys(project):
if request.GET.get(tag_key):
tags[tag_key] = request.GET[tag_key]
if tags:
query_kwargs['tags'] = tags
limit = request.GET.get('limit')
if limit:
try:
query_kwargs['limit'] = int(limit)
except ValueError:
raise ValidationError('invalid limit')
# TODO: proper pagination support
cursor = request.GET.get('cursor')
if cursor:
query_kwargs['cursor'] = Cursor.from_string(cursor)
query = request.GET.get('query', 'is:unresolved').strip()
if query:
query_kwargs.update(parse_query(project, query, request.user))
return query_kwargs
# bookmarks=0/1
# status=<x>
# <tag>=<value>
# statsPeriod=24h
@attach_scenarios([list_project_aggregates_scenario])
def get(self, request, project):
"""
List a Project's Aggregates
```````````````````````````
Return a list of aggregates bound to a project. All parameters are
supplied as query string parameters.
A default query of ``is:resolved`` is applied. To return results
with other statuses send an new query value (i.e. ``?query=`` for all
results).
The ``statsPeriod`` parameter can be used to select the timeline
stats which should be present. Possible values are: '' (disable),
'24h', '14d'
:qparam string statsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam querystring query: an optional Sentry structured search
query. If not provided an implied
``"is:resolved"`` is assumed.)
:pparam string organization_slug: the slug of the organization the
groups belong to.
:pparam string project_slug: the slug of the project the groups
belong to.
:auth: required
"""
stats_period = request.GET.get('statsPeriod')
if stats_period not in (None, '', '24h', '14d'):
return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400)
elif stats_period is None:
# default
stats_period = '24h'
elif stats_period == '':
# disable stats
stats_period = None
query = request.GET.get('query')
if query and len(query) == 32:
# check to see if we've got an event ID
try:
mapping = EventMapping.objects.get(
project_id=project.id,
event_id=query,
)
except EventMapping.DoesNotExist:
pass
else:
matching_group = Group.objects.get(id=mapping.group_id)
return Response(serialize(
[matching_group], request.user, StreamGroupSerializer(
stats_period=stats_period
)
))
try:
query_kwargs = self._build_query_params_from_request(request, project)
except ValidationError as exc:
return Response({'detail': unicode(exc)}, status=400)
cursor_result = search.query(**query_kwargs)
results = list(cursor_result)
context = serialize(
results, request.user, StreamGroupSerializer(
stats_period=stats_period
)
)
# HACK: remove auto resolved entries
if query_kwargs.get('status') == GroupStatus.UNRESOLVED:
context = [
r for r in context
if r['status'] == 'unresolved'
]
response = Response(context)
response['Link'] = ', '.join([
self.build_cursor_link(request, 'previous', cursor_result.prev),
self.build_cursor_link(request, 'next', cursor_result.next),
])
return response
@attach_scenarios([bulk_update_aggregates_scenario])
def put(self, request, project):
"""
Bulk Mutate a List of Aggregates
````````````````````````````````
Bulk mutate various attributes on aggregates. The list of groups
to modify is given through the `id` query parameter. It is repeated
for each group that should be modified.
- For non-status updates, the `id` query parameter is required.
- For status updates, the `id` query parameter may be omitted
for a batch "update all" query.
- An optional `status` query parameter may be used to restrict
mutations to only events with the given status.
The following attributes can be modified and are supplied as
JSON object in the body:
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the groups to be mutated. This
parameter shall be repeated for each group. It
is optional only if a status is mutated in which
case an implicit `update all` is assumed.
:qparam string status: optionally limits the query to groups of the
specified status. Valid values are
``"resolved"``, ``"unresolved"`` and
``"muted"``.
:pparam string organization_slug: the slug of the organization the
groups belong to.
:pparam string project_slug: the slug of the project the groups
belong to.
:param string status: the new status for the groups. Valid values
are ``"resolved"``, ``"unresolved"`` and
``"muted"``.
:param int snoozeDuration: the number of minutes to mute this issue.
:param boolean isPublic: sets the group to public or private.
:param boolean merge: allows to merge or unmerge different groups.
:param boolean hasSeen: in case this API call is invoked with a user
context this allows changing of the flag
that indicates if the user has seen the
event.
:param boolean isBookmarked: in case this API call is invoked with a
user context this allows changing of
the bookmark flag.
:auth: required
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
if not group_ids:
return Response(status=204)
else:
group_list = None
serializer = GroupSerializer(data=request.DATA, partial=True)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = dict(serializer.object)
acting_user = request.user if request.user.is_authenticated() else None
if not group_ids:
try:
query_kwargs = self._build_query_params_from_request(request, project)
except ValidationError as exc:
return Response({'detail': unicode(exc)}, status=400)
# bulk mutations are limited to 1000 items
# TODO(dcramer): it'd be nice to support more than this, but its
# a bit too complicated right now
query_kwargs['limit'] = 1000
cursor_result = search.query(**query_kwargs)
group_list = list(cursor_result)
group_ids = [g.id for g in group_list]
queryset = Group.objects.filter(
id__in=group_ids,
)
if result.get('status') == 'resolvedInNextRelease':
try:
release = Release.objects.filter(
project=project,
).order_by('-date_added')[0]
except IndexError:
return Response('{"detail": "No release data present in the system to indicate form a basis for \'Next Release\'"}', status=400)
now = timezone.now()
for group in group_list:
try:
with transaction.atomic():
resolution, created = GroupResolution.objects.create(
group=group,
release=release,
), True
except IntegrityError:
resolution, created = GroupResolution.objects.get(
group=group,
), False
if created:
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_RESOLVED_IN_RELEASE,
user=acting_user,
ident=resolution.id,
data={
# no version yet
'version': '',
}
)
activity.send_notification()
queryset.update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
result.update({
'status': 'resolved',
'statusDetails': {
'inNextRelease': True,
},
})
elif result.get('status') == 'resolved':
now = timezone.now()
happened = queryset.exclude(
status=GroupStatus.RESOLVED,
).update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
GroupResolution.objects.filter(
group__in=group_ids,
).delete()
if group_list and happened:
for group in group_list:
group.status = GroupStatus.RESOLVED
group.resolved_at = now
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_RESOLVED,
user=acting_user,
)
activity.send_notification()
result['statusDetails'] = {}
elif result.get('status'):
new_status = STATUS_CHOICES[result['status']]
happened = queryset.exclude(
status=new_status,
).update(
status=new_status,
)
GroupResolution.objects.filter(
group__in=group_ids,
).delete()
if new_status == GroupStatus.MUTED:
snooze_duration = result.pop('snoozeDuration', None)
if snooze_duration:
snooze_until = timezone.now() + timedelta(
minutes=snooze_duration,
)
for group in group_list:
GroupSnooze.objects.create_or_update(
group=group,
values={
'until': snooze_until,
}
)
result['statusDetails'] = {
'snoozeUntil': snooze_until,
}
else:
GroupSnooze.objects.filter(
group__in=group_ids,
).delete()
snooze_until = None
result['statusDetails'] = {}
else:
result['statusDetails'] = {}
if group_list and happened:
if new_status == GroupStatus.UNRESOLVED:
activity_type = Activity.SET_UNRESOLVED
activity_data = {}
elif new_status == GroupStatus.MUTED:
activity_type = Activity.SET_MUTED
activity_data = {
'snoozeUntil': snooze_until,
'snoozeDuration': snooze_duration,
}
for group in group_list:
group.status = new_status
activity = Activity.objects.create(
project=group.project,
group=group,
type=activity_type,
user=acting_user,
data=activity_data,
)
activity.send_notification()
if result.get('hasSeen') and project.member_set.filter(user=request.user).exists():
for group in group_list:
instance, created = create_or_update(
GroupSeen,
group=group,
user=request.user,
project=group.project,
values={
'last_seen': timezone.now(),
}
)
elif result.get('hasSeen') is False:
GroupSeen.objects.filter(
group__in=group_ids,
user=request.user,
).delete()
if result.get('isBookmarked'):
for group in group_list:
GroupBookmark.objects.get_or_create(
project=project,
group=group,
user=request.user,
)
elif result.get('isBookmarked') is False:
GroupBookmark.objects.filter(
group__in=group_ids,
user=request.user,
).delete()
if result.get('isPublic'):
queryset.update(is_public=True)
for group in group_list:
if group.is_public:
continue
group.is_public = True
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PUBLIC,
user=acting_user,
)
elif result.get('isPublic') is False:
queryset.update(is_public=False)
for group in group_list:
if not group.is_public:
continue
group.is_public = False
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PRIVATE,
user=acting_user,
)
# XXX(dcramer): this feels a bit shady like it should be its own
# endpoint
if result.get('merge') and len(group_list) > 1:
primary_group = sorted(group_list, key=lambda x: -x.times_seen)[0]
children = []
for group in group_list:
if group == primary_group:
continue
children.append(group)
group.update(status=GroupStatus.PENDING_MERGE)
merge_group.delay(
from_object_id=group.id,
to_object_id=primary_group.id,
)
Activity.objects.create(
project=primary_group.project,
group=primary_group,
type=Activity.MERGE,
user=acting_user,
data={
'issues': [{'id': c.id} for c in children],
},
)
result['merge'] = {
'parent': str(primary_group.id),
'children': [str(g.id) for g in children],
}
return Response(result)
@attach_scenarios([bulk_remove_aggregates_scenario])
def delete(self, request, project):
"""
Bulk Remove a List of Aggregates
````````````````````````````````
Permanently remove the given aggregates. The list of groups to
modify is given through the `id` query parameter. It is repeated
for each group that should be removed.
Only queries by 'id' are accepted.
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the groups to be removed. This
parameter shall be repeated for each group.
:pparam string organization_slug: the slug of the organization the
groups belong to.
:pparam string project_slug: the slug of the project the groups
belong to.
:auth: required
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
else:
# missing any kind of filter
return Response('{"detail": "You must specify a list of IDs for this operation"}', status=400)
if not group_ids:
return Response(status=204)
# TODO(dcramer): set status to pending deletion
for group in group_list:
delete_group.delay(object_id=group.id, countdown=3600)
return Response(status=204)
| {
"repo_name": "nicholasserra/sentry",
"path": "src/sentry/api/endpoints/project_group_index.py",
"copies": "2",
"size": "21496",
"license": "bsd-3-clause",
"hash": 6713285384856708000,
"line_mean": 36.2547660312,
"line_max": 144,
"alpha_frac": 0.5289356159,
"autogenerated": false,
"ratio": 4.7747667703243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00014983852012912064,
"num_lines": 577
} |
from __future__ import absolute_import, division, print_function
from datetime import timedelta
from django.db.models import Q
from django.utils import timezone
from rest_framework import serializers
from rest_framework.response import Response
from sentry.app import search
from sentry.api.base import DocSection, Endpoint
from sentry.api.permissions import assert_perm
from sentry.api.serializers import serialize
from sentry.constants import (
DEFAULT_SORT_OPTION, STATUS_CHOICES
)
from sentry.db.models.query import create_or_update
from sentry.models import (
Activity, Group, GroupBookmark, GroupMeta, GroupStatus, Project, TagKey
)
from sentry.search.utils import parse_query
from sentry.tasks.deletion import delete_group
from sentry.tasks.merge import merge_group
from sentry.utils.cursors import Cursor
from sentry.utils.dates import parse_date
class GroupSerializer(serializers.Serializer):
status = serializers.ChoiceField(choices=zip(
STATUS_CHOICES.keys(), STATUS_CHOICES.keys()
))
isBookmarked = serializers.BooleanField()
merge = serializers.BooleanField()
class ProjectGroupIndexEndpoint(Endpoint):
doc_section = DocSection.EVENTS
# bookmarks=0/1
# status=<x>
# <tag>=<value>
def get(self, request, project_id):
"""
List a project's aggregates
Return a list of aggregates bound to a project.
{method} {path}?id=1&id=2&id=3
A default query of 'is:resolved' is applied. To return results with
other statuses send an new query value (i.e. ?query= for all results).
Any standard Sentry structured search query can be passed via the
``query`` parameter.
"""
project = Project.objects.get_from_cache(
id=project_id,
)
assert_perm(project, request.user, request.auth)
query_kwargs = {
'project': project,
}
if request.GET.get('status'):
try:
query_kwargs['status'] = STATUS_CHOICES[request.GET['status']]
except KeyError:
return Response('{"error": "invalid status"}', status=400)
if request.user.is_authenticated() and request.GET.get('bookmarks'):
query_kwargs['bookmarked_by'] = request.user
if request.user.is_authenticated() and request.GET.get('assigned'):
query_kwargs['assigned_to'] = request.user
sort_by = request.GET.get('sort')
if sort_by is None:
sort_by = DEFAULT_SORT_OPTION
query_kwargs['sort_by'] = sort_by
tags = {}
for tag_key in TagKey.objects.all_keys(project):
if request.GET.get(tag_key):
tags[tag_key] = request.GET[tag_key]
if tags:
query_kwargs['tags'] = tags
# TODO: dates should include timestamps
date_from = request.GET.get('since')
time_from = request.GET.get('until')
date_filter = request.GET.get('date_filter')
date_to = request.GET.get('dt')
time_to = request.GET.get('tt')
limit = request.GET.get('limit')
if limit:
try:
query_kwargs['limit'] = int(limit)
except ValueError:
return Response('{"error": "invalid limit"}', status=400)
today = timezone.now()
# date format is Y-m-d
if any(x is not None for x in [date_from, time_from, date_to, time_to]):
date_from, date_to = parse_date(date_from, time_from), parse_date(date_to, time_to)
else:
date_from = today - timedelta(days=5)
date_to = None
query_kwargs['date_from'] = date_from
query_kwargs['date_to'] = date_to
if date_filter:
query_kwargs['date_filter'] = date_filter
# TODO: proper pagination support
cursor = request.GET.get('cursor')
if cursor:
query_kwargs['cursor'] = Cursor.from_string(cursor)
query = request.GET.get('query', 'is:unresolved')
if query is not None:
query_kwargs.update(parse_query(query, request.user))
cursor_result = search.query(**query_kwargs)
context = list(cursor_result)
GroupMeta.objects.populate_cache(context)
response = Response(serialize(context, request.user))
response['Link'] = ', '.join([
self.build_cursor_link(request, 'previous', cursor_result.prev),
self.build_cursor_link(request, 'next', cursor_result.next),
])
return response
def put(self, request, project_id):
"""
Bulk mutate a list of aggregates
Bulk mutate various attributes on aggregates.
{method} {path}?id=1&id=2&id=3
{{
"status": "resolved",
"isBookmarked": true
}}
- For non-status updates, only queries by 'id' are accepted.
- For status updates, the 'id' parameter may be omitted for a batch
"update all" query.
Attributes:
- status: resolved, unresolved, muted
- isBookmarked: true, false
- merge: true, false
If any ids are out of scope this operation will succeed without any data
mutation.
"""
project = Project.objects.get_from_cache(
id=project_id,
)
assert_perm(project, request.user, request.auth)
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
if not group_ids:
return Response(status=204)
else:
group_list = None
serializer = GroupSerializer(data=request.DATA, partial=True)
if not serializer.is_valid():
return Response(status=400)
result = serializer.object
# validate that we've passed a selector for non-status bulk operations
if not group_ids and result.get('isBookmarked') is not None:
return Response(status=400)
if group_ids:
filters = Q(id__in=group_ids)
else:
filters = Q(project=project)
if result.get('status') == 'resolved':
now = timezone.now()
happened = Group.objects.filter(filters).exclude(
status=GroupStatus.RESOLVED,
).update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
if group_list and happened:
for group in group_list:
group.status = GroupStatus.RESOLVED
group.resolved_at = now
create_or_update(
Activity,
project=group.project,
group=group,
type=Activity.SET_RESOLVED,
user=request.user,
)
elif result.get('status'):
new_status = STATUS_CHOICES[result['status']]
happened = Group.objects.filter(filters).exclude(
status=new_status,
).update(
status=new_status,
)
if group_list and happened:
for group in group_list:
group.status = new_status
if result.get('isBookmarked'):
for group in group_list:
GroupBookmark.objects.get_or_create(
project=group.project,
group=group,
user=request.user,
)
elif result.get('isBookmarked') is False:
GroupBookmark.objects.filter(
group__in=group_ids,
user=request.user,
).delete()
# XXX(dcramer): this feels a bit shady like it should be its own
# endpoint
if result.get('merge') and len(group_list) > 1:
primary_group = sorted(group_list, key=lambda x: -x.times_seen)[0]
for group in group_list:
if group == primary_group:
continue
merge_group.delay(
from_object_id=group.id,
to_object_id=primary_group.id,
)
if group_list:
GroupMeta.objects.populate_cache(group_list)
# TODO(dcramer): we need create a public API for 'sort_value'
context = serialize(list(group_list), request.user)
return Response(context)
return Response(status=204)
def delete(self, request, project_id):
"""
Bulk remove a list of aggregates
Permanently remove the given aggregates.
Only queries by 'id' are accepted.
{method} {path}?id=1&id=2&id=3
If any ids are out of scope this operation will succeed without any data
mutation
"""
project = Project.objects.get_from_cache(
id=project_id,
)
assert_perm(project, request.user, request.auth)
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
else:
# missing any kind of filter
return Response(status=400)
if not group_ids:
return Response(status=204)
# TODO(dcramer): set status to pending deletion
for group in group_list:
delete_group.delay(object_id=group.id)
return Response(status=204)
| {
"repo_name": "camilonova/sentry",
"path": "src/sentry/api/endpoints/project_group_index.py",
"copies": "1",
"size": "9766",
"license": "bsd-3-clause",
"hash": 1807118110907661300,
"line_mean": 31.7718120805,
"line_max": 95,
"alpha_frac": 0.5751587139,
"autogenerated": false,
"ratio": 4.249782419495213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5324941133395213,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
from ... import py2help
def crossdomain(origin=None, methods=None, headers=None,
automatic_headers=True,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, py2help.basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, py2help.basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if automatic_headers and h.get('Access-Control-Request-Headers'):
h['Access-Control-Allow-Headers'] = h['Access-Control-Request-Headers']
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
| {
"repo_name": "FrancescAlted/blaze",
"path": "blaze/io/server/crossdomain.py",
"copies": "6",
"size": "1834",
"license": "bsd-3-clause",
"hash": -2363745160118387700,
"line_mean": 38.0212765957,
"line_max": 87,
"alpha_frac": 0.6199563795,
"autogenerated": false,
"ratio": 4.1776765375854215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7797632917085421,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import timedelta
from uuid import uuid4
import six
from django.db import IntegrityError, transaction
from django.utils import timezone
from rest_framework import serializers
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint, ProjectEventPermission
from sentry.api.fields import UserField
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import (
SUBSCRIPTION_REASON_MAP, StreamGroupSerializer
)
from sentry.app import search
from sentry.constants import DEFAULT_SORT_OPTION
from sentry.db.models.query import create_or_update
from sentry.models import (
Activity, EventMapping, Group, GroupAssignee, GroupBookmark, GroupHash,
GroupResolution, GroupSeen, GroupSnooze, GroupStatus, GroupSubscription,
GroupSubscriptionReason, Release, TagKey
)
from sentry.models.group import looks_like_short_id
from sentry.search.utils import InvalidQuery, parse_query
from sentry.tasks.deletion import delete_group
from sentry.tasks.merge import merge_group
from sentry.utils.apidocs import attach_scenarios, scenario
from sentry.utils.cursors import Cursor
ERR_INVALID_STATS_PERIOD = "Invalid stats_period. Valid choices are '', '24h', and '14d'"
@scenario('BulkUpdateIssues')
def bulk_update_issues_scenario(runner):
project = runner.default_project
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method='PUT',
path='/projects/%s/%s/issues/?id=%s&id=%s' % (
runner.org.slug, project.slug, group1.id, group2.id),
data={'status': 'unresolved', 'isPublic': False}
)
@scenario('BulkRemoveIssuess')
def bulk_remove_issues_scenario(runner):
with runner.isolated_project('Amazing Plumbing') as project:
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method='DELETE',
path='/projects/%s/%s/issues/?id=%s&id=%s' % (
runner.org.slug, project.slug, group1.id, group2.id),
)
@scenario('ListProjectIssuess')
def list_project_issues_scenario(runner):
project = runner.default_project
runner.request(
method='GET',
path='/projects/%s/%s/issues/?statsPeriod=24h' % (
runner.org.slug, project.slug),
)
STATUS_CHOICES = {
'resolved': GroupStatus.RESOLVED,
'unresolved': GroupStatus.UNRESOLVED,
'ignored': GroupStatus.IGNORED,
'resolvedInNextRelease': GroupStatus.UNRESOLVED,
# TODO(dcramer): remove in 9.0
'muted': GroupStatus.IGNORED,
}
class ValidationError(Exception):
pass
class GroupValidator(serializers.Serializer):
status = serializers.ChoiceField(choices=zip(
STATUS_CHOICES.keys(), STATUS_CHOICES.keys()
))
hasSeen = serializers.BooleanField()
isBookmarked = serializers.BooleanField()
isPublic = serializers.BooleanField()
isSubscribed = serializers.BooleanField()
merge = serializers.BooleanField()
ignoreDuration = serializers.IntegerField()
assignedTo = UserField()
# TODO(dcramer): remove in 9.0
snoozeDuration = serializers.IntegerField()
def validate_assignedTo(self, attrs, source):
value = attrs[source]
if value and not self.context['project'].member_set.filter(user=value).exists():
raise serializers.ValidationError('Cannot assign to non-team member')
return attrs
class ProjectGroupIndexEndpoint(ProjectEndpoint):
doc_section = DocSection.EVENTS
permission_classes = (ProjectEventPermission,)
def _build_query_params_from_request(self, request, project):
query_kwargs = {
'project': project,
}
if request.GET.get('status'):
try:
query_kwargs['status'] = STATUS_CHOICES[request.GET['status']]
except KeyError:
raise ValidationError('invalid status')
if request.user.is_authenticated() and request.GET.get('bookmarks'):
query_kwargs['bookmarked_by'] = request.user
if request.user.is_authenticated() and request.GET.get('assigned'):
query_kwargs['assigned_to'] = request.user
sort_by = request.GET.get('sort')
if sort_by is None:
sort_by = DEFAULT_SORT_OPTION
query_kwargs['sort_by'] = sort_by
tags = {}
for tag_key in TagKey.objects.all_keys(project):
if request.GET.get(tag_key):
tags[tag_key] = request.GET[tag_key]
if tags:
query_kwargs['tags'] = tags
limit = request.GET.get('limit')
if limit:
try:
query_kwargs['limit'] = int(limit)
except ValueError:
raise ValidationError('invalid limit')
# TODO: proper pagination support
cursor = request.GET.get('cursor')
if cursor:
query_kwargs['cursor'] = Cursor.from_string(cursor)
query = request.GET.get('query', 'is:unresolved').strip()
if query:
try:
query_kwargs.update(parse_query(project, query, request.user))
except InvalidQuery as e:
raise ValidationError(u'Your search query could not be parsed: {}'.format(e.message))
return query_kwargs
# bookmarks=0/1
# status=<x>
# <tag>=<value>
# statsPeriod=24h
@attach_scenarios([list_project_issues_scenario])
def get(self, request, project):
"""
List a Project's Issues
```````````````````````
Return a list of issues (groups) bound to a project. All parameters are
supplied as query string parameters.
A default query of ``is:unresolved`` is applied. To return results
with other statuses send an new query value (i.e. ``?query=`` for all
results).
The ``statsPeriod`` parameter can be used to select the timeline
stats which should be present. Possible values are: '' (disable),
'24h', '14d'
:qparam string statsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam bool shortIdLookup: if this is set to true then short IDs are
looked up by this function as well. This
can cause the return value of the function
to return an event issue of a different
project which is why this is an opt-in.
Set to `1` to enable.
:qparam querystring query: an optional Sentry structured search
query. If not provided an implied
``"is:unresolved"`` is assumed.)
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:auth: required
"""
stats_period = request.GET.get('statsPeriod')
if stats_period not in (None, '', '24h', '14d'):
return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400)
elif stats_period is None:
# default
stats_period = '24h'
elif stats_period == '':
# disable stats
stats_period = None
query = request.GET.get('query', '').strip()
if query:
matching_group = None
if len(query) == 32:
# check to see if we've got an event ID
try:
mapping = EventMapping.objects.get(
project_id=project.id,
event_id=query,
)
except EventMapping.DoesNotExist:
pass
else:
matching_group = Group.objects.get(id=mapping.group_id)
# If the query looks like a short id, we want to provide some
# information about where that is. Note that this can return
# results for another project. The UI deals with this.
elif request.GET.get('shortIdLookup') == '1' and \
looks_like_short_id(query):
try:
matching_group = Group.objects.by_qualified_short_id(
project.organization_id, query)
except Group.DoesNotExist:
matching_group = None
if matching_group is not None:
response = Response(serialize(
[matching_group], request.user, StreamGroupSerializer(
stats_period=stats_period
)
))
response['X-Sentry-Direct-Hit'] = '1'
return response
try:
query_kwargs = self._build_query_params_from_request(request, project)
except ValidationError as exc:
return Response({'detail': six.text_type(exc)}, status=400)
cursor_result = search.query(**query_kwargs)
results = list(cursor_result)
context = serialize(
results, request.user, StreamGroupSerializer(
stats_period=stats_period
)
)
# HACK: remove auto resolved entries
if query_kwargs.get('status') == GroupStatus.UNRESOLVED:
context = [
r for r in context
if r['status'] == 'unresolved'
]
response = Response(context)
response['Link'] = ', '.join([
self.build_cursor_link(request, 'previous', cursor_result.prev),
self.build_cursor_link(request, 'next', cursor_result.next),
])
return response
@attach_scenarios([bulk_update_issues_scenario])
def put(self, request, project):
"""
Bulk Mutate a List of Issues
````````````````````````````
Bulk mutate various attributes on issues. The list of issues
to modify is given through the `id` query parameter. It is repeated
for each issue that should be modified.
- For non-status updates, the `id` query parameter is required.
- For status updates, the `id` query parameter may be omitted
for a batch "update all" query.
- An optional `status` query parameter may be used to restrict
mutations to only events with the given status.
The following attributes can be modified and are supplied as
JSON object in the body:
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be mutated. This
parameter shall be repeated for each issue. It
is optional only if a status is mutated in which
case an implicit `update all` is assumed.
:qparam string status: optionally limits the query to issues of the
specified status. Valid values are
``"resolved"``, ``"unresolved"`` and
``"ignored"``.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:param string status: the new status for the issues. Valid values
are ``"resolved"``, ``resolvedInNextRelease``,
``"unresolved"``, and ``"ignored"``.
:param int ignoreDuration: the number of minutes to ignore this issue.
:param boolean isPublic: sets the issue to public or private.
:param boolean merge: allows to merge or unmerge different issues.
:param string assignedTo: the username of the user that should be
assigned to this issue.
:param boolean hasSeen: in case this API call is invoked with a user
context this allows changing of the flag
that indicates if the user has seen the
event.
:param boolean isBookmarked: in case this API call is invoked with a
user context this allows changing of
the bookmark flag.
:auth: required
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
if not group_ids:
return Response(status=204)
else:
group_list = None
serializer = GroupValidator(
data=request.DATA,
partial=True,
context={'project': project},
)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = dict(serializer.object)
acting_user = request.user if request.user.is_authenticated() else None
if not group_ids:
try:
query_kwargs = self._build_query_params_from_request(request, project)
except ValidationError as exc:
return Response({'detail': six.text_type(exc)}, status=400)
# bulk mutations are limited to 1000 items
# TODO(dcramer): it'd be nice to support more than this, but its
# a bit too complicated right now
query_kwargs['limit'] = 1000
cursor_result = search.query(**query_kwargs)
group_list = list(cursor_result)
group_ids = [g.id for g in group_list]
is_bulk = len(group_ids) > 1
queryset = Group.objects.filter(
id__in=group_ids,
)
if result.get('status') == 'resolvedInNextRelease':
try:
release = Release.objects.filter(
projects=project,
organization_id=project.organization_id
).order_by('-date_added')[0]
except IndexError:
return Response('{"detail": "No release data present in the system to form a basis for \'Next Release\'"}', status=400)
now = timezone.now()
for group in group_list:
try:
with transaction.atomic():
resolution, created = GroupResolution.objects.create(
group=group,
release=release,
), True
except IntegrityError:
resolution, created = GroupResolution.objects.get(
group=group,
), False
if acting_user:
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.status_change,
)
if created:
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_RESOLVED_IN_RELEASE,
user=acting_user,
ident=resolution.id,
data={
# no version yet
'version': '',
}
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
activity.send_notification()
queryset.update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
result.update({
'status': 'resolved',
'statusDetails': {
'inNextRelease': True,
},
})
elif result.get('status') == 'resolved':
now = timezone.now()
happened = queryset.exclude(
status=GroupStatus.RESOLVED,
).update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
GroupResolution.objects.filter(
group__in=group_ids,
).delete()
if group_list and happened:
for group in group_list:
group.status = GroupStatus.RESOLVED
group.resolved_at = now
if acting_user:
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.status_change,
)
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_RESOLVED,
user=acting_user,
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
activity.send_notification()
result['statusDetails'] = {}
elif result.get('status'):
new_status = STATUS_CHOICES[result['status']]
happened = queryset.exclude(
status=new_status,
).update(
status=new_status,
)
GroupResolution.objects.filter(
group__in=group_ids,
).delete()
if new_status == GroupStatus.IGNORED:
ignore_duration = (
result.pop('ignoreDuration', None)
or result.pop('snoozeDuration', None)
)
if ignore_duration:
ignore_until = timezone.now() + timedelta(
minutes=ignore_duration,
)
for group in group_list:
GroupSnooze.objects.create_or_update(
group=group,
values={
'until': ignore_until,
}
)
result['statusDetails'] = {
'ignoreUntil': ignore_until,
}
else:
GroupSnooze.objects.filter(
group__in=group_ids,
).delete()
ignore_until = None
result['statusDetails'] = {}
else:
result['statusDetails'] = {}
if group_list and happened:
if new_status == GroupStatus.UNRESOLVED:
activity_type = Activity.SET_UNRESOLVED
activity_data = {}
elif new_status == GroupStatus.IGNORED:
activity_type = Activity.SET_IGNORED
activity_data = {
'ignoreUntil': ignore_until,
'ignoreDuration': ignore_duration,
}
for group in group_list:
group.status = new_status
activity = Activity.objects.create(
project=group.project,
group=group,
type=activity_type,
user=acting_user,
data=activity_data,
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
if acting_user:
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.status_change,
)
activity.send_notification()
if 'assignedTo' in result:
if result['assignedTo']:
for group in group_list:
GroupAssignee.objects.assign(group, result['assignedTo'],
acting_user)
if 'isSubscribed' not in result or result['assignedTo'] != request.user:
GroupSubscription.objects.subscribe(
group=group,
user=result['assignedTo'],
reason=GroupSubscriptionReason.assigned,
)
result['assignedTo'] = serialize(result['assignedTo'])
else:
for group in group_list:
GroupAssignee.objects.deassign(group, acting_user)
if result.get('hasSeen') and project.member_set.filter(user=acting_user).exists():
for group in group_list:
instance, created = create_or_update(
GroupSeen,
group=group,
user=acting_user,
project=group.project,
values={
'last_seen': timezone.now(),
}
)
elif result.get('hasSeen') is False:
GroupSeen.objects.filter(
group__in=group_ids,
user=acting_user,
).delete()
if result.get('isBookmarked'):
for group in group_list:
GroupBookmark.objects.get_or_create(
project=project,
group=group,
user=acting_user,
)
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.bookmark,
)
elif result.get('isBookmarked') is False:
GroupBookmark.objects.filter(
group__in=group_ids,
user=acting_user,
).delete()
# TODO(dcramer): we could make these more efficient by first
# querying for rich rows are present (if N > 2), flipping the flag
# on those rows, and then creating the missing rows
if result.get('isSubscribed') in (True, False):
is_subscribed = result['isSubscribed']
for group in group_list:
# NOTE: Subscribing without an initiating event (assignment,
# commenting, etc.) clears out the previous subscription reason
# to avoid showing confusing messaging as a result of this
# action. It'd be jarring to go directly from "you are not
# subscribed" to "you were subscribed due since you were
# assigned" just by clicking the "subscribe" button (and you
# may no longer be assigned to the issue anyway.)
GroupSubscription.objects.create_or_update(
user=acting_user,
group=group,
project=project,
values={
'is_active': is_subscribed,
'reason': GroupSubscriptionReason.unknown,
},
)
result['subscriptionDetails'] = {
'reason': SUBSCRIPTION_REASON_MAP.get(
GroupSubscriptionReason.unknown,
'unknown',
),
}
if result.get('isPublic'):
queryset.update(is_public=True)
for group in group_list:
if group.is_public:
continue
group.is_public = True
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PUBLIC,
user=acting_user,
)
elif result.get('isPublic') is False:
queryset.update(is_public=False)
for group in group_list:
if not group.is_public:
continue
group.is_public = False
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PRIVATE,
user=acting_user,
)
# XXX(dcramer): this feels a bit shady like it should be its own
# endpoint
if result.get('merge') and len(group_list) > 1:
primary_group = sorted(group_list, key=lambda x: -x.times_seen)[0]
children = []
transaction_id = uuid4().hex
for group in group_list:
if group == primary_group:
continue
children.append(group)
group.update(status=GroupStatus.PENDING_MERGE)
merge_group.delay(
from_object_id=group.id,
to_object_id=primary_group.id,
transaction_id=transaction_id,
)
Activity.objects.create(
project=primary_group.project,
group=primary_group,
type=Activity.MERGE,
user=acting_user,
data={
'issues': [{'id': c.id} for c in children],
},
)
result['merge'] = {
'parent': six.text_type(primary_group.id),
'children': [six.text_type(g.id) for g in children],
}
return Response(result)
@attach_scenarios([bulk_remove_issues_scenario])
def delete(self, request, project):
"""
Bulk Remove a List of Issues
````````````````````````````
Permanently remove the given issues. The list of issues to
modify is given through the `id` query parameter. It is repeated
for each issue that should be removed.
Only queries by 'id' are accepted.
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be removed. This
parameter shall be repeated for each issue.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:auth: required
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = list(Group.objects.filter(
project=project,
id__in=set(group_ids),
).exclude(
status__in=[
GroupStatus.PENDING_DELETION,
GroupStatus.DELETION_IN_PROGRESS,
]
))
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
else:
# missing any kind of filter
return Response('{"detail": "You must specify a list of IDs for this operation"}', status=400)
if not group_ids:
return Response(status=204)
Group.objects.filter(
id__in=group_ids,
).exclude(
status__in=[
GroupStatus.PENDING_DELETION,
GroupStatus.DELETION_IN_PROGRESS,
]
).update(status=GroupStatus.PENDING_DELETION)
GroupHash.objects.filter(group__id__in=group_ids).delete()
for group in group_list:
delete_group.apply_async(
kwargs={'object_id': group.id},
countdown=3600,
)
return Response(status=204)
| {
"repo_name": "BuildingLink/sentry",
"path": "src/sentry/api/endpoints/project_group_index.py",
"copies": "1",
"size": "28583",
"license": "bsd-3-clause",
"hash": 4345379878744416000,
"line_mean": 37.6779431664,
"line_max": 135,
"alpha_frac": 0.5207990764,
"autogenerated": false,
"ratio": 4.923858742463394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5944657818863395,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import timedelta
import functools
import logging
from uuid import uuid4
import six
from django.db import IntegrityError, transaction
from django.utils import timezone
from rest_framework import serializers
from rest_framework.response import Response
from sentry import analytics, features, search
from sentry.api.base import DocSection, EnvironmentMixin
from sentry.api.bases.project import ProjectEndpoint, ProjectEventPermission
from sentry.api.fields import ActorField, Actor
from sentry.api.serializers import serialize
from sentry.api.serializers.models.actor import ActorSerializer
from sentry.api.serializers.models.group import (
SUBSCRIPTION_REASON_MAP, StreamGroupSerializer)
from sentry.constants import DEFAULT_SORT_OPTION
from sentry.db.models.query import create_or_update
from sentry.models import (
Activity, Environment, Group, GroupAssignee, GroupBookmark, GroupHash, GroupResolution,
GroupSeen, GroupShare, GroupSnooze, GroupStatus, GroupSubscription, GroupSubscriptionReason,
GroupHashTombstone, GroupTombstone, Release, TOMBSTONE_FIELDS_FROM_GROUP, UserOption, User, Team
)
from sentry.models.event import Event
from sentry.models.group import looks_like_short_id
from sentry.receivers import DEFAULT_SAVED_SEARCHES
from sentry.search.utils import InvalidQuery, parse_query
from sentry.signals import advanced_search, issue_ignored, issue_resolved_in_release
from sentry.tasks.deletion import delete_group
from sentry.tasks.integrations import kick_off_status_syncs
from sentry.tasks.merge import merge_group
from sentry.utils.apidocs import attach_scenarios, scenario
from sentry.utils.cursors import Cursor, CursorResult
from sentry.utils.functional import extract_lazy_object
delete_logger = logging.getLogger('sentry.deletions.api')
ERR_INVALID_STATS_PERIOD = "Invalid stats_period. Valid choices are '', '24h', and '14d'"
SAVED_SEARCH_QUERIES = set([s['query'] for s in DEFAULT_SAVED_SEARCHES])
@scenario('BulkUpdateIssues')
def bulk_update_issues_scenario(runner):
project = runner.default_project
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method='PUT',
path='/projects/%s/%s/issues/?id=%s&id=%s' %
(runner.org.slug, project.slug, group1.id, group2.id),
data={'status': 'unresolved',
'isPublic': False}
)
@scenario('BulkRemoveIssuess')
def bulk_remove_issues_scenario(runner):
with runner.isolated_project('Amazing Plumbing') as project:
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method='DELETE',
path='/projects/%s/%s/issues/?id=%s&id=%s' %
(runner.org.slug, project.slug, group1.id, group2.id),
)
@scenario('ListProjectIssuess')
def list_project_issues_scenario(runner):
project = runner.default_project
runner.request(
method='GET',
path='/projects/%s/%s/issues/?statsPeriod=24h' % (
runner.org.slug, project.slug),
)
STATUS_CHOICES = {
'resolved': GroupStatus.RESOLVED,
'unresolved': GroupStatus.UNRESOLVED,
'ignored': GroupStatus.IGNORED,
'resolvedInNextRelease': GroupStatus.UNRESOLVED,
# TODO(dcramer): remove in 9.0
'muted': GroupStatus.IGNORED,
}
class ValidationError(Exception):
pass
class StatusDetailsValidator(serializers.Serializer):
inNextRelease = serializers.BooleanField()
inRelease = serializers.CharField()
ignoreDuration = serializers.IntegerField()
ignoreCount = serializers.IntegerField()
# in minutes, max of one week
ignoreWindow = serializers.IntegerField(max_value=7 * 24 * 60)
ignoreUserCount = serializers.IntegerField()
# in minutes, max of one week
ignoreUserWindow = serializers.IntegerField(max_value=7 * 24 * 60)
def validate_inRelease(self, attrs, source):
value = attrs[source]
project = self.context['project']
if value == 'latest':
try:
attrs[source] = Release.objects.filter(
projects=project,
organization_id=project.organization_id,
).extra(select={
'sort': 'COALESCE(date_released, date_added)',
}).order_by('-sort')[0]
except IndexError:
raise serializers.ValidationError(
'No release data present in the system to form a basis for \'Next Release\''
)
else:
try:
attrs[source] = Release.objects.get(
projects=project,
organization_id=project.organization_id,
version=value,
)
except Release.DoesNotExist:
raise serializers.ValidationError(
'Unable to find a release with the given version.'
)
return attrs
def validate_inNextRelease(self, attrs, source):
project = self.context['project']
if not Release.objects.filter(
projects=project,
organization_id=project.organization_id,
).exists():
raise serializers.ValidationError(
'No release data present in the system to form a basis for \'Next Release\''
)
return attrs
class GroupValidator(serializers.Serializer):
status = serializers.ChoiceField(choices=zip(
STATUS_CHOICES.keys(), STATUS_CHOICES.keys()))
statusDetails = StatusDetailsValidator()
hasSeen = serializers.BooleanField()
isBookmarked = serializers.BooleanField()
isPublic = serializers.BooleanField()
isSubscribed = serializers.BooleanField()
merge = serializers.BooleanField()
discard = serializers.BooleanField()
ignoreDuration = serializers.IntegerField()
ignoreCount = serializers.IntegerField()
# in minutes, max of one week
ignoreWindow = serializers.IntegerField(max_value=7 * 24 * 60)
ignoreUserCount = serializers.IntegerField()
# in minutes, max of one week
ignoreUserWindow = serializers.IntegerField(max_value=7 * 24 * 60)
assignedTo = ActorField()
# TODO(dcramer): remove in 9.0
snoozeDuration = serializers.IntegerField()
def validate_assignedTo(self, attrs, source):
value = attrs[source]
if value and value.type is User and not self.context['project'].member_set.filter(
user_id=value.id).exists():
raise serializers.ValidationError(
'Cannot assign to non-team member')
if value and value.type is Team and not self.context['project'].teams.filter(
id=value.id).exists():
raise serializers.ValidationError(
'Cannot assign to a team without access to the project')
return attrs
def validate(self, attrs):
attrs = super(GroupValidator, self).validate(attrs)
if len(attrs) > 1 and 'discard' in attrs:
raise serializers.ValidationError(
'Other attributes cannot be updated when discarding')
return attrs
class ProjectGroupIndexEndpoint(ProjectEndpoint, EnvironmentMixin):
doc_section = DocSection.EVENTS
permission_classes = (ProjectEventPermission, )
def _build_query_params_from_request(self, request, project):
query_kwargs = {
'project': project,
'sort_by': request.GET.get('sort', DEFAULT_SORT_OPTION),
}
limit = request.GET.get('limit')
if limit:
try:
query_kwargs['limit'] = int(limit)
except ValueError:
raise ValidationError('invalid limit')
# TODO: proper pagination support
cursor = request.GET.get('cursor')
if cursor:
query_kwargs['cursor'] = Cursor.from_string(cursor)
query = request.GET.get('query', 'is:unresolved').strip()
if query:
try:
query_kwargs.update(parse_query(project, query, request.user))
except InvalidQuery as e:
raise ValidationError(
u'Your search query could not be parsed: {}'.format(
e.message)
)
return query_kwargs
def _search(self, request, project, extra_query_kwargs=None):
query_kwargs = self._build_query_params_from_request(request, project)
if extra_query_kwargs is not None:
assert 'environment' not in extra_query_kwargs
query_kwargs.update(extra_query_kwargs)
try:
if features.has('organizations:environments', project.organization, actor=request.user):
query_kwargs['environment'] = self._get_environment_from_request(
request,
project.organization_id,
)
except Environment.DoesNotExist:
# XXX: The 1000 magic number for `max_hits` is an abstraction leak
# from `sentry.api.paginator.BasePaginator.get_result`.
result = CursorResult([], None, None, hits=0, max_hits=1000)
else:
result = search.query(**query_kwargs)
return result, query_kwargs
def _subscribe_and_assign_issue(self, acting_user, group, result):
if acting_user:
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.status_change,
)
self_assign_issue = UserOption.objects.get_value(
user=acting_user, key='self_assign_issue', default='0'
)
if self_assign_issue == '1' and not group.assignee_set.exists():
result['assignedTo'] = Actor(type=User, id=extract_lazy_object(acting_user).id)
# statsPeriod=24h
@attach_scenarios([list_project_issues_scenario])
def get(self, request, project):
"""
List a Project's Issues
```````````````````````
Return a list of issues (groups) bound to a project. All parameters are
supplied as query string parameters.
A default query of ``is:unresolved`` is applied. To return results
with other statuses send an new query value (i.e. ``?query=`` for all
results).
The ``statsPeriod`` parameter can be used to select the timeline
stats which should be present. Possible values are: '' (disable),
'24h', '14d'
:qparam string statsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam bool shortIdLookup: if this is set to true then short IDs are
looked up by this function as well. This
can cause the return value of the function
to return an event issue of a different
project which is why this is an opt-in.
Set to `1` to enable.
:qparam querystring query: an optional Sentry structured search
query. If not provided an implied
``"is:unresolved"`` is assumed.)
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:auth: required
"""
stats_period = request.GET.get('statsPeriod')
if stats_period not in (None, '', '24h', '14d'):
return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400)
elif stats_period is None:
# default
stats_period = '24h'
elif stats_period == '':
# disable stats
stats_period = None
serializer = functools.partial(
StreamGroupSerializer,
environment_func=self._get_environment_func(request, project.organization_id),
stats_period=stats_period,
)
query = request.GET.get('query', '').strip()
if query:
matching_group = None
matching_event = None
if len(query) == 32:
# check to see if we've got an event ID
try:
matching_group = Group.objects.from_event_id(project, query)
except Group.DoesNotExist:
pass
else:
try:
matching_event = Event.objects.get(
event_id=query, project_id=project.id)
except Event.DoesNotExist:
pass
else:
Event.objects.bind_nodes([matching_event], 'data')
# If the query looks like a short id, we want to provide some
# information about where that is. Note that this can return
# results for another project. The UI deals with this.
elif request.GET.get('shortIdLookup') == '1' and \
looks_like_short_id(query):
try:
matching_group = Group.objects.by_qualified_short_id(
project.organization_id, query
)
except Group.DoesNotExist:
matching_group = None
if matching_group is not None:
matching_event_environment = None
try:
matching_event_environment = matching_event.get_environment().name if matching_event else None
except Environment.DoesNotExist:
pass
response = Response(
serialize(
[matching_group], request.user, serializer(
matching_event_id=getattr(matching_event, 'id', None),
matching_event_environment=matching_event_environment,
)
)
)
response['X-Sentry-Direct-Hit'] = '1'
return response
try:
cursor_result, query_kwargs = self._search(request, project, {'count_hits': True})
except ValidationError as exc:
return Response({'detail': six.text_type(exc)}, status=400)
results = list(cursor_result)
context = serialize(results, request.user, serializer())
# HACK: remove auto resolved entries
if query_kwargs.get('status') == GroupStatus.UNRESOLVED:
context = [r for r in context if r['status'] == 'unresolved']
response = Response(context)
self.add_cursor_headers(request, response, cursor_result)
if results and query not in SAVED_SEARCH_QUERIES:
advanced_search.send(project=project, sender=request.user)
analytics.record('project_issue.searched', user_id=request.user.id,
organization_id=project.organization_id, project_id=project.id,
query=query)
return response
@attach_scenarios([bulk_update_issues_scenario])
def put(self, request, project):
"""
Bulk Mutate a List of Issues
````````````````````````````
Bulk mutate various attributes on issues. The list of issues
to modify is given through the `id` query parameter. It is repeated
for each issue that should be modified.
- For non-status updates, the `id` query parameter is required.
- For status updates, the `id` query parameter may be omitted
for a batch "update all" query.
- An optional `status` query parameter may be used to restrict
mutations to only events with the given status.
The following attributes can be modified and are supplied as
JSON object in the body:
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be mutated. This
parameter shall be repeated for each issue. It
is optional only if a status is mutated in which
case an implicit `update all` is assumed.
:qparam string status: optionally limits the query to issues of the
specified status. Valid values are
``"resolved"``, ``"unresolved"`` and
``"ignored"``.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:param string status: the new status for the issues. Valid values
are ``"resolved"``, ``"resolvedInNextRelease"``,
``"unresolved"``, and ``"ignored"``.
:param int ignoreDuration: the number of minutes to ignore this issue.
:param boolean isPublic: sets the issue to public or private.
:param boolean merge: allows to merge or unmerge different issues.
:param string assignedTo: the actor id (or username) of the user or team that should be
assigned to this issue.
:param boolean hasSeen: in case this API call is invoked with a user
context this allows changing of the flag
that indicates if the user has seen the
event.
:param boolean isBookmarked: in case this API call is invoked with a
user context this allows changing of
the bookmark flag.
:auth: required
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(
project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
if not group_ids:
return Response(status=204)
else:
group_list = None
serializer = GroupValidator(
data=request.DATA,
partial=True,
context={'project': project},
)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = dict(serializer.object)
acting_user = request.user if request.user.is_authenticated() else None
if not group_ids:
try:
# bulk mutations are limited to 1000 items
# TODO(dcramer): it'd be nice to support more than this, but its
# a bit too complicated right now
cursor_result, _ = self._search(request, project, {
'limit': 1000,
'paginator_options': {'max_limit': 1000},
})
except ValidationError as exc:
return Response({'detail': six.text_type(exc)}, status=400)
group_list = list(cursor_result)
group_ids = [g.id for g in group_list]
is_bulk = len(group_ids) > 1
queryset = Group.objects.filter(
id__in=group_ids,
)
discard = result.get('discard')
if discard:
if not features.has('projects:discard-groups', project, actor=request.user):
return Response({'detail': ['You do not have that feature enabled']}, status=400)
group_list = list(queryset)
groups_to_delete = []
for group in group_list:
with transaction.atomic():
try:
tombstone = GroupTombstone.objects.create(
previous_group_id=group.id,
actor_id=acting_user.id if acting_user else None,
**{name: getattr(group, name) for name in TOMBSTONE_FIELDS_FROM_GROUP}
)
except IntegrityError:
# in this case, a tombstone has already been created
# for a group, so no hash updates are necessary
pass
else:
groups_to_delete.append(group)
GroupHash.objects.filter(
group=group,
).update(
group=None,
group_tombstone_id=tombstone.id,
)
self._delete_groups(request, project, groups_to_delete)
return Response(status=204)
statusDetails = result.pop('statusDetails', result)
status = result.get('status')
if status in ('resolved', 'resolvedInNextRelease'):
if status == 'resolvedInNextRelease' or statusDetails.get('inNextRelease'):
release = Release.objects.filter(
projects=project,
organization_id=project.organization_id,
).extra(select={
'sort': 'COALESCE(date_released, date_added)',
}).order_by('-sort')[0]
activity_type = Activity.SET_RESOLVED_IN_RELEASE
activity_data = {
# no version yet
'version': '',
}
status_details = {
'inNextRelease': True,
'actor': serialize(extract_lazy_object(request.user), request.user),
}
res_type = GroupResolution.Type.in_next_release
res_status = GroupResolution.Status.pending
elif statusDetails.get('inRelease'):
release = statusDetails['inRelease']
activity_type = Activity.SET_RESOLVED_IN_RELEASE
activity_data = {
# no version yet
'version': release.version,
}
status_details = {
'inRelease': release.version,
'actor': serialize(extract_lazy_object(request.user), request.user),
}
res_type = GroupResolution.Type.in_release
res_status = GroupResolution.Status.resolved
else:
release = None
activity_type = Activity.SET_RESOLVED
activity_data = {}
status_details = {}
now = timezone.now()
for group in group_list:
with transaction.atomic():
if release:
resolution_params = {
'release': release,
'type': res_type,
'status': res_status,
'actor_id': request.user.id
if request.user.is_authenticated() else None,
}
resolution, created = GroupResolution.objects.get_or_create(
group=group,
defaults=resolution_params,
)
if not created:
resolution.update(
datetime=timezone.now(), **resolution_params)
else:
resolution = None
affected = Group.objects.filter(
id=group.id,
).update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
if not resolution:
created = affected
group.status = GroupStatus.RESOLVED
group.resolved_at = now
self._subscribe_and_assign_issue(
acting_user, group, result)
if created:
activity = Activity.objects.create(
project=group.project,
group=group,
type=activity_type,
user=acting_user,
ident=resolution.id if resolution else None,
data=activity_data,
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
activity.send_notification()
issue_resolved_in_release.send(
group=group,
project=project,
sender=acting_user,
)
kick_off_status_syncs.apply_async(kwargs={
'project_id': group.project_id,
'group_id': group.id,
})
result.update({
'status': 'resolved',
'statusDetails': status_details,
})
elif status:
new_status = STATUS_CHOICES[result['status']]
with transaction.atomic():
happened = queryset.exclude(
status=new_status,
).update(
status=new_status,
)
GroupResolution.objects.filter(
group__in=group_ids,
).delete()
if new_status == GroupStatus.IGNORED:
ignore_duration = (
statusDetails.pop('ignoreDuration', None) or
statusDetails.pop('snoozeDuration', None)
) or None
ignore_count = statusDetails.pop(
'ignoreCount', None) or None
ignore_window = statusDetails.pop(
'ignoreWindow', None) or None
ignore_user_count = statusDetails.pop(
'ignoreUserCount', None) or None
ignore_user_window = statusDetails.pop(
'ignoreUserWindow', None) or None
if ignore_duration or ignore_count or ignore_user_count:
if ignore_duration:
ignore_until = timezone.now() + timedelta(
minutes=ignore_duration,
)
else:
ignore_until = None
for group in group_list:
state = {}
if ignore_count and not ignore_window:
state['times_seen'] = group.times_seen
if ignore_user_count and not ignore_user_window:
state['users_seen'] = group.count_users_seen()
GroupSnooze.objects.create_or_update(
group=group,
values={
'until':
ignore_until,
'count':
ignore_count,
'window':
ignore_window,
'user_count':
ignore_user_count,
'user_window':
ignore_user_window,
'state':
state,
'actor_id':
request.user.id if request.user.is_authenticated() else None,
}
)
result['statusDetails'] = {
'ignoreCount': ignore_count,
'ignoreUntil': ignore_until,
'ignoreUserCount': ignore_user_count,
'ignoreUserWindow': ignore_user_window,
'ignoreWindow': ignore_window,
'actor': serialize(extract_lazy_object(request.user), request.user),
}
issue_ignored.send_robust(project=project, sender=self.__class__)
else:
GroupSnooze.objects.filter(
group__in=group_ids,
).delete()
ignore_until = None
result['statusDetails'] = {}
else:
result['statusDetails'] = {}
if group_list and happened:
if new_status == GroupStatus.UNRESOLVED:
activity_type = Activity.SET_UNRESOLVED
activity_data = {}
elif new_status == GroupStatus.IGNORED:
activity_type = Activity.SET_IGNORED
activity_data = {
'ignoreCount': ignore_count,
'ignoreDuration': ignore_duration,
'ignoreUntil': ignore_until,
'ignoreUserCount': ignore_user_count,
'ignoreUserWindow': ignore_user_window,
'ignoreWindow': ignore_window,
}
for group in group_list:
group.status = new_status
activity = Activity.objects.create(
project=group.project,
group=group,
type=activity_type,
user=acting_user,
data=activity_data,
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
if acting_user:
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.status_change,
)
activity.send_notification()
if new_status == GroupStatus.UNRESOLVED:
kick_off_status_syncs.apply_async(kwargs={
'project_id': group.project_id,
'group_id': group.id,
})
if 'assignedTo' in result:
assigned_actor = result['assignedTo']
if assigned_actor:
for group in group_list:
resolved_actor = assigned_actor.resolve()
GroupAssignee.objects.assign(group, resolved_actor, acting_user)
result['assignedTo'] = serialize(
assigned_actor.resolve(), acting_user, ActorSerializer())
else:
for group in group_list:
GroupAssignee.objects.deassign(group, acting_user)
if result.get('hasSeen') and project.member_set.filter(user=acting_user).exists():
for group in group_list:
instance, created = create_or_update(
GroupSeen,
group=group,
user=acting_user,
project=group.project,
values={
'last_seen': timezone.now(),
}
)
elif result.get('hasSeen') is False:
GroupSeen.objects.filter(
group__in=group_ids,
user=acting_user,
).delete()
if result.get('isBookmarked'):
for group in group_list:
GroupBookmark.objects.get_or_create(
project=project,
group=group,
user=acting_user,
)
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.bookmark,
)
elif result.get('isBookmarked') is False:
GroupBookmark.objects.filter(
group__in=group_ids,
user=acting_user,
).delete()
# TODO(dcramer): we could make these more efficient by first
# querying for rich rows are present (if N > 2), flipping the flag
# on those rows, and then creating the missing rows
if result.get('isSubscribed') in (True, False):
is_subscribed = result['isSubscribed']
for group in group_list:
# NOTE: Subscribing without an initiating event (assignment,
# commenting, etc.) clears out the previous subscription reason
# to avoid showing confusing messaging as a result of this
# action. It'd be jarring to go directly from "you are not
# subscribed" to "you were subscribed due since you were
# assigned" just by clicking the "subscribe" button (and you
# may no longer be assigned to the issue anyway.)
GroupSubscription.objects.create_or_update(
user=acting_user,
group=group,
project=project,
values={
'is_active': is_subscribed,
'reason': GroupSubscriptionReason.unknown,
},
)
result['subscriptionDetails'] = {
'reason': SUBSCRIPTION_REASON_MAP.get(
GroupSubscriptionReason.unknown,
'unknown',
),
}
if 'isPublic' in result:
# We always want to delete an existing share, because triggering
# an isPublic=True even when it's already public, should trigger
# regenerating.
for group in group_list:
if GroupShare.objects.filter(group=group).delete():
result['shareId'] = None
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PRIVATE,
user=acting_user,
)
if result.get('isPublic'):
for group in group_list:
share, created = GroupShare.objects.get_or_create(
project=group.project,
group=group,
user=acting_user,
)
if created:
result['shareId'] = share.uuid
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PUBLIC,
user=acting_user,
)
# XXX(dcramer): this feels a bit shady like it should be its own
# endpoint
if result.get('merge') and len(group_list) > 1:
primary_group = sorted(group_list, key=lambda x: -x.times_seen)[0]
children = []
transaction_id = uuid4().hex
for group in group_list:
if group == primary_group:
continue
children.append(group)
group.update(status=GroupStatus.PENDING_MERGE)
merge_group.delay(
from_object_id=group.id,
to_object_id=primary_group.id,
transaction_id=transaction_id,
)
Activity.objects.create(
project=primary_group.project,
group=primary_group,
type=Activity.MERGE,
user=acting_user,
data={
'issues': [{
'id': c.id
} for c in children],
},
)
result['merge'] = {
'parent': six.text_type(primary_group.id),
'children': [six.text_type(g.id) for g in children],
}
return Response(result)
@attach_scenarios([bulk_remove_issues_scenario])
def delete(self, request, project):
"""
Bulk Remove a List of Issues
````````````````````````````
Permanently remove the given issues. The list of issues to
modify is given through the `id` query parameter. It is repeated
for each issue that should be removed.
Only queries by 'id' are accepted.
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be removed. This
parameter shall be repeated for each issue.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:auth: required
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = list(
Group.objects.filter(
project=project,
id__in=set(group_ids),
).exclude(
status__in=[
GroupStatus.PENDING_DELETION,
GroupStatus.DELETION_IN_PROGRESS,
]
)
)
else:
try:
# bulk mutations are limited to 1000 items
# TODO(dcramer): it'd be nice to support more than this, but its
# a bit too complicated right now
cursor_result, _ = self._search(request, project, {
'limit': 1000,
'paginator_options': {'max_limit': 1000},
})
except ValidationError as exc:
return Response({'detail': six.text_type(exc)}, status=400)
group_list = list(cursor_result)
if not group_list:
return Response(status=204)
self._delete_groups(request, project, group_list)
return Response(status=204)
def _delete_groups(self, request, project, group_list):
group_ids = [g.id for g in group_list]
Group.objects.filter(
id__in=group_ids,
).exclude(status__in=[
GroupStatus.PENDING_DELETION,
GroupStatus.DELETION_IN_PROGRESS,
]).update(status=GroupStatus.PENDING_DELETION)
GroupHashTombstone.tombstone_groups(
project_id=project.id,
group_ids=group_ids,
)
transaction_id = uuid4().hex
for group in group_list:
delete_group.apply_async(
kwargs={
'object_id': group.id,
'transaction_id': transaction_id,
},
countdown=3600,
)
self.create_audit_entry(
request=request,
organization_id=project.organization_id,
target_object=group.id,
transaction_id=transaction_id,
)
delete_logger.info(
'object.delete.queued',
extra={
'object_id': group.id,
'transaction_id': transaction_id,
'model': type(group).__name__,
}
)
| {
"repo_name": "ifduyue/sentry",
"path": "src/sentry/api/endpoints/project_group_index.py",
"copies": "1",
"size": "40265",
"license": "bsd-3-clause",
"hash": 907701814312215800,
"line_mean": 40.0448521916,
"line_max": 114,
"alpha_frac": 0.5156090898,
"autogenerated": false,
"ratio": 5.014321295143213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000406181067403593,
"num_lines": 981
} |
from __future__ import absolute_import, division, print_function
from datetime import timedelta
import logging
from uuid import uuid4
import six
from django.db import IntegrityError, transaction
from django.utils import timezone
from rest_framework import serializers
from rest_framework.response import Response
from sentry import features, search
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint, ProjectEventPermission
from sentry.api.fields import UserField
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import (SUBSCRIPTION_REASON_MAP, StreamGroupSerializer)
from sentry.constants import DEFAULT_SORT_OPTION
from sentry.db.models.query import create_or_update
from sentry.models import (
Activity, EventMapping, Group, GroupAssignee, GroupBookmark, GroupHash, GroupResolution,
GroupSeen, GroupSnooze, GroupStatus, GroupSubscription, GroupSubscriptionReason, GroupTombstone,
Release, TagKey, UserOption
)
from sentry.models.event import Event
from sentry.models.group import looks_like_short_id
from sentry.receivers import DEFAULT_SAVED_SEARCHES
from sentry.search.utils import InvalidQuery, parse_query
from sentry.signals import advanced_search, issue_resolved_in_release
from sentry.tasks.deletion import delete_group
from sentry.tasks.merge import merge_group
from sentry.utils.apidocs import attach_scenarios, scenario
from sentry.utils.cursors import Cursor
from sentry.utils.functional import extract_lazy_object
delete_logger = logging.getLogger('sentry.deletions.api')
ERR_INVALID_STATS_PERIOD = "Invalid stats_period. Valid choices are '', '24h', and '14d'"
SAVED_SEARCH_QUERIES = set([s['query'] for s in DEFAULT_SAVED_SEARCHES])
TOMBSTONE_FIELDS_FROM_GROUP = ('project_id', 'level', 'message', 'culprit', 'data')
@scenario('BulkUpdateIssues')
def bulk_update_issues_scenario(runner):
project = runner.default_project
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method='PUT',
path='/projects/%s/%s/issues/?id=%s&id=%s' %
(runner.org.slug, project.slug, group1.id, group2.id),
data={'status': 'unresolved',
'isPublic': False}
)
@scenario('BulkRemoveIssuess')
def bulk_remove_issues_scenario(runner):
with runner.isolated_project('Amazing Plumbing') as project:
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method='DELETE',
path='/projects/%s/%s/issues/?id=%s&id=%s' %
(runner.org.slug, project.slug, group1.id, group2.id),
)
@scenario('ListProjectIssuess')
def list_project_issues_scenario(runner):
project = runner.default_project
runner.request(
method='GET',
path='/projects/%s/%s/issues/?statsPeriod=24h' % (runner.org.slug, project.slug),
)
STATUS_CHOICES = {
'resolved': GroupStatus.RESOLVED,
'unresolved': GroupStatus.UNRESOLVED,
'ignored': GroupStatus.IGNORED,
'resolvedInNextRelease': GroupStatus.UNRESOLVED,
# TODO(dcramer): remove in 9.0
'muted': GroupStatus.IGNORED,
}
class ValidationError(Exception):
pass
class StatusDetailsValidator(serializers.Serializer):
inNextRelease = serializers.BooleanField()
inRelease = serializers.CharField()
ignoreDuration = serializers.IntegerField()
ignoreCount = serializers.IntegerField()
# in hours, max of one week
ignoreWindow = serializers.IntegerField(max_value=7 * 24)
ignoreUserCount = serializers.IntegerField()
# in hours, max of one week
ignoreUserWindow = serializers.IntegerField(max_value=7 * 24)
def validate_inRelease(self, attrs, source):
value = attrs[source]
project = self.context['project']
if value == 'latest':
try:
attrs[source] = Release.objects.filter(
projects=project,
organization_id=project.organization_id,
).order_by('-date_added')[0]
except IndexError:
raise serializers.ValidationError(
'No release data present in the system to form a basis for \'Next Release\''
)
else:
try:
attrs[source] = Release.objects.get(
projects=project,
organization_id=project.organization_id,
version=value,
)
except Release.DoesNotExist:
raise serializers.ValidationError(
'Unable to find a release with the given version.'
)
return attrs
def validate_inNextRelease(self, attrs, source):
project = self.context['project']
if not Release.objects.filter(
projects=project,
organization_id=project.organization_id,
).exists():
raise serializers.ValidationError(
'No release data present in the system to form a basis for \'Next Release\''
)
return attrs
class GroupValidator(serializers.Serializer):
status = serializers.ChoiceField(choices=zip(STATUS_CHOICES.keys(), STATUS_CHOICES.keys()))
statusDetails = StatusDetailsValidator()
hasSeen = serializers.BooleanField()
isBookmarked = serializers.BooleanField()
isPublic = serializers.BooleanField()
isSubscribed = serializers.BooleanField()
merge = serializers.BooleanField()
discard = serializers.BooleanField()
ignoreDuration = serializers.IntegerField()
ignoreCount = serializers.IntegerField()
# in hours, max of one week
ignoreWindow = serializers.IntegerField(max_value=7 * 24)
ignoreUserCount = serializers.IntegerField()
# in hours, max of one week
ignoreUserWindow = serializers.IntegerField(max_value=7 * 24)
assignedTo = UserField()
# TODO(dcramer): remove in 9.0
snoozeDuration = serializers.IntegerField()
def validate_assignedTo(self, attrs, source):
value = attrs[source]
if value and not self.context['project'].member_set.filter(user=value).exists():
raise serializers.ValidationError('Cannot assign to non-team member')
return attrs
def validate(self, attrs):
attrs = super(GroupValidator, self).validate(attrs)
if len(attrs) > 1 and 'discard' in attrs:
raise serializers.ValidationError('Other attributes cannot be updated when discarding')
return attrs
class ProjectGroupIndexEndpoint(ProjectEndpoint):
doc_section = DocSection.EVENTS
permission_classes = (ProjectEventPermission, )
def _build_query_params_from_request(self, request, project):
query_kwargs = {
'project': project,
}
if request.GET.get('status'):
try:
query_kwargs['status'] = STATUS_CHOICES[request.GET['status']]
except KeyError:
raise ValidationError('invalid status')
if request.user.is_authenticated() and request.GET.get('bookmarks'):
query_kwargs['bookmarked_by'] = request.user
if request.user.is_authenticated() and request.GET.get('assigned'):
query_kwargs['assigned_to'] = request.user
sort_by = request.GET.get('sort')
if sort_by is None:
sort_by = DEFAULT_SORT_OPTION
query_kwargs['sort_by'] = sort_by
tags = {}
for tag_key in TagKey.objects.all_keys(project):
if request.GET.get(tag_key):
tags[tag_key] = request.GET[tag_key]
if tags:
query_kwargs['tags'] = tags
limit = request.GET.get('limit')
if limit:
try:
query_kwargs['limit'] = int(limit)
except ValueError:
raise ValidationError('invalid limit')
# TODO: proper pagination support
cursor = request.GET.get('cursor')
if cursor:
query_kwargs['cursor'] = Cursor.from_string(cursor)
query = request.GET.get('query', 'is:unresolved').strip()
if query:
try:
query_kwargs.update(parse_query(project, query, request.user))
except InvalidQuery as e:
raise ValidationError(
u'Your search query could not be parsed: {}'.format(e.message)
)
return query_kwargs
def _subscribe_and_assign_issue(self, acting_user, group, result):
if acting_user:
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.status_change,
)
self_assign_issue = UserOption.objects.get_value(
user=acting_user, key='self_assign_issue', default='0'
)
if self_assign_issue == '1' and not group.assignee_set.exists():
result['assignedTo'] = extract_lazy_object(acting_user)
# bookmarks=0/1
# status=<x>
# <tag>=<value>
# statsPeriod=24h
@attach_scenarios([list_project_issues_scenario])
def get(self, request, project):
"""
List a Project's Issues
```````````````````````
Return a list of issues (groups) bound to a project. All parameters are
supplied as query string parameters.
A default query of ``is:unresolved`` is applied. To return results
with other statuses send an new query value (i.e. ``?query=`` for all
results).
The ``statsPeriod`` parameter can be used to select the timeline
stats which should be present. Possible values are: '' (disable),
'24h', '14d'
:qparam string statsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam bool shortIdLookup: if this is set to true then short IDs are
looked up by this function as well. This
can cause the return value of the function
to return an event issue of a different
project which is why this is an opt-in.
Set to `1` to enable.
:qparam querystring query: an optional Sentry structured search
query. If not provided an implied
``"is:unresolved"`` is assumed.)
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:auth: required
"""
stats_period = request.GET.get('statsPeriod')
if stats_period not in (None, '', '24h', '14d'):
return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400)
elif stats_period is None:
# default
stats_period = '24h'
elif stats_period == '':
# disable stats
stats_period = None
query = request.GET.get('query', '').strip()
if query:
matching_group = None
matching_event = None
if len(query) == 32:
# check to see if we've got an event ID
try:
mapping = EventMapping.objects.get(
project_id=project.id,
event_id=query,
)
except EventMapping.DoesNotExist:
pass
else:
matching_group = Group.objects.get(id=mapping.group_id)
try:
matching_event = Event.objects.get(event_id=query, project_id=project.id)
except Event.DoesNotExist:
pass
# If the query looks like a short id, we want to provide some
# information about where that is. Note that this can return
# results for another project. The UI deals with this.
elif request.GET.get('shortIdLookup') == '1' and \
looks_like_short_id(query):
try:
matching_group = Group.objects.by_qualified_short_id(
project.organization_id, query
)
except Group.DoesNotExist:
matching_group = None
if matching_group is not None:
response = Response(
serialize(
[matching_group], request.user,
StreamGroupSerializer(
stats_period=stats_period,
matching_event_id=getattr(matching_event, 'id', None)
)
)
)
response['X-Sentry-Direct-Hit'] = '1'
return response
try:
query_kwargs = self._build_query_params_from_request(request, project)
except ValidationError as exc:
return Response({'detail': six.text_type(exc)}, status=400)
count_hits = features.has('projects:stream-hit-counts', project=project, actor=request.user)
cursor_result = search.query(count_hits=count_hits, **query_kwargs)
results = list(cursor_result)
context = serialize(results, request.user, StreamGroupSerializer(stats_period=stats_period))
# HACK: remove auto resolved entries
if query_kwargs.get('status') == GroupStatus.UNRESOLVED:
context = [r for r in context if r['status'] == 'unresolved']
response = Response(context)
self.add_cursor_headers(request, response, cursor_result)
if results and query not in SAVED_SEARCH_QUERIES:
advanced_search.send(project=project, sender=request.user)
return response
@attach_scenarios([bulk_update_issues_scenario])
def put(self, request, project):
"""
Bulk Mutate a List of Issues
````````````````````````````
Bulk mutate various attributes on issues. The list of issues
to modify is given through the `id` query parameter. It is repeated
for each issue that should be modified.
- For non-status updates, the `id` query parameter is required.
- For status updates, the `id` query parameter may be omitted
for a batch "update all" query.
- An optional `status` query parameter may be used to restrict
mutations to only events with the given status.
The following attributes can be modified and are supplied as
JSON object in the body:
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be mutated. This
parameter shall be repeated for each issue. It
is optional only if a status is mutated in which
case an implicit `update all` is assumed.
:qparam string status: optionally limits the query to issues of the
specified status. Valid values are
``"resolved"``, ``"unresolved"`` and
``"ignored"``.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:param string status: the new status for the issues. Valid values
are ``"resolved"``, ``resolvedInNextRelease``,
``"unresolved"``, and ``"ignored"``.
:param int ignoreDuration: the number of minutes to ignore this issue.
:param boolean isPublic: sets the issue to public or private.
:param boolean merge: allows to merge or unmerge different issues.
:param string assignedTo: the username of the user that should be
assigned to this issue.
:param boolean hasSeen: in case this API call is invoked with a user
context this allows changing of the flag
that indicates if the user has seen the
event.
:param boolean isBookmarked: in case this API call is invoked with a
user context this allows changing of
the bookmark flag.
:auth: required
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
if not group_ids:
return Response(status=204)
else:
group_list = None
serializer = GroupValidator(
data=request.DATA,
partial=True,
context={'project': project},
)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = dict(serializer.object)
acting_user = request.user if request.user.is_authenticated() else None
if not group_ids:
try:
query_kwargs = self._build_query_params_from_request(request, project)
except ValidationError as exc:
return Response({'detail': six.text_type(exc)}, status=400)
# bulk mutations are limited to 1000 items
# TODO(dcramer): it'd be nice to support more than this, but its
# a bit too complicated right now
query_kwargs['limit'] = 1000
cursor_result = search.query(**query_kwargs)
group_list = list(cursor_result)
group_ids = [g.id for g in group_list]
is_bulk = len(group_ids) > 1
queryset = Group.objects.filter(
id__in=group_ids,
)
discard = result.get('discard')
if discard:
if not features.has('projects:custom-filters', project, actor=request.user):
return Response({'detail': ['You do not have that feature enabled']}, status=400)
group_list = list(queryset)
groups_to_delete = []
for group in group_list:
with transaction.atomic():
try:
tombstone = GroupTombstone.objects.create(
previous_group_id=group.id,
actor_id=acting_user.id if acting_user else None,
**{name: getattr(group, name) for name in TOMBSTONE_FIELDS_FROM_GROUP}
)
except IntegrityError:
# in this case, a tombstone has already been created
# for a group, so no hash updates are necessary
pass
else:
groups_to_delete.append(group)
GroupHash.objects.filter(
group=group,
).update(
group=None,
group_tombstone_id=tombstone.id,
)
self._delete_groups(request, project, groups_to_delete)
return Response(status=204)
statusDetails = result.pop('statusDetails', result)
status = result.get('status')
if status in ('resolved', 'resolvedInNextRelease'):
if status == 'resolvedInNextRelease' or statusDetails.get('inNextRelease'):
release = Release.objects.filter(
projects=project,
organization_id=project.organization_id,
).order_by('-date_added')[0]
activity_type = Activity.SET_RESOLVED_IN_RELEASE
activity_data = {
# no version yet
'version': '',
}
status_details = {
'inNextRelease': True,
'actor': serialize(extract_lazy_object(request.user), request.user),
}
res_type = GroupResolution.Type.in_next_release
res_status = GroupResolution.Status.pending
elif statusDetails.get('inRelease'):
release = statusDetails['inRelease']
activity_type = Activity.SET_RESOLVED_IN_RELEASE
activity_data = {
# no version yet
'version': release.version,
}
status_details = {
'inRelease': release.version,
'actor': serialize(extract_lazy_object(request.user), request.user),
}
res_type = GroupResolution.Type.in_release
res_status = GroupResolution.Status.resolved
else:
release = None
activity_type = Activity.SET_RESOLVED
activity_data = {}
status_details = {}
now = timezone.now()
for group in group_list:
with transaction.atomic():
if release:
resolution_params = {
'release': release,
'type': res_type,
'status': res_status,
'actor_id': request.user.id
if request.user.is_authenticated() else None,
}
resolution, created = GroupResolution.objects.get_or_create(
group=group,
defaults=resolution_params,
)
if not created:
resolution.update(datetime=timezone.now(), **resolution_params)
else:
resolution = None
affected = Group.objects.filter(
id=group.id,
).update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
if not resolution:
created = affected
group.status = GroupStatus.RESOLVED
group.resolved_at = now
self._subscribe_and_assign_issue(acting_user, group, result)
if created:
activity = Activity.objects.create(
project=group.project,
group=group,
type=activity_type,
user=acting_user,
ident=resolution.id if resolution else None,
data=activity_data,
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
activity.send_notification()
issue_resolved_in_release.send(
group=group,
project=project,
sender=acting_user,
)
result.update({
'status': 'resolved',
'statusDetails': status_details,
})
elif status:
new_status = STATUS_CHOICES[result['status']]
with transaction.atomic():
happened = queryset.exclude(
status=new_status,
).update(
status=new_status,
)
GroupResolution.objects.filter(
group__in=group_ids,
).delete()
if new_status == GroupStatus.IGNORED:
ignore_duration = (
statusDetails.pop('ignoreDuration', None) or
statusDetails.pop('snoozeDuration', None)
) or None
ignore_count = statusDetails.pop('ignoreCount', None) or None
ignore_window = statusDetails.pop('ignoreWindow', None) or None
ignore_user_count = statusDetails.pop('ignoreUserCount', None) or None
ignore_user_window = statusDetails.pop('ignoreUserWindow', None) or None
if ignore_duration or ignore_count or ignore_user_count:
if ignore_duration:
ignore_until = timezone.now() + timedelta(
minutes=ignore_duration,
)
else:
ignore_until = None
for group in group_list:
state = {}
if ignore_count and not ignore_window:
state['times_seen'] = group.times_seen
if ignore_user_count and not ignore_user_window:
state['users_seen'] = group.count_users_seen()
GroupSnooze.objects.create_or_update(
group=group,
values={
'until':
ignore_until,
'count':
ignore_count,
'window':
ignore_window,
'user_count':
ignore_user_count,
'user_window':
ignore_user_window,
'state':
state,
'actor_id':
request.user.id if request.user.is_authenticated() else None,
}
)
result['statusDetails'] = {
'ignoreCount': ignore_count,
'ignoreUntil': ignore_until,
'ignoreUserCount': ignore_user_count,
'ignoreUserWindow': ignore_user_window,
'ignoreWindow': ignore_window,
'actor': serialize(extract_lazy_object(request.user), request.user),
}
else:
GroupSnooze.objects.filter(
group__in=group_ids,
).delete()
ignore_until = None
result['statusDetails'] = {}
else:
result['statusDetails'] = {}
if group_list and happened:
if new_status == GroupStatus.UNRESOLVED:
activity_type = Activity.SET_UNRESOLVED
activity_data = {}
elif new_status == GroupStatus.IGNORED:
activity_type = Activity.SET_IGNORED
activity_data = {
'ignoreCount': ignore_count,
'ignoreDuration': ignore_duration,
'ignoreUntil': ignore_until,
'ignoreUserCount': ignore_user_count,
'ignoreUserWindow': ignore_user_window,
'ignoreWindow': ignore_window,
}
for group in group_list:
group.status = new_status
activity = Activity.objects.create(
project=group.project,
group=group,
type=activity_type,
user=acting_user,
data=activity_data,
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
if acting_user:
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.status_change,
)
activity.send_notification()
if 'assignedTo' in result:
if result['assignedTo']:
for group in group_list:
GroupAssignee.objects.assign(group, result['assignedTo'], acting_user)
if 'isSubscribed' not in result or result['assignedTo'] != request.user:
GroupSubscription.objects.subscribe(
group=group,
user=result['assignedTo'],
reason=GroupSubscriptionReason.assigned,
)
result['assignedTo'] = serialize(result['assignedTo'])
else:
for group in group_list:
GroupAssignee.objects.deassign(group, acting_user)
if result.get('hasSeen') and project.member_set.filter(user=acting_user).exists():
for group in group_list:
instance, created = create_or_update(
GroupSeen,
group=group,
user=acting_user,
project=group.project,
values={
'last_seen': timezone.now(),
}
)
elif result.get('hasSeen') is False:
GroupSeen.objects.filter(
group__in=group_ids,
user=acting_user,
).delete()
if result.get('isBookmarked'):
for group in group_list:
GroupBookmark.objects.get_or_create(
project=project,
group=group,
user=acting_user,
)
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.bookmark,
)
elif result.get('isBookmarked') is False:
GroupBookmark.objects.filter(
group__in=group_ids,
user=acting_user,
).delete()
# TODO(dcramer): we could make these more efficient by first
# querying for rich rows are present (if N > 2), flipping the flag
# on those rows, and then creating the missing rows
if result.get('isSubscribed') in (True, False):
is_subscribed = result['isSubscribed']
for group in group_list:
# NOTE: Subscribing without an initiating event (assignment,
# commenting, etc.) clears out the previous subscription reason
# to avoid showing confusing messaging as a result of this
# action. It'd be jarring to go directly from "you are not
# subscribed" to "you were subscribed due since you were
# assigned" just by clicking the "subscribe" button (and you
# may no longer be assigned to the issue anyway.)
GroupSubscription.objects.create_or_update(
user=acting_user,
group=group,
project=project,
values={
'is_active': is_subscribed,
'reason': GroupSubscriptionReason.unknown,
},
)
result['subscriptionDetails'] = {
'reason': SUBSCRIPTION_REASON_MAP.get(
GroupSubscriptionReason.unknown,
'unknown',
),
}
if result.get('isPublic'):
queryset.update(is_public=True)
for group in group_list:
if group.is_public:
continue
group.is_public = True
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PUBLIC,
user=acting_user,
)
elif result.get('isPublic') is False:
queryset.update(is_public=False)
for group in group_list:
if not group.is_public:
continue
group.is_public = False
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PRIVATE,
user=acting_user,
)
# XXX(dcramer): this feels a bit shady like it should be its own
# endpoint
if result.get('merge') and len(group_list) > 1:
primary_group = sorted(group_list, key=lambda x: -x.times_seen)[0]
children = []
transaction_id = uuid4().hex
for group in group_list:
if group == primary_group:
continue
children.append(group)
group.update(status=GroupStatus.PENDING_MERGE)
merge_group.delay(
from_object_id=group.id,
to_object_id=primary_group.id,
transaction_id=transaction_id,
)
Activity.objects.create(
project=primary_group.project,
group=primary_group,
type=Activity.MERGE,
user=acting_user,
data={
'issues': [{
'id': c.id
} for c in children],
},
)
result['merge'] = {
'parent': six.text_type(primary_group.id),
'children': [six.text_type(g.id) for g in children],
}
return Response(result)
@attach_scenarios([bulk_remove_issues_scenario])
def delete(self, request, project):
"""
Bulk Remove a List of Issues
````````````````````````````
Permanently remove the given issues. The list of issues to
modify is given through the `id` query parameter. It is repeated
for each issue that should be removed.
Only queries by 'id' are accepted.
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be removed. This
parameter shall be repeated for each issue.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:auth: required
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = list(
Group.objects.filter(
project=project,
id__in=set(group_ids),
).exclude(
status__in=[
GroupStatus.PENDING_DELETION,
GroupStatus.DELETION_IN_PROGRESS,
]
)
)
else:
# missing any kind of filter
return Response(
'{"detail": "You must specify a list of IDs for this operation"}', status=400
)
if not group_list:
return Response(status=204)
self._delete_groups(request, project, group_list)
return Response(status=204)
def _delete_groups(self, request, project, group_list):
group_ids = [g.id for g in group_list]
Group.objects.filter(
id__in=group_ids,
).exclude(status__in=[
GroupStatus.PENDING_DELETION,
GroupStatus.DELETION_IN_PROGRESS,
]).update(status=GroupStatus.PENDING_DELETION)
GroupHash.objects.filter(group__id__in=group_ids).delete()
transaction_id = uuid4().hex
for group in group_list:
delete_group.apply_async(
kwargs={
'object_id': group.id,
'transaction_id': transaction_id,
},
countdown=3600,
)
self.create_audit_entry(
request=request,
organization_id=project.organization_id,
target_object=group.id,
transaction_id=transaction_id,
)
delete_logger.info(
'object.delete.queued',
extra={
'object_id': group.id,
'transaction_id': transaction_id,
'model': type(group).__name__,
}
)
| {
"repo_name": "jean/sentry",
"path": "src/sentry/api/endpoints/project_group_index.py",
"copies": "1",
"size": "37609",
"license": "bsd-3-clause",
"hash": 4815199713830203000,
"line_mean": 39.5269396552,
"line_max": 100,
"alpha_frac": 0.517588875,
"autogenerated": false,
"ratio": 4.988592651545297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6006181526545298,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .disinfect import (
Field,
MappedValueError,
Mapping,
MultiValueError,
Test,
test_and_return,
validate,
sanitize,
)
from .tests import (
Boolean,
Date,
DateTime,
Email,
Enum,
Float,
InstanceOf,
Int,
List,
ListOf,
Regex,
Set,
String,
Upload,
IsNone,
)
__version__ = '0.2.1'
__package__ = 'disinfect'
__title__ = 'Disinfect'
__description__ = 'Disinfect: Destroy bad input.'
__uri__ = 'https://github.com/corverdevelopment/Disinfect/'
__author__ = 'Nils Corver'
__email__ = 'nils@corverdevelopment.nl'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2016 Corver Development B.V.'
__all__ = [
'Field',
'MappedValueError',
'Mapping',
'MultiValueError',
'Test',
'test_and_return',
'validate',
'sanitize',
'Boolean',
'Date',
'DateTime',
'Email',
'Enum',
'Float',
'InstanceOf',
'Int',
'List',
'ListOf',
'Regex',
'Set',
'String',
'Upload',
]
| {
"repo_name": "CorverDevelopment/Disinfect",
"path": "src/disinfect/__init__.py",
"copies": "1",
"size": "1094",
"license": "mit",
"hash": -4702460455042760000,
"line_mean": 15.0882352941,
"line_max": 64,
"alpha_frac": 0.5594149909,
"autogenerated": false,
"ratio": 3.2176470588235295,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.927706204972353,
"avg_score": 0,
"num_lines": 68
} |
from __future__ import absolute_import, division, print_function
from distutils.version import LooseVersion
import difflib
import math
import os
import numpy as np
from toolz import frequencies, concat
from .core import Array
from ..async import get_sync
from ..sharedict import ShareDict
if LooseVersion(np.__version__) >= '1.10.0':
allclose = np.allclose
else:
def allclose(a, b, **kwargs):
if kwargs.pop('equal_nan', False):
a_nans = np.isnan(a)
b_nans = np.isnan(b)
if not (a_nans == b_nans).all():
return False
a = a[~a_nans]
b = b[~b_nans]
return np.allclose(a, b, **kwargs)
def _not_empty(x):
return x.shape and 0 not in x.shape
def _check_dsk(dsk):
""" Check that graph is well named and non-overlapping """
if not isinstance(dsk, ShareDict):
return
assert all(isinstance(k, str) for k in dsk.dicts)
freqs = frequencies(concat(dsk.dicts.values()))
non_one = {k: v for k, v in freqs.items() if v != 1}
assert not non_one, non_one
def assert_eq_shape(a, b, check_nan=True):
for aa, bb in zip(a, b):
if math.isnan(aa) or math.isnan(bb):
if check_nan:
assert math.isnan(aa) == math.isnan(bb)
else:
assert aa == bb
def assert_eq(a, b, check_shape=True, **kwargs):
a_original = a
b_original = b
if isinstance(a, Array):
assert a.dtype is not None
adt = a.dtype
_check_dsk(a.dask)
a = a.compute(get=get_sync)
if _not_empty(a):
assert a.dtype == a_original.dtype
if check_shape:
assert_eq_shape(a_original.shape, a.shape, check_nan=False)
else:
adt = getattr(a, 'dtype', None)
if isinstance(b, Array):
assert b.dtype is not None
bdt = b.dtype
_check_dsk(b.dask)
b = b.compute(get=get_sync)
if _not_empty(b):
assert b.dtype == b_original.dtype
if check_shape:
assert_eq_shape(b_original.shape, b.shape, check_nan=False)
else:
bdt = getattr(b, 'dtype', None)
if str(adt) != str(bdt):
diff = difflib.ndiff(str(adt).splitlines(), str(bdt).splitlines())
raise AssertionError('string repr are different' + os.linesep +
os.linesep.join(diff))
try:
if _not_empty(a) and _not_empty(b):
# Treat all empty arrays as equivalent
assert a.shape == b.shape
assert allclose(a, b, **kwargs)
return
except TypeError:
pass
c = a == b
if isinstance(c, np.ndarray):
assert c.all()
else:
assert c
return True
| {
"repo_name": "cpcloud/dask",
"path": "dask/array/utils.py",
"copies": "1",
"size": "2735",
"license": "bsd-3-clause",
"hash": -2554400838261626400,
"line_mean": 26.0792079208,
"line_max": 74,
"alpha_frac": 0.5674588665,
"autogenerated": false,
"ratio": 3.427318295739348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9493070094980898,
"avg_score": 0.0003414134516899966,
"num_lines": 101
} |
from __future__ import absolute_import, division, print_function
from distutils.version import LooseVersion
import pandas as pd
import numpy as np
from ..core import DataFrame, Series
from ...base import tokenize
def getnanos(rule):
try:
return getattr(rule, 'nanos', None)
except ValueError:
return None
if LooseVersion(pd.__version__) >= '0.18.0':
def _resample_apply(s, rule, how, resample_kwargs):
return getattr(s.resample(rule, **resample_kwargs), how)()
def _resample(obj, rule, how, **kwargs):
resampler = Resampler(obj, rule, **kwargs)
if how is not None:
raise FutureWarning(("how in .resample() is deprecated "
"the new syntax is .resample(...)"
".{0}()").format(how))
return getattr(resampler, how)()
return resampler
else:
def _resample_apply(s, rule, how, resample_kwargs):
return s.resample(rule, how=how, **resample_kwargs)
def _resample(obj, rule, how, **kwargs):
how = how or 'mean'
return getattr(Resampler(obj, rule, **kwargs), how)()
def _resample_series(series, start, end, reindex_closed, rule,
resample_kwargs, how, fill_value):
out = _resample_apply(series, rule, how, resample_kwargs)
return out.reindex(pd.date_range(start, end, freq=rule,
closed=reindex_closed),
fill_value=fill_value)
def _resample_bin_and_out_divs(divisions, rule, closed='left', label='left'):
rule = pd.datetools.to_offset(rule)
g = pd.TimeGrouper(rule, how='count', closed=closed, label=label)
# Determine bins to apply `how` to. Disregard labeling scheme.
divs = pd.Series(range(len(divisions)), index=divisions)
temp = divs.resample(rule, how='count', closed=closed, label='left')
tempdivs = temp.loc[temp > 0].index
# Cleanup closed == 'right' and label == 'right'
res = pd.offsets.Nano() if hasattr(rule, 'delta') else pd.offsets.Day()
if g.closed == 'right':
newdivs = tempdivs + res
else:
newdivs = tempdivs
if g.label == 'right':
outdivs = tempdivs + rule
else:
outdivs = tempdivs
newdivs = newdivs.tolist()
outdivs = outdivs.tolist()
# Adjust ends
if newdivs[0] < divisions[0]:
newdivs[0] = divisions[0]
if newdivs[-1] < divisions[-1]:
if len(newdivs) < len(divs):
setter = lambda a, val: a.append(val)
else:
setter = lambda a, val: a.__setitem__(-1, val)
setter(newdivs, divisions[-1])
if outdivs[-1] > divisions[-1]:
setter(outdivs, outdivs[-1])
elif outdivs[-1] < divisions[-1]:
setter(outdivs, temp.index[-1])
return tuple(map(pd.Timestamp, newdivs)), tuple(map(pd.Timestamp, outdivs))
class Resampler(object):
def __init__(self, obj, rule, **kwargs):
self.obj = obj
rule = pd.datetools.to_offset(rule)
day_nanos = pd.datetools.Day().nanos
if getnanos(rule) and day_nanos % rule.nanos:
raise NotImplementedError('Resampling frequency %s that does'
' not evenly divide a day is not '
'implemented' % rule)
self._rule = rule
self._kwargs = kwargs
def _agg(self, how, columns=None, fill_value=np.nan):
rule = self._rule
kwargs = self._kwargs
name = 'resample-' + tokenize(self.obj, rule, kwargs, how)
# Create a grouper to determine closed and label conventions
newdivs, outdivs = _resample_bin_and_out_divs(self.obj.divisions, rule,
**kwargs)
# Repartition divs into bins. These won't match labels after mapping
partitioned = self.obj.repartition(newdivs, force=True)
keys = partitioned._keys()
dsk = partitioned.dask
args = zip(keys, outdivs, outdivs[1:], ['left']*(len(keys)-1) + [None])
for i, (k, s, e, c) in enumerate(args):
dsk[(name, i)] = (_resample_series, k, s, e, c,
rule, kwargs, how, fill_value)
if columns:
return DataFrame(dsk, name, columns, outdivs)
return Series(dsk, name, self.obj.name, outdivs)
def count(self):
return self._agg('count', fill_value=0)
def first(self):
return self._agg('first')
def last(self):
return self._agg('last')
def mean(self):
return self._agg('mean')
def min(self):
return self._agg('min')
def median(self):
return self._agg('median')
def max(self):
return self._agg('max')
def ohlc(self):
return self._agg('ohlc', columns=['open', 'high', 'low', 'close'])
def prod(self):
return self._agg('prod')
def sem(self):
return self._agg('sem')
def std(self):
return self._agg('std')
def sum(self):
return self._agg('sum')
def var(self):
return self._agg('var')
| {
"repo_name": "mikegraham/dask",
"path": "dask/dataframe/tseries/resample.py",
"copies": "1",
"size": "5137",
"license": "bsd-3-clause",
"hash": -421743431968018750,
"line_mean": 30.9068322981,
"line_max": 79,
"alpha_frac": 0.5693984816,
"autogenerated": false,
"ratio": 3.6381019830028327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9706352975393442,
"avg_score": 0.00022949784187809244,
"num_lines": 161
} |
from __future__ import absolute_import, division, print_function
from django.conf.urls import include, url
from django.contrib import admin
from rest_framework.routers import SimpleRouter
from rest_framework_docs.views import DRFDocsView
from tests import views
accounts_urls = [
url(r'^login/$', views.LoginView.as_view(), name="login"),
url(r'^login2/$', views.LoginWithSerilaizerClassView.as_view(), name="login2"),
url(r'^register/$', views.UserRegistrationView.as_view(), name="register"),
url(r'^reset-password/$', view=views.PasswordResetView.as_view(), name="reset-password"),
url(r'^reset-password/confirm/$', views.PasswordResetConfirmView.as_view(), name="reset-password-confirm"),
url(r'^user/profile/$', views.UserProfileView.as_view(), name="profile"),
url(r'^test/$', views.TestView.as_view(), name="test-view"),
]
organisations_urls = [
url(r'^create/$', view=views.CreateOrganisationView.as_view(), name="create"),
url(r'^(?P<slug>[\w-]+)/members/$', view=views.OrganisationMembersView.as_view(), name="members"),
url(r'^(?P<slug>[\w-]+)/leave/$', view=views.LeaveOrganisationView.as_view(), name="leave"),
url(r'^(?P<slug>[\w-]+)/errored/$', view=views.OrganisationErroredView.as_view(), name="errored"),
url(r'^(?P<slug>[\w-]+)/$', view=views.RetrieveOrganisationView.as_view(), name="organisation"),
]
router = SimpleRouter()
router.register('organisation-model-viewsets', views.TestModelViewSet, base_name='organisation')
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^docs/', DRFDocsView.as_view(drf_router=router), name='drfdocs'),
# API
url(r'^accounts/', view=include(accounts_urls, namespace='accounts')),
url(r'^organisations/', view=include(organisations_urls, namespace='organisations')),
url(r'^', include(router.urls)),
# Endpoints without parents/namespaces
url(r'^another-login/$', views.LoginView.as_view(), name="login"),
]
| {
"repo_name": "manosim/django-rest-framework-docs",
"path": "tests/urls.py",
"copies": "2",
"size": "1967",
"license": "bsd-2-clause",
"hash": 1385682343277960000,
"line_mean": 44.7441860465,
"line_max": 111,
"alpha_frac": 0.6929334011,
"autogenerated": false,
"ratio": 3.4388111888111887,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.5131744589911189,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from dynd import nd
from glob import glob
from itertools import chain
from datashape import dshape, var
from datashape.predicates import isdimension
from .core import DataDescriptor
from .. import compatibility
from ..utils import get, ndget
__all__ = 'Concat', 'Stack'
class Concat(DataDescriptor):
def __init__(self, descriptors):
assert all(isdimension(ddesc.dshape[0]) for ddesc in descriptors)
self.descriptors = descriptors
@property
def dshape(self):
return var * self.descriptors[0].dshape.subarray(1)
def _iter(self):
return chain.from_iterable(self.descriptors)
def _chunks(self, **kwargs):
return (chunk for dd in self.descriptors
for chunk in dd.chunks(**kwargs))
def _get_py(self, key):
if not isinstance(key, tuple):
return get(key, iter(self))
result = get(key[0], iter(self))
if isinstance(key[0], (list, slice)):
return (ndget(key[1:], row) for row in result)
else:
return ndget(key[1:], result)
class Stack(DataDescriptor):
def __init__(self, descriptors):
self.descriptors = descriptors
@property
def dshape(self):
return len(self.descriptors) * self.descriptors[0].dshape
def _iter(self):
return (dd.as_py() for dd in self.descriptors)
def chunks(self, **kwargs):
return (dd.as_dynd() for dd in self.descriptors)
def _get_py(self, key):
if isinstance(key, tuple):
result = get(key[0], self.descriptors)
if isinstance(key[0], (list, slice)):
return (s._get_py(key[1:]) for s in result)
else:
return result._get_py(key[1:])
else:
result = get(key, self.descriptors)
if isinstance(key, (list, slice)):
return (s.as_py() for s in result)
else:
return result.as_py()
| {
"repo_name": "aterrel/blaze",
"path": "blaze/data/meta.py",
"copies": "2",
"size": "2025",
"license": "bsd-3-clause",
"hash": 3433165095077093400,
"line_mean": 28.347826087,
"line_max": 73,
"alpha_frac": 0.6004938272,
"autogenerated": false,
"ratio": 3.87189292543021,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5472386752630211,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from dynd import nd
from glob import glob
from itertools import chain
from datashape import dshape, Var
from .core import DataDescriptor
from .. import py2help
__all__ = 'Files',
class Files(DataDescriptor):
immutable = True
deferred = False
appendable = False
remote = False
persistent = True
def __init__(self, files, descriptor, subdshape=None, schema=None,
open=open):
if isinstance(files, py2help._strtypes):
files = glob(files)
self.filenames = files
self.open = open
self.descriptor = descriptor
if schema and not subdshape:
subdshape = Var() * schema
self.subdshape = dshape(subdshape)
@property
def dshape(self):
if isinstance(self.subdshape[0], Var):
return self.subdshape
else:
return Var() * self.subdshape
def _iter(self):
return chain.from_iterable(self.descriptor(fn,
dshape=self.subdshape,
open=self.open)
for fn in self.filenames)
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/data/filesystem.py",
"copies": "1",
"size": "1228",
"license": "bsd-3-clause",
"hash": -4884883343233145000,
"line_mean": 26.9090909091,
"line_max": 73,
"alpha_frac": 0.5716612378,
"autogenerated": false,
"ratio": 4.370106761565836,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5441767999365836,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from dynd import nd
import datashape
from datashape import DataShape, dshape, Record
from datashape.user import validate, issubschema
from numbers import Number
from collections import Iterable
import numpy as np
from ..dispatch import dispatch
__all__ = ['into', 'discover']
@dispatch(type, object)
def into(a, b):
f = into.resolve((a, type(b)))
try:
a = a()
except:
pass
return f(a, b)
@dispatch((list, tuple, set), (list, tuple, set))
def into(a, b):
return type(a)(b)
@dispatch(dict, (list, tuple, set))
def into(a, b):
return dict(b)
@dispatch((list, tuple, set), dict)
def into(a, b):
return type(a)(map(type(a), sorted(b.items(), key=lambda x: x[0])))
@dispatch(nd.array, (Iterable, Number, str))
def into(a, b):
return nd.array(b)
@dispatch(list, nd.array)
def into(a, b):
return nd.as_py(b)
@dispatch(np.ndarray, nd.array)
def into(a, b):
return nd.as_numpy(b, allow_copy=True)
@dispatch(np.ndarray, Iterable)
def into(a, b):
return np.asarray(b)
@dispatch(list, np.ndarray)
def into(a, b):
return b.tolist()
from blaze.data import DataDescriptor
from pandas import DataFrame
@dispatch(DataFrame, DataDescriptor)
def into(a, b):
return DataFrame(list(b), columns=b.columns)
@dispatch(DataFrame, nd.array)
def into(a, b):
ds = dshape(nd.dshape_of(b))
if list(a.columns):
names = a.columns
elif isinstance(ds[-1], Record):
names = ds[-1].names
else:
names = None
if names:
return DataFrame(nd.as_py(b), columns=names)
else:
return DataFrame(nd.as_py(b))
@dispatch(DataFrame, (list, tuple))
def into(df, seq):
if list(df.columns):
return DataFrame(list(seq), columns=df.columns)
else:
return DataFrame(list(seq))
@dispatch(nd.array, DataFrame)
def into(a, df):
schema = discover(df)
arr = nd.empty(str(schema))
for i in range(len(df.columns)):
arr[:, i] = np.asarray(df[df.columns[i]])
return arr
@dispatch(DataFrame)
def discover(df):
obj = datashape.coretypes.object_
names = list(df.columns)
dtypes = list(map(datashape.CType.from_numpy_dtype, df.dtypes))
dtypes = [datashape.string if dt == obj else dt for dt in dtypes]
schema = Record(list(zip(names, dtypes)))
return len(df) * schema
| {
"repo_name": "aterrel/blaze",
"path": "blaze/api/into.py",
"copies": "1",
"size": "2388",
"license": "bsd-3-clause",
"hash": -6126362583294449000,
"line_mean": 22.1844660194,
"line_max": 71,
"alpha_frac": 0.6507537688,
"autogenerated": false,
"ratio": 3.081290322580645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42320440913806445,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from dynd import nd
from blaze.compute.core import compute
from blaze.expr import *
from blaze.compute.dynd import *
def eq(a, b):
return nd.as_py(a) == nd.as_py(b)
n = Symbol('n', '3 * 5 * int')
nx = nd.array([[ 1, 2, 3, 4, 5],
[11, 22, 33, 44, 55],
[21, 22, 23, 24, 25]], type=str(n.dshape))
rec = Symbol('s', '3 * var * {name: string, amount: int}')
recx = nd.array([[('Alice', 1), ('Bob', 2)],
[('Charlie', 3)],
[('Dennis', 4), ('Edith', 5), ('Frank', 6)]],
type = str(rec.dshape))
def test_symbol():
assert eq(compute(n, nx), nx)
def test_slice():
assert eq(compute(n[0], nx), nx[0])
assert eq(compute(n[0, :3], nx), nx[0, :3])
def test_field():
assert eq(compute(rec.amount, recx), recx.amount)
def test_arithmetic():
# assert eq(compute(n + 1, nx), nx + 1)
assert eq(compute(rec.amount + 1, recx), recx.amount + 1)
assert eq(compute(-rec.amount, recx), 0-recx.amount)
| {
"repo_name": "vitan/blaze",
"path": "blaze/compute/tests/test_dynd_compute.py",
"copies": "1",
"size": "1070",
"license": "bsd-3-clause",
"hash": -606230794249742800,
"line_mean": 25.0975609756,
"line_max": 64,
"alpha_frac": 0.5504672897,
"autogenerated": false,
"ratio": 2.772020725388601,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3822488015088601,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from dynd import nd
from blaze.serve.server import Server
from blaze.data.python import Python
from blaze.serve.index import parse_index, emit_index
from blaze.serve.client import Client
accounts = Python([['Alice', 100], ['Bob', 200]],
schema='{name: string, amount: int32}')
cities = Python([['Alice', 'NYC'], ['Bob', 'LA'], ['Charlie', 'Beijing']],
schema='{name: string, city: string}')
server = Server(datasets={'accounts': accounts, 'cities': cities})
test = server.app.test_client()
import blaze.serve.client as client
client.requests = test # OMG monkey patching
dd = Client('http://localhost:5000', 'accounts')
def test_dshape():
assert dd.dshape == accounts.dshape
def test_get_py():
assert list(dd.py[0:, 'name']) == list(accounts.py[:, 'name'])
def test_get_dynd():
result = dd.dynd[0:, 'name']
expected = accounts.dynd[:, 'name']
assert nd.as_py(result) == nd.as_py(expected)
def test_iter():
assert list(dd) == list(accounts)
def test_chunks():
assert [nd.as_py(chunk) for chunk in dd.chunks()] == \
[nd.as_py(chunk) for chunk in accounts.chunks()]
| {
"repo_name": "aterrel/blaze",
"path": "blaze/serve/tests/test_client.py",
"copies": "1",
"size": "1222",
"license": "bsd-3-clause",
"hash": 718005206594917500,
"line_mean": 25.5652173913,
"line_max": 74,
"alpha_frac": 0.6464811784,
"autogenerated": false,
"ratio": 3.2586666666666666,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9392085297393135,
"avg_score": 0.002612509534706331,
"num_lines": 46
} |
from __future__ import absolute_import, division, print_function
from dynd.ndt.type import make_var_dim, make_fixed_dim, make_fixed_dim_kind, type as w_type
__all__ = ['var', 'fixed']
class _Dim(object):
__slots__ = []
def __mul__(self, rhs):
if isinstance(rhs, w_type):
# Apply all the dimensions to get
# produce a type
for dim in reversed(self.dims):
rhs = dim.create(rhs)
return rhs
elif isinstance(rhs, (str, type)):
# Allow:
# ndt.fixed * 'int32'
# ndt.fixed * int
rhs = w_type(rhs)
for dim in reversed(self.dims):
rhs = dim.create(rhs)
return rhs
elif isinstance(rhs, _Dim):
# Combine the dimension fragments
return _DimFragment(self.dims + rhs.dims)
else:
raise TypeError('Expected a dynd dimension or type, not %r' % rhs)
def __pow__(self, count):
return _DimFragment(self.dims * count)
class _DimFragment(_Dim):
__slots__ = ['dims']
def __init__(self, dims):
self.dims = dims
def __repr__(self):
return ' * '.join(repr(dim) for dim in self.dims)
class _Var(_Dim):
"""
Creates a var dimension when combined with other types.
Examples
--------
>>> ndt.var * ndt.int32
ndt.type('var * int32')
>>> ndt.fixed[5] * ndt.var * ndt.float64
ndt.type('5 * var * float64')
"""
__slots__ = []
@property
def dims(self):
return (self,)
def create(self, eltype):
return make_var_dim(eltype)
def __repr__(self):
return 'ndt.var'
class _Fixed(_Dim):
"""
Creates a fixed dimension when combined with other types.
Examples
--------
>>> ndt.fixed[3] * ndt.int32
ndt.type('3 * int32')
>>> ndt.fixed[5] * ndt.var * ndt.float64
ndt.type('5 * var * float64')
"""
__slots__ = ['dim_size']
def __init__(self, dim_size = None):
self.dim_size = dim_size
@property
def dims(self):
return (self,)
def create(self, eltype):
if self.dim_size is None:
return make_fixed_dim_kind(eltype)
else:
return make_fixed_dim(self.dim_size, eltype)
def __getitem__(self, dim_size):
return _Fixed(dim_size)
def __repr__(self):
if self.dim_size is not None:
return 'ndt.fixed[%d]' % self.dim_size
else:
return 'ndt.fixed'
var = _Var()
fixed = _Fixed() | {
"repo_name": "libdynd/dynd-python",
"path": "dynd/ndt/dim_helpers.py",
"copies": "8",
"size": "2558",
"license": "bsd-2-clause",
"hash": -2271444841386698000,
"line_mean": 22.9158878505,
"line_max": 91,
"alpha_frac": 0.5293197811,
"autogenerated": false,
"ratio": 3.5185694635488307,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8047889244648831,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from dynd._pydynd import w_type as type, \
make_byteswap, make_fixedbytes, make_convert, \
make_view, \
make_unaligned, make_fixedstring, make_string, \
make_pointer, make_struct, make_cstruct, \
make_strided_dim, make_fixed_dim, make_cfixed_dim, make_var_dim, \
make_categorical, replace_dtype, extract_dtype, \
factor_categorical, make_bytes, make_property, \
make_reversed_property, cuda_support
void = type('void')
bool = type('bool')
int8 = type('int8')
int16 = type('int16')
int32 = type('int32')
int64 = type('int64')
int128 = type('int128')
intptr = type('intptr')
uint8 = type('uint8')
uint16 = type('uint16')
uint32 = type('uint32')
uint64 = type('uint64')
uint128 = type('uint128')
uintptr = type('uintptr')
float16 = type('float16')
float32 = type('float32')
float64 = type('float64')
complex_float32 = type('complex[float32]')
complex_float64 = type('complex[float64]')
# Aliases for people comfortable with the NumPy complex namings
complex64 = complex_float32
complex128 = complex_float64
string = type('string')
date = type('date')
time = type('time')
datetime = type('datetime')
datetimeutc = type('datetime[tz="UTC"]')
json = type('json')
bytes = type('bytes')
# Includes ctypes definitions
from . import dynd_ctypes as ctypes
# Some classes making dimension construction easier
from .dim_helpers import *
| {
"repo_name": "aterrel/dynd-python",
"path": "dynd/ndt/__init__.py",
"copies": "1",
"size": "1457",
"license": "bsd-2-clause",
"hash": -6558407968968507000,
"line_mean": 30,
"line_max": 74,
"alpha_frac": 0.693891558,
"autogenerated": false,
"ratio": 3.252232142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44461237008571425,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from dynd._pydynd import w_type, \
make_var_dim, make_strided_dim, make_fixed_dim, make_cfixed_dim
__all__ = ['var', 'strided', 'fixed', 'cfixed']
class _Dim(object):
__slots__ = []
def __mul__(self, rhs):
if isinstance(rhs, w_type):
# Apply all the dimensions to get
# produce a type
for dim in reversed(self.dims):
rhs = dim.create(rhs)
return rhs
elif isinstance(rhs, (str, type)):
# Allow:
# ndt.strided * 'int32'
# ndt.strided * int
rhs = w_type(rhs)
for dim in reversed(self.dims):
rhs = dim.create(rhs)
return rhs
elif isinstance(rhs, _Dim):
# Combine the dimension fragments
return _DimFragment(self.dims + rhs.dims)
else:
raise TypeError('Expected a dynd dimension or type, not %r' % rhs)
def __pow__(self, count):
return _DimFragment(self.dims * count)
class _DimFragment(_Dim):
__slots__ = ['dims']
def __init__(self, dims):
self.dims = dims
def __repr__(self):
return ' * '.join(repr(dim) for dim in self.dims)
class _Var(_Dim):
"""
Creates a var dimension when combined with other types.
Examples
--------
>>> ndt.var * ndt.int32
ndt.type('var * int32')
>>> ndt.fixed[5] * ndt.var * ndt.float64
ndt.type('5 * var * float64')
"""
__slots__ = []
@property
def dims(self):
return (self,)
def create(self, eltype):
return make_var_dim(eltype)
def __repr__(self):
return 'ndt.var'
class _Strided(_Dim):
"""
Creates a strided dimension when combined with other types.
Examples
--------
>>> ndt.strided * ndt.int32
ndt.type('strided * int32')
>>> ndt.fixed[5] * ndt.strided * ndt.float64
ndt.type('5 * strided * float64')
"""
__slots__ = []
@property
def dims(self):
return (self,)
def create(self, eltype):
return make_strided_dim(eltype)
def __repr__(self):
return 'ndt.strided'
class _Fixed(_Dim):
"""
Creates a fixed dimension when combined with other types.
Examples
--------
>>> ndt.fixed[3] * ndt.int32
ndt.type('3 * int32')
>>> ndt.fixed[5] * ndt.var * ndt.float64
ndt.type('5 * var * float64')
"""
__slots__ = ['dim_size']
def __init__(self, dim_size = None):
self.dim_size = dim_size
@property
def dims(self):
if self.dim_size is not None:
return (self,)
else:
raise TypeError('Need to specify ndt.fixed[dim_size],' +
' not just ndt.fixed')
def create(self, eltype):
return make_fixed_dim(self.dim_size, eltype)
def __getitem__(self, dim_size):
return _Fixed(dim_size)
def __repr__(self):
if self.dim_size is not None:
return 'ndt.fixed[%d]' % self.dim_size
else:
return 'ndt.fixed'
class _CFixed(_Dim):
"""
Creates a cfixed dimension when combined with other types.
Examples
--------
>>> ndt.cfixed[3] * ndt.int32
ndt.type('cfixed[3] * int32')
>>> ndt.fixed[5] * ndt.cfixed[2] * ndt.float64
ndt.type('5 * cfixed[2] * float64')
"""
__slots__ = ['dim_size']
def __init__(self, dim_size = None):
self.dim_size = dim_size
@property
def dims(self):
if self.dim_size is not None:
return (self,)
else:
raise TypeError('Need to specify ndt.cfixed[dim_size],' +
' not just ndt.cfixed')
def create(self, eltype):
return make_cfixed_dim(self.dim_size, eltype)
def __getitem__(self, dim_size):
return _CFixed(dim_size)
def __repr__(self):
if self.dim_size is not None:
return 'ndt.cfixed[%d]' % self.dim_size
else:
return 'ndt.cfixed'
var = _Var()
strided = _Strided()
fixed = _Fixed()
cfixed = _CFixed()
| {
"repo_name": "aterrel/dynd-python",
"path": "dynd/ndt/dim_helpers.py",
"copies": "1",
"size": "4145",
"license": "bsd-2-clause",
"hash": -3439929169362638300,
"line_mean": 23.0988372093,
"line_max": 78,
"alpha_frac": 0.5305186972,
"autogenerated": false,
"ratio": 3.4426910299003324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9470373652227956,
"avg_score": 0.0005672149744753262,
"num_lines": 172
} |
from __future__ import absolute_import, division, print_function
from enum import Enum
class CipherSuites(Enum):
TLS_NULL_WITH_NULL_NULL = 0x0000
TLS_RSA_WITH_NULL_MD5 = 0x0001
TLS_RSA_WITH_NULL_SHA = 0x0002
TLS_RSA_EXPORT_WITH_RC4_40_MD5 = 0x0003
TLS_RSA_WITH_RC4_128_MD5 = 0x0004
TLS_RSA_WITH_RC4_128_SHA = 0x0005
TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 = 0x0006
TLS_RSA_WITH_IDEA_CBC_SHA = 0x0007
TLS_RSA_EXPORT_WITH_DES40_CBC_SHA = 0x0008
TLS_RSA_WITH_DES_CBC_SHA = 0x0009
TLS_RSA_WITH_3DES_EDE_CBC_SHA = 0x000A
TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA = 0x000B
TLS_DH_DSS_WITH_DES_CBC_SHA = 0x000C
TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA = 0x000D
TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA = 0x000E
TLS_DH_RSA_WITH_DES_CBC_SHA = 0x000F
TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA = 0x0010
TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA = 0x0011
TLS_DHE_DSS_WITH_DES_CBC_SHA = 0x0012
TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA = 0x0013
TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA = 0x0014
TLS_DHE_RSA_WITH_DES_CBC_SHA = 0x0015
TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA = 0x0016
TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 = 0x0017
TLS_DH_anon_WITH_RC4_128_MD5 = 0x0018
TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA = 0x0019
TLS_DH_anon_WITH_DES_CBC_SHA = 0x001A
TLS_DH_anon_WITH_3DES_EDE_CBC_SHA = 0x001B
TLS_KRB5_WITH_DES_CBC_SHA = 0x001E
TLS_KRB5_WITH_3DES_EDE_CBC_SHA = 0x001F
TLS_KRB5_WITH_RC4_128_SHA = 0x0020
TLS_KRB5_WITH_IDEA_CBC_SHA = 0x0021
TLS_KRB5_WITH_DES_CBC_MD5 = 0x0022
TLS_KRB5_WITH_3DES_EDE_CBC_MD5 = 0x0023
TLS_KRB5_WITH_RC4_128_MD5 = 0x0024
TLS_KRB5_WITH_IDEA_CBC_MD5 = 0x0025
TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA = 0x0026
TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA = 0x0027
TLS_KRB5_EXPORT_WITH_RC4_40_SHA = 0x0028
TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 = 0x0029
TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 = 0x002A
TLS_KRB5_EXPORT_WITH_RC4_40_MD5 = 0x002B
TLS_PSK_WITH_NULL_SHA = 0x002C
TLS_DHE_PSK_WITH_NULL_SHA = 0x002D
TLS_RSA_PSK_WITH_NULL_SHA = 0x002E
TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
TLS_DH_DSS_WITH_AES_128_CBC_SHA = 0x0030
TLS_DH_RSA_WITH_AES_128_CBC_SHA = 0x0031
TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
TLS_DH_anon_WITH_AES_128_CBC_SHA = 0x0034
TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
TLS_DH_DSS_WITH_AES_256_CBC_SHA = 0x0036
TLS_DH_RSA_WITH_AES_256_CBC_SHA = 0x0037
TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
TLS_DH_anon_WITH_AES_256_CBC_SHA = 0x003A
TLS_RSA_WITH_NULL_SHA256 = 0x003B
TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
TLS_DH_DSS_WITH_AES_128_CBC_SHA256 = 0x003E
TLS_DH_RSA_WITH_AES_128_CBC_SHA256 = 0x003F
TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040
TLS_RSA_WITH_CAMELLIA_128_CBC_SHA = 0x0041
TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA = 0x0042
TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA = 0x0043
TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA = 0x0044
TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA = 0x0045
TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA = 0x0046
TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
TLS_DH_DSS_WITH_AES_256_CBC_SHA256 = 0x0068
TLS_DH_RSA_WITH_AES_256_CBC_SHA256 = 0x0069
TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A
TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
TLS_DH_anon_WITH_AES_128_CBC_SHA256 = 0x006C
TLS_DH_anon_WITH_AES_256_CBC_SHA256 = 0x006D
TLS_RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0084
TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA = 0x0085
TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0086
TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA = 0x0087
TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0088
TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA = 0x0089
TLS_PSK_WITH_RC4_128_SHA = 0x008A
TLS_PSK_WITH_3DES_EDE_CBC_SHA = 0x008B
TLS_PSK_WITH_AES_128_CBC_SHA = 0x008C
TLS_PSK_WITH_AES_256_CBC_SHA = 0x008D
TLS_DHE_PSK_WITH_RC4_128_SHA = 0x008E
TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA = 0x008F
TLS_DHE_PSK_WITH_AES_128_CBC_SHA = 0x0090
TLS_DHE_PSK_WITH_AES_256_CBC_SHA = 0x0091
TLS_RSA_PSK_WITH_RC4_128_SHA = 0x0092
TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA = 0x0093
TLS_RSA_PSK_WITH_AES_128_CBC_SHA = 0x0094
TLS_RSA_PSK_WITH_AES_256_CBC_SHA = 0x0095
TLS_RSA_WITH_SEED_CBC_SHA = 0x0096
TLS_DH_DSS_WITH_SEED_CBC_SHA = 0x0097
TLS_DH_RSA_WITH_SEED_CBC_SHA = 0x0098
TLS_DHE_DSS_WITH_SEED_CBC_SHA = 0x0099
TLS_DHE_RSA_WITH_SEED_CBC_SHA = 0x009A
TLS_DH_anon_WITH_SEED_CBC_SHA = 0x009B
TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
TLS_DH_RSA_WITH_AES_128_GCM_SHA256 = 0x00A0
TLS_DH_RSA_WITH_AES_256_GCM_SHA384 = 0x00A1
TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2
TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3
TLS_DH_DSS_WITH_AES_128_GCM_SHA256 = 0x00A4
TLS_DH_DSS_WITH_AES_256_GCM_SHA384 = 0x00A5
TLS_DH_anon_WITH_AES_128_GCM_SHA256 = 0x00A6
TLS_DH_anon_WITH_AES_256_GCM_SHA384 = 0x00A7
TLS_PSK_WITH_AES_128_GCM_SHA256 = 0x00A8
TLS_PSK_WITH_AES_256_GCM_SHA384 = 0x00A9
TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 = 0x00AA
TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 = 0x00AB
TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 = 0x00AC
TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 = 0x00AD
TLS_PSK_WITH_AES_128_CBC_SHA256 = 0x00AE
TLS_PSK_WITH_AES_256_CBC_SHA384 = 0x00AF
TLS_PSK_WITH_NULL_SHA256 = 0x00B0
TLS_PSK_WITH_NULL_SHA384 = 0x00B1
TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 = 0x00B2
TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 = 0x00B3
TLS_DHE_PSK_WITH_NULL_SHA256 = 0x00B4
TLS_DHE_PSK_WITH_NULL_SHA384 = 0x00B5
TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 = 0x00B6
TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 = 0x00B7
TLS_RSA_PSK_WITH_NULL_SHA256 = 0x00B8
TLS_RSA_PSK_WITH_NULL_SHA384 = 0x00B9
TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BA
TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BB
TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BC
TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BD
TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BE
TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BF
TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C0
TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C1
TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C2
TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C3
TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C4
TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C5
TLS_EMPTY_RENEGOTIATION_INFO_SCSV = 0x00FF
TLS_ECDH_ECDSA_WITH_NULL_SHA = 0xC001
TLS_ECDH_ECDSA_WITH_RC4_128_SHA = 0xC002
TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA = 0xC003
TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA = 0xC004
TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA = 0xC005
TLS_ECDHE_ECDSA_WITH_NULL_SHA = 0xC006
TLS_ECDHE_ECDSA_WITH_RC4_128_SHA = 0xC007
TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA = 0xC008
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
TLS_ECDH_RSA_WITH_NULL_SHA = 0xC00B
TLS_ECDH_RSA_WITH_RC4_128_SHA = 0xC00C
TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA = 0xC00D
TLS_ECDH_RSA_WITH_AES_128_CBC_SHA = 0xC00E
TLS_ECDH_RSA_WITH_AES_256_CBC_SHA = 0xC00F
TLS_ECDHE_RSA_WITH_NULL_SHA = 0xC010
TLS_ECDHE_RSA_WITH_RC4_128_SHA = 0xC011
TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA = 0xC012
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
TLS_ECDH_anon_WITH_NULL_SHA = 0xC015
TLS_ECDH_anon_WITH_RC4_128_SHA = 0xC016
TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA = 0xC017
TLS_ECDH_anon_WITH_AES_128_CBC_SHA = 0xC018
TLS_ECDH_anon_WITH_AES_256_CBC_SHA = 0xC019
TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = 0xC01A
TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = 0xC01B
TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA = 0xC01C
TLS_SRP_SHA_WITH_AES_128_CBC_SHA = 0xC01D
TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = 0xC01E
TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA = 0xC01F
TLS_SRP_SHA_WITH_AES_256_CBC_SHA = 0xC020
TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = 0xC021
TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA = 0xC022
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC025
TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC026
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 = 0xC029
TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 = 0xC02A
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02D
TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02E
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 = 0xC031
TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 = 0xC032
TLS_ECDHE_PSK_WITH_RC4_128_SHA = 0xC033
TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA = 0xC034
TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = 0xC035
TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = 0xC036
TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 = 0xC037
TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 = 0xC038
TLS_ECDHE_PSK_WITH_NULL_SHA = 0xC039
TLS_ECDHE_PSK_WITH_NULL_SHA256 = 0xC03A
TLS_ECDHE_PSK_WITH_NULL_SHA384 = 0xC03B
TLS_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC03C
TLS_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC03D
TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 = 0xC03E
TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 = 0xC03F
TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC040
TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC041
TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 = 0xC042
TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 = 0xC043
TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC044
TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC045
TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 = 0xC046
TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 = 0xC047
TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 = 0xC048
TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 = 0xC049
TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 = 0xC04A
TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 = 0xC04B
TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC04C
TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC04D
TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC04E
TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC04F
TLS_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC050
TLS_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC051
TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC052
TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC053
TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC054
TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC055
TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 = 0xC056
TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 = 0xC057
TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 = 0xC058
TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 = 0xC059
TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 = 0xC05A
TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 = 0xC05B
TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 = 0xC05C
TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 = 0xC05D
TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 = 0xC05E
TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 = 0xC05F
TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC060
TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC061
TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC062
TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC063
TLS_PSK_WITH_ARIA_128_CBC_SHA256 = 0xC064
TLS_PSK_WITH_ARIA_256_CBC_SHA384 = 0xC065
TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 = 0xC066
TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 = 0xC067
TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 = 0xC068
TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 = 0xC069
TLS_PSK_WITH_ARIA_128_GCM_SHA256 = 0xC06A
TLS_PSK_WITH_ARIA_256_GCM_SHA384 = 0xC06B
TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 = 0xC06C
TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 = 0xC06D
TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 = 0xC06E
TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 = 0xC06F
TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 = 0xC070
TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 = 0xC071
TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = 0xC072
TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = 0xC073
TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = 0xC074
TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = 0xC075
TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0xC076
TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 = 0xC077
TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0xC078
TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 = 0xC079
TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC07A
TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC07B
TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC07C
TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC07D
TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC07E
TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC07F
TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 = 0xC080
TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 = 0xC081
TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 = 0xC082
TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 = 0xC083
TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 = 0xC084
TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 = 0xC085
TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC086
TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC087
TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC088
TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC089
TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC08A
TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC08B
TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC08C
TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC08D
TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 = 0xC08E
TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 = 0xC08F
TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 = 0xC090
TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 = 0xC091
TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 = 0xC092
TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 = 0xC093
TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 = 0xC094
TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 = 0xC095
TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = 0xC096
TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = 0xC097
TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 = 0xC098
TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 = 0xC099
TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = 0xC09A
TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = 0xC09B
TLS_RSA_WITH_AES_128_CCM = 0xC09C
TLS_RSA_WITH_AES_256_CCM = 0xC09D
TLS_DHE_RSA_WITH_AES_128_CCM = 0xC09E
TLS_DHE_RSA_WITH_AES_256_CCM = 0xC09F
TLS_RSA_WITH_AES_128_CCM_8 = 0xC0A0
TLS_RSA_WITH_AES_256_CCM_8 = 0xC0A1
TLS_DHE_RSA_WITH_AES_128_CCM_8 = 0xC0A2
TLS_DHE_RSA_WITH_AES_256_CCM_8 = 0xC0A3
TLS_PSK_WITH_AES_128_CCM = 0xC0A4
TLS_PSK_WITH_AES_256_CCM = 0xC0A5
TLS_DHE_PSK_WITH_AES_128_CCM = 0xC0A6
TLS_DHE_PSK_WITH_AES_256_CCM = 0xC0A7
TLS_PSK_WITH_AES_128_CCM_8 = 0xC0A8
TLS_PSK_WITH_AES_256_CCM_8 = 0xC0A9
TLS_PSK_DHE_WITH_AES_128_CCM_8 = 0xC0AA
TLS_PSK_DHE_WITH_AES_256_CCM_8 = 0xC0AB
TLS_ECDHE_ECDSA_WITH_AES_128_CCM = 0xC0AC
TLS_ECDHE_ECDSA_WITH_AES_256_CCM = 0xC0AD
TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 = 0xC0AE
TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 = 0xC0AF
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCC14
TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCC13
class ECPointFormat(Enum):
uncompressed = 0
ansiX962_compressed_prime = 1
ansiX962_compressed_char2 = 2
class ECCurves(Enum):
sect163k1 = 1
sect163r1 = 2
sect163r2 = 3
sect193r1 = 4
sect193r2 = 5
sect233k1 = 6
sect233r1 = 7
sect239k1 = 8
sect283k1 = 9
sect283r1 = 10
sect409k1 = 11
sect409r1 = 12
sect571k1 = 13
sect571r1 = 14
secp160k1 = 15
secp160r1 = 16
secp160r2 = 17
secp192k1 = 18
secp192r1 = 19
secp224k1 = 20
secp224r1 = 21
secp256k1 = 22
secp256r1 = 23
secp384r1 = 24
secp521r1 = 25
brainpoolP256r1 = 26
brainpoolP384r1 = 27
brainpoolP512r1 = 28
TLSProtocolVersion = ("3.0", "1.0", "1.1", "1.2")
| {
"repo_name": "Ayrx/tlsenum",
"path": "tlsenum/mappings.py",
"copies": "1",
"size": "16201",
"license": "mit",
"hash": 5690872095771614000,
"line_mean": 43.2650273224,
"line_max": 64,
"alpha_frac": 0.6931670885,
"autogenerated": false,
"ratio": 2.1541018481584895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33472689366584896,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from enum import Flag, auto
from numbers import Number
import numpy as np
import tensorflow as tf
from tensorflow_probability import distributions as tfd
from odin.bay import distributions as obd
# ===========================================================================
# enum
# ===========================================================================
class Statistic(Flag):
SAMPLE = auto()
MEAN = auto()
VAR = auto()
STDDEV = auto()
DIST = auto()
# ===========================================================================
# Logging
# ===========================================================================
def _dist2text(dist):
return '[%s]%s batch:%s event:%s' % (dist.__class__.__name__, dist.dtype.name,
dist.batch_shape, dist.event_shape)
def _extract_desc(dist, name, pad):
text = pad + (name + ':' if len(name) > 0 else '') + _dist2text(dist) + '\n'
obj_type = type(dist)
for key in dir(dist):
val = getattr(dist, key)
if isinstance(val, tfd.Distribution) and \
not isinstance(getattr(obj_type, key, None), property):
text += _extract_desc(val, key, pad + ' ')
return text
def print_dist(dist, return_text=False):
assert isinstance(dist, tfd.Distribution)
text = _extract_desc(dist, '', '')
if return_text:
return text[:-1]
print(text)
# ===========================================================================
# Objectives
# ===========================================================================
def kl_divergence(q,
p,
use_analytic_kl=False,
q_sample=lambda q: q.sample(),
reduce_axis=(),
auto_remove_independent=True,
name=None):
""" Calculating KL(q(x)||p(x))
Parameters
----------
q : tensorflow_probability.Distribution
the posterior distribution
p : tensorflow_probability.Distribution
the prior distribution
use_analytic_kl : bool (default: False)
if True, use the close-form solution for
q_sample : {callable, Tensor, Number}
callable for extracting sample from `q(x)` (takes q distribution
as input argument)
reudce_axis : {None, int, tuple}
reduce axis when use MCMC to estimate KL divergence, default
`()` mean keep all original dimensions
auto_remove_independent : bool (default: True)
if `q` or `p` is `tfd.Independent` wrapper, get the original
distribution for calculating the analytic KL
name : {None, str}
Returns
-------
"""
if auto_remove_independent:
# only remove Independent if one is Indepedent and another is not.
if isinstance(q, tfd.Independent) and not isinstance(p, tfd.Independent):
q = q.distribution
if not isinstance(q, tfd.Independent) and isinstance(p, tfd.Independent):
p = p.distribution
q_name = [i for i in q.name.split('/') if len(i) > 0][-1]
p_name = [i for i in p.name.split('/') if len(i) > 0][-1]
with tf.compat.v1.name_scope(name, "KL_q%s_p%s" % (q_name, p_name)):
if bool(use_analytic_kl):
return tfd.kl_divergence(q, p)
# using MCMC sampling for estimating the KL
if callable(q_sample):
z = q_sample(q)
elif isinstance(q_sample, Number):
z = q.sample(int(q_sample))
else:
z = q_sample
# calculate the output, then perform reduction
kl = q.log_prob(z) - p.log_prob(z)
kl = tf.reduce_mean(input_tensor=kl, axis=reduce_axis)
return kl
| {
"repo_name": "imito/odin",
"path": "odin/bay/helpers.py",
"copies": "1",
"size": "3549",
"license": "mit",
"hash": -2688553872197972500,
"line_mean": 30.6785714286,
"line_max": 80,
"alpha_frac": 0.547632469,
"autogenerated": false,
"ratio": 3.8691384950926935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4916770964092693,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from examples.helpers import authenticate
from trakt import Trakt
from trakt.objects import Movie, Show
import logging
import os
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
# Configure
Trakt.configuration.defaults.client(
id=os.environ.get('CLIENT_ID'),
secret=os.environ.get('CLIENT_SECRET')
)
Trakt.configuration.defaults.http(
retry=True
)
# Authenticate
Trakt.configuration.defaults.oauth.from_response(
authenticate()
)
# Fetch playback for movies
playback = Trakt['sync/playback'].movies(exceptions=True)
for item in playback.values():
print(item)
if type(item) is Movie:
print('\tprogress: %r' % item.progress)
print('\tpaused_at: %r' % item.paused_at)
# Fetch movie library (watched, collection, ratings)
movies = {}
Trakt['sync/watched'].movies(movies, exceptions=True)
Trakt['sync/collection'].movies(movies, exceptions=True)
Trakt['sync/ratings'].movies(movies, exceptions=True)
for movie in movies.values():
print(movie)
print('\tkeys%s%r' % ('\t' * 3, movie.keys))
print('\trating%s%r' % ('\t' * 3, movie.rating))
print('\tis_watched%s%r' % ('\t' * 2, movie.is_watched))
print('\tlast_watched_at%s%r' % ('\t', movie.last_watched_at))
print('\tis_collected%s%r' % ('\t', movie.is_collected))
print('\tcollected_at%s%r' % ('\t', movie.collected_at))
print('\tplays%s%r' % ('\t' * 3, movie.plays))
print('')
# Fetch playback for shows
playback = Trakt['sync/playback'].episodes(exceptions=True)
for item in playback.values():
print(item)
if type(item) is Show:
for _, episode in item.episodes():
print('\t%r' % (episode,))
print('\t\tprogress: %r' % episode.progress)
print('\t\tpaused_at: %r' % episode.paused_at)
# Fetch show/episode library (watched, collection, ratings)
shows = {}
Trakt['sync/watched'].shows(shows, exceptions=True)
Trakt['sync/collection'].shows(shows, exceptions=True)
Trakt['sync/ratings'].shows(shows, exceptions=True)
Trakt['sync/ratings'].episodes(shows, exceptions=True)
for show in shows.values():
print(show)
print('\tkeys%s%r' % ('\t' * 3, show.keys))
print('\trating%s%r' % ('\t' * 3, show.rating))
print('')
| {
"repo_name": "fuzeman/trakt.py",
"path": "examples/collection.py",
"copies": "1",
"size": "2527",
"license": "mit",
"hash": -4542371561525351400,
"line_mean": 27.7159090909,
"line_max": 70,
"alpha_frac": 0.6026909379,
"autogenerated": false,
"ratio": 3.495159059474412,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4597849997374412,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from examples.helpers import authenticate
from trakt import Trakt
from datetime import datetime
import logging
import os
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
# Configure
Trakt.configuration.defaults.client(
id=os.environ.get('CLIENT_ID'),
secret=os.environ.get('CLIENT_SECRET')
)
# Authenticate
Trakt.configuration.defaults.oauth.from_response(
authenticate()
)
now = datetime.utcnow()
# Retrieve 10 history records (most recent)
for item in Trakt['sync/history'].get(per_page=10):
print(' - %-120s (watched_at: %r)' % (
repr(item),
item.watched_at.strftime('%Y-%m-%d %H:%M:%S')
))
print('=' * 160)
# Retrieve history records for "Family Guy"
for item in Trakt['sync/history'].shows('1425', pagination=True, per_page=25):
print(' - %-120s (watched_at: %r)' % (
repr(item),
item.watched_at.strftime('%Y-%m-%d %H:%M:%S')
))
print('=' * 160)
# Retrieve history records for this year
for item in Trakt['sync/history'].get(pagination=True, per_page=25, start_at=datetime(now.year, 1, 1)):
print(' - %-120s (watched_at: %r)' % (
repr(item),
item.watched_at.strftime('%Y-%m-%d %H:%M:%S')
))
print('=' * 160)
# Retrieve all history records
for item in Trakt['sync/history'].get(pagination=True, per_page=25):
print(' - %-120s (watched_at: %r)' % (
repr(item),
item.watched_at.strftime('%Y-%m-%d %H:%M:%S')
))
| {
"repo_name": "fuzeman/trakt.py",
"path": "examples/history.py",
"copies": "1",
"size": "1667",
"license": "mit",
"hash": 8130554255942685000,
"line_mean": 27.2542372881,
"line_max": 107,
"alpha_frac": 0.5776844631,
"autogenerated": false,
"ratio": 3.451345755693582,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45290302187935816,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from examples.helpers import authenticate
import logging
import os
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
from trakt import Trakt
# Configure
Trakt.configuration.defaults.client(
id=os.environ.get('CLIENT_ID'),
secret=os.environ.get('CLIENT_SECRET')
)
# Authenticate
Trakt.configuration.defaults.oauth.from_response(
authenticate()
)
# Retrieve lists
lists = Trakt['users'].likes('lists', pagination=True)
print('Found %d list(s) [%d page(s)]' % (
lists.total_items,
lists.total_pages
))
for x, liked_list in enumerate(lists):
items = liked_list.items(pagination=True, per_page=10)
print('[%d/%d] %r [%d item(s), %d page(s)]' % (
x + 1,
lists.total_items,
liked_list,
items.total_items,
items.total_pages
))
for y, item in enumerate(items):
print('\t[%s/%s] %r' % (
y + 1,
items.total_items,
item
))
# Only print 20 items in the list
if y + 1 >= 20:
break
| {
"repo_name": "fuzeman/trakt.py",
"path": "examples/lists.py",
"copies": "1",
"size": "1259",
"license": "mit",
"hash": -7525251905356537000,
"line_mean": 22.7547169811,
"line_max": 64,
"alpha_frac": 0.5361397935,
"autogenerated": false,
"ratio": 3.873846153846154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4909985947346154,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .expressions import ElemWise, schema_method_list, method_properties
import datashape
from datashape import dshape, isdatelike, isnumeric
from datashape.coretypes import timedelta_
from ..compatibility import basestring
__all__ = ['DateTime', 'Date', 'date', 'Year', 'year', 'Month', 'month', 'Day',
'day', 'days', 'Hour', 'hour', 'Minute', 'minute', 'Second', 'second',
'Millisecond', 'millisecond', 'Microsecond', 'microsecond', 'nanosecond',
'Date',
'date', 'Time', 'time', 'week', 'nanoseconds', 'seconds', 'total_seconds',
'UTCFromTimestamp', 'DateTimeTruncate',
'Ceil', 'Floor', 'Round', 'strftime']
def _validate(var, name, type, typename):
if not isinstance(var, type):
raise TypeError('"%s" argument must be a %s'%(name, typename))
class DateTime(ElemWise):
""" Superclass for datetime accessors """
_arguments = '_child',
def __str__(self):
return '%s.%s' % (str(self._child), type(self).__name__.lower())
def _schema(self):
ds = dshape(self._dtype)
return ds if not isinstance(self._child.schema.measure, datashape.Option) else datashape.Option(ds)
@property
def _name(self):
return '%s_%s' % (self._child._name, self.attr)
@property
def attr(self):
return type(self).__name__.lower()
class Date(DateTime):
_dtype = datashape.date_
def date(expr):
return Date(expr)
class Year(DateTime):
_dtype = datashape.int32
def year(expr):
return Year(expr)
class Month(DateTime):
_dtype = datashape.int64
def month(expr):
return Month(expr)
class Day(DateTime):
_dtype = datashape.int64
def day(expr):
return Day(expr)
class Time(DateTime):
_dtype = datashape.time_
def time(expr):
return Time(expr)
class Hour(DateTime):
_dtype = datashape.int64
def hour(expr):
return Hour(expr)
class Minute(DateTime):
_dtype = datashape.int64
def minute(expr):
return Minute(expr)
class Second(DateTime):
_dtype = datashape.float64
def second(expr):
return Second(expr)
class Millisecond(DateTime):
_dtype = datashape.int64
def millisecond(expr):
return Millisecond(expr)
class Microsecond(DateTime):
_dtype = datashape.int64
def microsecond(expr):
return Microsecond(expr)
class UTCFromTimestamp(DateTime):
_dtype = datashape.datetime_
def utcfromtimestamp(expr):
return UTCFromTimestamp(expr)
class nanosecond(DateTime): _dtype = datashape.int64
class week(DateTime): _dtype = datashape.int16
class weekday_name(DateTime): _dtype = datashape.string
class daysinmonth(DateTime): _dtype = datashape.int16
class weekofyear(DateTime): _dtype = datashape.int16
class dayofyear(DateTime): _dtype = datashape.int16
class dayofweek(DateTime): _dtype = datashape.int16
weekday = dayofweek # alias
class quarter(DateTime): _dtype = datashape.int16
class is_month_start(DateTime): _dtype = datashape.bool_
class is_month_end(DateTime): _dtype = datashape.bool_
class is_quarter_start(DateTime): _dtype = datashape.bool_
class is_quarter_end(DateTime): _dtype = datashape.bool_
class is_year_start(DateTime): _dtype = datashape.bool_
class is_year_end(DateTime): _dtype = datashape.bool_
class days_in_month(DateTime): _dtype = datashape.int64
class strftime(ElemWise):
_arguments = '_child', 'format'
schema = datashape.string
units = (
'year',
'month',
'week',
'day',
'hour',
'minute',
'second',
'millisecond',
'microsecond',
'nanosecond',
)
unit_aliases = {
'y': 'year',
'w': 'week',
'd': 'day',
'date': 'day',
'h': 'hour',
's': 'second',
'ms': 'millisecond',
'us': 'microsecond',
'ns': 'nanosecond'
}
def normalize_time_unit(s):
""" Normalize time input to one of 'year', 'second', 'millisecond', etc..
Examples
--------
>>> normalize_time_unit('milliseconds')
'millisecond'
>>> normalize_time_unit('ms')
'millisecond'
"""
s = s.lower().strip()
if s in units:
return s
if s in unit_aliases:
return unit_aliases[s]
if s[-1] == 's':
return normalize_time_unit(s.rstrip('s'))
raise ValueError("Do not understand time unit %s" % s)
class DateTimeTruncate(DateTime):
_arguments = '_child', 'measure', 'unit'
@property
def _dtype(self):
if units.index('day') >= units.index(self.unit):
return datashape.date_
else:
return datashape.datetime_
@property
def _name(self):
return self._child._name
def __str__(self):
return '%s.truncate(%ss=%g)' % (self._child, self.unit, self.measure)
def truncate(expr, *args, **kwargs):
""" Truncate datetime expression
Examples
--------
>>> from blaze import symbol, compute
>>> from datetime import datetime
>>> s = symbol('s', 'datetime')
>>> expr = s.truncate(10, 'minutes')
>>> compute(expr, datetime(2000, 6, 25, 12, 35, 10))
datetime.datetime(2000, 6, 25, 12, 30)
>>> expr = s.truncate(1, 'week')
>>> compute(expr, datetime(2000, 6, 25, 12, 35, 10))
datetime.date(2000, 6, 25)
Alternatively use keyword arguments to specify unit and measure
>>> expr = s.truncate(weeks=2)
"""
if not args and not kwargs:
raise TypeError('truncate takes exactly 2 positional arguments, '
'e.g., truncate(2, "days") or 1 keyword argument, '
'e.g., truncate(days=2)')
if args:
if kwargs:
raise TypeError('Cannot pass both positional and keyword '
'arguments; given %s and %s.' % (args, kwargs))
measure, unit = args
else:
[(unit, measure)] = kwargs.items()
return DateTimeTruncate(expr, measure, normalize_time_unit(unit))
class UnaryDateTimeFunction(ElemWise):
"""DateTime function that only takes a single argument."""
_arguments = '_child'
class Round(ElemWise):
_arguments = '_child', 'freq'
@property
def schema(self):
return self._child.schema
class Floor(ElemWise):
_arguments = '_child', 'freq'
@property
def schema(self):
return self._child.schema
class Ceil(ElemWise):
_arguments = '_child', 'freq'
@property
def schema(self):
return self._child.schema
class dt_ns(object):
def __init__(self, field):
self.field = field
def year(self):
return year(self.field)
def month(self):
return month(self.field)
def day(self):
return day(self.field)
def hour(self):
return hour(self.field)
def minute(self):
return minute(self.field)
def date(self):
return date(self.field)
def time(self):
return time(self.field)
def second(self):
return second(self.field)
def millisecond(self):
return millisecond(self.field)
def microsecond(self):
return microsecond(self.field)
def nanosecond(self):
return nanosecond(self.field)
def weekday(self):
"""Alias for dayofweek
"""
return dayofweek(self.field)
def weekday_name(self):
return weekday_name(self.field)
def daysinmonth(self):
return daysinmonth(self.field)
def weekofyear(self):
return weekofyear(self.field)
def dayofyear(self):
return dayofyear(self.field)
def dayofweek(self):
return dayofweek(self.field)
def quarter(self):
return quarter(self.field)
def is_month_start(self):
return is_month_start(self.field)
def is_month_end(self):
return is_month_end(self.field)
def is_quarter_start(self):
return is_quarter_start(self.field)
def is_quarter_end(self):
return is_quarter_end(self.field)
def is_year_start(self):
return is_year_start(self.field)
def is_year_end(self):
return is_year_end(self.field)
def days_in_month(self):
return days_in_month(self.field)
def strftime(self, format):
_validate(format, 'format', basestring, 'string')
return strftime(self.field, format)
def truncate(self, *args, **kwargs):
return truncate(self.field, *args, **kwargs)
def round(self, freq):
_validate(freq, 'freq', basestring, 'string')
return Round(self.field, freq)
def floor(self, freq):
_validate(freq, 'freq', basestring, 'string')
return Floor(self.field, freq)
def ceil(self, freq):
_validate(freq, 'freq', basestring, 'string')
return Ceil(self.field, freq)
def week(self):
return week(self.field)
class dt(object):
__name__ = 'dt'
def __get__(self, obj, type=None):
return dt_ns(obj) if obj is not None else self
class days(DateTime): _dtype = datashape.int64
class nanoseconds(DateTime): _dtype = datashape.int64
class seconds(DateTime): _dtype = datashape.int64
class total_seconds(DateTime): _dtype = datashape.int64
class timedelta_ns(object):
def __init__(self, field):
self.field = field
def days(self): return days(self.field)
def nanoseconds(self): return nanoseconds(self.field)
def seconds(self): return seconds(self.field)
def total_seconds(self): return total_seconds(self.field)
class timedelta(object):
# pandas uses the same 'dt' name for
# DateTimeProperties and TimedeltaProperties.
__name__ = 'dt'
def __get__(self, obj, type=None):
return timedelta_ns(obj) if obj is not None else self
def isdeltalike(ds):
return ds == timedelta_
schema_method_list.extend([
(isdatelike, set([year, month, day, hour, minute, date, time, second,
millisecond, microsecond, truncate,
dt()])),
(isnumeric, set([utcfromtimestamp])),
(isdeltalike, set([timedelta()]))
])
method_properties |= set([year, month, day, hour, minute, second, millisecond,
microsecond, date, time, utcfromtimestamp])
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/datetime.py",
"copies": "3",
"size": "10146",
"license": "bsd-3-clause",
"hash": -2604416622286587000,
"line_mean": 23.8676470588,
"line_max": 107,
"alpha_frac": 0.6242854327,
"autogenerated": false,
"ratio": 3.5315001740341105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5655785606734111,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .expressions import Expr, ElemWise
from datashape import dshape
import datashape
__all__ = ['DateTime', 'Date', 'date', 'Year', 'year', 'Month', 'month', 'Day',
'day', 'Hour', 'hour', 'Second', 'second', 'Millisecond',
'millisecond', 'Microsecond', 'microsecond', 'Date', 'date', 'Time',
'time', 'UTCFromTimestamp', 'DateTimeTruncate']
class DateTime(ElemWise):
""" Superclass for datetime accessors """
__slots__ = '_hash', '_child',
def __str__(self):
return '%s.%s' % (str(self._child), type(self).__name__.lower())
@property
def schema(self):
return dshape(self._dtype)
@property
def _name(self):
return '%s_%s' % (self._child._name, self.attr)
@property
def attr(self):
return type(self).__name__.lower()
class Date(DateTime):
_dtype = datashape.date_
def date(expr):
return Date(expr)
class Year(DateTime):
_dtype = datashape.int32
def year(expr):
return Year(expr)
class Month(DateTime):
_dtype = datashape.int64
def month(expr):
return Month(expr)
class Day(DateTime):
_dtype = datashape.int64
def day(expr):
return Day(expr)
class Time(DateTime):
_dtype = datashape.time_
def time(expr):
return Time(Expr)
class Hour(DateTime):
_dtype = datashape.int64
def hour(expr):
return Hour(expr)
class Minute(DateTime):
_dtype = datashape.int64
def minute(expr):
return Minute(expr)
class Second(DateTime):
_dtype = datashape.int64
def second(expr):
return Second(expr)
class Millisecond(DateTime):
_dtype = datashape.int64
def millisecond(expr):
return Millisecond(expr)
class Microsecond(DateTime):
_dtype = datashape.int64
def microsecond(expr):
return Microsecond(expr)
class UTCFromTimestamp(DateTime):
_dtype = datashape.datetime_
def utcfromtimestamp(expr):
return UTCFromTimestamp(expr)
units = [
'year',
'month',
'week',
'day',
'hour',
'minute',
'second',
'millisecond',
'microsecond',
'nanosecond'
]
_unit_aliases = {
'y': 'year',
'w': 'week',
'd': 'day',
'date': 'day',
'h': 'hour',
's': 'second',
'ms': 'millisecond',
'us': 'microsecond',
'ns': 'nanosecond'
}
def normalize_time_unit(s):
""" Normalize time input to one of 'year', 'second', 'millisecond', etc..
Examples
--------
>>> normalize_time_unit('milliseconds')
'millisecond'
>>> normalize_time_unit('ms')
'millisecond'
"""
s = s.lower().strip()
if s in units:
return s
if s in _unit_aliases:
return _unit_aliases[s]
if s[-1] == 's':
return normalize_time_unit(s.rstrip('s'))
raise ValueError("Do not understand time unit %s" % s)
class DateTimeTruncate(DateTime):
__slots__ = '_hash', '_child', 'measure', 'unit'
@property
def _dtype(self):
if units.index('day') >= units.index(self.unit):
return datashape.date_
else:
return datashape.datetime_
@property
def _name(self):
return self._child._name
def __str__(self):
return '%s.truncate(%ss=%g)' % (self._child, self.unit, self.measure)
def truncate(expr, *args, **kwargs):
""" Truncate datetime expression
Examples
--------
>>> from blaze import symbol, compute
>>> from datetime import datetime
>>> s = symbol('s', 'datetime')
>>> expr = s.truncate(10, 'minutes')
>>> compute(expr, datetime(2000, 6, 25, 12, 35, 10))
datetime.datetime(2000, 6, 25, 12, 30)
>>> expr = s.truncate(1, 'week')
>>> compute(expr, datetime(2000, 6, 25, 12, 35, 10))
datetime.date(2000, 6, 25)
Alternatively use keyword arguments to specify unit and measure
>>> # expr = s.truncate(2, 'weeks')
>>> expr = s.truncate(weeks=2)
"""
if not args and not kwargs:
raise TypeError('truncate takes exactly 2 positional arguments, '
'e.g., truncate(2, "days") or 1 keyword argument, '
'e.g., truncate(days=2)')
if args:
assert not kwargs, ('only positional arguments allowed if any are '
'passed in')
measure, unit = args
if kwargs:
assert not args, 'only keyword arguments allowed if any are passed in'
[(unit, measure)] = kwargs.items()
return DateTimeTruncate(expr, measure, normalize_time_unit(unit))
from .expressions import schema_method_list, method_properties
from datashape.predicates import isdatelike, isnumeric
schema_method_list.extend([
(isdatelike, set([year, month, day, hour, minute, date, time, second,
millisecond, microsecond, truncate])),
(isnumeric, set([utcfromtimestamp]))
])
method_properties |= set([year, month, day, hour, minute, second, millisecond,
microsecond, date, time, utcfromtimestamp])
| {
"repo_name": "LiaoPan/blaze",
"path": "blaze/expr/datetime.py",
"copies": "3",
"size": "5037",
"license": "bsd-3-clause",
"hash": 4041801985907352600,
"line_mean": 20.343220339,
"line_max": 79,
"alpha_frac": 0.5975779234,
"autogenerated": false,
"ratio": 3.5698086463501064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5667386569750107,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .expressions import Expr, ElemWise
from datashape import dshape, Record, DataShape, Unit, Option, date_, datetime_
import datashape
__all__ = ['DateTime', 'Date', 'date', 'Year', 'year', 'Month', 'month', 'Day',
'day', 'Hour', 'hour', 'Second', 'second', 'Millisecond',
'millisecond', 'Microsecond', 'microsecond', 'Date', 'date', 'Time',
'time']
class DateTime(ElemWise):
""" Superclass for datetime accessors """
__slots__ = '_child',
def __str__(self):
return '%s.%s' % (str(self._child), type(self).__name__.lower())
@property
def schema(self):
return dshape(self._dtype)
@property
def _name(self):
return '%s_%s' % (self._child._name, self.attr)
@property
def attr(self):
return type(self).__name__.lower()
class Date(DateTime):
_dtype = datashape.date_
def date(expr):
return Date(expr)
class Year(DateTime):
_dtype = datashape.int32
def year(expr):
return Year(expr)
class Month(DateTime):
_dtype = datashape.int32
def month(expr):
return Month(expr)
class Day(DateTime):
_dtype = datashape.int32
def day(expr):
return Day(expr)
class Time(DateTime):
_dtype = datashape.time_
def time(expr):
return Time(Expr)
class Hour(DateTime):
_dtype = datashape.int32
def hour(expr):
return Hour(expr)
class Minute(DateTime):
_dtype = datashape.int32
def minute(expr):
return Minute(expr)
class Second(DateTime):
_dtype = datashape.int32
def second(expr):
return Second(expr)
class Millisecond(DateTime):
_dtype = datashape.int64
def millisecond(expr):
return Millisecond(expr)
class Microsecond(DateTime):
_dtype = datashape.int64
def microsecond(expr):
return Microsecond(expr)
from .expressions import schema_method_list, method_properties
from datashape.predicates import isdatelike
schema_method_list.extend([
(isdatelike, set([year, month, day, hour, minute, date, time, second,
millisecond, microsecond]))
])
method_properties |= set([year, month, day, hour, minute, second, millisecond,
microsecond, date, time])
| {
"repo_name": "vitan/blaze",
"path": "blaze/expr/datetime.py",
"copies": "1",
"size": "2247",
"license": "bsd-3-clause",
"hash": -1624166945605099000,
"line_mean": 21.0294117647,
"line_max": 79,
"alpha_frac": 0.6444147753,
"autogenerated": false,
"ratio": 3.4045454545454548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45489602298454546,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .expressions import Expr, ndim, symbol
from datashape import DataShape
from collections import Iterable
__all__ = 'Transpose', 'TensorDot', 'dot', 'transpose', 'tensordot'
class Transpose(Expr):
""" Transpose dimensions in an N-Dimensional array
Examples
--------
>>> x = symbol('x', '10 * 20 * int32')
>>> x.T
transpose(x)
>>> x.T.shape
(20, 10)
Specify axis ordering with axes keyword argument
>>> x = symbol('x', '10 * 20 * 30 * int32')
>>> x.transpose([2, 0, 1])
transpose(x, axes=[2, 0, 1])
>>> x.transpose([2, 0, 1]).shape
(30, 10, 20)
"""
__slots__ = '_hash', '_child', 'axes'
@property
def dshape(self):
s = self._child.shape
return DataShape(*(tuple([s[i] for i in self.axes]) +
(self._child.dshape.measure,)))
def __str__(self):
if self.axes == tuple(range(ndim(self)))[::-1]:
return 'transpose(%s)' % self._child
else:
return 'transpose(%s, axes=%s)' % (self._child,
list(self.axes))
def transpose(expr, axes=None):
if axes is None:
axes = tuple(range(ndim(expr)))[::-1]
if isinstance(axes, list):
axes = tuple(axes)
return Transpose(expr, axes)
transpose.__doc__ = Transpose.__doc__
def T(expr):
return transpose(expr)
class TensorDot(Expr):
""" Dot Product: Contract and sum dimensions of two arrays
>>> x = symbol('x', '20 * 20 * int32')
>>> y = symbol('y', '20 * 30 * int32')
>>> x.dot(y)
tensordot(x, y)
>>> tensordot(x, y, axes=[0, 0])
tensordot(x, y, axes=[0, 0])
"""
__slots__ = '_hash', 'lhs', 'rhs', '_left_axes', '_right_axes'
__inputs__ = 'lhs', 'rhs'
@property
def dshape(self):
# Compute shape
shape = tuple([d for i, d in enumerate(self.lhs.shape)
if i not in self._left_axes] +
[d for i, d in enumerate(self.rhs.shape)
if i not in self._right_axes])
# Compute measure by mimicking a mul and add
l = symbol('l', self.lhs.dshape.measure)
r = symbol('r', self.rhs.dshape.measure)
measure = ((l * r) + (l * r)).dshape.measure
return DataShape(*(shape + (measure,)))
def __str__(self):
if self.isidentical(tensordot(self.lhs, self.rhs)):
return 'tensordot(%s, %s)' % (self.lhs, self.rhs)
else:
la = self._left_axes
if len(la) == 1:
la = la[0]
ra = self._right_axes
if len(ra) == 1:
ra = ra[0]
return 'tensordot(%s, %s, axes=[%s, %s])' % (
self.lhs, self.rhs, str(la), str(ra))
def tensordot(lhs, rhs, axes=None):
if axes is None:
left = ndim(lhs) - 1
right = 0
elif isinstance(axes, Iterable):
left, right = axes
else:
left, right = axes, axes
if isinstance(left, int):
left = (left,)
if isinstance(right, int):
right = (right,)
if isinstance(left, list):
left = tuple(left)
if isinstance(right, list):
right = tuple(right)
return TensorDot(lhs, rhs, left, right)
tensordot.__doc__ = TensorDot.__doc__
def dot(lhs, rhs):
return tensordot(lhs, rhs)
from datashape.predicates import isnumeric, isboolean
from .expressions import dshape_method_list, method_properties
dshape_method_list.extend([
(lambda ds: ndim(ds) > 1, set([transpose])),
(lambda ds: ndim(ds) == 2, set([T])),
(lambda ds: ndim(ds) >= 1 and (isnumeric(ds) or isboolean(ds)), set([dot]))
])
method_properties.add(T)
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/expr/arrays.py",
"copies": "2",
"size": "3752",
"license": "bsd-3-clause",
"hash": 7205476646865406000,
"line_mean": 26.1884057971,
"line_max": 79,
"alpha_frac": 0.5386460554,
"autogenerated": false,
"ratio": 3.4327538883806037,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4971399943780604,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ..expr import (Expr, Symbol, Field, Arithmetic, Math,
Date, Time, DateTime, Millisecond, Microsecond, broadcast,
sin, cos, Map, UTCFromTimestamp, DateTimeTruncate, symbol,
USub, Not)
from ..expr import math as expr_math
from ..expr.expressions import valid_identifier
from ..dispatch import dispatch
from . import pydatetime
import datetime
import math
import toolz
import itertools
funcnames = ('func_%d' % i for i in itertools.count())
def parenthesize(s):
if ' ' in s:
return '(%s)' % s
else:
return s
def print_python(leaves, expr):
""" Print expression to be evaluated in Python
>>> from blaze.expr import ceil, sin
>>> t = symbol('t', '{x: int, y: int, z: int, when: datetime}')
>>> print_python([t], t.x + t.y)
('t[0] + t[1]', {})
Supports mathematical and datetime access
>>> print_python([t], sin(t.x) > ceil(t.y)) # doctest: +SKIP
('math.sin(t[0]) > math.ceil(t[1])', {'math':<module 'math'>})
>>> print_python([t], t.when.day + 1)
('t[3].day + 1', {})
Specify leaves of the expression to control level of printing
>>> print_python([t.x, t.y], t.x + t.y)
('x + y', {})
Returns
-------
s: string
A evalable string
scope: dict
A namespace to add to be given to eval
"""
if isinstance(expr, Expr) and any(expr.isidentical(lf) for lf in leaves):
return valid_identifier(expr._name), {}
return _print_python(expr, leaves=leaves)
@dispatch(object)
def _print_python(expr, leaves=None):
return repr(expr), {}
@dispatch((datetime.datetime, datetime.date))
def _print_python(expr, leaves=None):
return repr(expr), {'datetime': datetime}
@dispatch(Symbol)
def _print_python(expr, leaves=None):
return valid_identifier(expr._name), {}
@dispatch(Field)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
index = expr._child.fields.index(expr._name)
return '%s[%d]' % (parenthesize(child), index), scope
@dispatch(Arithmetic)
def _print_python(expr, leaves=None):
lhs, left_scope = print_python(leaves, expr.lhs)
rhs, right_scope = print_python(leaves, expr.rhs)
return ('%s %s %s' % (parenthesize(lhs),
expr.symbol,
parenthesize(rhs)),
toolz.merge(left_scope, right_scope))
@dispatch(USub)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return '%s%s' % (expr.symbol, parenthesize(child)), scope
@dispatch(Not)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return 'not %s' % parenthesize(child), scope
@dispatch(Math)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('math.%s(%s)' % (type(expr).__name__, child),
toolz.merge(scope, {'math': math}))
@dispatch(expr_math.abs)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('abs(%s)' % child, scope)
@dispatch(Date)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('%s.date()' % parenthesize(child), scope)
@dispatch(Time)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('%s.time()' % parenthesize(child), scope)
@dispatch(Millisecond)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('%s.microsecond // 1000' % parenthesize(child), scope)
@dispatch(UTCFromTimestamp)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
return ('datetime.datetime.utcfromtimestamp(%s)' % parenthesize(child),
toolz.merge({'datetime': datetime}, scope))
@dispatch(DateTime)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
attr = type(expr).__name__.lower()
return ('%s.%s' % (parenthesize(child), attr), scope)
@dispatch(DateTimeTruncate)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
scope['truncate'] = pydatetime.truncate
return ('truncate(%s, %s, "%s")' % (child, expr.measure, expr.unit),
scope)
@dispatch(Map)
def _print_python(expr, leaves=None):
child, scope = print_python(leaves, expr._child)
funcname = next(funcnames)
return ('%s(%s)' % (funcname, child),
toolz.assoc(scope, funcname, expr.func))
@dispatch(Expr)
def _print_python(expr, leaves=None):
raise NotImplementedError("Do not know how to write expressions of type %s"
" to Python code" % type(expr).__name__)
def funcstr(leaves, expr):
""" Lambda string for an expresion
>>> t = symbol('t', '{x: int, y: int, z: int, when: datetime}')
>>> funcstr([t], t.x + t.y)
('lambda t: t[0] + t[1]', {})
>>> funcstr([t.x, t.y], t.x + t.y)
('lambda x, y: x + y', {})
Also returns scope for libraries like math or datetime
>>> funcstr([t.x, t.y], sin(t.x) + t.y) # doctest: +SKIP
('lambda x, y: math.sin(x) + y', {'math': <module 'math'>})
>>> from datetime import date
>>> funcstr([t.x, t.y, t.when], t.when.date > date(2001, 12, 25)) #doctest: +SKIP
('lambda x, y, when: when.day > datetime.date(2001, 12, 25)', {'datetime': <module 'datetime'>})
"""
result, scope = print_python(leaves, expr)
leaf_names = [print_python([leaf], leaf)[0] for leaf in leaves]
return 'lambda %s: %s' % (', '.join(leaf_names),
result), scope
def lambdify(leaves, expr):
""" Lambda for an expresion
>>> t = symbol('t', '{x: int, y: int, z: int, when: datetime}')
>>> f = lambdify([t], t.x + t.y)
>>> f((1, 10, 100, ''))
11
>>> f = lambdify([t.x, t.y, t.z, t.when], t.x + cos(t.y))
>>> f(1, 0, 100, '')
2.0
"""
s, scope = funcstr(leaves, expr)
return eval(s, scope)
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/compute/pyfunc.py",
"copies": "1",
"size": "6101",
"license": "bsd-3-clause",
"hash": 9211441601999977000,
"line_mean": 29.8131313131,
"line_max": 100,
"alpha_frac": 0.6095722013,
"autogenerated": false,
"ratio": 3.2573411639081686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4366913365208168,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ..expr import (Expr, Symbol, Field, Arithmetic, RealMath, Map, Not,
USub, Date, Time, DateTime, Millisecond, Microsecond, broadcast, sin,
cos, isnan, UnaryOp, symbol)
import datetime
from datashape import iscollection
import math
from toolz import curry
import itertools
from ..expr.broadcast import broadcast_collect
funcnames = ('func_%d' % i for i in itertools.count())
def parenthesize(s):
if ' ' in s:
return '(%s)' % s
else:
return s
def print_numexpr(leaves, expr):
""" Print expression to be evaluated in Python
>>> from blaze.expr import ceil, sin
>>> t = symbol('t', 'var * {x: int, y: int, z: int, when: datetime}')
>>> print_numexpr([t], t.x + t.y)
'x + y'
Supports mathematical functions
>>> print_numexpr([t], sin(t.x) > cos(t.y))
'sin(x) > cos(y)'
Returns
-------
s: string
A evalable string
"""
if not isinstance(expr, Expr):
return repr(expr)
if any(expr.isidentical(leaf) for leaf in leaves):
return expr._name
if isinstance(expr, Symbol):
return expr._name
if isinstance(expr, Field):
return expr._name
if isinstance(expr, Arithmetic):
lhs = print_numexpr(leaves, expr.lhs)
rhs = print_numexpr(leaves, expr.rhs)
return '%s %s %s' % (parenthesize(lhs),
expr.symbol,
parenthesize(rhs))
if isinstance(expr, RealMath):
child = print_numexpr(leaves, expr._child)
return '%s(%s)' % (type(expr).__name__, child)
if isinstance(expr, UnaryOp) and hasattr(expr, 'symbol'):
child = print_numexpr(leaves, expr._child)
return '%s%s' % (expr.symbol, parenthesize(child))
if isinstance(expr, isnan):
child = print_numexpr(leaves, expr._child)
return '%s != %s' % (parenthsize(child), parenthesize(child))
raise NotImplementedError("Operation %s not supported by numexpr" %
type(expr).__name__)
WantToBroadcast = (Arithmetic, RealMath, Not, USub)
Broadcastable = (Arithmetic, RealMath, Not, USub)
broadcast_numexpr_collect = curry(broadcast_collect,
Broadcastable=Broadcastable,
WantToBroadcast=WantToBroadcast)
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/compute/numexpr.py",
"copies": "1",
"size": "2321",
"license": "bsd-3-clause",
"hash": 907984567619242100,
"line_mean": 29.9466666667,
"line_max": 77,
"alpha_frac": 0.6182679879,
"autogenerated": false,
"ratio": 3.5928792569659445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47111472448659447,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ..expr import (Expr, Symbol, Field, Arithmetic, RealMath, Not,
USub, Date, Time, DateTime, Millisecond, Microsecond,
broadcast, sin, cos, isnan, UnaryOp, symbol)
from toolz import curry
import itertools
from ..expr.broadcast import broadcast_collect
funcnames = ('func_%d' % i for i in itertools.count())
def parenthesize(s):
if ' ' in s:
return '(%s)' % s
else:
return s
def print_numexpr(leaves, expr):
""" Print expression to be evaluated in Python
>>> from blaze.expr import ceil, sin
>>> t = symbol('t', 'var * {x: int, y: int, z: int, when: datetime}')
>>> print_numexpr([t], t.x + t.y)
'x + y'
Supports mathematical functions
>>> print_numexpr([t], sin(t.x) > cos(t.y))
'sin(x) > cos(y)'
Returns
-------
s: string
A evalable string
"""
if not isinstance(expr, Expr):
return repr(expr)
if any(expr.isidentical(leaf) for leaf in leaves):
return expr._name
if isinstance(expr, Symbol):
return expr._name
if isinstance(expr, Field):
return expr._name
if isinstance(expr, Arithmetic):
lhs = print_numexpr(leaves, expr.lhs)
rhs = print_numexpr(leaves, expr.rhs)
return '%s %s %s' % (parenthesize(lhs),
expr.symbol,
parenthesize(rhs))
if isinstance(expr, RealMath):
child = print_numexpr(leaves, expr._child)
return '%s(%s)' % (type(expr).__name__, child)
if isinstance(expr, UnaryOp) and hasattr(expr, 'symbol'):
child = print_numexpr(leaves, expr._child)
return '%s%s' % (expr.symbol, parenthesize(child))
if isinstance(expr, isnan):
child = print_numexpr(leaves, expr._child)
return '%s != %s' % (parenthsize(child), parenthesize(child))
raise NotImplementedError("Operation %s not supported by numexpr" %
type(expr).__name__)
WantToBroadcast = (Arithmetic, RealMath, Not, USub)
Broadcastable = (Arithmetic, RealMath, Not, USub)
broadcast_numexpr_collect = curry(broadcast_collect,
Broadcastable=Broadcastable,
WantToBroadcast=WantToBroadcast)
| {
"repo_name": "caseyclements/blaze",
"path": "blaze/compute/numexpr.py",
"copies": "10",
"size": "2278",
"license": "bsd-3-clause",
"hash": -1143501348464861800,
"line_mean": 30.2054794521,
"line_max": 73,
"alpha_frac": 0.6044776119,
"autogenerated": false,
"ratio": 3.615873015873016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011194561812670732,
"num_lines": 73
} |
from __future__ import absolute_import, division, print_function
from ..expr import (Expr, Symbol, Field, Arithmetic, UnaryMath, Not, USub,
isnan, UnaryOp, BinOp)
from toolz import curry
import itertools
from ..expr.broadcast import broadcast_collect
funcnames = ('func_%d' % i for i in itertools.count())
def parenthesize(s):
if ' ' in s:
return '(%s)' % s
else:
return s
def print_numexpr(leaves, expr):
""" Print expression to be evaluated in Python
>>> from blaze import symbol, ceil, sin, cos
>>> t = symbol('t', 'var * {x: int, y: int, z: int, when: datetime}')
>>> print_numexpr([t], t.x + t.y)
'x + y'
Supports mathematical functions
>>> print_numexpr([t], sin(t.x) > cos(t.y))
'sin(x) > cos(y)'
Returns
-------
s: string
A evalable string
"""
if not isinstance(expr, Expr):
return repr(expr)
if any(expr.isidentical(leaf) for leaf in leaves):
return expr._name
if isinstance(expr, Symbol):
return expr._name
if isinstance(expr, Field):
return expr._name
if isinstance(expr, Arithmetic):
lhs = print_numexpr(leaves, expr.lhs)
rhs = print_numexpr(leaves, expr.rhs)
return '%s %s %s' % (parenthesize(lhs),
expr.symbol,
parenthesize(rhs))
if isinstance(expr, UnaryMath):
child = print_numexpr(leaves, expr._child)
return '%s(%s)' % (type(expr).__name__, child)
if isinstance(expr, UnaryOp) and hasattr(expr, 'symbol'):
child = print_numexpr(leaves, expr._child)
return '%s%s' % (expr.symbol, parenthesize(child))
if isinstance(expr, isnan):
child = print_numexpr(leaves, expr._child)
return '%s != %s' % (parenthesize(child), parenthesize(child))
raise NotImplementedError("Operation %s not supported by numexpr" %
type(expr).__name__)
Broadcastable = WantToBroadcast = BinOp, UnaryOp
broadcast_numexpr_collect = curry(
broadcast_collect,
broadcastable=Broadcastable,
want_to_broadcast=WantToBroadcast
)
| {
"repo_name": "ChinaQuants/blaze",
"path": "blaze/compute/numexpr.py",
"copies": "6",
"size": "2164",
"license": "bsd-3-clause",
"hash": 729832068026365300,
"line_mean": 28.6438356164,
"line_max": 74,
"alpha_frac": 0.5975046211,
"autogenerated": false,
"ratio": 3.6187290969899664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 73
} |
from __future__ import absolute_import, division, print_function
from ...external.qt import QtGui
from ...external.qt.QtCore import Qt
from ... import core
from ...clients.scatter_client import ScatterClient
from ..glue_toolbar import GlueToolbar
from ..mouse_mode import (RectangleMode, CircleMode,
PolyMode, HRangeMode, VRangeMode)
from .data_viewer import DataViewer
from .mpl_widget import MplWidget, defer_draw
from ..widget_properties import (ButtonProperty, FloatLineProperty,
CurrentComboProperty,
connect_bool_button, connect_float_edit)
from ..qtutil import load_ui, cache_axes, nonpartial
__all__ = ['ScatterWidget']
WARN_SLOW = 1000000 # max number of points which render quickly
class ScatterWidget(DataViewer):
"""
An interactive scatter plot.
"""
LABEL = "Scatter Plot"
_property_set = DataViewer._property_set + \
'xlog ylog xflip yflip hidden xatt yatt xmin xmax ymin ymax'.split()
xlog = ButtonProperty('ui.xLogCheckBox', 'log scaling on x axis?')
ylog = ButtonProperty('ui.yLogCheckBox', 'log scaling on y axis?')
xflip = ButtonProperty('ui.xFlipCheckBox', 'invert the x axis?')
yflip = ButtonProperty('ui.yFlipCheckBox', 'invert the y axis?')
xmin = FloatLineProperty('ui.xmin', 'Lower x limit of plot')
xmax = FloatLineProperty('ui.xmax', 'Upper x limit of plot')
ymin = FloatLineProperty('ui.ymin', 'Lower y limit of plot')
ymax = FloatLineProperty('ui.ymax', 'Upper y limit of plot')
hidden = ButtonProperty('ui.hidden_attributes', 'Show hidden attributes')
xatt = CurrentComboProperty('ui.xAxisComboBox',
'Attribute to plot on x axis')
yatt = CurrentComboProperty('ui.yAxisComboBox',
'Attribute to plot on y axis')
def __init__(self, session, parent=None):
super(ScatterWidget, self).__init__(session, parent)
self.central_widget = MplWidget()
self.option_widget = QtGui.QWidget()
self.setCentralWidget(self.central_widget)
self.ui = load_ui('scatterwidget', self.option_widget)
self._tweak_geometry()
self.client = ScatterClient(self._data,
self.central_widget.canvas.fig,
artist_container=self._container)
self._connect()
self.unique_fields = set()
tb = self.make_toolbar()
cache_axes(self.client.axes, tb)
self.statusBar().setSizeGripEnabled(False)
self.setFocusPolicy(Qt.StrongFocus)
@staticmethod
def _get_default_tools():
return []
def _tweak_geometry(self):
self.central_widget.resize(600, 400)
self.resize(self.central_widget.size())
def _connect(self):
ui = self.ui
cl = self.client
connect_bool_button(cl, 'xlog', ui.xLogCheckBox)
connect_bool_button(cl, 'ylog', ui.yLogCheckBox)
connect_bool_button(cl, 'xflip', ui.xFlipCheckBox)
connect_bool_button(cl, 'yflip', ui.yFlipCheckBox)
ui.xAxisComboBox.currentIndexChanged.connect(self.update_xatt)
ui.yAxisComboBox.currentIndexChanged.connect(self.update_yatt)
ui.hidden_attributes.toggled.connect(lambda x: self._update_combos())
ui.swapAxes.clicked.connect(nonpartial(self.swap_axes))
ui.snapLimits.clicked.connect(cl.snap)
connect_float_edit(cl, 'xmin', ui.xmin)
connect_float_edit(cl, 'xmax', ui.xmax)
connect_float_edit(cl, 'ymin', ui.ymin)
connect_float_edit(cl, 'ymax', ui.ymax)
def make_toolbar(self):
result = GlueToolbar(self.central_widget.canvas, self,
name='Scatter Plot')
for mode in self._mouse_modes():
result.add_mode(mode)
self.addToolBar(result)
return result
def _mouse_modes(self):
axes = self.client.axes
def apply_mode(mode):
return self.apply_roi(mode.roi())
rect = RectangleMode(axes, roi_callback=apply_mode)
xra = HRangeMode(axes, roi_callback=apply_mode)
yra = VRangeMode(axes, roi_callback=apply_mode)
circ = CircleMode(axes, roi_callback=apply_mode)
poly = PolyMode(axes, roi_callback=apply_mode)
return [rect, xra, yra, circ, poly]
@defer_draw
def _update_combos(self):
""" Update contents of combo boxes """
# have to be careful here, since client and/or widget
# are potentially out of sync
layer_ids = []
# show hidden attributes if needed
if ((self.client.xatt and self.client.xatt.hidden) or
(self.client.yatt and self.client.yatt.hidden)):
self.hidden = True
# determine which components to put in combos
for l in self.client.data:
if not self.client.is_layer_present(l):
continue
for lid in self.client.plottable_attributes(
l, show_hidden=self.hidden):
if lid not in layer_ids:
layer_ids.append(lid)
oldx = self.xatt
oldy = self.yatt
newx = self.client.xatt or oldx
newy = self.client.yatt or oldy
for combo, target in zip([self.ui.xAxisComboBox, self.ui.yAxisComboBox],
[newx, newy]):
combo.blockSignals(True)
combo.clear()
if not layer_ids: # empty component list
continue
# populate
for lid in layer_ids:
combo.addItem(lid.label, userData=lid)
idx = layer_ids.index(target) if target in layer_ids else 0
combo.setCurrentIndex(idx)
combo.blockSignals(False)
# ensure client and widget synced
self.client.xatt = self.xatt
self.client.lyatt = self.yatt
@defer_draw
def add_data(self, data):
"""Add a new data set to the widget
:returns: True if the addition was expected, False otherwise
"""
if self.client.is_layer_present(data):
return
if data.size > WARN_SLOW and not self._confirm_large_data(data):
return False
first_layer = self.client.layer_count == 0
self.client.add_data(data)
self._update_combos()
if first_layer: # forces both x and y axes to be rescaled
self.update_xatt(None)
self.update_yatt(None)
self.ui.xAxisComboBox.setCurrentIndex(0)
if len(data.visible_components) > 1:
self.ui.yAxisComboBox.setCurrentIndex(1)
else:
self.ui.yAxisComboBox.setCurrentIndex(0)
self.update_window_title()
return True
@defer_draw
def add_subset(self, subset):
"""Add a subset to the widget
:returns: True if the addition was accepted, False otherwise
"""
if self.client.is_layer_present(subset):
return
data = subset.data
if data.size > WARN_SLOW and not self._confirm_large_data(data):
return False
first_layer = self.client.layer_count == 0
self.client.add_layer(subset)
self._update_combos()
if first_layer: # forces both x and y axes to be rescaled
self.update_xatt(None)
self.update_yatt(None)
self.ui.xAxisComboBox.setCurrentIndex(0)
if len(data.visible_components) > 1:
self.ui.yAxisComboBox.setCurrentIndex(1)
else:
self.ui.yAxisComboBox.setCurrentIndex(0)
self.update_window_title()
return True
def register_to_hub(self, hub):
super(ScatterWidget, self).register_to_hub(hub)
self.client.register_to_hub(hub)
hub.subscribe(self, core.message.DataUpdateMessage,
nonpartial(self._sync_labels))
hub.subscribe(self, core.message.ComponentsChangedMessage,
nonpartial(self._update_combos))
hub.subscribe(self, core.message.ComponentReplacedMessage,
self._on_component_replace)
def _on_component_replace(self, msg):
# let client update its state first
self.client._on_component_replace(msg)
self._update_combos()
def unregister(self, hub):
super(ScatterWidget, self).unregister(hub)
hub.unsubscribe_all(self.client)
hub.unsubscribe_all(self)
@defer_draw
def swap_axes(self):
xid = self.ui.xAxisComboBox.currentIndex()
yid = self.ui.yAxisComboBox.currentIndex()
xlog = self.ui.xLogCheckBox.isChecked()
ylog = self.ui.yLogCheckBox.isChecked()
xflip = self.ui.xFlipCheckBox.isChecked()
yflip = self.ui.yFlipCheckBox.isChecked()
self.ui.xAxisComboBox.setCurrentIndex(yid)
self.ui.yAxisComboBox.setCurrentIndex(xid)
self.ui.xLogCheckBox.setChecked(ylog)
self.ui.yLogCheckBox.setChecked(xlog)
self.ui.xFlipCheckBox.setChecked(yflip)
self.ui.yFlipCheckBox.setChecked(xflip)
@defer_draw
def update_xatt(self, index):
component_id = self.xatt
self.client.xatt = component_id
@defer_draw
def update_yatt(self, index):
component_id = self.yatt
self.client.yatt = component_id
@property
def window_title(self):
data = self.client.data
label = ', '.join([d.label for d in data if
self.client.is_visible(d)])
return label
def _sync_labels(self):
self.update_window_title()
def options_widget(self):
return self.option_widget
@defer_draw
def restore_layers(self, rec, context):
self.client.restore_layers(rec, context)
self._update_combos()
# manually force client attributes to sync
self.update_xatt(None)
self.update_yatt(None)
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/widgets/scatter_widget.py",
"copies": "1",
"size": "10031",
"license": "bsd-3-clause",
"hash": 7163589458533899000,
"line_mean": 33.119047619,
"line_max": 80,
"alpha_frac": 0.6091117536,
"autogenerated": false,
"ratio": 3.8849728892331528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49940846428331526,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ...external.qt import QtGui
from ...external.qt.QtCore import Qt, Signal
from ... import core
from ..mime import LAYER_MIME_TYPE, LAYERS_MIME_TYPE
class GlueMdiArea(QtGui.QMdiArea):
"""Glue's MdiArea implementation.
Drop events with :class:`~glue.core.data.Data` objects in
:class:`~glue.qt.mime.PyMimeData` load these objects into new
data viewers
"""
def __init__(self, application, parent=None):
"""
:param application: The Glue application to which this is attached
:type application: :class:`~glue.qt.glue_application.GlueApplication`
"""
super(GlueMdiArea, self).__init__(parent)
self._application = application
self.setAcceptDrops(True)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setBackground(QtGui.QBrush(QtGui.QColor(250, 250, 250)))
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
def addSubWindow(self, sub):
super(GlueMdiArea, self).addSubWindow(sub)
self.repaint()
def dragEnterEvent(self, event):
""" Accept the event if it has an application/py_instance format """
if event.mimeData().hasFormat(LAYERS_MIME_TYPE):
event.accept()
elif event.mimeData().hasFormat(LAYER_MIME_TYPE):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
""" Load a new data viewer if the event has a glue Data object """
md = event.mimeData()
def new_layer(layer):
if isinstance(layer, core.data.Data):
self._application.choose_new_data_viewer(layer)
else:
assert isinstance(layer, core.subset.Subset)
self._application.choose_new_data_viewer(layer.data)
if md.hasFormat(LAYER_MIME_TYPE):
new_layer(md.data(LAYER_MIME_TYPE))
assert md.hasFormat(LAYERS_MIME_TYPE)
for layer in md.data(LAYERS_MIME_TYPE):
new_layer(layer)
event.accept()
def mousePressEvent(self, event):
"""Right mouse press in the MDI area opens a new data viewer"""
if event.button() != Qt.RightButton:
return
self._application.choose_new_data_viewer()
def close(self):
self.closeAllSubWindows()
super(GlueMdiArea, self).close()
def paintEvent(self, event):
super(GlueMdiArea, self).paintEvent(event)
painter = QtGui.QPainter(self.viewport())
painter.setPen(QtGui.QColor(210, 210, 210))
font = painter.font()
font.setPointSize(48)
font.setWeight(font.Black)
painter.setFont(font)
rect = self.contentsRect()
painter.drawText(rect, Qt.AlignHCenter | Qt.AlignVCenter,
"Drag Data To Plot")
class GlueMdiSubWindow(QtGui.QMdiSubWindow):
closed = Signal()
def closeEvent(self, event):
super(GlueMdiSubWindow, self).closeEvent(event)
self.closed.emit()
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/widgets/glue_mdi_area.py",
"copies": "1",
"size": "3117",
"license": "bsd-3-clause",
"hash": -4857368453920461000,
"line_mean": 31.8105263158,
"line_max": 77,
"alpha_frac": 0.6326596086,
"autogenerated": false,
"ratio": 3.801219512195122,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9933879120795122,
"avg_score": 0,
"num_lines": 95
} |
from __future__ import absolute_import, division, print_function
from ...external.qt import QtGui
from ...core.edit_subset_mode import (EditSubsetMode, OrMode, AndNotMode,
AndMode, XorMode, ReplaceMode)
from ..actions import act
from ..qtutil import nonpartial
def set_mode(mode):
edit_mode = EditSubsetMode()
edit_mode.mode = mode
class EditSubsetModeToolBar(QtGui.QToolBar):
def __init__(self, title="Subset Update Mode", parent=None):
super(EditSubsetModeToolBar, self).__init__(title, parent)
self._group = QtGui.QActionGroup(self)
self._modes = {}
self._add_actions()
self._modes[EditSubsetMode().mode].trigger()
self._backup_mode = None
def _make_mode(self, name, tip, icon, mode):
a = act(name, self, tip, icon)
a.setCheckable(True)
a.triggered.connect(nonpartial(set_mode, mode))
self._group.addAction(a)
self.addAction(a)
self._modes[mode] = a
label = name.split()[0].lower().replace('&', '')
self._modes[label] = mode
def _add_actions(self):
self._make_mode("&Replace Mode", "Replace selection",
'glue_replace', ReplaceMode)
self._make_mode("&Or Mode", "Add to selection",
'glue_or', OrMode)
self._make_mode("&And Mode", "Set selection as intersection",
'glue_and', AndMode)
self._make_mode("&Xor Mode", "Set selection as exclusive intersection",
'glue_xor', XorMode)
self._make_mode("&Not Mode", "Remove from selection",
'glue_andnot', AndNotMode)
def set_mode(self, mode):
"""Temporarily set the edit mode to mode
:param mode: Name of the mode (Or, Not, And, Xor, Replace)
:type mode: str
"""
try:
mode = self._modes[mode] # label to mode class
except KeyError:
raise KeyError("Unrecognized mode: %s" % mode)
self._backup_mode = self._backup_mode or EditSubsetMode().mode
self._modes[mode].trigger() # mode class to action
def unset_mode(self):
"""Restore the mode to the state before set_mode was called"""
mode = self._backup_mode
self._backup_mode = None
if mode:
self._modes[mode].trigger()
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/widgets/edit_subset_mode_toolbar.py",
"copies": "1",
"size": "2391",
"license": "bsd-3-clause",
"hash": -2621465117561346000,
"line_mean": 35.2272727273,
"line_max": 79,
"alpha_frac": 0.5788373066,
"autogenerated": false,
"ratio": 3.9261083743842367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 66
} |
from __future__ import absolute_import, division, print_function
from ..external.qt.QtCore import QMimeData, QByteArray
class PyMimeData(QMimeData):
"""
A custom MimeData object that stores live python objects
Associate specific objects with a mime type by passing
mime type / object kev/value pairs to the __init__ method
If a single object is passed to the init method, that
object is associated with the PyMimeData.MIME_TYPE mime type
"""
MIME_TYPE = 'application/py_instance'
def __init__(self, instance=None, **kwargs):
"""
:param instance: The python object to store
kwargs: Optional mime type / objects pairs to store as objects
"""
super(PyMimeData, self).__init__()
self._instances = {}
self.setData(self.MIME_TYPE, instance)
for k, v in kwargs.items():
self.setData(k, v)
def formats(self):
return list(set(super(PyMimeData, self).formats() +
list(self._instances.keys())))
def hasFormat(self, fmt):
return fmt in self._instances or super(PyMimeData, self).hasFormat(fmt)
def setData(self, mime, data):
super(PyMimeData, self).setData(mime, QByteArray('1'))
self._instances[mime] = data
def data(self, mime_type):
""" Retrieve the data stored at the specified mime_type
If mime_type is application/py_instance, a python object
is returned. Otherwise, a QByteArray is returned """
if str(mime_type) in self._instances:
return self._instances[mime_type]
return super(PyMimeData, self).data(mime_type)
#some standard glue mime types
LAYER_MIME_TYPE = 'glue/layer'
LAYERS_MIME_TYPE = 'glue/layers'
INSTANCE_MIME_TYPE = PyMimeData.MIME_TYPE
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/mime.py",
"copies": "1",
"size": "1799",
"license": "bsd-3-clause",
"hash": -1622374647008352000,
"line_mean": 30.5614035088,
"line_max": 79,
"alpha_frac": 0.6498054475,
"autogenerated": false,
"ratio": 3.8605150214592276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5010320468959227,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ..external.qt.QtGui import QAction
from .qtutil import get_icon
def act(name, parent, tip='', icon=None, shortcut=None):
""" Factory for making a new action """
a = QAction(name, parent)
a.setToolTip(tip)
if icon:
a.setIcon(get_icon(icon))
if shortcut:
a.setShortcut(shortcut)
return a
"""
tab_new = act('New Tab',
shortcut=QKeySequence.AddTab,
tip='Add a new tab')
tab_tile = act("Tile",
tip="Tile windows in the current tab")
tab_cascade = act("Cascade",
tip = "Cascade windows in the current tab")
window_new = act('New Window',
shortcut=QKeySequence.New,
tip='Add a new visualization window to the current tab')
subset_or = act("Union Combine",
icon='glue_or',
tip = 'Define a new subset as a union of selection')
subste_and = act("Intersection Combine",
icon="glue_and",
tip = 'Define a new subset as intersection of selection')
subset_xor = act("XOR Combine",
icon='glue_xor',
tip= 'Define a new subset as non-intersection of selection')
subset_not = act("Invert",
icon="glue_not",
tip="Invert current subset")
subset_copy = act("Copy subset",
tip="Copy the definition for the selected subset",
shortcut=QKeySequence.Copy)
subset_paste = act("Paste subset",
tip = "Replace the selected subset with clipboard",
shortcut=QKeySequence.Paste)
subset_new = act("New subset",
tip="Create a new subset for the selected data",
shortcut=QKeySequence.New)
subset_clear = act("Clear subset",
tip="Clear current selection")
subset_duplicate = act("Duplicate subset",
tip="Duplicate the current subset",
shortcut="Ctrl+D")
layer_delete = act("Delete layer",
shortcut=QKeySequence.Delete,
tip="Remove the highlighted layer")
"""
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/actions.py",
"copies": "1",
"size": "2189",
"license": "bsd-3-clause",
"hash": 307613344573699140,
"line_mean": 29.8309859155,
"line_max": 77,
"alpha_frac": 0.5710370032,
"autogenerated": false,
"ratio": 4.217726396917149,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5288763400117148,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ...external.qt.QtGui import (QAction, QLabel, QCursor, QMainWindow,
QToolButton, QIcon, QMessageBox
)
from ...external.qt.QtCore import Qt, QRect, Signal
from .data_viewer import DataViewer
from ... import core
from ...clients.image_client import MplImageClient
from ...clients.ds9norm import DS9Normalize
from ...external.modest_image import imshow
from ...clients.layer_artist import Pointer
from ...core.callback_property import add_callback, delay_callback
from .data_slice_widget import DataSlice
from ..mouse_mode import (RectangleMode, CircleMode, PolyMode,
ContrastMode)
from ..glue_toolbar import GlueToolbar
from .mpl_widget import MplWidget, defer_draw
from ..qtutil import cmap2pixmap, load_ui, get_icon, nonpartial, update_combobox
from ..widget_properties import CurrentComboProperty, ButtonProperty, connect_current_combo
from .glue_mdi_area import GlueMdiSubWindow
WARN_THRESH = 10000000 # warn when contouring large images
__all__ = ['ImageWidget']
class ImageWidgetBase(DataViewer):
"""
Widget for ImageClient
This base class avoids any matplotlib-specific logic
"""
LABEL = "Image Viewer"
_property_set = DataViewer._property_set + \
'data attribute rgb_mode rgb_viz ratt gatt batt slice'.split()
attribute = CurrentComboProperty('ui.attributeComboBox',
'Current attribute')
data = CurrentComboProperty('ui.displayDataCombo',
'Current data')
rgb_mode = ButtonProperty('ui.rgb',
'RGB Mode?')
rgb_viz = Pointer('ui.rgb_options.rgb_visible')
def __init__(self, session, parent=None):
super(ImageWidgetBase, self).__init__(session, parent)
self._setup_widgets()
self.client = self.make_client()
self._setup_tools()
tb = self.make_toolbar()
self.addToolBar(tb)
self._connect()
def _setup_widgets(self):
self.central_widget = self.make_central_widget()
self.label_widget = QLabel("", self.central_widget)
self.setCentralWidget(self.central_widget)
self.ui = load_ui('imagewidget', None)
self.option_widget = self.ui
self.ui.slice = DataSlice()
self.ui.slice_layout.addWidget(self.ui.slice)
self._tweak_geometry()
def make_client(self):
""" Instantiate and return an ImageClient subclass """
raise NotImplementedError()
def make_central_widget(self):
""" Create and return the central widget to display the image """
raise NotImplementedError()
def make_toolbar(self):
""" Create and return the toolbar for this widget """
raise NotImplementedError()
@staticmethod
def _get_default_tools():
return []
def _setup_tools(self):
"""
Set up additional tools for this widget
"""
from ... import config
self._tools = []
for tool in config.tool_registry.members[self.__class__]:
self._tools.append(tool(self))
def _tweak_geometry(self):
self.central_widget.resize(600, 400)
self.resize(self.central_widget.size())
self.ui.rgb_options.hide()
self.statusBar().setSizeGripEnabled(False)
self.setFocusPolicy(Qt.StrongFocus)
@defer_draw
def add_data(self, data):
"""
Add a new dataset to the viewer
"""
# overloaded from DataViewer
# need to delay callbacks, otherwise might
# try to set combo boxes to nonexisting items
with delay_callback(self.client, 'display_data', 'display_attribute'):
# If there is not already any image data set, we can't add 1-D
# datasets (tables/catalogs) to the image widget yet.
if data.data.ndim == 1 and self.client.display_data is None:
QMessageBox.information(self.window(), "Note",
"Cannot create image viewer from a 1-D "
"dataset. You will need to first "
"create an image viewer using data "
"with 2 or more dimensions, after "
"which you will be able to overlay 1-D "
"data as a scatter plot.",
buttons=QMessageBox.Ok)
return
r = self.client.add_layer(data)
if r is not None and self.client.display_data is not None:
self.add_data_to_combo(data)
self.set_attribute_combo(self.client.display_data)
return r is not None
@defer_draw
def add_subset(self, subset):
self.client.add_scatter_layer(subset)
assert subset in self.client.artists
def add_data_to_combo(self, data):
""" Add a data object to the combo box, if not already present
"""
if not self.client.can_image_data(data):
return
combo = self.ui.displayDataCombo
label = data.label
pos = combo.findText(label)
if pos == -1:
combo.addItem(label, userData=data)
assert combo.findText(label) >= 0
@property
def ratt(self):
"""ComponentID assigned to R channel in RGB Mode"""
return self.ui.rgb_options.attributes[0]
@ratt.setter
def ratt(self, value):
att = list(self.ui.rgb_options.attributes)
att[0] = value
self.ui.rgb_options.attributes = att
@property
def gatt(self):
"""ComponentID assigned to G channel in RGB Mode"""
return self.ui.rgb_options.attributes[1]
@gatt.setter
def gatt(self, value):
att = list(self.ui.rgb_options.attributes)
att[1] = value
self.ui.rgb_options.attributes = att
@property
def batt(self):
"""ComponentID assigned to B channel in RGB Mode"""
return self.ui.rgb_options.attributes[2]
@batt.setter
def batt(self, value):
att = list(self.ui.rgb_options.attributes)
att[2] = value
self.ui.rgb_options.attributes = att
@property
def slice(self):
return self.client.slice
@slice.setter
def slice(self, value):
self.client.slice = value
def set_attribute_combo(self, data):
""" Update attribute combo box to reflect components in data"""
labeldata = ((f.label, f) for f in data.visible_components)
update_combobox(self.ui.attributeComboBox, labeldata)
def _connect(self):
ui = self.ui
ui.monochrome.toggled.connect(self._update_rgb_console)
ui.rgb_options.colors_changed.connect(self.update_window_title)
# sync client and widget slices
ui.slice.slice_changed.connect(lambda: setattr(self, 'slice', self.ui.slice.slice))
update_ui_slice = lambda val: setattr(ui.slice, 'slice', val)
add_callback(self.client, 'slice', update_ui_slice)
add_callback(self.client, 'display_data', self.ui.slice.set_data)
# sync window title to data/attribute
add_callback(self.client, 'display_data', nonpartial(self.update_window_title))
add_callback(self.client, 'display_attribute', nonpartial(self.update_window_title))
# sync data/attribute combos with client properties
connect_current_combo(self.client, 'display_data', self.ui.displayDataCombo)
connect_current_combo(self.client, 'display_attribute', self.ui.attributeComboBox)
@defer_draw
def _update_rgb_console(self, is_monochrome):
if is_monochrome:
self.ui.rgb_options.hide()
self.ui.mono_att_label.show()
self.ui.attributeComboBox.show()
self.client.rgb_mode(False)
else:
self.ui.mono_att_label.hide()
self.ui.attributeComboBox.hide()
self.ui.rgb_options.show()
rgb = self.client.rgb_mode(True)
if rgb is not None:
self.ui.rgb_options.artist = rgb
def register_to_hub(self, hub):
super(ImageWidgetBase, self).register_to_hub(hub)
self.client.register_to_hub(hub)
dc_filt = lambda x: x.sender is self.client._data
layer_present_filter = lambda x: x.data in self.client.artists
hub.subscribe(self,
core.message.DataCollectionAddMessage,
handler=lambda x: self.add_data_to_combo(x.data),
filter=dc_filt)
hub.subscribe(self,
core.message.DataCollectionDeleteMessage,
handler=lambda x: self.remove_data_from_combo(x.data),
filter=dc_filt)
hub.subscribe(self,
core.message.DataUpdateMessage,
handler=lambda x: self._sync_data_labels()
)
hub.subscribe(self,
core.message.ComponentsChangedMessage,
handler=lambda x: self.set_attribute_combo(x.data),
filter=layer_present_filter)
def unregister(self, hub):
super(ImageWidgetBase, self).unregister(hub)
for obj in [self, self.client]:
hub.unsubscribe_all(obj)
def remove_data_from_combo(self, data):
""" Remove a data object from the combo box, if present """
combo = self.ui.displayDataCombo
pos = combo.findText(data.label)
if pos >= 0:
combo.removeItem(pos)
def _set_norm(self, mode):
""" Use the `ContrastMouseMode` to adjust the transfer function """
# at least one of the clip/vmin pairs will be None
clip_lo, clip_hi = mode.get_clip_percentile()
vmin, vmax = mode.get_vmin_vmax()
stretch = mode.stretch
return self.client.set_norm(clip_lo=clip_lo, clip_hi=clip_hi,
stretch=stretch,
vmin=vmin, vmax=vmax,
bias=mode.bias, contrast=mode.contrast)
@property
def window_title(self):
if self.client.display_data is None or self.client.display_attribute is None:
title = ''
else:
data = self.client.display_data.label
a = self.client.rgb_mode()
if a is None: # monochrome mode
title = "%s - %s" % (self.client.display_data.label,
self.client.display_attribute.label)
else:
r = a.r.label if a.r is not None else ''
g = a.g.label if a.g is not None else ''
b = a.b.label if a.b is not None else ''
title = "%s Red = %s Green = %s Blue = %s" % (data, r, g, b)
return title
def _sync_data_combo_labels(self):
combo = self.ui.displayDataCombo
for i in range(combo.count()):
combo.setItemText(i, combo.itemData(i).label)
def _sync_data_labels(self):
self.update_window_title()
self._sync_data_combo_labels()
def __str__(self):
return "Image Widget"
def _confirm_large_image(self, data):
"""Ask user to confirm expensive operations
:rtype: bool. Whether the user wishes to continue
"""
warn_msg = ("WARNING: Image has %i pixels, and may render slowly."
" Continue?" % data.size)
title = "Contour large image?"
ok = QMessageBox.Ok
cancel = QMessageBox.Cancel
buttons = ok | cancel
result = QMessageBox.question(self, title, warn_msg,
buttons=buttons,
defaultButton=cancel)
return result == ok
def options_widget(self):
return self.option_widget
@defer_draw
def restore_layers(self, rec, context):
with delay_callback(self.client, 'display_data', 'display_attribute'):
self.client.restore_layers(rec, context)
for artist in self.layers:
self.add_data_to_combo(artist.layer.data)
self.set_attribute_combo(self.client.display_data)
self._sync_data_combo_labels()
def closeEvent(self, event):
# close window and all plugins
super(ImageWidgetBase, self).closeEvent(event)
if event.isAccepted():
for t in self._tools:
t.close()
class ImageWidget(ImageWidgetBase):
"""
A matplotlib-based image widget
"""
def make_client(self):
return MplImageClient(self._data,
self.central_widget.canvas.fig,
artist_container=self._container)
def make_central_widget(self):
return MplWidget()
def make_toolbar(self):
result = GlueToolbar(self.central_widget.canvas, self, name='Image')
for mode in self._mouse_modes():
result.add_mode(mode)
cmap = _colormap_mode(self, self.client.set_cmap)
result.addWidget(cmap)
# connect viewport update buttons to client commands to
# allow resampling
cl = self.client
result.buttons['HOME'].triggered.connect(nonpartial(cl.check_update))
result.buttons['FORWARD'].triggered.connect(nonpartial(
cl.check_update))
result.buttons['BACK'].triggered.connect(nonpartial(cl.check_update))
return result
def _mouse_modes(self):
axes = self.client.axes
def apply_mode(mode):
for roi_mode in roi_modes:
if roi_mode != mode:
roi_mode._roi_tool.reset()
self.apply_roi(mode.roi())
rect = RectangleMode(axes, roi_callback=apply_mode)
circ = CircleMode(axes, roi_callback=apply_mode)
poly = PolyMode(axes, roi_callback=apply_mode)
roi_modes = [rect, circ, poly]
contrast = ContrastMode(axes, move_callback=self._set_norm)
self._contrast = contrast
# Get modes from tools
tool_modes = []
for tool in self._tools:
tool_modes += tool._get_modes(axes)
add_callback(self.client, 'display_data', tool._display_data_hook)
return [rect, circ, poly, contrast] + tool_modes
def paintEvent(self, event):
super(ImageWidget, self).paintEvent(event)
pos = self.central_widget.canvas.mapFromGlobal(QCursor.pos())
x, y = pos.x(), self.central_widget.canvas.height() - pos.y()
self._update_intensity_label(x, y)
def _intensity_label(self, x, y):
x, y = self.client.axes.transData.inverted().transform([(x, y)])[0]
value = self.client.point_details(x, y)['value']
lbl = '' if value is None else "data: %s" % value
return lbl
def _update_intensity_label(self, x, y):
lbl = self._intensity_label(x, y)
self.label_widget.setText(lbl)
fm = self.label_widget.fontMetrics()
w, h = fm.width(lbl), fm.height()
g = QRect(20, self.central_widget.geometry().height() - h, w, h)
self.label_widget.setGeometry(g)
def _connect(self):
super(ImageWidget, self)._connect()
self.ui.rgb_options.current_changed.connect(lambda: self._toolbars[0].set_mode(self._contrast))
class ColormapAction(QAction):
def __init__(self, label, cmap, parent):
super(ColormapAction, self).__init__(label, parent)
self.cmap = cmap
pm = cmap2pixmap(cmap)
self.setIcon(QIcon(pm))
def _colormap_mode(parent, on_trigger):
from ... import config
# actions for each colormap
acts = []
for label, cmap in config.colormaps:
a = ColormapAction(label, cmap, parent)
a.triggered.connect(nonpartial(on_trigger, cmap))
acts.append(a)
# Toolbar button
tb = QToolButton()
tb.setWhatsThis("Set color scale")
tb.setToolTip("Set color scale")
icon = get_icon('glue_rainbow')
tb.setIcon(icon)
tb.setPopupMode(QToolButton.InstantPopup)
tb.addActions(acts)
return tb
class StandaloneImageWidget(QMainWindow):
"""
A simplified image viewer, without any brushing or linking,
but with the ability to adjust contrast and resample.
"""
window_closed = Signal()
def __init__(self, image=None, wcs=None, parent=None, **kwargs):
"""
:param image: Image to display (2D numpy array)
:param parent: Parent widget (optional)
:param kwargs: Extra keywords to pass to imshow
"""
super(StandaloneImageWidget, self).__init__(parent)
self.central_widget = MplWidget()
self.setCentralWidget(self.central_widget)
self._setup_axes()
self._im = None
self._norm = DS9Normalize()
self.make_toolbar()
if image is not None:
self.set_image(image=image, wcs=wcs, **kwargs)
def _setup_axes(self):
from ...clients.viz_client import init_mpl
_, self._axes = init_mpl(self.central_widget.canvas.fig, axes=None, wcs=True)
self._axes.set_aspect('equal', adjustable='datalim')
def set_image(self, image=None, wcs=None, **kwargs):
"""
Update the image shown in the widget
"""
if self._im is not None:
self._im.remove()
self._im = None
kwargs.setdefault('origin', 'upper')
if wcs is not None:
self._axes.reset_wcs(wcs)
self._im = imshow(self._axes, image, norm=self._norm, cmap='gray', **kwargs)
self._im_array = image
self._wcs = wcs
self._redraw()
@property
def axes(self):
"""
The Matplolib axes object for this figure
"""
return self._axes
def show(self):
super(StandaloneImageWidget, self).show()
self._redraw()
def _redraw(self):
self.central_widget.canvas.draw()
def _set_cmap(self, cmap):
self._im.set_cmap(cmap)
self._redraw()
def mdi_wrap(self):
"""
Embed this widget in a GlueMdiSubWindow
"""
sub = GlueMdiSubWindow()
sub.setWidget(self)
self.destroyed.connect(sub.close)
self.window_closed.connect(sub.close)
sub.resize(self.size())
self._mdi_wrapper = sub
return sub
def closeEvent(self, event):
self.window_closed.emit()
return super(StandaloneImageWidget, self).closeEvent(event)
def _set_norm(self, mode):
""" Use the `ContrastMouseMode` to adjust the transfer function """
clip_lo, clip_hi = mode.get_clip_percentile()
vmin, vmax = mode.get_vmin_vmax()
stretch = mode.stretch
self._norm.clip_lo = clip_lo
self._norm.clip_hi = clip_hi
self._norm.stretch = stretch
self._norm.bias = mode.bias
self._norm.contrast = mode.contrast
self._norm.vmin = vmin
self._norm.vmax = vmax
self._im.set_norm(self._norm)
self._redraw()
def make_toolbar(self):
"""
Setup the toolbar
"""
result = GlueToolbar(self.central_widget.canvas, self,
name='Image')
result.add_mode(ContrastMode(self._axes, move_callback=self._set_norm))
cm = _colormap_mode(self, self._set_cmap)
result.addWidget(cm)
self._cmap_actions = cm.actions()
self.addToolBar(result)
return result
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/widgets/image_widget.py",
"copies": "1",
"size": "19700",
"license": "bsd-3-clause",
"hash": 4673547099472136000,
"line_mean": 32.7328767123,
"line_max": 103,
"alpha_frac": 0.5879695431,
"autogenerated": false,
"ratio": 3.965378421900161,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00033200978419246837,
"num_lines": 584
} |
from __future__ import absolute_import, division, print_function
from ...external.qt.QtGui import (QDialog, QDoubleValidator, QIcon)
import numpy as np
from matplotlib import cm
from ..qtutil import pretty_number, cmap2pixmap, load_ui
from ...core.util import colorize_subsets, facet_subsets, Pointer
from ..widget_properties import (ButtonProperty, FloatLineProperty,
ValueProperty)
class SubsetFacet(object):
log = ButtonProperty('ui.log')
vmin = FloatLineProperty('ui.min')
vmax = FloatLineProperty('ui.max')
steps = ValueProperty('ui.num')
data = Pointer('ui.component_selector.data')
component = Pointer('ui.component_selector.component')
def __init__(self, collect, default=None, parent=None):
"""Create a new dialog for subset faceting
:param collect: The :class:`~glue.core.data_collection.DataCollection` to use
:param default: The default dataset in the collection (optional)
"""
self.ui = load_ui('subset_facet', None)
self.ui.setWindowTitle("Subset Facet")
self._collect = collect
self.ui.component_selector.setup(self._collect)
if default is not None:
self.ui.component_selector.data = default
val = QDoubleValidator(-1e100, 1e100, 4, None)
self.ui.component_selector.component_changed.connect(self._set_limits)
combo = self.ui.color_scale
for cmap in [cm.cool, cm.RdYlBu, cm.RdYlGn, cm.RdBu, cm.Purples]:
combo.addItem(QIcon(cmap2pixmap(cmap)), cmap.name, cmap)
def _set_limits(self):
data = self.ui.component_selector.data
cid = self.ui.component_selector.component
vals = data[cid]
wmin = self.ui.min
wmax = self.ui.max
wmin.setText(pretty_number(np.nanmin(vals)))
wmax.setText(pretty_number(np.nanmax(vals)))
@property
def cmap(self):
combo = self.ui.color_scale
index = combo.currentIndex()
return combo.itemData(index)
def _apply(self):
try:
lo, hi = self.vmin, self.vmax
except ValueError:
return # limits not set. Abort
if not np.isfinite(lo) or not np.isfinite(hi):
return
subsets = facet_subsets(self._collect, self.component, lo=lo, hi=hi,
steps=self.steps, log=self.log)
colorize_subsets(subsets, self.cmap)
def exec_(self):
return self.ui.exec_()
@classmethod
def facet(cls, collect, default=None, parent=None):
"""Class method to create facted subsets
The arguments are the same as __init__
"""
self = cls(collect, parent=parent, default=default)
value = self.exec_()
if value == QDialog.Accepted:
self._apply()
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/widgets/subset_facet.py",
"copies": "1",
"size": "2833",
"license": "bsd-3-clause",
"hash": -2600776059852832300,
"line_mean": 32.3294117647,
"line_max": 85,
"alpha_frac": 0.626897282,
"autogenerated": false,
"ratio": 3.8335588633288227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49604561453288226,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ..external.qt.QtGui import QDialog, QListWidgetItem, QWidget
from .. import core
from .qtutil import load_ui
class LinkEditor(object):
def __init__(self, collection, functions=None, parent=None):
self._collection = collection
self._ui = load_ui('link_editor', parent)
self._init_widgets()
self._connect()
if len(collection) > 1:
self._ui.right_components.set_data_row(1)
self._size = None
def _init_widgets(self):
self._ui.left_components.setup(self._collection)
self._ui.right_components.setup(self._collection)
self._ui.signature_editor.hide()
for link in self._collection.links:
self._add_link(link)
def _connect(self):
self._ui.add_link.clicked.connect(self._add_new_link)
self._ui.remove_link.clicked.connect(self._remove_link)
self._ui.toggle_editor.clicked.connect(self._toggle_advanced)
self._ui.signature_editor._ui.addButton.clicked.connect(
self._add_new_link)
@property
def advanced(self):
return self._ui.signature_editor.isVisible()
@advanced.setter
def advanced(self, state):
"""Set whether the widget is in advanced state"""
self._ui.signature_editor.setVisible(state)
self._ui.toggle_editor.setText("Basic" if state else "Advanced")
def _toggle_advanced(self):
"""Show or hide the signature editor widget"""
self.advanced = not self.advanced
def _selected_components(self):
result = []
id1 = self._ui.left_components.component
id2 = self._ui.right_components.component
if id1:
result.append(id1)
if id2:
result.append(id2)
return result
def _simple_links(self):
"""Return identity links which connect the highlighted items
in each component selector.
Returns:
A list of :class:`~glue.core.ComponentLink` objects
If items are not selected in the component selectors,
an empty list is returned
"""
comps = self._selected_components()
if len(comps) != 2:
return []
assert isinstance(comps[0], core.data.ComponentID), comps[0]
assert isinstance(comps[1], core.data.ComponentID), comps[1]
link1 = core.component_link.ComponentLink([comps[0]], comps[1])
return [link1]
def _add_link(self, link):
current = self._ui.current_links
item = QListWidgetItem(str(link))
current.addItem(item)
item.setHidden(link.hidden)
current.set_data(item, link)
def _add_new_link(self):
if not self.advanced:
links = self._simple_links()
else:
links = self._ui.signature_editor.links()
self._ui.signature_editor.clear_inputs()
for link in links:
self._add_link(link)
def links(self):
current = self._ui.current_links
return current.data.values()
def _remove_link(self):
current = self._ui.current_links
item = current.currentItem()
row = current.currentRow()
if item is None:
return
current.drop_data(item)
deleted = current.takeItem(row)
assert deleted == item # sanity check
@classmethod
def update_links(cls, collection):
widget = cls(collection)
isok = widget._ui.exec_()
if isok:
links = widget.links()
collection.set_links(links)
def main():
import numpy as np
from glue.core import Data, DataCollection
x = np.array([1, 2, 3])
d = Data(label='data', x=x, y=x * 2)
dc = DataCollection(d)
LinkEditor.update_links(dc)
if __name__ == "__main__":
main()
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/link_editor.py",
"copies": "1",
"size": "3870",
"license": "bsd-3-clause",
"hash": 5575908791014250000,
"line_mean": 29.234375,
"line_max": 72,
"alpha_frac": 0.603875969,
"autogenerated": false,
"ratio": 3.917004048582996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5020880017582996,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ...external.qt.QtGui import QWidget, QDrag
from ...external.qt.QtCore import QMimeData, Qt
from ...external.qt.QtTest import QTest
from .. import mime
import pytest
class TestWidget(QWidget):
def __init__(self, out_mime, parent=None):
super(TestWidget, self).__init__(parent)
self.setAcceptDrops(True)
self.last_mime = None
self.out_mime = out_mime
def dragEnterEvent(self, event):
print('drag enter')
event.accept()
def dropEvent(self, event):
print('drop')
self.last_mime = event.mimeData()
def mousePressEvent(self, event):
print('mouse event')
drag = QDrag(self)
drag.setMimeData(self.out_mime)
drop_action = drag.exec_()
print(drop_action)
event.accept()
class TestMime(object):
def test_formats(self):
d = mime.PyMimeData()
assert set(d.formats()) == set([mime.INSTANCE_MIME_TYPE])
d = mime.PyMimeData(**{'text/plain': 'hello'})
assert set(d.formats()) == set([mime.INSTANCE_MIME_TYPE, 'text/plain'])
def test_empty_has_format(self):
d = mime.PyMimeData()
assert d.hasFormat(mime.INSTANCE_MIME_TYPE)
assert not d.hasFormat(mime.LAYER_MIME_TYPE)
assert not d.hasFormat(mime.LAYERS_MIME_TYPE)
def test_instance_format(self):
d = mime.PyMimeData(5)
assert d.hasFormat(mime.INSTANCE_MIME_TYPE)
assert not d.hasFormat(mime.LAYER_MIME_TYPE)
assert not d.hasFormat(mime.LAYERS_MIME_TYPE)
def test_layer_format(self):
d = mime.PyMimeData(5, **{mime.LAYER_MIME_TYPE: 10})
assert d.hasFormat(mime.INSTANCE_MIME_TYPE)
assert d.hasFormat(mime.LAYER_MIME_TYPE)
assert not d.hasFormat(mime.LAYERS_MIME_TYPE)
def test_layers_format(self):
d = mime.PyMimeData(5, **{mime.LAYERS_MIME_TYPE: 10})
assert d.hasFormat(mime.INSTANCE_MIME_TYPE)
assert d.hasFormat(mime.LAYERS_MIME_TYPE)
assert not d.hasFormat(mime.LAYER_MIME_TYPE)
def test_retrieve_instance(self):
d = mime.PyMimeData(10)
assert d.retrieveData(mime.INSTANCE_MIME_TYPE) == 10
def test_retrieve_layer(self):
d = mime.PyMimeData(**{mime.LAYERS_MIME_TYPE: 12})
assert d.retrieveData(mime.LAYERS_MIME_TYPE) == 12
d = mime.PyMimeData(**{mime.LAYER_MIME_TYPE: 12})
assert d.retrieveData(mime.LAYER_MIME_TYPE) == 12
def test_retrieve_not_present_returns_null(self):
d = mime.PyMimeData()
assert d.retrieveData('not-a-format') is None
@pytest.skip
class TestMimeDragAndDrop(object):
def setup_method(self, method):
m1 = mime.PyMimeData(1, **{'text/plain': 'hi', 'test': 4})
m2 = mime.PyMimeData(1, **{'test': 5})
w1 = TestWidget(m1)
w2 = TestWidget(m2)
self.w1 = w1
self.w2 = w2
self.m1 = m1
self.m2 = m2
def test_drag_drop(self):
QTest.mousePress(self.w1, Qt.LeftButton)
QTest.mouseMove(self.w2)
QTest.mouseRelease(self.w2, Qt.LeftButton)
assert self.w2.last_mime == self.m1
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/tests/test_mime.py",
"copies": "1",
"size": "3203",
"license": "bsd-3-clause",
"hash": 1388360724479105500,
"line_mean": 29.5047619048,
"line_max": 79,
"alpha_frac": 0.6250390259,
"autogenerated": false,
"ratio": 3.251776649746193,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4376815675646193,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ..external.qt.QtGui import QWidget, QListWidgetItem
from ..external.qt.QtCore import Signal
from .qtutil import load_ui
class ComponentSelector(QWidget):
""" An interface to view the components and data of a DataCollection
Components can be draged and dropped.
The currently-selected componentID is stored in the
Component property. The currently-selected Data is stored in the
Data property.
Usage:
>>> widget = ComponentSelector()
>>> widget.setup(data_collection)
"""
component_changed = Signal()
def __init__(self, parent=None):
super(ComponentSelector, self).__init__(parent)
self._data = None
self._ui = load_ui('component_selector', self)
self._init_widgets()
self._connect()
def _init_widgets(self):
self._ui.component_selector.setDragEnabled(True)
self._ui.setMinimumWidth(300)
def _connect(self):
#attach Qt signals
ds = self._ui.data_selector
ds.currentIndexChanged.connect(self._set_components)
self._ui.component_selector.currentItemChanged.connect(
lambda *args: self.component_changed.emit())
def set_current_row(self, row):
"""Select which component is selected
:param row: Row number
"""
self._ui.component_selector.setCurrentRow(row)
def set_data_row(self, row):
"""Select which data object is selected
:param row: Row number
"""
self._ui.data_selector.setCurrentIndex(row)
def setup(self, data_collection):
""" Set up the widgets.
:param data_collection: Object to browse
:type data_colleciton:
:class:`~glue.core.data_collection.DataCollection`
"""
self._data = data_collection
self._set_data()
self._set_components()
def _set_components(self):
""" Set list of component widgets to match current data set """
index = self._ui.data_selector.currentIndex()
if index < 0:
return
data = self._data[index]
cids = data.components
c_list = self._ui.component_selector
c_list.clear()
for c in cids:
item = QListWidgetItem(c.label)
c_list.addItem(item)
c_list.set_data(item, c)
def _set_data(self):
""" Populate the data list with data sets in the collection """
d_list = self._ui.data_selector
for d in self._data:
d_list.addItem(d.label)
@property
def component(self):
"""Returns the currently-selected ComponentID
:rtype: :class:`~glue.core.data.ComponentID`
"""
item = self._ui.component_selector.currentItem()
return self._ui.component_selector.get_data(item)
@component.setter
def component(self, component):
w = self._ui.component_selector
for i in range(w.count()):
item = w.item(i)
if w.get_data(item) is component:
w.setCurrentRow(i)
return
else:
raise ValueError("Component not found: %s" % component)
@property
def data(self):
index = self._ui.data_selector.currentIndex()
if index < 0:
return
return self._data[index]
@data.setter
def data(self, value):
for i, d in enumerate(self._data):
if d is value:
self._ui.data_selector.setCurrentIndex(i)
return
else:
raise ValueError("Data is not part of the DataCollection")
def main(): # pragma: no cover
import glue
import numpy as np
from . import get_qapp
from ..external.qt.QtGui import QApplication
d = glue.core.Data(label="hi")
d2 = glue.core.Data(label="there")
c1 = glue.core.Component(np.array([1, 2, 3]))
c2 = glue.core.Component(np.array([1, 2, 3]))
c3 = glue.core.Component(np.array([1, 2, 3]))
dc = glue.core.DataCollection()
dc.append(d)
dc.append(d2)
d.add_component(c1, "a")
d.add_component(c2, "b")
d2.add_component(c3, "c")
app = get_qapp()
w = ComponentSelector()
w.setup(dc)
w.show()
app.exec_()
if __name__ == "__main__": # pragma: no cover
main()
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/component_selector.py",
"copies": "1",
"size": "4354",
"license": "bsd-3-clause",
"hash": -965535995508611200,
"line_mean": 27.6447368421,
"line_max": 72,
"alpha_frac": 0.5962333486,
"autogenerated": false,
"ratio": 3.8840321141837646,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49802654627837645,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ..extern.vispy import scene
from ..extern.vispy.geometry import create_cube
from ..extern.vispy.visuals.transforms import STTransform, ChainTransform
class AxesVisual3D(object):
def __init__(self, view=None, transform=None, **kwargs):
self.view = view
# Add a 3D cube to show us the unit cube. The 1.001 factor is to make
# sure that the grid lines are not 'hidden' by volume renderings on the
# front side due to numerical precision.
vertices, filled_indices, outline_indices = create_cube()
self.axis = scene.visuals.Mesh(vertices['position'],
outline_indices, parent=self.view.scene,
color=kwargs['axis_color'], mode='lines')
self.axis.transform = transform
self.xax = scene.visuals.Axis(pos=[[-1.0, 0], [1.0, 0]],
tick_direction=(0, -1),
parent=self.view.scene, axis_label='X',
anchors=['center', 'middle'], **kwargs)
self.yax = scene.visuals.Axis(pos=[[0, -1.0], [0, 1.0]],
tick_direction=(-1, 0),
parent=self.view.scene, axis_label='Y',
anchors=['center', 'middle'], **kwargs)
self.zax = scene.visuals.Axis(pos=[[0, -1.0], [0, 1.0]],
tick_direction=(-1, 0),
parent=self.view.scene, axis_label='Z',
anchors=['center', 'middle'], **kwargs)
self.xtr = STTransform()
self.xtr = self.xtr.as_matrix()
self.xtr.rotate(45, (1, 0, 0))
self.xtr.translate((0, -1., -1.))
self.ytr = STTransform()
self.ytr = self.ytr.as_matrix()
self.ytr.rotate(-45, (0, 1, 0))
self.ytr.translate((-1, 0, -1.))
self.ztr = STTransform()
self.ztr = self.ztr.as_matrix()
self.ztr.rotate(45, (0, 1, 0))
self.ztr.rotate(90, (1, 0, 0))
self.ztr.translate((-1, -1, 0.))
self.xax.transform = ChainTransform(transform, self.xtr)
self.yax.transform = ChainTransform(transform, self.ytr)
self.zax.transform = ChainTransform(transform, self.ztr)
@property
def tick_color(self):
return self.xax.tick_color
@tick_color.setter
def tick_color(self, value):
self.xax.tick_color = value
self.yax.tick_color = value
self.zax.tick_color = value
@property
def label_color(self):
return self._label_color
@label_color.setter
def label_color(self, value):
self.xax.label_color = value
self.yax.label_color = value
self.zax.label_color = value
@property
def axis_color(self):
return self._axis_color
@axis_color.setter
def axis_color(self, value):
self.axis.color = value
@property
def tick_font_size(self):
return self.xax.tick_font_size
@tick_font_size.setter
def tick_font_size(self, value):
self.xax.tick_font_size = value
self.yax.tick_font_size = value
self.zax.tick_font_size = value
@property
def axis_font_size(self):
return self.xax.axis_font_size
@axis_font_size.setter
def axis_font_size(self, value):
self.xax.axis_font_size = value
self.yax.axis_font_size = value
self.zax.axis_font_size = value
@property
def xlabel(self):
return self.xax.axis_label
@xlabel.setter
def xlabel(self, value):
self.xax.axis_label = value
@property
def ylabel(self):
return self.yax.axis_label
@ylabel.setter
def ylabel(self, value):
self.yax.axis_label = value
@property
def zlabel(self):
return self.zax.axis_label
@zlabel.setter
def zlabel(self, value):
self.zax.axis_label = value
@property
def xlim(self):
return self.xax.domain
@xlim.setter
def xlim(self, value):
self.xax.domain = value
@property
def ylim(self):
return self.yax.domain
@ylim.setter
def ylim(self, value):
self.yax.domain = value
@property
def zlim(self):
return self.zax.domain
@zlim.setter
def zlim(self, value):
self.zax.domain = value
@property
def parent(self):
return self.axis.parent
@parent.setter
def parent(self, value):
self.axis.parent = value
self.xax.parent = value
self.yax.parent = value
self.zax.parent = value
| {
"repo_name": "astrofrog/glue-vispy-viewers",
"path": "glue_vispy_viewers/common/axes.py",
"copies": "3",
"size": "4760",
"license": "bsd-2-clause",
"hash": -6677401766666398000,
"line_mean": 28.0243902439,
"line_max": 80,
"alpha_frac": 0.556092437,
"autogenerated": false,
"ratio": 3.600605143721634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00007527853056308341,
"num_lines": 164
} |
from __future__ import absolute_import, division, print_function
from flask import Blueprint, current_app, jsonify, request, url_for
import appr
info_app = Blueprint(
'info',
__name__, )
@info_app.before_request
def pre_request_logging():
jsonbody = request.get_json(force=True, silent=True)
values = request.values.to_dict()
if jsonbody:
values.update(jsonbody)
current_app.logger.info("request", extra={
"remote_addr": request.remote_addr,
"http_method": request.method,
"original_url": request.url,
"path": request.path,
"data": values,
"headers": dict(request.headers.to_list())})
@info_app.route("/")
def index_discovery():
host = request.url_root
domain = request.headers['Host']
return """<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="appr-package" content="{domain}/{{name}} {host}/appr/api/v1/packages/{{name}}/pull">
</head>
<body>
</body>
</html>""".format(domain=domain, host=host)
@info_app.route("/version")
def version():
return jsonify({"appr-api": appr.__version__})
@info_app.route("/routes")
def routes():
import urllib
output = []
for rule in current_app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
methods = ','.join(rule.methods)
url = url_for(rule.endpoint, **options)
line = urllib.unquote("{:50s} {:20s} {}".format(rule.endpoint, methods, url))
output.append(line)
lines = []
for line in sorted(output):
lines.append(line)
return jsonify({"routes": lines})
| {
"repo_name": "app-registry/appr",
"path": "appr/api/info.py",
"copies": "2",
"size": "1751",
"license": "apache-2.0",
"hash": -4554707177456860700,
"line_mean": 26.7936507937,
"line_max": 100,
"alpha_frac": 0.610508281,
"autogenerated": false,
"ratio": 3.4400785854616895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5050586866461689,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
from functools import wraps
from glob import glob
from math import ceil
from operator import getitem
import os
from threading import Lock
import uuid
from warnings import warn
import pandas as pd
import numpy as np
from toolz import merge
from ..base import tokenize
from ..compatibility import unicode, apply
from .. import array as da
from ..async import get_sync
from .core import _Frame, DataFrame, Series
from .shuffle import set_partition
lock = Lock()
def _dummy_from_array(x, columns=None):
""" Create empty pd.DataFrame or pd.Series which has correct dtype """
if x.ndim > 2:
raise ValueError('from_array does not input more than 2D array, got'
' array with shape %r' % (x.shape,))
if getattr(x.dtype, 'names', None) is not None:
# record array has named columns
cols = tuple(x.dtype.names)
dtypes = [x.dtype.fields[n][0] for n in x.dtype.names]
elif x.ndim == 1 and (np.isscalar(columns) or columns is None):
# Series
return pd.Series([], name=columns, dtype=x.dtype)
else:
cols = list(range(x.shape[1])) if x.ndim == 2 else [0]
dtypes = [x.dtype] * len(cols)
data = {}
for c, dt in zip(cols, dtypes):
data[c] = np.array([], dtype=dt)
data = pd.DataFrame(data, columns=cols)
if columns is not None:
# if invalid, raise error from pandas
data.columns = columns
return data
def from_array(x, chunksize=50000, columns=None):
""" Read dask Dataframe from any slicable array
Uses getitem syntax to pull slices out of the array. The array need not be
a NumPy array but must support slicing syntax
x[50000:100000]
and have 2 dimensions:
x.ndim == 2
or have a record dtype:
x.dtype == [('name', 'O'), ('balance', 'i8')]
"""
if isinstance(x, da.Array):
return from_dask_array(x, columns=columns)
dummy = _dummy_from_array(x, columns)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
token = tokenize(x, chunksize, columns)
name = 'from_array-' + token
dsk = {}
for i in range(0, int(ceil(len(x) / chunksize))):
data = (getitem, x, slice(i * chunksize, (i + 1) * chunksize))
if isinstance(dummy, pd.Series):
dsk[name, i] = (pd.Series, data, None, dummy.dtype, dummy.name)
else:
dsk[name, i] = (pd.DataFrame, data, None, dummy.columns)
return _Frame(dsk, name, dummy, divisions)
def from_pandas(data, npartitions=None, chunksize=None, sort=True, name=None):
"""Construct a dask object from a pandas object.
If given a ``pandas.Series`` a ``dask.Series`` will be returned. If given a
``pandas.DataFrame`` a ``dask.DataFrame`` will be returned. All other
pandas objects will raise a ``TypeError``.
Parameters
----------
df : pandas.DataFrame or pandas.Series
The DataFrame/Series with which to construct a dask DataFrame/Series
npartitions : int, optional
The number of partitions of the index to create.
chunksize : int, optional
The size of the partitions of the index.
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series partitioned along the index
Examples
--------
>>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),
... index=pd.date_range(start='20100101', periods=6))
>>> ddf = from_pandas(df, npartitions=3)
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', offset='D'),
Timestamp('2010-01-03 00:00:00', offset='D'),
Timestamp('2010-01-05 00:00:00', offset='D'),
Timestamp('2010-01-06 00:00:00', offset='D'))
>>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', offset='D'),
Timestamp('2010-01-03 00:00:00', offset='D'),
Timestamp('2010-01-05 00:00:00', offset='D'),
Timestamp('2010-01-06 00:00:00', offset='D'))
Raises
------
TypeError
If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is
passed in.
See Also
--------
from_array : Construct a dask.DataFrame from an array that has record dtype
from_bcolz : Construct a dask.DataFrame from a bcolz ctable
read_csv : Construct a dask.DataFrame from a CSV file
"""
if isinstance(getattr(data, 'index', None), pd.MultiIndex):
raise NotImplementedError("Dask does not support MultiIndex Dataframes.")
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("Input must be a pandas DataFrame or Series")
if ((npartitions is None) == (chunksize is None)):
raise ValueError('Exactly one of npartitions and chunksize must be specified.')
nrows = len(data)
if chunksize is None:
chunksize = int(ceil(nrows / npartitions))
else:
npartitions = int(ceil(nrows / chunksize))
if sort and not data.index.is_monotonic_increasing:
data = data.sort_index(ascending=True)
if sort:
divisions, locations = sorted_division_locations(data.index,
chunksize=chunksize)
else:
divisions = [None] * (npartitions + 1)
locations = list(range(0, nrows, chunksize)) + [len(data)]
name = name or ('from_pandas-' + tokenize(data, chunksize))
dsk = dict(((name, i), data.iloc[start: stop])
for i, (start, stop) in enumerate(zip(locations[:-1],
locations[1:])))
return _Frame(dsk, name, data, divisions)
def from_bcolz(x, chunksize=None, categorize=True, index=None, lock=lock,
**kwargs):
""" Read dask Dataframe from bcolz.ctable
Parameters
----------
x : bcolz.ctable
Input data
chunksize : int, optional
The size of blocks to pull out from ctable. Ideally as large as can
comfortably fit in memory
categorize : bool, defaults to True
Automatically categorize all string dtypes
index : string, optional
Column to make the index
lock: bool or Lock
Lock to use when reading or False for no lock (not-thread-safe)
See Also
--------
from_array: more generic function not optimized for bcolz
"""
if lock is True:
lock = Lock()
import dask.array as da
import bcolz
if isinstance(x, (str, unicode)):
x = bcolz.ctable(rootdir=x)
bc_chunklen = max(x[name].chunklen for name in x.names)
if chunksize is None and bc_chunklen > 10000:
chunksize = bc_chunklen
categories = dict()
if categorize:
for name in x.names:
if (np.issubdtype(x.dtype[name], np.string_) or
np.issubdtype(x.dtype[name], np.unicode_) or
np.issubdtype(x.dtype[name], np.object_)):
a = da.from_array(x[name], chunks=(chunksize * len(x.names),))
categories[name] = da.unique(a)
columns = tuple(x.dtype.names)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
if x.rootdir:
token = tokenize((x.rootdir, os.path.getmtime(x.rootdir)), chunksize,
categorize, index, kwargs)
else:
token = tokenize((id(x), x.shape, x.dtype), chunksize, categorize,
index, kwargs)
new_name = 'from_bcolz-' + token
dsk = dict(((new_name, i),
(dataframe_from_ctable,
x,
(slice(i * chunksize, (i + 1) * chunksize),),
columns, categories, lock))
for i in range(0, int(ceil(len(x) / chunksize))))
meta = dataframe_from_ctable(x, slice(0, 0), columns, categories, lock)
result = DataFrame(dsk, new_name, meta, divisions)
if index:
assert index in x.names
a = da.from_array(x[index], chunks=(chunksize * len(x.names),))
q = np.linspace(0, 100, len(x) // chunksize + 2)
divisions = da.percentile(a, q).compute()
return set_partition(result, index, divisions, **kwargs)
else:
return result
def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock):
""" Get DataFrame from bcolz.ctable
Parameters
----------
x: bcolz.ctable
slc: slice
columns: list of column names or None
>>> import bcolz
>>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b'])
>>> dataframe_from_ctable(x, slice(1, 3))
a b
1 2 20
2 3 30
>>> dataframe_from_ctable(x, slice(1, 3), columns=['b'])
b
1 20
2 30
>>> dataframe_from_ctable(x, slice(1, 3), columns='b')
1 20
2 30
Name: b, dtype: int...
"""
import bcolz
if columns is None:
columns = x.dtype.names
if isinstance(columns, tuple):
columns = list(columns)
x = x[columns]
if type(slc) is slice:
start = slc.start
stop = slc.stop if slc.stop < len(x) else len(x)
else:
start = slc[0].start
stop = slc[0].stop if slc[0].stop < len(x) else len(x)
idx = pd.Index(range(start, stop))
if lock:
lock.acquire()
try:
if isinstance(x, bcolz.ctable):
chunks = [x[name][slc] for name in columns]
if categories is not None:
chunks = [pd.Categorical.from_codes(
np.searchsorted(categories[name], chunk),
categories[name], True)
if name in categories else chunk
for name, chunk in zip(columns, chunks)]
result = pd.DataFrame(dict(zip(columns, chunks)), columns=columns,
index=idx)
elif isinstance(x, bcolz.carray):
chunk = x[slc]
if categories is not None and columns and columns in categories:
chunk = pd.Categorical.from_codes(
np.searchsorted(categories[columns], chunk),
categories[columns], True)
result = pd.Series(chunk, name=columns, index=idx)
finally:
if lock:
lock.release()
return result
def from_dask_array(x, columns=None):
""" Convert dask Array to dask DataFrame
Converts a 2d array into a DataFrame and a 1d array into a Series.
Parameters
----------
x: da.Array
columns: list or string
list of column names if DataFrame, single string if Series
Examples
--------
>>> import dask.array as da
>>> import dask.dataframe as dd
>>> x = da.ones((4, 2), chunks=(2, 2))
>>> df = dd.io.from_dask_array(x, columns=['a', 'b'])
>>> df.compute()
a b
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
3 1.0 1.0
"""
dummy = _dummy_from_array(x, columns)
name = 'from-dask-array' + tokenize(x, columns)
divisions = [0]
for c in x.chunks[0]:
divisions.append(divisions[-1] + c)
index = [(np.arange, a, b, 1, 'i8') for a, b in
zip(divisions[:-1], divisions[1:])]
divisions[-1] -= 1
if x.ndim == 2:
if len(x.chunks[1]) > 1:
x = x.rechunk({1: x.shape[1]})
dsk = {}
for i, (chunk, ind) in enumerate(zip(x._keys(), index)):
if x.ndim == 2:
chunk = chunk[0]
if isinstance(dummy, pd.Series):
dsk[name, i] = (pd.Series, chunk, ind, x.dtype, dummy.name)
else:
dsk[name, i] = (pd.DataFrame, chunk, ind, dummy.columns)
return _Frame(merge(x.dask, dsk), name, dummy, divisions)
def from_castra(x, columns=None):
"""Load a dask DataFrame from a Castra.
Parameters
----------
x : filename or Castra
columns: list or string, optional
The columns to load. Default is all columns.
"""
from castra import Castra
if not isinstance(x, Castra):
x = Castra(x, readonly=True)
return x.to_dask(columns)
def _link(token, result):
""" A dummy function to link results together in a graph
We use this to enforce an artificial sequential ordering on tasks that
don't explicitly pass around a shared resource
"""
return None
@wraps(pd.DataFrame.to_hdf)
def to_hdf(df, path_or_buf, key, mode='a', append=False, complevel=0,
complib=None, fletcher32=False, get=get_sync, dask_kwargs=None,
name_function=str, **kwargs):
name = 'to-hdf-' + uuid.uuid1().hex
pd_to_hdf = getattr(df._partition_type, 'to_hdf')
# if path_or_buf is string, format using i and name
if isinstance(path_or_buf, str):
if path_or_buf.count('*') + key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in file path and dataset key")
fmt_obj = lambda path_or_buf, i_name: path_or_buf.replace('*', i_name)
else:
if key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in dataset key")
fmt_obj = lambda path_or_buf, _: path_or_buf
dsk = dict()
i_name = name_function(0)
dsk[(name, 0)] = (_link, None,
(apply, pd_to_hdf,
(tuple, [(df._name, 0), fmt_obj(path_or_buf, i_name),
key.replace('*', i_name)]),
merge(kwargs,
{'mode': mode, 'format': 'table', 'append': append,
'complevel': complevel, 'complib': complib,
'fletcher32': fletcher32})))
for i in range(1, df.npartitions):
i_name = name_function(i)
dsk[(name, i)] = (_link, (name, i - 1),
(apply, pd_to_hdf,
(tuple, [(df._name, i), fmt_obj(path_or_buf, i_name),
key.replace('*', i_name)]),
merge(kwargs,
{'mode': 'a', 'format': 'table', 'append': True,
'complevel': complevel, 'complib': complib,
'fletcher32': fletcher32})))
dask_kwargs = dask_kwargs or {}
DataFrame._get(merge(df.dask, dsk), (name, df.npartitions - 1),
get=get, **dask_kwargs)
dont_use_fixed_error_message = """
This HDFStore is not partitionable and can only be use monolithically with
pandas. In the future when creating HDFStores use the ``format='table'``
option to ensure that your dataset can be parallelized"""
read_hdf_error_msg = """
The start and stop keywords are not supported when reading from more than
one file/dataset.
The combination is ambiguous because it could be interpreted as the starting
and stopping index per file, or starting and stopping index of the global
dataset."""
def _read_single_hdf(path, key, start=0, stop=None, columns=None,
chunksize=int(1e6), lock=None):
"""
Read a single hdf file into a dask.dataframe. Used for each file in
read_hdf.
"""
def get_keys_and_stops(path, key, stop):
"""
Get the "keys" or group identifiers which match the given key, which
can contain wildcards. This uses the hdf file identified by the
given path. Also get the index of the last row of data for each matched
key.
"""
with pd.HDFStore(path) as hdf:
keys = [k for k in hdf.keys() if fnmatch(k, key)]
stops = []
for k in keys:
storer = hdf.get_storer(k)
if storer.format_type != 'table':
raise TypeError(dont_use_fixed_error_message)
if stop is None:
stops.append(storer.nrows)
else:
stops.append(stop)
return keys, stops
def one_path_one_key(path, key, start, stop, columns, chunksize, lock):
"""
Get the data frame corresponding to one path and one key (which should
not contain any wildcards).
"""
empty = pd.read_hdf(path, key, stop=0)
if columns is not None:
empty = empty[columns]
token = tokenize((path, os.path.getmtime(path), key, start,
stop, empty, chunksize))
name = 'read-hdf-' + token
dsk = dict(((name, i), (_pd_read_hdf, path, key, lock,
{'start': s,
'stop': s + chunksize,
'columns': empty.columns}))
for i, s in enumerate(range(start, stop, chunksize)))
divisions = [None] * (len(dsk) + 1)
return DataFrame(dsk, name, empty, divisions)
keys, stops = get_keys_and_stops(path, key, stop)
if (start != 0 or stop is not None) and len(keys) > 1:
raise NotImplementedError(read_hdf_error_msg)
from .multi import concat
return concat([one_path_one_key(path, k, start, s, columns, chunksize, lock)
for k, s in zip(keys, stops)])
def _pd_read_hdf(path, key, lock, kwargs):
""" Read from hdf5 file with a lock """
if lock:
lock.acquire()
try:
result = pd.read_hdf(path, key, **kwargs)
finally:
if lock:
lock.release()
return result
@wraps(pd.read_hdf)
def read_hdf(pattern, key, start=0, stop=None, columns=None,
chunksize=1000000, lock=True):
"""
Read hdf files into a dask dataframe. Like pandas.read_hdf, except it we
can read multiple files, and read multiple keys from the same file by using
pattern matching.
Parameters
----------
pattern : pattern (string), or buffer to read from. Can contain wildcards
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : optional, a list of columns that if not None, will limit the
return columns
chunksize : optional, nrows to include in iteration, return an iterator
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
if lock is True:
lock = Lock()
key = key if key.startswith('/') else '/' + key
paths = sorted(glob(pattern))
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
from .multi import concat
return concat([_read_single_hdf(path, key, start=start, stop=stop,
columns=columns, chunksize=chunksize,
lock=lock)
for path in paths])
def to_castra(df, fn=None, categories=None, sorted_index_column=None,
compute=True, get=get_sync):
""" Write DataFrame to Castra on-disk store
See https://github.com/blosc/castra for details
See Also
--------
Castra.to_dask
"""
from castra import Castra
if isinstance(categories, list):
categories = (list, categories)
name = 'to-castra-' + uuid.uuid1().hex
if sorted_index_column:
set_index = lambda x: x.set_index(sorted_index_column)
func = lambda part: (set_index, part)
else:
func = lambda part: part
dsk = dict()
dsk[(name, -1)] = (Castra, fn, func((df._name, 0)), categories)
for i in range(0, df.npartitions):
dsk[(name, i)] = (_link, (name, i - 1),
(Castra.extend, (name, -1), func((df._name, i))))
dsk = merge(dsk, df.dask)
keys = [(name, -1), (name, df.npartitions - 1)]
if compute:
c, _ = DataFrame._get(dsk, keys, get=get)
return c
else:
return dsk, keys
def to_csv(df, filename, compression=None, get=None, **kwargs):
if compression:
raise NotImplementedError("Writing compressed csv files not supported")
name = 'to-csv-' + uuid.uuid1().hex
dsk = dict()
dsk[(name, 0)] = (lambda df, fn, kwargs: df.to_csv(fn, **kwargs),
(df._name, 0), filename, kwargs)
kwargs2 = kwargs.copy()
kwargs2['mode'] = 'a'
kwargs2['header'] = False
for i in range(1, df.npartitions):
dsk[(name, i)] = (_link, (name, i - 1),
(lambda df, fn, kwargs: df.to_csv(fn, **kwargs),
(df._name, i), filename, kwargs2))
DataFrame._get(merge(dsk, df.dask), (name, df.npartitions - 1), get=get)
def to_bag(df, index=False):
from ..bag.core import Bag
if isinstance(df, DataFrame):
func = lambda df: list(df.itertuples(index))
elif isinstance(df, Series):
func = (lambda df: list(df.iteritems())) if index else list
else:
raise TypeError("df must be either DataFrame or Series")
name = 'to_bag-' + tokenize(df, index)
dsk = dict(((name, i), (func, block)) for (i, block) in enumerate(df._keys()))
dsk.update(df._optimize(df.dask, df._keys()))
return Bag(dsk, name, df.npartitions)
def from_imperative(*args, **kwargs):
warn("Deprecation warning: moved to from_delayed")
return from_delayed(*args, **kwargs)
def from_delayed(dfs, metadata=None, divisions=None, columns=None,
prefix='from-delayed'):
""" Create DataFrame from many dask.delayed objects
Parameters
----------
dfs: list of Values
An iterable of ``dask.delayed.Delayed`` objects, such as come from
``dask.delayed`` These comprise the individual partitions of the
resulting dataframe.
metadata: str, list of column names, or empty dataframe, optional
Metadata for the underlying pandas object. Can be either column name
(if Series), list of column names, or pandas object with the same
columns/dtypes. If not provided, will be computed from the first
partition.
divisions: list, optional
Partition boundaries along the index.
prefix, str, optional
Prefix to prepend to the keys.
"""
if columns is not None:
warn("Deprecation warning: Use metadata argument, not columns")
metadata = columns
from dask.delayed import Delayed
if isinstance(dfs, Delayed):
dfs = [dfs]
dsk = merge(df.dask for df in dfs)
name = prefix + '-' + tokenize(*dfs)
names = [(name, i) for i in range(len(dfs))]
values = [df.key for df in dfs]
dsk2 = dict(zip(names, values))
if divisions is None:
divisions = [None] * (len(dfs) + 1)
if metadata is None:
metadata = dfs[0].compute()
if isinstance(metadata, (str, pd.Series)):
return Series(merge(dsk, dsk2), name, metadata, divisions)
else:
return DataFrame(merge(dsk, dsk2), name, metadata, divisions)
def sorted_division_locations(seq, npartitions=None, chunksize=None):
""" Find division locations and values in sorted list
Examples
--------
>>> L = ['A', 'B', 'C', 'D', 'E', 'F']
>>> sorted_division_locations(L, chunksize=2)
(['A', 'C', 'E', 'F'], [0, 2, 4, 6])
>>> sorted_division_locations(L, chunksize=3)
(['A', 'D', 'F'], [0, 3, 6])
>>> L = ['A', 'A', 'A', 'A', 'B', 'B', 'B', 'C']
>>> sorted_division_locations(L, chunksize=3)
(['A', 'B', 'C'], [0, 4, 8])
>>> sorted_division_locations(L, chunksize=2)
(['A', 'B', 'C'], [0, 4, 8])
>>> sorted_division_locations(['A'], chunksize=2)
(['A', 'A'], [0, 1])
"""
if ((npartitions is None) == (chunksize is None)):
raise ValueError('Exactly one of npartitions and chunksize must be specified.')
if npartitions:
chunksize = ceil(len(seq) / npartitions)
positions = [0]
values = [seq[0]]
for pos in list(range(0, len(seq), chunksize)):
if pos <= positions[-1]:
continue
while pos + 1 < len(seq) and seq[pos - 1] == seq[pos]:
pos += 1
values.append(seq[pos])
if pos == len(seq) - 1:
pos += 1
positions.append(pos)
if positions[-1] != len(seq):
positions.append(len(seq))
values.append(seq[-1])
return values, positions
| {
"repo_name": "mikegraham/dask",
"path": "dask/dataframe/io.py",
"copies": "1",
"size": "24678",
"license": "bsd-3-clause",
"hash": 5854856073040914000,
"line_mean": 32.0361445783,
"line_max": 98,
"alpha_frac": 0.5757354729,
"autogenerated": false,
"ratio": 3.681085918854415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47568213917544155,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
from functools import wraps
from glob import glob
import io
from math import ceil
from operator import getitem
import os
from threading import Lock
import multiprocessing
import uuid
from warnings import warn
import sys
import pandas as pd
import numpy as np
import dask
from toolz import merge
from ..base import tokenize
from ..compatibility import unicode, PY2
from .. import array as da
from ..async import get_sync
from ..context import _globals
from ..delayed import Delayed, delayed
import dask.multiprocessing
from .core import DataFrame, Series, new_dd_object
from .shuffle import set_partition
from .utils import insert_meta_param_description
from ..utils import build_name_function, M
from ..bytes.core import write_bytes
lock = Lock()
def _meta_from_array(x, columns=None):
""" Create empty pd.DataFrame or pd.Series which has correct dtype """
if x.ndim > 2:
raise ValueError('from_array does not input more than 2D array, got'
' array with shape %r' % (x.shape,))
if getattr(x.dtype, 'names', None) is not None:
# record array has named columns
if columns is None:
columns = list(x.dtype.names)
elif np.isscalar(columns):
raise ValueError("For a struct dtype, columns must be a list.")
elif not all(i in x.dtype.names for i in columns):
extra = sorted(set(columns).difference(x.dtype.names))
raise ValueError("dtype {0} doesn't have fields "
"{1}".format(x.dtype, extra))
fields = x.dtype.fields
dtypes = [fields[n][0] if n in fields else 'f8' for n in columns]
elif x.ndim == 1:
if np.isscalar(columns) or columns is None:
return pd.Series([], name=columns, dtype=x.dtype)
elif len(columns) == 1:
return pd.DataFrame(np.array([], dtype=x.dtype), columns=columns)
raise ValueError("For a 1d array, columns must be a scalar or single "
"element list")
else:
if columns is None:
columns = list(range(x.shape[1])) if x.ndim == 2 else [0]
elif len(columns) != x.shape[1]:
raise ValueError("Number of column names must match width of the "
"array. Got {0} names for {1} "
"columns".format(len(columns), x.shape[1]))
dtypes = [x.dtype] * len(columns)
data = {c: np.array([], dtype=dt) for (c, dt) in zip(columns, dtypes)}
return pd.DataFrame(data, columns=columns)
def from_array(x, chunksize=50000, columns=None):
""" Read dask Dataframe from any slicable array
Uses getitem syntax to pull slices out of the array. The array need not be
a NumPy array but must support slicing syntax
x[50000:100000]
and have 2 dimensions:
x.ndim == 2
or have a record dtype:
x.dtype == [('name', 'O'), ('balance', 'i8')]
"""
if isinstance(x, da.Array):
return from_dask_array(x, columns=columns)
meta = _meta_from_array(x, columns)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
token = tokenize(x, chunksize, columns)
name = 'from_array-' + token
dsk = {}
for i in range(0, int(ceil(len(x) / chunksize))):
data = (getitem, x, slice(i * chunksize, (i + 1) * chunksize))
if isinstance(meta, pd.Series):
dsk[name, i] = (pd.Series, data, None, meta.dtype, meta.name)
else:
dsk[name, i] = (pd.DataFrame, data, None, meta.columns)
return new_dd_object(dsk, name, meta, divisions)
def from_pandas(data, npartitions=None, chunksize=None, sort=True, name=None):
"""Construct a dask object from a pandas object.
If given a ``pandas.Series`` a ``dask.Series`` will be returned. If given a
``pandas.DataFrame`` a ``dask.DataFrame`` will be returned. All other
pandas objects will raise a ``TypeError``.
Parameters
----------
df : pandas.DataFrame or pandas.Series
The DataFrame/Series with which to construct a dask DataFrame/Series
npartitions : int, optional
The number of partitions of the index to create.
chunksize : int, optional
The size of the partitions of the index.
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series partitioned along the index
Examples
--------
>>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),
... index=pd.date_range(start='20100101', periods=6))
>>> ddf = from_pandas(df, npartitions=3)
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', offset='D'),
Timestamp('2010-01-03 00:00:00', offset='D'),
Timestamp('2010-01-05 00:00:00', offset='D'),
Timestamp('2010-01-06 00:00:00', offset='D'))
>>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', offset='D'),
Timestamp('2010-01-03 00:00:00', offset='D'),
Timestamp('2010-01-05 00:00:00', offset='D'),
Timestamp('2010-01-06 00:00:00', offset='D'))
Raises
------
TypeError
If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is
passed in.
See Also
--------
from_array : Construct a dask.DataFrame from an array that has record dtype
from_bcolz : Construct a dask.DataFrame from a bcolz ctable
read_csv : Construct a dask.DataFrame from a CSV file
"""
if isinstance(getattr(data, 'index', None), pd.MultiIndex):
raise NotImplementedError("Dask does not support MultiIndex Dataframes.")
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("Input must be a pandas DataFrame or Series")
if ((npartitions is None) == (chunksize is None)):
raise ValueError('Exactly one of npartitions and chunksize must be specified.')
nrows = len(data)
if chunksize is None:
chunksize = int(ceil(nrows / npartitions))
else:
npartitions = int(ceil(nrows / chunksize))
name = name or ('from_pandas-' + tokenize(data, chunksize))
if not nrows:
return new_dd_object({(name, 0): data}, name, data, [None, None])
if sort and not data.index.is_monotonic_increasing:
data = data.sort_index(ascending=True)
if sort:
divisions, locations = sorted_division_locations(data.index,
chunksize=chunksize)
else:
locations = list(range(0, nrows, chunksize)) + [len(data)]
divisions = [None] * len(locations)
dsk = dict(((name, i), data.iloc[start: stop])
for i, (start, stop) in enumerate(zip(locations[:-1],
locations[1:])))
return new_dd_object(dsk, name, data, divisions)
def from_bcolz(x, chunksize=None, categorize=True, index=None, lock=lock,
**kwargs):
""" Read dask Dataframe from bcolz.ctable
Parameters
----------
x : bcolz.ctable
Input data
chunksize : int, optional
The size of blocks to pull out from ctable. Ideally as large as can
comfortably fit in memory
categorize : bool, defaults to True
Automatically categorize all string dtypes
index : string, optional
Column to make the index
lock: bool or Lock
Lock to use when reading or False for no lock (not-thread-safe)
See Also
--------
from_array: more generic function not optimized for bcolz
"""
if lock is True:
lock = Lock()
import dask.array as da
import bcolz
if isinstance(x, (str, unicode)):
x = bcolz.ctable(rootdir=x)
bc_chunklen = max(x[name].chunklen for name in x.names)
if chunksize is None and bc_chunklen > 10000:
chunksize = bc_chunklen
categories = dict()
if categorize:
for name in x.names:
if (np.issubdtype(x.dtype[name], np.string_) or
np.issubdtype(x.dtype[name], np.unicode_) or
np.issubdtype(x.dtype[name], np.object_)):
a = da.from_array(x[name], chunks=(chunksize * len(x.names),))
categories[name] = da.unique(a)
columns = tuple(x.dtype.names)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
if x.rootdir:
token = tokenize((x.rootdir, os.path.getmtime(x.rootdir)), chunksize,
categorize, index, kwargs)
else:
token = tokenize((id(x), x.shape, x.dtype), chunksize, categorize,
index, kwargs)
new_name = 'from_bcolz-' + token
dsk = dict(((new_name, i),
(dataframe_from_ctable,
x,
(slice(i * chunksize, (i + 1) * chunksize),),
columns, categories, lock))
for i in range(0, int(ceil(len(x) / chunksize))))
meta = dataframe_from_ctable(x, slice(0, 0), columns, categories, lock)
result = DataFrame(dsk, new_name, meta, divisions)
if index:
assert index in x.names
a = da.from_array(x[index], chunks=(chunksize * len(x.names),))
q = np.linspace(0, 100, len(x) // chunksize + 2)
divisions = tuple(da.percentile(a, q).compute())
return set_partition(result, index, divisions, **kwargs)
else:
return result
def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock):
""" Get DataFrame from bcolz.ctable
Parameters
----------
x: bcolz.ctable
slc: slice
columns: list of column names or None
>>> import bcolz
>>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b'])
>>> dataframe_from_ctable(x, slice(1, 3))
a b
1 2 20
2 3 30
>>> dataframe_from_ctable(x, slice(1, 3), columns=['b'])
b
1 20
2 30
>>> dataframe_from_ctable(x, slice(1, 3), columns='b')
1 20
2 30
Name: b, dtype: int...
"""
import bcolz
if columns is None:
columns = x.dtype.names
if isinstance(columns, tuple):
columns = list(columns)
x = x[columns]
if type(slc) is slice:
start = slc.start
stop = slc.stop if slc.stop < len(x) else len(x)
else:
start = slc[0].start
stop = slc[0].stop if slc[0].stop < len(x) else len(x)
idx = pd.Index(range(start, stop))
if lock:
lock.acquire()
try:
if isinstance(x, bcolz.ctable):
chunks = [x[name][slc] for name in columns]
if categories is not None:
chunks = [pd.Categorical.from_codes(
np.searchsorted(categories[name], chunk),
categories[name], True)
if name in categories else chunk
for name, chunk in zip(columns, chunks)]
result = pd.DataFrame(dict(zip(columns, chunks)), columns=columns,
index=idx)
elif isinstance(x, bcolz.carray):
chunk = x[slc]
if categories is not None and columns and columns in categories:
chunk = pd.Categorical.from_codes(
np.searchsorted(categories[columns], chunk),
categories[columns], True)
result = pd.Series(chunk, name=columns, index=idx)
finally:
if lock:
lock.release()
return result
def from_dask_array(x, columns=None):
""" Convert dask Array to dask DataFrame
Converts a 2d array into a DataFrame and a 1d array into a Series.
Parameters
----------
x: da.Array
columns: list or string
list of column names if DataFrame, single string if Series
Examples
--------
>>> import dask.array as da
>>> import dask.dataframe as dd
>>> x = da.ones((4, 2), chunks=(2, 2))
>>> df = dd.io.from_dask_array(x, columns=['a', 'b'])
>>> df.compute()
a b
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
3 1.0 1.0
"""
meta = _meta_from_array(x, columns)
name = 'from-dask-array' + tokenize(x, columns)
divisions = [0]
for c in x.chunks[0]:
divisions.append(divisions[-1] + c)
index = [(np.arange, a, b, 1, 'i8') for a, b in
zip(divisions[:-1], divisions[1:])]
divisions[-1] -= 1
if x.ndim == 2:
if len(x.chunks[1]) > 1:
x = x.rechunk({1: x.shape[1]})
dsk = {}
for i, (chunk, ind) in enumerate(zip(x._keys(), index)):
if x.ndim == 2:
chunk = chunk[0]
if isinstance(meta, pd.Series):
dsk[name, i] = (pd.Series, chunk, ind, x.dtype, meta.name)
else:
dsk[name, i] = (pd.DataFrame, chunk, ind, meta.columns)
return new_dd_object(merge(x.dask, dsk), name, meta, divisions)
def from_castra(x, columns=None):
"""Load a dask DataFrame from a Castra.
Parameters
----------
x : filename or Castra
columns: list or string, optional
The columns to load. Default is all columns.
"""
from castra import Castra
if not isinstance(x, Castra):
x = Castra(x, readonly=True)
return x.to_dask(columns)
def _link(token, result):
""" A dummy function to link results together in a graph
We use this to enforce an artificial sequential ordering on tasks that
don't explicitly pass around a shared resource
"""
return None
def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None):
""" A wrapper function around pd_to_hdf that enables locking"""
if lock:
lock.acquire()
try:
pd_to_hdf(*args, **kwargs)
finally:
if lock:
lock.release()
return None
@wraps(pd.DataFrame.to_hdf)
def to_hdf(df, path_or_buf, key, mode='a', append=False, get=None,
name_function=None, compute=True, lock=None, dask_kwargs={},
**kwargs):
name = 'to-hdf-' + uuid.uuid1().hex
pd_to_hdf = getattr(df._partition_type, 'to_hdf')
single_file = True
single_node = True
# if path_or_buf is string, format using i_name
if isinstance(path_or_buf, str):
if path_or_buf.count('*') + key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in file path and dataset key")
fmt_obj = lambda path_or_buf, i_name: path_or_buf.replace('*', i_name)
if '*' in path_or_buf:
single_file = False
else:
if key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in dataset key")
fmt_obj = lambda path_or_buf, _: path_or_buf
if '*' in key:
single_node = False
if 'format' in kwargs and kwargs['format'] != 'table':
raise ValueError("Dask only support 'table' format in hdf files.")
if mode not in ('a', 'w', 'r+'):
raise ValueError("Mode must be one of 'a', 'w' or 'r+'")
if name_function is None:
name_function = build_name_function(df.npartitions - 1)
# we guarantee partition order is preserved when its saved and read
# so we enforce name_function to maintain the order of its input.
if not (single_file and single_node):
formatted_names = [name_function(i) for i in range(df.npartitions)]
if formatted_names != sorted(formatted_names):
warn("To preserve order between partitions name_function "
"must preserve the order of its input")
# If user did not specify scheduler and write is sequential default to the
# sequential scheduler. otherwise let the _get method choose the scheduler
if get is None and 'get' not in _globals and single_node and single_file:
get = get_sync
# handle lock default based on whether we're writing to a single entity
_actual_get = get or _globals.get('get') or df._default_get
if lock is None:
if not single_node:
lock = True
elif not single_file and _actual_get is not dask.multiprocessing.get:
# if we're writing to multiple files with the multiprocessing
# scheduler we don't need to lock
lock = True
else:
lock = False
if lock is True:
if _actual_get == dask.multiprocessing.get:
lock = multiprocessing.Manager().Lock()
else:
lock = Lock()
kwargs.update({'format': 'table', 'mode': mode, 'append': append})
dsk = dict()
i_name = name_function(0)
dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, 0), fmt_obj(path_or_buf, i_name),
key.replace('*', i_name)], kwargs)
kwargs2 = kwargs.copy()
if single_file:
kwargs2['mode'] = 'a'
if single_node:
kwargs2['append'] = True
for i in range(1, df.npartitions):
i_name = name_function(i)
task = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, i), fmt_obj(path_or_buf, i_name),
key.replace('*', i_name)], kwargs2)
if single_file:
link_dep = i - 1 if single_node else 0
task = (_link, (name, link_dep), task)
dsk[(name, i)] = task
dsk = merge(df.dask, dsk)
if single_file and single_node:
keys = [(name, df.npartitions - 1)]
else:
keys = [(name, i) for i in range(df.npartitions)]
if compute:
return DataFrame._get(dsk, keys, get=get, **dask_kwargs)
else:
return delayed([Delayed(key, [dsk]) for key in keys])
dont_use_fixed_error_message = """
This HDFStore is not partitionable and can only be use monolithically with
pandas. In the future when creating HDFStores use the ``format='table'``
option to ensure that your dataset can be parallelized"""
read_hdf_error_msg = """
The start and stop keywords are not supported when reading from more than
one file/dataset.
The combination is ambiguous because it could be interpreted as the starting
and stopping index per file, or starting and stopping index of the global
dataset."""
def _read_single_hdf(path, key, start=0, stop=None, columns=None,
chunksize=int(1e6), sorted_index=False, lock=None, mode=None):
"""
Read a single hdf file into a dask.dataframe. Used for each file in
read_hdf.
"""
def get_keys_stops_divisions(path, key, stop, sorted_index):
"""
Get the "keys" or group identifiers which match the given key, which
can contain wildcards. This uses the hdf file identified by the
given path. Also get the index of the last row of data for each matched
key.
"""
with pd.HDFStore(path, mode=mode) as hdf:
keys = [k for k in hdf.keys() if fnmatch(k, key)]
stops = []
divisions = []
for k in keys:
storer = hdf.get_storer(k)
if storer.format_type != 'table':
raise TypeError(dont_use_fixed_error_message)
if stop is None:
stops.append(storer.nrows)
elif stop > storer.nrows:
raise ValueError("Stop keyword exceeds dataset number "
"of rows ({})".format(storer.nrows))
else:
stops.append(stop)
if sorted_index:
division_start = storer.read_column('index', start=0, stop=1)[0]
division_end = storer.read_column('index', start=storer.nrows-1, stop=storer.nrows)[0]
divisions.append([division_start, division_end])
else:
divisions.append(None)
return keys, stops, divisions
def one_path_one_key(path, key, start, stop, columns, chunksize, division, lock):
"""
Get the data frame corresponding to one path and one key (which should
not contain any wildcards).
"""
empty = pd.read_hdf(path, key, mode=mode, stop=0)
if columns is not None:
empty = empty[columns]
token = tokenize((path, os.path.getmtime(path), key, start,
stop, empty, chunksize, division))
name = 'read-hdf-' + token
if start >= stop:
raise ValueError("Start row number ({}) is above or equal to stop "
"row number ({})".format(start, stop))
if division:
dsk = {(name, 0): (_pd_read_hdf, path, key, lock,
{'mode': mode,
'columns': empty.columns})}
divisions = division
else:
dsk = dict(((name, i), (_pd_read_hdf, path, key, lock,
{'mode': mode,
'start': s,
'stop': s + chunksize,
'columns': empty.columns}))
for i, s in enumerate(range(start, stop, chunksize)))
divisions = [None] * (len(dsk) + 1)
return DataFrame(dsk, name, empty, divisions)
keys, stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index)
if (start != 0 or stop is not None) and len(keys) > 1:
raise NotImplementedError(read_hdf_error_msg)
from .multi import concat
return concat([one_path_one_key(path, k, start, s, columns, chunksize, d, lock)
for k, s, d in zip(keys, stops, divisions)])
def _pd_read_hdf(path, key, lock, kwargs):
""" Read from hdf5 file with a lock """
if lock:
lock.acquire()
try:
result = pd.read_hdf(path, key, **kwargs)
finally:
if lock:
lock.release()
return result
@wraps(pd.read_hdf)
def read_hdf(pattern, key, start=0, stop=None, columns=None,
chunksize=1000000, sorted_index=False, lock=True, mode=None):
"""
Read hdf files into a dask dataframe. Like pandas.read_hdf, except it we
can read multiple files, and read multiple keys from the same file by using
pattern matching.
Parameters
----------
pattern : pattern (string), or buffer to read from. Can contain wildcards
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : optional, a list of columns that if not None, will limit the
return columns
chunksize : optional, positive integer
maximal number of rows per partition
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
if lock is True:
lock = Lock()
key = key if key.startswith('/') else '/' + key
paths = sorted(glob(pattern))
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
if chunksize <= 0:
raise ValueError("Chunksize must be a positive integer")
if (start != 0 or stop is not None) and sorted_index:
raise ValueError("When assuming pre-partitioned data, data must be "
"read in its entirety using the same chunksizes")
from .multi import concat
return concat([_read_single_hdf(path, key, start=start, stop=stop,
columns=columns, chunksize=chunksize,
sorted_index=sorted_index,
lock=lock, mode=mode)
for path in paths])
def to_castra(df, fn=None, categories=None, sorted_index_column=None,
compute=True, get=get_sync):
""" Write DataFrame to Castra on-disk store
See https://github.com/blosc/castra for details
See Also
--------
Castra.to_dask
"""
from castra import Castra
if isinstance(categories, list):
categories = (list, categories)
name = 'to-castra-' + uuid.uuid1().hex
if sorted_index_column:
func = lambda part: (M.set_index, part, sorted_index_column)
else:
func = lambda part: part
dsk = dict()
dsk[(name, -1)] = (Castra, fn, func((df._name, 0)), categories)
for i in range(0, df.npartitions):
dsk[(name, i)] = (_link, (name, i - 1),
(Castra.extend, (name, -1), func((df._name, i))))
dsk = merge(dsk, df.dask)
keys = [(name, -1), (name, df.npartitions - 1)]
if compute:
return DataFrame._get(dsk, keys, get=get)[0]
else:
return delayed([Delayed(key, [dsk]) for key in keys])[0]
@delayed
def _to_csv_chunk(df, **kwargs):
if PY2:
out = io.BytesIO()
else:
out = io.StringIO()
df.to_csv(out, **kwargs)
out.seek(0)
if PY2:
return out.getvalue()
encoding = kwargs.get('encoding', sys.getdefaultencoding())
return out.getvalue().encode(encoding)
def to_csv(df, filename, name_function=None, compression=None, compute=True,
**kwargs):
values = [_to_csv_chunk(d, **kwargs) for d in df.to_delayed()]
values = write_bytes(values, filename, name_function, compression,
encoding=None)
if compute:
from dask import compute
compute(*values)
else:
return values
def _df_to_bag(df, index=False):
if isinstance(df, pd.DataFrame):
return list(map(tuple, df.itertuples(index)))
elif isinstance(df, pd.Series):
return list(df.iteritems()) if index else list(df)
def to_bag(df, index=False):
from ..bag.core import Bag
if not isinstance(df, (DataFrame, Series)):
raise TypeError("df must be either DataFrame or Series")
name = 'to_bag-' + tokenize(df, index)
dsk = dict(((name, i), (_df_to_bag, block, index))
for (i, block) in enumerate(df._keys()))
dsk.update(df._optimize(df.dask, df._keys()))
return Bag(dsk, name, df.npartitions)
@insert_meta_param_description
def from_delayed(dfs, meta=None, divisions=None, prefix='from-delayed',
metadata=None):
""" Create DataFrame from many dask.delayed objects
Parameters
----------
dfs : list of Delayed
An iterable of ``dask.delayed.Delayed`` objects, such as come from
``dask.delayed`` These comprise the individual partitions of the
resulting dataframe.
$META
divisions : tuple, str, optional
Partition boundaries along the index.
For tuple, see http://dask.pydata.io/en/latest/dataframe-partitions.html
For string 'sorted' will compute the delayed values to find index
values. Assumes that the indexes are mutually sorted.
If None, then won't use index information
prefix : str, optional
Prefix to prepend to the keys.
"""
if metadata is not None and meta is None:
warn("Deprecation warning: Use meta keyword, not metadata")
meta = metadata
from dask.delayed import Delayed
if isinstance(dfs, Delayed):
dfs = [dfs]
dsk = merge(df.dask for df in dfs)
name = prefix + '-' + tokenize(*dfs)
names = [(name, i) for i in range(len(dfs))]
values = [df.key for df in dfs]
dsk2 = dict(zip(names, values))
dsk3 = merge(dsk, dsk2)
if meta is None:
meta = dfs[0].compute()
if isinstance(meta, (str, pd.Series)):
Frame = Series
else:
Frame = DataFrame
if divisions == 'sorted':
from .core import compute_divisions
divisions = [None] * (len(dfs) + 1)
df = Frame(dsk3, name, meta, divisions)
return compute_divisions(df)
elif divisions is None:
divisions = [None] * (len(dfs) + 1)
return Frame(dsk3, name, meta, divisions)
def sorted_division_locations(seq, npartitions=None, chunksize=None):
""" Find division locations and values in sorted list
Examples
--------
>>> L = ['A', 'B', 'C', 'D', 'E', 'F']
>>> sorted_division_locations(L, chunksize=2)
(['A', 'C', 'E', 'F'], [0, 2, 4, 6])
>>> sorted_division_locations(L, chunksize=3)
(['A', 'D', 'F'], [0, 3, 6])
>>> L = ['A', 'A', 'A', 'A', 'B', 'B', 'B', 'C']
>>> sorted_division_locations(L, chunksize=3)
(['A', 'B', 'C'], [0, 4, 8])
>>> sorted_division_locations(L, chunksize=2)
(['A', 'B', 'C'], [0, 4, 8])
>>> sorted_division_locations(['A'], chunksize=2)
(['A', 'A'], [0, 1])
"""
if ((npartitions is None) == (chunksize is None)):
raise ValueError('Exactly one of npartitions and chunksize must be specified.')
if npartitions:
chunksize = ceil(len(seq) / npartitions)
positions = [0]
values = [seq[0]]
for pos in list(range(0, len(seq), chunksize)):
if pos <= positions[-1]:
continue
while pos + 1 < len(seq) and seq[pos - 1] == seq[pos]:
pos += 1
values.append(seq[pos])
if pos == len(seq) - 1:
pos += 1
positions.append(pos)
if positions[-1] != len(seq):
positions.append(len(seq))
values.append(seq[-1])
return values, positions
| {
"repo_name": "cowlicks/dask",
"path": "dask/dataframe/io.py",
"copies": "1",
"size": "29652",
"license": "bsd-3-clause",
"hash": 7687687358721241000,
"line_mean": 32.5809739524,
"line_max": 106,
"alpha_frac": 0.5806353703,
"autogenerated": false,
"ratio": 3.7420494699646643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48226848402646644,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
from functools import wraps
from glob import glob
import os
from threading import Lock
import multiprocessing
import uuid
from warnings import warn
import pandas as pd
import dask
from toolz import merge
from ...async import get_sync
from ...base import tokenize
from ...context import _globals
from ...delayed import Delayed, delayed
import dask.multiprocessing
from ..core import DataFrame, new_dd_object
from ...utils import build_name_function
from .io import _link
lock = Lock()
def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None):
""" A wrapper function around pd_to_hdf that enables locking"""
if lock:
lock.acquire()
try:
pd_to_hdf(*args, **kwargs)
finally:
if lock:
lock.release()
return None
@wraps(pd.DataFrame.to_hdf)
def to_hdf(df, path_or_buf, key, mode='a', append=False, get=None,
name_function=None, compute=True, lock=None, dask_kwargs={},
**kwargs):
name = 'to-hdf-' + uuid.uuid1().hex
pd_to_hdf = getattr(df._partition_type, 'to_hdf')
single_file = True
single_node = True
# if path_or_buf is string, format using i_name
if isinstance(path_or_buf, str):
if path_or_buf.count('*') + key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in file path and dataset key")
fmt_obj = lambda path_or_buf, i_name: path_or_buf.replace('*', i_name)
if '*' in path_or_buf:
single_file = False
else:
if key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in dataset key")
fmt_obj = lambda path_or_buf, _: path_or_buf
if '*' in key:
single_node = False
if 'format' in kwargs and kwargs['format'] != 'table':
raise ValueError("Dask only support 'table' format in hdf files.")
if mode not in ('a', 'w', 'r+'):
raise ValueError("Mode must be one of 'a', 'w' or 'r+'")
if name_function is None:
name_function = build_name_function(df.npartitions - 1)
# we guarantee partition order is preserved when its saved and read
# so we enforce name_function to maintain the order of its input.
if not (single_file and single_node):
formatted_names = [name_function(i) for i in range(df.npartitions)]
if formatted_names != sorted(formatted_names):
warn("To preserve order between partitions name_function "
"must preserve the order of its input")
# If user did not specify scheduler and write is sequential default to the
# sequential scheduler. otherwise let the _get method choose the scheduler
if get is None and 'get' not in _globals and single_node and single_file:
get = get_sync
# handle lock default based on whether we're writing to a single entity
_actual_get = get or _globals.get('get') or df._default_get
if lock is None:
if not single_node:
lock = True
elif not single_file and _actual_get is not dask.multiprocessing.get:
# if we're writing to multiple files with the multiprocessing
# scheduler we don't need to lock
lock = True
else:
lock = False
if lock is True:
if _actual_get == dask.multiprocessing.get:
lock = multiprocessing.Manager().Lock()
else:
lock = Lock()
kwargs.update({'format': 'table', 'mode': mode, 'append': append})
dsk = dict()
i_name = name_function(0)
dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, 0), fmt_obj(path_or_buf, i_name),
key.replace('*', i_name)], kwargs)
kwargs2 = kwargs.copy()
if single_file:
kwargs2['mode'] = 'a'
if single_node:
kwargs2['append'] = True
for i in range(1, df.npartitions):
i_name = name_function(i)
task = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, i), fmt_obj(path_or_buf, i_name),
key.replace('*', i_name)], kwargs2)
if single_file:
link_dep = i - 1 if single_node else 0
task = (_link, (name, link_dep), task)
dsk[(name, i)] = task
dsk = merge(df.dask, dsk)
if single_file and single_node:
keys = [(name, df.npartitions - 1)]
else:
keys = [(name, i) for i in range(df.npartitions)]
if compute:
return DataFrame._get(dsk, keys, get=get, **dask_kwargs)
else:
return delayed([Delayed(k, [dsk]) for k in keys])
dont_use_fixed_error_message = """
This HDFStore is not partitionable and can only be use monolithically with
pandas. In the future when creating HDFStores use the ``format='table'``
option to ensure that your dataset can be parallelized"""
read_hdf_error_msg = """
The start and stop keywords are not supported when reading from more than
one file/dataset.
The combination is ambiguous because it could be interpreted as the starting
and stopping index per file, or starting and stopping index of the global
dataset."""
def _read_single_hdf(path, key, start=0, stop=None, columns=None,
chunksize=int(1e6), sorted_index=False, lock=None,
mode='a'):
"""
Read a single hdf file into a dask.dataframe. Used for each file in
read_hdf.
"""
def get_keys_stops_divisions(path, key, stop, sorted_index):
"""
Get the "keys" or group identifiers which match the given key, which
can contain wildcards. This uses the hdf file identified by the
given path. Also get the index of the last row of data for each matched
key.
"""
with pd.HDFStore(path, mode=mode) as hdf:
keys = [k for k in hdf.keys() if fnmatch(k, key)]
stops = []
divisions = []
for k in keys:
storer = hdf.get_storer(k)
if storer.format_type != 'table':
raise TypeError(dont_use_fixed_error_message)
if stop is None:
stops.append(storer.nrows)
elif stop > storer.nrows:
raise ValueError("Stop keyword exceeds dataset number "
"of rows ({})".format(storer.nrows))
else:
stops.append(stop)
if sorted_index:
division_start = storer.read_column('index', start=0, stop=1)[0]
division_end = storer.read_column('index', start=storer.nrows - 1,
stop=storer.nrows)[0]
divisions.append([division_start, division_end])
else:
divisions.append(None)
return keys, stops, divisions
def one_path_one_key(path, key, start, stop, columns, chunksize, division, lock):
"""
Get the data frame corresponding to one path and one key (which should
not contain any wildcards).
"""
empty = pd.read_hdf(path, key, mode=mode, stop=0)
if columns is not None:
empty = empty[columns]
token = tokenize((path, os.path.getmtime(path), key, start,
stop, empty, chunksize, division))
name = 'read-hdf-' + token
if empty.ndim == 1:
base = {'name': empty.name, 'mode': mode}
else:
base = {'columns': empty.columns, 'mode': mode}
if start >= stop:
raise ValueError("Start row number ({}) is above or equal to stop "
"row number ({})".format(start, stop))
if division:
dsk = {(name, 0): (_pd_read_hdf, path, key, lock,
base)}
divisions = division
else:
def update(s):
new = base.copy()
new.update({'start': s, 'stop': s + chunksize})
return new
dsk = dict(((name, i), (_pd_read_hdf, path, key, lock,
update(s)))
for i, s in enumerate(range(start, stop, chunksize)))
divisions = [None] * (len(dsk) + 1)
return new_dd_object(dsk, name, empty, divisions)
keys, stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index)
if (start != 0 or stop is not None) and len(keys) > 1:
raise NotImplementedError(read_hdf_error_msg)
from ..multi import concat
return concat([one_path_one_key(path, k, start, s, columns, chunksize, d, lock)
for k, s, d in zip(keys, stops, divisions)])
def _pd_read_hdf(path, key, lock, kwargs):
""" Read from hdf5 file with a lock """
if lock:
lock.acquire()
try:
result = pd.read_hdf(path, key, **kwargs)
finally:
if lock:
lock.release()
return result
@wraps(pd.read_hdf)
def read_hdf(pattern, key, start=0, stop=None, columns=None,
chunksize=1000000, sorted_index=False, lock=True, mode='a'):
"""
Read hdf files into a dask dataframe. Like pandas.read_hdf, except it we
can read multiple files, and read multiple keys from the same file by using
pattern matching.
Parameters
----------
pattern : pattern (string), or buffer to read from. Can contain wildcards
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : optional, a list of columns that if not None, will limit the
return columns
chunksize : optional, positive integer
maximal number of rows per partition
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
if lock is True:
lock = Lock()
key = key if key.startswith('/') else '/' + key
paths = sorted(glob(pattern))
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
if chunksize <= 0:
raise ValueError("Chunksize must be a positive integer")
if (start != 0 or stop is not None) and sorted_index:
raise ValueError("When assuming pre-partitioned data, data must be "
"read in its entirety using the same chunksizes")
from ..multi import concat
return concat([_read_single_hdf(path, key, start=start, stop=stop,
columns=columns, chunksize=chunksize,
sorted_index=sorted_index,
lock=lock, mode=mode)
for path in paths])
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/dataframe/io/hdf.py",
"copies": "1",
"size": "11095",
"license": "mit",
"hash": -608692107418220800,
"line_mean": 33.8899371069,
"line_max": 98,
"alpha_frac": 0.580261379,
"autogenerated": false,
"ratio": 3.9695885509838997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50498499299839,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
from functools import wraps, partial
from glob import glob
from math import ceil
from operator import getitem
import os
import re
from threading import Lock
import uuid
import pandas as pd
import numpy as np
from toolz import merge, assoc, dissoc
from ..compatibility import BytesIO, unicode, range, apply
from ..utils import textblock, file_size, get_bom
from ..base import tokenize
from .. import array as da
from ..async import get_sync
from . import core
from .core import DataFrame, Series
from .shuffle import set_partition
lock = Lock()
csv_defaults = {'compression': None}
def _read_csv(fn, i, chunkbytes, compression, kwargs, bom):
block = textblock(fn, i*chunkbytes, (i+1) * chunkbytes, compression,
encoding=kwargs.get('encoding'))
block = BytesIO(bom + block)
try:
return pd.read_csv(block, **kwargs)
except ValueError as e:
msg = """
Dask dataframe inspected the first 1,000 rows of your csv file to guess the
data types of your columns. These first 1,000 rows led us to an incorrect
guess.
For example a column may have had integers in the first 1000
rows followed by a float or missing value in the 1,001-st row.
You will need to specify some dtype information explicitly using the
``dtype=`` keyword argument for the right column names and dtypes.
df = dd.read_csv(..., dtype={'my-column': float})
Pandas has given us the following error when trying to parse the file:
"%s"
""" % e.args[0]
match = re.match('cannot safely convert passed user dtype of (?P<old_dtype>\S+) for (?P<new_dtype>\S+) dtyped data in column (?P<column_number>\d+)', e.args[0])
if match:
d = match.groupdict()
d['column'] = kwargs['names'][int(d['column_number'])]
msg += """
From this we think that you should probably add the following column/dtype
pair to your dtype= dictionary
'%(column)s': '%(new_dtype)s'
""" % d
# TODO: add more regexes and msg logic here for other pandas errors
# as apporpriate
raise ValueError(msg)
def clean_kwargs(kwargs):
""" Do some sanity checks on kwargs
>>> clean_kwargs({'parse_dates': ['a', 'b'], 'usecols': ['b', 'c']})
{'parse_dates': ['b'], 'usecols': ['b', 'c']}
>>> clean_kwargs({'names': ['a', 'b'], 'usecols': [1]})
{'parse_dates': [], 'names': ['a', 'b'], 'usecols': ['b']}
"""
kwargs = kwargs.copy()
if 'usecols' in kwargs and 'names' in kwargs:
kwargs['usecols'] = [kwargs['names'][c]
if isinstance(c, int) and c not in kwargs['names']
else c
for c in kwargs['usecols']]
kwargs['parse_dates'] = [col for col in kwargs.get('parse_dates', ())
if kwargs.get('usecols') is None
or isinstance(col, (tuple, list)) and all(c in kwargs['usecols']
for c in col)
or col in kwargs['usecols']]
return kwargs
def fill_kwargs(fn, **kwargs):
""" Read a csv file and fill up kwargs
This normalizes kwargs against a sample file. It does the following:
1. If given a globstring, just use one file
2. Get names from csv file if not given
3. Identify the presence of a header
4. Identify dtypes
5. Establish column names
6. Switch around dtypes and column names if parse_dates is active
Normally ``pd.read_csv`` does this for us. However for ``dd.read_csv`` we
need to be consistent across multiple files and don't want to do these
heuristics each time so we use the pandas solution once, record the
results, and then send back a fully explicit kwargs dict to send to future
calls to ``pd.read_csv``.
Returns
-------
kwargs: dict
keyword arguments to give to pd.read_csv
"""
if 'index_col' in kwargs:
msg = """
The index column cannot be set at dataframe creation time. Instead use
the `set_index` method on the dataframe after it is created.
"""
raise ValueError(msg)
kwargs = merge(csv_defaults, kwargs)
sample_nrows = kwargs.pop('sample_nrows', 1000)
essentials = ['columns', 'names', 'header', 'parse_dates', 'dtype']
if set(essentials).issubset(kwargs):
return kwargs
# Let pandas infer on the first 100 rows
if '*' in fn:
filenames = sorted(glob(fn))
if not filenames:
raise ValueError("No files found matching name %s" % fn)
fn = filenames[0]
if 'names' not in kwargs:
kwargs['names'] = csv_names(fn, **kwargs)
if 'header' not in kwargs:
kwargs['header'] = 0 if infer_header(fn, **kwargs) else None
kwargs = clean_kwargs(kwargs)
try:
head = pd.read_csv(fn, **assoc(kwargs, 'nrows', sample_nrows))
except StopIteration:
head = pd.read_csv(fn, **kwargs)
if 'parse_dates' not in kwargs:
kwargs['parse_dates'] = [col for col in head.dtypes.index
if np.issubdtype(head.dtypes[col], np.datetime64)]
new_dtype = dict(head.dtypes)
dtype = kwargs.get('dtype', dict())
for k, v in dict(head.dtypes).items():
if k not in dtype:
dtype[k] = v
if kwargs.get('parse_dates'):
for col in kwargs['parse_dates']:
del dtype[col]
kwargs['dtype'] = dtype
return head.columns, kwargs
@wraps(pd.read_csv)
def read_csv(fn, **kwargs):
if 'nrows' in kwargs: # Just create single partition
df = read_csv(fn, **dissoc(kwargs, 'nrows'))
return df.head(kwargs['nrows'], compute=False)
chunkbytes = kwargs.pop('chunkbytes', 2**25) # 50 MB
index = kwargs.pop('index', None)
kwargs = kwargs.copy()
columns, kwargs = fill_kwargs(fn, **kwargs)
# Handle glob strings
if '*' in fn:
from .multi import concat
return concat([read_csv(f, **kwargs) for f in sorted(glob(fn))])
token = tokenize(os.path.getmtime(fn), kwargs)
name = 'read-csv-%s-%s' % (fn, token)
bom = get_bom(fn)
# Chunk sizes and numbers
total_bytes = file_size(fn, kwargs['compression'])
nchunks = int(ceil(total_bytes / chunkbytes))
divisions = [None] * (nchunks + 1)
first_kwargs = merge(kwargs, dict(compression=None))
rest_kwargs = merge(kwargs, dict(header=None, compression=None))
# Create dask graph
dsk = dict(((name, i), (_read_csv, fn, i, chunkbytes,
kwargs['compression'], rest_kwargs,
bom))
for i in range(1, nchunks))
dsk[(name, 0)] = (_read_csv, fn, 0, chunkbytes, kwargs['compression'],
first_kwargs, b'')
result = DataFrame(dsk, name, columns, divisions)
if index:
result = result.set_index(index)
return result
def infer_header(fn, **kwargs):
""" Guess if csv file has a header or not
This uses Pandas to read a sample of the file, then looks at the column
names to see if they are all phrase-like (words, potentially with spaces
in between.)
Returns True or False
"""
# See read_csv docs for header for reasoning
kwargs.update(dict(nrows=5, names=None, parse_dates=None))
try:
df = pd.read_csv(fn, **kwargs)
except StopIteration:
kwargs['nrows'] = None
df = pd.read_csv(fn, **kwargs)
return (len(df) > 0 and
all(re.match('^\s*\D[\w ]*\s*$', n) for n in df.columns) and
not all(dt == 'O' for dt in df.dtypes))
def csv_names(fn, encoding='utf-8', compression=None, names=None,
parse_dates=None, usecols=None, dtype=None, **kwargs):
try:
kwargs['nrows'] = 5
df = pd.read_csv(fn, encoding=encoding, compression=compression,
names=names, parse_dates=parse_dates, **kwargs)
except StopIteration:
kwargs['nrows'] = None
df = pd.read_csv(fn, encoding=encoding, compression=compression,
names=names, parse_dates=parse_dates, **kwargs)
return list(df.columns)
def from_array(x, chunksize=50000, columns=None):
""" Read dask Dataframe from any slicable array
Uses getitem syntax to pull slices out of the array. The array need not be
a NumPy array but must support slicing syntax
x[50000:100000]
and have 2 dimensions:
x.ndim == 2
or have a record dtype:
x.dtype == [('name', 'O'), ('balance', 'i8')]
"""
has_record_dtype = getattr(x.dtype, 'names', None) is not None
if x.ndim > 2:
raise ValueError('from_array does not input more than 2D array, got'
' array with shape %r' % (x.shape,))
if columns is None:
if has_record_dtype:
columns = tuple(x.dtype.names) # record array has named columns
elif x.ndim == 2:
columns = [str(i) for i in range(x.shape[1])]
if isinstance(x, da.Array):
return from_dask_array(x, columns=columns)
divisions = tuple(range(0, len(x), chunksize))
if divisions[-1] != len(x) - 1:
divisions = divisions + (len(x) - 1,)
token = tokenize(x, chunksize, columns)
name = 'from_array-' + token
dsk = dict(((name, i), (pd.DataFrame,
(getitem, x,
slice(i * chunksize, (i + 1) * chunksize))))
for i in range(0, int(ceil(len(x) / chunksize))))
return DataFrame(dsk, name, columns, divisions)
def from_pandas(data, npartitions, sort=True):
"""Construct a dask object from a pandas object.
If given a ``pandas.Series`` a ``dask.Series`` will be returned. If given a
``pandas.DataFrame`` a ``dask.DataFrame`` will be returned. All other
pandas objects will raise a ``TypeError``.
Parameters
----------
df : pandas.DataFrame or pandas.Series
The DataFrame/Series with which to construct a dask DataFrame/Series
npartitions : int
The number of partitions of the index to create
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series partitioned along the index
Examples
--------
>>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),
... index=pd.date_range(start='20100101', periods=6))
>>> ddf = from_pandas(df, npartitions=3)
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', offset='D'),
Timestamp('2010-01-03 00:00:00', offset='D'),
Timestamp('2010-01-05 00:00:00', offset='D'),
Timestamp('2010-01-06 00:00:00', offset='D'))
>>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', offset='D'),
Timestamp('2010-01-03 00:00:00', offset='D'),
Timestamp('2010-01-05 00:00:00', offset='D'),
Timestamp('2010-01-06 00:00:00', offset='D'))
Raises
------
TypeError
If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is
passed in.
See Also
--------
from_array : Construct a dask.DataFrame from an array that has record dtype
from_bcolz : Construct a dask.DataFrame from a bcolz ctable
read_csv : Construct a dask.DataFrame from a CSV file
"""
columns = getattr(data, 'columns', getattr(data, 'name', None))
if columns is None and not isinstance(data, pd.Series):
raise TypeError("Input must be a pandas DataFrame or Series")
nrows = len(data)
chunksize = int(ceil(nrows / npartitions))
if sort and not data.index.is_monotonic_increasing:
data = data.sort_index(ascending=True)
if sort:
divisions = tuple(data.index[i]
for i in range(0, nrows, chunksize))
divisions = divisions + (data.index[-1],)
else:
divisions = [None] * (npartitions + 1)
name = 'from_pandas-' + tokenize(data, chunksize)
dsk = dict(((name, i), data.iloc[i * chunksize:(i + 1) * chunksize])
for i in range(npartitions - 1))
dsk[(name, npartitions - 1)] = data.iloc[chunksize*(npartitions - 1):]
return getattr(core, type(data).__name__)(dsk, name, columns, divisions)
def from_bcolz(x, chunksize=None, categorize=True, index=None, **kwargs):
""" Read dask Dataframe from bcolz.ctable
Parameters
----------
x : bcolz.ctable
Input data
chunksize : int (optional)
The size of blocks to pull out from ctable. Ideally as large as can
comfortably fit in memory
categorize : bool (defaults to True)
Automatically categorize all string dtypes
index : string (optional)
Column to make the index
See Also
--------
from_array: more generic function not optimized for bcolz
"""
import dask.array as da
import bcolz
if isinstance(x, (str, unicode)):
x = bcolz.ctable(rootdir=x)
bc_chunklen = max(x[name].chunklen for name in x.names)
if chunksize is None and bc_chunklen > 10000:
chunksize = bc_chunklen
categories = dict()
if categorize:
for name in x.names:
if (np.issubdtype(x.dtype[name], np.string_) or
np.issubdtype(x.dtype[name], np.unicode_) or
np.issubdtype(x.dtype[name], np.object_)):
a = da.from_array(x[name], chunks=(chunksize * len(x.names),))
categories[name] = da.unique(a)
columns = tuple(x.dtype.names)
divisions = (0,) + tuple(range(-1, len(x), chunksize))[1:]
if divisions[-1] != len(x) - 1:
divisions = divisions + (len(x) - 1,)
if x.rootdir:
token = tokenize((x.rootdir, os.path.getmtime(x.rootdir)), chunksize,
categorize, index, kwargs)
else:
token = tokenize((id(x), x.shape, x.dtype), chunksize, categorize,
index, kwargs)
new_name = 'from_bcolz-' + token
dsk = dict(((new_name, i),
(locked_df_from_ctable,
x,
(slice(i * chunksize, (i + 1) * chunksize),),
columns, categories))
for i in range(0, int(ceil(len(x) / chunksize))))
result = DataFrame(dsk, new_name, columns, divisions)
if index:
assert index in x.names
a = da.from_array(x[index], chunks=(chunksize * len(x.names),))
q = np.linspace(0, 100, len(x) // chunksize + 2)
divisions = da.percentile(a, q).compute()
return set_partition(result, index, divisions, **kwargs)
else:
return result
def dataframe_from_ctable(x, slc, columns=None, categories=None):
""" Get DataFrame from bcolz.ctable
Parameters
----------
x: bcolz.ctable
slc: slice
columns: list of column names or None
>>> import bcolz
>>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b'])
>>> dataframe_from_ctable(x, slice(1, 3))
a b
0 2 20
1 3 30
>>> dataframe_from_ctable(x, slice(1, 3), columns=['b'])
b
0 20
1 30
>>> dataframe_from_ctable(x, slice(1, 3), columns='b')
0 20
1 30
Name: b, dtype: int...
"""
import bcolz
if columns is None:
columns = x.dtype.names
if isinstance(columns, tuple):
columns = list(columns)
x = x[columns]
if isinstance(x, bcolz.ctable):
chunks = [x[name][slc] for name in columns]
if categories is not None:
chunks = [pd.Categorical.from_codes(np.searchsorted(categories[name],
chunk),
categories[name], True)
if name in categories else chunk
for name, chunk in zip(columns, chunks)]
return pd.DataFrame(dict(zip(columns, chunks)), columns=columns)
elif isinstance(x, bcolz.carray):
chunk = x[slc]
if categories is not None and columns and columns in categories:
chunk = pd.Categorical.from_codes(
np.searchsorted(categories[columns], chunk),
categories[columns], True)
return pd.Series(chunk, name=columns)
def locked_df_from_ctable(*args, **kwargs):
with lock:
result = dataframe_from_ctable(*args, **kwargs)
return result
def from_dask_array(x, columns=None):
""" Convert dask Array to dask DataFrame
Converts a 2d array into a DataFrame and a 1d array into a Series.
Parameters
----------
x: da.Array
columns: list or string
list of column names if DataFrame, single string if Series
Examples
--------
>>> import dask.array as da
>>> import dask.dataframe as dd
>>> x = da.ones((4, 2), chunks=(2, 2))
>>> df = dd.io.from_dask_array(x, columns=['a', 'b'])
>>> df.compute()
a b
0 1 1
1 1 1
2 1 1
3 1 1
"""
name = 'from-dask-array' + tokenize(x, columns)
divisions = [0]
for c in x.chunks[0]:
divisions.append(divisions[-1] + c)
index = [(range, a, b) for a, b in zip(divisions[:-1], divisions[1:])]
divisions[-1] -= 1
if x.ndim == 1:
if x.dtype.names is None:
dsk = dict(((name, i), (pd.Series, chunk, ind, x.dtype, columns))
for i, (chunk, ind) in enumerate(zip(x._keys(), index)))
return Series(merge(x.dask, dsk), name, columns, divisions)
else:
if columns is None:
columns = x.dtype.names
dsk = dict(((name, i), (pd.DataFrame, chunk, ind, columns))
for i, (chunk, ind) in enumerate(zip(x._keys(), index)))
return DataFrame(merge(x.dask, dsk), name, columns, divisions)
elif x.ndim == 2:
if columns is None:
raise ValueError("Must provide columns for DataFrame")
if len(columns) != x.shape[1]:
raise ValueError("Columns must be the same length as array width\n"
" columns: %s\n width: %d" % (str(columns), x.shape[1]))
if len(x.chunks[1]) > 1:
x = x.rechunk({1: x.shape[1]})
dsk = dict(((name, i), (pd.DataFrame, chunk[0], ind, columns))
for i, (chunk, ind) in enumerate(zip(x._keys(), index)))
return DataFrame(merge(x.dask, dsk), name, columns, divisions)
else:
raise ValueError("Array must have one or two dimensions. Had %d" %
x.ndim)
def from_castra(x, columns=None):
"""Load a dask DataFrame from a Castra.
Parameters
----------
x : filename or Castra
columns: list or string, optional
The columns to load. Default is all columns.
"""
from castra import Castra
if not isinstance(x, Castra):
x = Castra(x, readonly=True)
return x.to_dask(columns)
def _link(token, result):
""" A dummy function to link results together in a graph
We use this to enforce an artificial sequential ordering on tasks that
don't explicitly pass around a shared resource
"""
return None
@wraps(pd.DataFrame.to_hdf)
def to_hdf(df, path_or_buf, key, mode='a', append=False, complevel=0,
complib=None, fletcher32=False, get=get_sync, **kwargs):
name = 'to-hdf-' + uuid.uuid1().hex
pd_to_hdf = getattr(df._partition_type, 'to_hdf')
dsk = dict()
dsk[(name, 0)] = (_link, None,
(apply, pd_to_hdf,
(tuple, [(df._name, 0), path_or_buf, key]),
{'mode': mode, 'format': 'table', 'append': append,
'complevel': complevel, 'complib': complib,
'fletcher32': fletcher32}))
for i in range(1, df.npartitions):
dsk[(name, i)] = (_link, (name, i - 1),
(apply, pd_to_hdf,
(tuple, [(df._name, i), path_or_buf, key]),
{'mode': 'a', 'format': 'table', 'append': True,
'complevel': complevel, 'complib': complib,
'fletcher32': fletcher32}))
DataFrame._get(merge(df.dask, dsk), (name, df.npartitions - 1),
get=get_sync, **kwargs)
dont_use_fixed_error_message = """
This HDFStore is not partitionable and can only be use monolithically with
pandas. In the future when creating HDFStores use the ``format='table'``
option to ensure that your dataset can be parallelized"""
read_hdf_error_msg = """
The start and stop keywords are not supported when reading from more than
one file/dataset.
The combination is ambiguous because it could be interpreted as the starting
and stopping index per file, or starting and stopping index of the global
dataset."""
def _read_single_hdf(path, key, start=0, stop=None, columns=None,
chunksize=int(1e6), lock=None):
"""
Read a single hdf file into a dask.dataframe. Used for each file in
read_hdf.
"""
def get_keys_and_stops(path, key, stop):
"""
Get the "keys" or group identifiers which match the given key, which
can contain wildcards. This uses the hdf file identified by the
given path. Also get the index of the last row of data for each matched
key.
"""
with pd.HDFStore(path) as hdf:
keys = [k for k in hdf.keys() if fnmatch(k, key)]
stops = []
for k in keys:
storer = hdf.get_storer(k)
if storer.format_type != 'table':
raise TypeError(dont_use_fixed_error_message)
if stop is None:
stops.append(storer.nrows)
else:
stops.append(stop)
return keys, stops
def one_path_one_key(path, key, start, stop, columns, chunksize, lock):
"""
Get the data frame corresponding to one path and one key (which should
not contain any wildcards).
"""
if columns is None:
columns = list(pd.read_hdf(path, key, stop=0).columns)
token = tokenize((path, os.path.getmtime(path), key, start,
stop, columns, chunksize))
name = 'read-hdf-' + token
dsk = dict(((name, i), (_pd_read_hdf, path, key, lock,
{'start': s,
'stop': s + chunksize,
'columns': columns}))
for i, s in enumerate(range(start, stop, chunksize)))
divisions = [None] * (len(dsk) + 1)
return DataFrame(dsk, name, columns, divisions)
if lock is True:
lock = Lock()
keys, stops = get_keys_and_stops(path, key, stop)
if (start != 0 or stop is not None) and len(keys) > 1:
raise NotImplementedError(read_hdf_error_msg)
from .multi import concat
return concat([one_path_one_key(path, k, start, s, columns, chunksize, lock)
for k, s in zip(keys, stops)])
def _pd_read_hdf(path, key, lock, kwargs):
""" Read from hdf5 file with a lock """
if lock:
lock.acquire()
try:
result = pd.read_hdf(path, key, **kwargs)
finally:
if lock:
lock.release()
return result
@wraps(pd.read_hdf)
def read_hdf(pattern, key, start=0, stop=None, columns=None,
chunksize=1000000, lock=True):
"""
Read hdf files into a dask dataframe. Like pandas.read_hdf, except it we
can read multiple files, and read multiple keys from the same file by using
pattern matching.
Parameters
----------
pattern : pattern (string), or buffer to read from. Can contain wildcards
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : optional, a list of columns that if not None, will limit the
return columns
chunksize : optional, nrows to include in iteration, return an iterator
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
paths = sorted(glob(pattern))
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
from .multi import concat
return concat([_read_single_hdf(path, key, start=start, stop=stop,
columns=columns, chunksize=chunksize,
lock=lock)
for path in paths])
def to_castra(df, fn=None, categories=None, sorted_index_column=None,
compute=True):
""" Write DataFrame to Castra on-disk store
See https://github.com/blosc/castra for details
See Also
--------
Castra.to_dask
"""
from castra import Castra
if isinstance(categories, list):
categories = (list, categories)
name = 'to-castra-' + uuid.uuid1().hex
if sorted_index_column:
set_index = lambda x: x.set_index(sorted_index_column)
func = lambda part: (set_index, part)
else:
func = lambda part: part
dsk = dict()
dsk[(name, -1)] = (Castra, fn, func((df._name, 0)), categories)
for i in range(0, df.npartitions):
dsk[(name, i)] = (_link, (name, i - 1),
(Castra.extend, (name, -1), func((df._name, i))))
dsk = merge(dsk, df.dask)
keys = [(name, -1), (name, df.npartitions - 1)]
if compute:
c, _ = DataFrame._get(dsk, keys, get=get_sync)
return c
else:
return dsk, keys
def to_csv(df, filename, compression=None, **kwargs):
if compression:
raise NotImplementedError("Writing compressed csv files not supported")
myget = kwargs.pop('get', None)
name = 'to-csv-' + uuid.uuid1().hex
dsk = dict()
dsk[(name, 0)] = (lambda df, fn, kwargs: df.to_csv(fn, **kwargs),
(df._name, 0), filename, kwargs)
kwargs2 = kwargs.copy()
kwargs2['mode'] = 'a'
kwargs2['header'] = False
for i in range(1, df.npartitions):
dsk[(name, i)] = (_link, (name, i - 1),
(lambda df, fn, kwargs: df.to_csv(fn, **kwargs),
(df._name, i), filename, kwargs2))
DataFrame._get(merge(dsk, df.dask), (name, df.npartitions - 1), get=myget)
def to_bag(df, index=False):
from ..bag.core import Bag
if isinstance(df, DataFrame):
func = lambda df: list(df.itertuples(index))
elif isinstance(df, Series):
func = (lambda df: list(df.iteritems())) if index else list
else:
raise TypeError("df must be either DataFrame or Series")
name = 'to_bag-' + tokenize(df, index)
dsk = dict(((name, i), (func, block)) for (i, block) in enumerate(df._keys()))
dsk.update(df._optimize(df.dask, df._keys()))
return Bag(dsk, name, df.npartitions)
| {
"repo_name": "pombredanne/dask",
"path": "dask/dataframe/io.py",
"copies": "1",
"size": "27492",
"license": "bsd-3-clause",
"hash": 7474663693914020000,
"line_mean": 32.9826946848,
"line_max": 168,
"alpha_frac": 0.5793321694,
"autogenerated": false,
"ratio": 3.7246985503319334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4804030719731933,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
from glob import glob
import os
import uuid
from warnings import warn
import pandas as pd
from toolz import merge
from ...async import get_sync
from ...base import tokenize
from ...compatibility import PY3
from ...context import _globals
from ...delayed import Delayed, delayed
from ... import multiprocessing
from ..core import DataFrame, new_dd_object
from ...utils import build_name_function, effective_get, get_scheduler_lock
from .io import _link
def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None):
""" A wrapper function around pd_to_hdf that enables locking"""
if lock:
lock.acquire()
try:
pd_to_hdf(*args, **kwargs)
finally:
if lock:
lock.release()
return None
def to_hdf(df, path, key, mode='a', append=False, get=None,
name_function=None, compute=True, lock=None, dask_kwargs={},
**kwargs):
""" Store Dask Dataframe to Hierarchical Data Format (HDF) files
This is a parallel version of the Pandas function of the same name. Please
see the Pandas docstring for more detailed information about shared keyword
arguments.
This function differs from the Pandas version by saving the many partitions
of a Dask DataFrame in parallel, either to many files, or to many datasets
within the same file. You may specify this parallelism with an asterix
``*`` within the filename or datapath, and an optional ``name_function``.
The asterix will be replaced with an increasing sequence of integers
starting from ``0`` or with the result of calling ``name_function`` on each
of those integers.
This function only supports the Pandas ``'table'`` format, not the more
specialized ``'fixed'`` format.
Parameters
----------
path: string
Path to a target filename. May contain a ``*`` to denote many filenames
key: string
Datapath within the files. May contain a ``*`` to denote many locations
name_function: function
A function to convert the ``*`` in the above options to a string.
Should take in a number from 0 to the number of partitions and return a
string. (see examples below)
compute: bool
Whether or not to execute immediately. If False then this returns a
``dask.Delayed`` value.
lock: Lock, optional
Lock to use to prevent concurrency issues. By default a
``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock``
will be used depending on your scheduler if a lock is required. See
dask.utils.get_scheduler_lock for more information about lock
selection.
**other:
See pandas.to_hdf for more information
Examples
--------
Save Data to a single file
>>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP
Save data to multiple datapaths within the same file:
>>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP
Save data to multiple files:
>>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP
Save data to multiple files, using the multiprocessing scheduler:
>>> df.to_hdf('output-*.hdf', '/data', get=dask.multiprocessing.get) # doctest: +SKIP
Specify custom naming scheme. This writes files as
'2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc..
>>> from datetime import date, timedelta
>>> base = date(year=2000, month=1, day=1)
>>> def name_function(i):
... ''' Convert integer 0 to n to a string '''
... return base + timedelta(days=i)
>>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP
Returns
-------
None: if compute == True
delayed value: if compute == False
See Also
--------
read_hdf:
to_parquet:
"""
name = 'to-hdf-' + uuid.uuid1().hex
pd_to_hdf = getattr(df._partition_type, 'to_hdf')
single_file = True
single_node = True
# if path is string, format using i_name
if isinstance(path, str):
if path.count('*') + key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in file "
"path and dataset key")
fmt_obj = lambda path, i_name: path.replace('*', i_name)
if '*' in path:
single_file = False
else:
if key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in "
"dataset key")
fmt_obj = lambda path, _: path
if '*' in key:
single_node = False
if 'format' in kwargs and kwargs['format'] != 'table':
raise ValueError("Dask only support 'table' format in hdf files.")
if mode not in ('a', 'w', 'r+'):
raise ValueError("Mode must be one of 'a', 'w' or 'r+'")
if name_function is None:
name_function = build_name_function(df.npartitions - 1)
# we guarantee partition order is preserved when its saved and read
# so we enforce name_function to maintain the order of its input.
if not (single_file and single_node):
formatted_names = [name_function(i) for i in range(df.npartitions)]
if formatted_names != sorted(formatted_names):
warn("To preserve order between partitions name_function "
"must preserve the order of its input")
# If user did not specify scheduler and write is sequential default to the
# sequential scheduler. otherwise let the _get method choose the scheduler
if get is None and 'get' not in _globals and single_node and single_file:
get = get_sync
# handle lock default based on whether we're writing to a single entity
_actual_get = effective_get(get, df)
if lock is None:
if not single_node:
lock = True
elif not single_file and _actual_get is not multiprocessing.get:
# if we're writing to multiple files with the multiprocessing
# scheduler we don't need to lock
lock = True
else:
lock = False
if lock:
lock = get_scheduler_lock(get, df)
kwargs.update({'format': 'table', 'mode': mode, 'append': append})
dsk = dict()
i_name = name_function(0)
dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, 0), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs)
kwargs2 = kwargs.copy()
if single_file:
kwargs2['mode'] = 'a'
if single_node:
kwargs2['append'] = True
for i in range(1, df.npartitions):
i_name = name_function(i)
task = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, i), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs2)
if single_file:
link_dep = i - 1 if single_node else 0
task = (_link, (name, link_dep), task)
dsk[(name, i)] = task
dsk = merge(df.dask, dsk)
if single_file and single_node:
keys = [(name, df.npartitions - 1)]
else:
keys = [(name, i) for i in range(df.npartitions)]
if compute:
return DataFrame._get(dsk, keys, get=get, **dask_kwargs)
else:
return delayed([Delayed(k, [dsk]) for k in keys])
dont_use_fixed_error_message = """
This HDFStore is not partitionable and can only be use monolithically with
pandas. In the future when creating HDFStores use the ``format='table'``
option to ensure that your dataset can be parallelized"""
read_hdf_error_msg = """
The start and stop keywords are not supported when reading from more than
one file/dataset.
The combination is ambiguous because it could be interpreted as the starting
and stopping index per file, or starting and stopping index of the global
dataset."""
def _read_single_hdf(path, key, start=0, stop=None, columns=None,
chunksize=int(1e6), sorted_index=False, lock=None,
mode='a'):
"""
Read a single hdf file into a dask.dataframe. Used for each file in
read_hdf.
"""
def get_keys_stops_divisions(path, key, stop, sorted_index):
"""
Get the "keys" or group identifiers which match the given key, which
can contain wildcards. This uses the hdf file identified by the
given path. Also get the index of the last row of data for each matched
key.
"""
with pd.HDFStore(path, mode=mode) as hdf:
keys = [k for k in hdf.keys() if fnmatch(k, key)]
stops = []
divisions = []
for k in keys:
storer = hdf.get_storer(k)
if storer.format_type != 'table':
raise TypeError(dont_use_fixed_error_message)
if stop is None:
stops.append(storer.nrows)
elif stop > storer.nrows:
raise ValueError("Stop keyword exceeds dataset number "
"of rows ({})".format(storer.nrows))
else:
stops.append(stop)
if sorted_index:
division_start = storer.read_column('index', start=0, stop=1)[0]
division_end = storer.read_column('index', start=storer.nrows - 1,
stop=storer.nrows)[0]
divisions.append([division_start, division_end])
else:
divisions.append(None)
return keys, stops, divisions
def one_path_one_key(path, key, start, stop, columns, chunksize, division, lock):
"""
Get the data frame corresponding to one path and one key (which should
not contain any wildcards).
"""
empty = pd.read_hdf(path, key, mode=mode, stop=0)
if columns is not None:
empty = empty[columns]
token = tokenize((path, os.path.getmtime(path), key, start,
stop, empty, chunksize, division))
name = 'read-hdf-' + token
if empty.ndim == 1:
base = {'name': empty.name, 'mode': mode}
else:
base = {'columns': empty.columns, 'mode': mode}
if start >= stop:
raise ValueError("Start row number ({}) is above or equal to stop "
"row number ({})".format(start, stop))
if division:
dsk = {(name, 0): (_pd_read_hdf, path, key, lock,
base)}
divisions = division
else:
def update(s):
new = base.copy()
new.update({'start': s, 'stop': s + chunksize})
return new
dsk = dict(((name, i), (_pd_read_hdf, path, key, lock,
update(s)))
for i, s in enumerate(range(start, stop, chunksize)))
divisions = [None] * (len(dsk) + 1)
return new_dd_object(dsk, name, empty, divisions)
keys, stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index)
if (start != 0 or stop is not None) and len(keys) > 1:
raise NotImplementedError(read_hdf_error_msg)
from ..multi import concat
return concat([one_path_one_key(path, k, start, s, columns, chunksize, d, lock)
for k, s, d in zip(keys, stops, divisions)])
def _pd_read_hdf(path, key, lock, kwargs):
""" Read from hdf5 file with a lock """
if lock:
lock.acquire()
try:
result = pd.read_hdf(path, key, **kwargs)
finally:
if lock:
lock.release()
return result
def read_hdf(pattern, key, start=0, stop=None, columns=None,
chunksize=1000000, sorted_index=False, lock=True, mode='a'):
"""
Read HDF files into a Dask DataFrame
Read hdf files into a dask dataframe. This function is like
``pandas.read_hdf``, except it can read from a single large file, or from
multiple files, or from multiple keys from the same file.
Parameters
----------
pattern : pattern (string), or buffer to read from. Can contain wildcards
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : optional, a list of columns that if not None, will limit the
return columns
chunksize : optional, positive integer
maximal number of rows per partition
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
if lock is True:
lock = get_scheduler_lock()
key = key if key.startswith('/') else '/' + key
paths = sorted(glob(pattern))
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
if chunksize <= 0:
raise ValueError("Chunksize must be a positive integer")
if (start != 0 or stop is not None) and sorted_index:
raise ValueError("When assuming pre-partitioned data, data must be "
"read in its entirety using the same chunksizes")
from ..multi import concat
return concat([_read_single_hdf(path, key, start=start, stop=stop,
columns=columns, chunksize=chunksize,
sorted_index=sorted_index,
lock=lock, mode=mode)
for path in paths])
if PY3:
from ..core import _Frame
_Frame.to_hdf.__doc__ = to_hdf.__doc__
| {
"repo_name": "gameduell/dask",
"path": "dask/dataframe/io/hdf.py",
"copies": "2",
"size": "13946",
"license": "bsd-3-clause",
"hash": 7106480953043793000,
"line_mean": 34.3959390863,
"line_max": 89,
"alpha_frac": 0.5890577943,
"autogenerated": false,
"ratio": 4.05406976744186,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00045916979432936936,
"num_lines": 394
} |
from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
def filenames(hdfs, path):
"""
Filenames in Hadoop File System under specific path
Parameters
----------
hdfs: pywebhdfs.webhdfs.PyWebHdfsClient instance
path: string
Directory on HDFS
Path can be either
1. A directory -- 'user/data/myfiles/'
2. A globstring -- 'user/data/myfiles/*/*.json'
Returns all filenames within this directory and all subdirectories
"""
if '*' in path:
directory = path[:path.find('*')].rsplit('/', 1)[0]
return [fn for fn in filenames(hdfs, directory)
if fnmatch(fn, path + '*')]
path = path.strip('/')
listdir = hdfs.list_dir(path)['FileStatuses']['FileStatus']
files = ['%s/%s' % (path, d['pathSuffix'])
for d in listdir if d['type'] == 'FILE']
directories = ['%s/%s' % (path, d['pathSuffix'])
for d in listdir if d['type'] == 'DIRECTORY']
return files + sum([filenames(hdfs, d) for d in directories], [])
| {
"repo_name": "vikhyat/dask",
"path": "dask/hdfs_utils.py",
"copies": "2",
"size": "1079",
"license": "bsd-3-clause",
"hash": 3005724060357743600,
"line_mean": 28.9722222222,
"line_max": 70,
"alpha_frac": 0.594068582,
"autogenerated": false,
"ratio": 3.9094202898550723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5503488871855072,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from fractions import Fraction
import sys
import subprocess
sys.path.insert(0, '../')
from compiler import *
from constructs import *
def test_tree_graph():
R = Parameter(Int, "R")
C = Parameter(Int, "C")
x = Variable(Int, "x")
y = Variable(Int, "y")
row = Interval(Int, 0, R-1)
col = Interval(Int, 0, C-1)
img = Image(Float, "img", [R, C])
alpha = 0.6
F = {}
L = 3
for l in range(0, (2**L)-1):
F[l] = Function(([x, y], [row, col]), Float, "F"+str(l))
for l in range(0, (2**(L-1))-1):
F[l].defn = [ (alpha) * F[2*l+1](x, y) + (1-alpha) * F[2*l+2](x, y) ]
for l in range((2**(L-1))-1, (2**L)-1):
F[l].defn = [ l * img(x, y) ]
p_est = [ (R, 1024), (C, 1024) ]
# build the pipeline
pipeline = buildPipeline([F[0]],
param_estimates = p_est,
group_size = 100,
pipe_name = "tree")
filename = "tree_graph"
dot_file = filename+".dot"
png_file = filename+".png"
g = pipeline.pipeline_graph
g.write(filename+".dot")
dotty_str = "dot -Tpng "+dot_file+" -o "+png_file
subprocess.check_output(dotty_str, shell=True)
filename = 'tree.cpp'
c_file = open(filename, 'w')
c_file.write(pipeline.generate_code().__str__())
c_file.close()
return
| {
"repo_name": "bollu/polymage",
"path": "sandbox/tests/test_grouping.py",
"copies": "1",
"size": "1428",
"license": "apache-2.0",
"hash": -8775009632833237000,
"line_mean": 24.0526315789,
"line_max": 77,
"alpha_frac": 0.5196078431,
"autogenerated": false,
"ratio": 2.9812108559498958,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40008186990498956,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from fractions import Fraction
import sys
import subprocess
sys.path.insert(0, '../')
from compiler import *
from constructs import *
def test_blur():
R = Parameter(Int, "R")
C = Parameter(Int, "C")
x = Variable(Int, "x")
y = Variable(Int, "y")
c = Variable(Int, "c")
cr = Interval(Int, 0, 2)
xrow = Interval(Int, 1, R)
xcol = Interval(Int, 1, C)
yrow = Interval(Int, 2, R-1)
ycol = Interval(Int, 2, C-1)
xcond = Condition(x, '>=', 1) & Condition(x, '<=', R) & \
Condition(y, '>=', 1) & Condition(y, '<=', C)
ycond = Condition(x, '>=', 2) & Condition(x, '<=', R-1) & \
Condition(y, '>=', 2) & Condition(y, '<=', C-1)
img = Image(Float, "input", [3, R+2, C+2])
blurx = Function(([c, x, y], [cr, xrow, xcol]), Float, "blurx")
blurx.defn = [ Case(xcond, (img(c, x-1, y) + \
img(c, x , y) + \
img(c, x+1, y) / 3.0)) ]
blury = Function(([c, x, y], [cr, yrow, ycol]), Float, "blury")
blury.defn = [ Case(ycond, (blurx(c, x, y-1) + \
blurx(c, x, y ) + \
blurx(c, x, y+1) / 3.0)) ]
groups = [[blurx, blury]]
p_est = [ (R, 1024), (C, 1024) ]
# build the pipeline
pipeline = buildPipeline([blury],
grouping = groups,
param_estimates = p_est,
pipe_name = "blur")
filename = "blur_graph"
dot_file = filename+".dot"
png_file = filename+".png"
g = pipeline.pipeline_graph
g.write(filename+".dot")
dotty_str = "dot -Tpng "+dot_file+" -o "+png_file
subprocess.check_output(dotty_str, shell=True)
filename = 'blur_naive.cpp'
c_file = open(filename, 'w')
c_file.write(pipeline.generate_code().__str__())
c_file.close()
| {
"repo_name": "bollu/polymage",
"path": "sandbox/tests/test_blur.py",
"copies": "1",
"size": "1954",
"license": "apache-2.0",
"hash": 7811141571456449000,
"line_mean": 27.7352941176,
"line_max": 67,
"alpha_frac": 0.4892528147,
"autogenerated": false,
"ratio": 3.043613707165109,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8919765051026841,
"avg_score": 0.022620294167653446,
"num_lines": 68
} |
from __future__ import absolute_import, division, print_function
from fractions import Fraction
import sys
sys.path.insert(0, '../')
from compiler import *
from constructs import *
from expression import *
R = Parameter(Int, "R")
C = Parameter(Int, "C")
P = Parameter(Int, "P")
B = Parameter(Int, "B")
x = Variable(Int, "x")
y = Variable(Int, "y")
z = Variable(Int, "z")
w = Variable(Int, "w")
c = Variable(Int, "c")
row = Interval(Int, 0, R+1)
col = Interval(Int, 0, C+1)
plane = Interval(Int, 0, P+1)
box = Interval(Int, 0, B+1)
cr = Interval(Int, 0, 2)
cond = Condition(x, '>=', 1) & Condition(x, '<=', R) & \
Condition(y, '>=', 1) & Condition(y, '<=', C)
cond3D = Condition(x, '>=', 1) & Condition(x, '<=', R) & \
Condition(y, '>=', 1) & Condition(y, '<=', C) & \
Condition(z, '>=', 1) & Condition(z, '<=', P)
cond4D = Condition(x, '>=', 1) & Condition(x, '<=', R) & \
Condition(y, '>=', 1) & Condition(y, '<=', C) & \
Condition(z, '>=', 1) & Condition(z, '<=', P) & \
Condition(w, '>=', 1) & Condition(w, '<=', B)
def test_gray():
img = Image(Float, "img", [R+2, C+2, 3])
gray = Function(([x, y], [row, col]), Float, "gray")
gray.defn = [ Case(cond, img(x, y, 0) * 0.299 \
+ img(x, y, 1) * 0.587 \
+ img(x, y, 2) * 0.114) ]
vector = Function(([x], [row]), Float, "vector")
vector.defn = [ Case(cond, gray(x, 0)) ]
pipeline = buildPipeline([vector], grouping = [[gray, vector]])
return
def test_flip():
img = Image(Int, "img", [R+2, C+2])
flip1 = Function(([y, x], [col, row]), Int, "flip1")
flip1.defn = [ Case(cond, img(x+1, y) + img(x, y+1)) ]
flip2 = Function(([x, y], [row, col]), Int, "flip2")
flip2.defn = [ Case(cond, flip1(y-1, x) + flip1(y, x-1)) ]
pipeline = buildPipeline([flip2], grouping = [[flip1, flip2]])
return
def test_robin():
img = Image(Short, "img", [R+2, C+2, 3])
robin1 = Function(([c, x, y], [cr, row, col]), Short, "robin1")
robin1.defn = [ Case(cond, img(x, y, c) + 1) ]
robin2 = Function(([y, c, x], [col, cr, row]), Short, "robin2")
robin2.defn = [ Case(cond, robin1(c, x, y) - 1) ]
robin3 = Function(([x, y, c], [row, col, cr]), Short, "robin3")
robin3.defn = [ Case(cond, robin2(y, c, x) + 1) ]
pipeline = buildPipeline([robin3], \
grouping = [[robin1, robin2, robin3]])
return
# *** Extreme Case *** not so practical, can be used as a stress test
#def test_high_dim():
def high_dim():
img = Image(Short, "img", [B+2, P+2, R+2, C+2])
random0 = Function(([w, z, y, x], [box, plane, row, col]), \
Double, "random0")
random0.defn = [ Case(cond4D, img(w, z, y, x) - 1) ]
random1 = Function(([w, y, z, x], [box, row, plane, col]), \
Double, "random1")
random1.defn = [ Case(cond4D, random0(w, z, y, x) + 1) ]
random2 = Function(([z, x, y, w], [plane, col, row, box]), \
Double, "random2")
random2.defn = [ Case(cond4D, random1(w, y, z, x) - 1) ]
random3 = Function(([x, w, y, z], [col, box, row, plane]), \
Double, "random3")
random3.defn = [ Case(cond4D, random2(z, x, y, w) + 1) ]
random4 = Function(([y, z, w, x], [row, plane, box, col]), \
Double, "random4")
random4.defn = [ Case(cond4D, random3(x, w, y, z) - 1) ]
random5 = Function(([z, w, x, y], [plane, box, col, row]), \
Double, "random5")
random5.defn = [ Case(cond4D, random4(y, z, w, x) + 1) ]
random6 = Function(([z, y, x], [plane, row, col]), \
Double, "random6")
random6.defn = [ Case(cond3D, ( \
random5(z, 0, x, y) + \
random5(z, 1, x, y) + \
random5(z, 2, x, y) - 3) / 3.0
) ]
random7 = Function(([y, x, w, z], [row, col, box, plane]), \
Double, "random7")
random7.defn = [ Case(cond4D, random6(z, y, x) * 2) ]
groups = [[random0, random1, random2, random3, \
random4, random5, random6, random7]]
#groups = [[random1, random2, random3], \
# [random4, random5, random6, random7]]
pipeline = buildPipeline([random7], grouping = groups)
return
| {
"repo_name": "bollu/polymage",
"path": "sandbox/tests/test_alignment.py",
"copies": "1",
"size": "4404",
"license": "apache-2.0",
"hash": 1805524718191090400,
"line_mean": 32.1127819549,
"line_max": 69,
"alpha_frac": 0.4943233424,
"autogenerated": false,
"ratio": 2.8541801685029164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38485035109029164,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from fractions import Fraction
import sys
sys.path.insert(0, '../')
from compiler import *
from constructs import *
def test_histogram():
R = Parameter(Int, "R")
C = Parameter(Int, "C")
x = Variable(Int, "x")
y = Variable(Int, "y")
c = Variable(Int, "c")
row = Interval(Int, 0, R+1)
col = Interval(Int, 0, C+1)
cr = Interval(Int, 0, 2)
cond = Condition(x, '>=', 0) & Condition(x, '<=', R-1) & \
Condition(y, '<=', C-1) & Condition(y, '>=', 0)
img = Image(Float, "img", [R+2, C+2])
cond0 = Condition(c, '==', 0)
cond1 = Condition(c, '==', 1)
cond2 = Condition(c, '==', 2)
# Iterates over [c, x, y] and reduces img(x, y) to hist(c, x).
# Over each row of img(x, y), op(c) is applied and the result is stored
# at hist(c, x). Here we use op(0) = Min, op(1) = Max, op(2) = Sum.
hist = Reduction(([c, x], [cr, row]), \
([c, x, y], [cr, row, col]), \
Float, "hist")
hist.defn = [ Case(cond0, Reduce(hist(c, x),
img(x, y),
Op.Min)),
Case(cond1, Reduce(hist(c, x),
img(x, y),
Op.Max)),
Case(cond2, Reduce(hist(c, x),
img(x, y),
Op.Sum)) ]
pipeline = buildPipeline([hist], \
pipe_name="hist")
'''
filename = 'hist_graph.dot'
pipeline.originalGraph.write(filename)
filename = 'hist_graph_grouped.dot'
g = pipeline.drawPipelineGraph()
g.write(filename)
'''
filename = 'hist_naive.cpp'
c_file = open(filename, 'w')
c_file.write(pipeline.generate_code().__str__())
c_file.close()
| {
"repo_name": "bollu/polymage",
"path": "sandbox/tests/test_hist.py",
"copies": "1",
"size": "1912",
"license": "apache-2.0",
"hash": -4185547047279912000,
"line_mean": 28.4153846154,
"line_max": 75,
"alpha_frac": 0.4665271967,
"autogenerated": false,
"ratio": 3.408199643493761,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4374726840193761,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from fractions import Fraction
import sys
sys.path.insert(0, '../')
from constructs import *
from expression import *
def test_affine():
N = Parameter(UInt, "N")
x = Variable(UInt, "x")
y = Variable(UInt, "y")
assert(isAffine(x + y) == True)
assert(isAffine(3) == True)
assert(isAffine(x*y) == False)
assert(isAffine(-x + N + 3*y) == True)
assert(isAffine(2*x + N/2 + 3*y) == True)
c1 = Condition(x, '<', 2*y)
c2 = Condition(x, '>', 2-y)
c3 = Condition(x, '>=', x*y)
c4 = Condition(x + 2*N, '<=', y + N)
c5 = Condition(x*N, '!=', y)
assert(isAffine(c1) == True)
assert(isAffine(c2) == True)
assert(isAffine(c3) == False)
assert(isAffine(c4) == True)
assert(isAffine(c5) == False)
def test_coeff():
N = Parameter(UInt, "N")
x = Variable(UInt, "x")
y = Variable(UInt, "y")
coeff = get_affine_var_and_param_coeff(1+x)
assert(coeff[x] == 1)
coeff = get_affine_var_and_param_coeff(1+x +y)
assert(coeff[x] == 1 and coeff[y] == 1)
coeff = get_affine_var_and_param_coeff(3)
assert(coeff == {})
coeff = get_affine_var_and_param_coeff(N*x + y)
assert(coeff == {})
coeff = get_affine_var_and_param_coeff(x*y)
assert(coeff == {})
coeff = get_affine_var_and_param_coeff(2*(x*3+y +N +x + y -5)
+ 3*(-x) + 4*(-y) + N)
assert(coeff[x] == 5 and coeff[y] == 0 and coeff[N] == 3)
| {
"repo_name": "bollu/polymage",
"path": "sandbox/tests/test_expression.py",
"copies": "1",
"size": "1502",
"license": "apache-2.0",
"hash": 7544326229378586000,
"line_mean": 30.9574468085,
"line_max": 66,
"alpha_frac": 0.5579227696,
"autogenerated": false,
"ratio": 2.7014388489208634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37593616185208634,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ._funcs import (
asdict,
assoc,
astuple,
evolve,
has,
)
from ._make import (
Attribute,
Factory,
NOTHING,
attr,
attributes,
fields,
make_class,
validate,
)
from ._config import (
get_run_validators,
set_run_validators,
)
from . import exceptions
from . import filters
from . import converters
from . import validators
__version__ = "17.2.0"
__title__ = "attrs"
__description__ = "Classes Without Boilerplate"
__uri__ = "http://www.attrs.org/"
__doc__ = __description__ + " <" + __uri__ + ">"
__author__ = "Hynek Schlawack"
__email__ = "hs@ox.cx"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2015 Hynek Schlawack"
s = attrs = attributes
ib = attrib = attr
__all__ = [
"Attribute",
"Factory",
"NOTHING",
"asdict",
"assoc",
"astuple",
"attr",
"attrib",
"attributes",
"attrs",
"converters",
"evolve",
"exceptions",
"fields",
"filters",
"get_run_validators",
"has",
"ib",
"make_class",
"s",
"set_run_validators",
"validate",
"validators",
]
| {
"repo_name": "Alecto3-D/testable-greeter",
"path": "bb-master/sandbox/lib/python3.5/site-packages/attr/__init__.py",
"copies": "2",
"size": "1173",
"license": "mit",
"hash": 4878455908172199000,
"line_mean": 15.5211267606,
"line_max": 64,
"alpha_frac": 0.5652173913,
"autogenerated": false,
"ratio": 3.196185286103542,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.47614026774035423,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
from .csv import *
from .json import *
from .hdf5 import *
from .filesystem import *
from .sql import *
from glob import glob
import gzip
from ..compatibility import urlopen
from ..py2help import _strtypes
__all__ = ['resource', 'copy']
filetypes = {'csv': CSV,
'tsv': CSV,
'json': JSON,
'h5': HDF5,
'hdf5': HDF5}
opens = {'http': urlopen,
'https': urlopen,
#'ssh': paramiko.open?
}
def resource(uri, **kwargs):
""" Get data resource from universal resource indicator
Supports the following logic:
* Infer data format based on the file extension (.csv, .json. .hdf5)
* Use ``gzip.open`` if files end in ``.gz`` extension (csv, json only)
* Use ``urlopen`` if web protocols detected (http, https)
* Use SQL if text ``sql`` found in protocol string
URI may be in any of the following forms
>>> uri = '/path/to/data.csv' # csv, json, etc...
>>> uri = '/path/to/data.json.gz' # handles gzip
>>> uri = '/path/to/*/many*/data.*.json' # glob string - many files
>>> uri = '/path/to/data.hdf5::/path/within/hdf5' # HDF5 path :: datapath
>>> uri = 'postgresql://sqlalchemy.uri::tablename'# SQLAlchemy :: tablename
>>> uri = 'http://api.domain.com/data.json' # Web requests
Note that this follows standard ``protocol://path`` syntax. In cases where
more information is needed, such as an HDF5 datapath or a SQL table name
the additional information follows two colons `::` as in the following
/path/to/data.hdf5::/datapath
"""
descriptor = None
args = []
if '::' in uri:
uri, datapath = uri.rsplit('::')
args.insert(0, datapath)
extensions = uri.split('.')
if extensions[-1] == 'gz':
kwargs['open'] = kwargs.get('open', gzip.open)
extensions.pop()
descriptor = filetypes.get(extensions[-1], None)
if '://' in uri:
protocol, _ = uri.split('://')
if protocol in opens:
kwargs['open'] = kwargs.get('open', opens[protocol])
if 'sql' in protocol:
descriptor = SQL
try:
filenames = glob(uri)
except:
filenames = []
if len(filenames) > 1:
args = [partial(descriptor, *args)] # pack sub descriptor into args
descriptor = Files
if descriptor:
return descriptor(uri, *args, **kwargs)
raise ValueError('Unknown resource type\n\t%s' % uri)
def copy(src, dest, **kwargs):
""" Copy content from one data descriptor to another """
dest.extend_chunks(src.chunks(**kwargs))
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/data/usability.py",
"copies": "1",
"size": "2752",
"license": "bsd-3-clause",
"hash": 5925662245249328000,
"line_mean": 30.2727272727,
"line_max": 80,
"alpha_frac": 0.5904796512,
"autogenerated": false,
"ratio": 3.8870056497175143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4977485300917514,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
from hashlib import md5
from operator import attrgetter
import os
import sys
import uuid
import warnings
from toolz import merge, groupby, curry, identity
from toolz.functoolz import Compose
from .compatibility import bind_method, unicode
from .context import _globals
from .utils import Dispatch, ignoring
__all__ = ("Base", "compute", "normalize_token", "tokenize", "visualize")
class Base(object):
"""Base class for dask collections"""
def visualize(self, filename='mydask', format=None, optimize_graph=False,
**kwargs):
return visualize(self, filename=filename, format=format,
optimize_graph=optimize_graph, **kwargs)
def _visualize(self, filename='mydask', format=None, optimize_graph=False):
warn = DeprecationWarning("``_visualize`` is deprecated, use "
"``visualize`` instead.")
warnings.warn(warn)
return self.visualize(filename=filename, format=format,
optimize_graph=optimize_graph)
def compute(self, **kwargs):
return compute(self, **kwargs)[0]
@classmethod
def _get(cls, dsk, keys, get=None, **kwargs):
get = get or _globals['get'] or cls._default_get
dsk2 = cls._optimize(dsk, keys, **kwargs)
return get(dsk2, keys, **kwargs)
@classmethod
def _bind_operator(cls, op):
""" bind operator to this class """
name = op.__name__
if name.endswith('_'):
# for and_ and or_
name = name[:-1]
elif name == 'inv':
name = 'invert'
meth = '__{0}__'.format(name)
if name in ('abs', 'invert', 'neg'):
bind_method(cls, meth, cls._get_unary_operator(op))
else:
bind_method(cls, meth, cls._get_binary_operator(op))
if name in ('eq', 'gt', 'ge', 'lt', 'le', 'ne'):
return
rmeth = '__r{0}__'.format(name)
bind_method(cls, rmeth, cls._get_binary_operator(op, inv=True))
@classmethod
def _get_unary_operator(cls, op):
""" Must return a method used by unary operator """
raise NotImplementedError
@classmethod
def _get_binary_operator(cls, op, inv=False):
""" Must return a method used by binary operator """
raise NotImplementedError
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> compute(a, b)
(45, 4.5)
"""
variables = [a for a in args if isinstance(a, Base)]
if not variables:
return args
groups = groupby(attrgetter('_optimize'), variables)
get = kwargs.pop('get', None) or _globals['get']
if not get:
get = variables[0]._default_get
if not all(a._default_get == get for a in variables):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler `get` function using either "
"the `get` kwarg or globally with `set_options`.")
dsk = merge([opt(merge([v.dask for v in val]),
[v._keys() for v in val], **kwargs)
for opt, val in groups.items()])
keys = [var._keys() for var in variables]
results = get(dsk, keys, **kwargs)
results_iter = iter(results)
return tuple(a if not isinstance(a, Base)
else a._finalize(a, next(results_iter))
for a in args)
def visualize(*args, **kwargs):
dsks = [arg for arg in args if isinstance(arg, dict)]
args = [arg for arg in args if isinstance(arg, Base)]
filename = kwargs.pop('filename', 'mydask')
optimize_graph = kwargs.pop('optimize_graph', False)
from dask.dot import dot_graph
if optimize_graph:
dsks.extend([arg._optimize(arg.dask, arg._keys()) for arg in args])
else:
dsks.extend([arg.dask for arg in args])
dsk = merge(dsks)
return dot_graph(dsk, filename=filename, **kwargs)
def normalize_function(func):
if isinstance(func, curry):
func = func._partial
if isinstance(func, Compose):
first = getattr(func, 'first', None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
kws = tuple(sorted(func.keywords.items())) if func.keywords else ()
return (normalize_function(func.func), func.args, kws)
else:
return str(func)
normalize_token = Dispatch()
normalize_token.register((int, float, str, unicode, bytes, type(None), type,
slice),
identity)
@partial(normalize_token.register, dict)
def normalize_dict(d):
return normalize_token(sorted(d.items()))
@partial(normalize_token.register, (tuple, list, set))
def normalize_seq(seq):
return type(seq).__name__, list(map(normalize_token, seq))
@partial(normalize_token.register, object)
def normalize_object(o):
if callable(o):
return normalize_function(o)
else:
return uuid.uuid4().hex
@partial(normalize_token.register, Base)
def normalize_base(b):
return type(b).__name__, b.key
with ignoring(ImportError):
import pandas as pd
@partial(normalize_token.register, pd.Index)
def normalize_index(ind):
return [ind.name, normalize_token(ind.values)]
@partial(normalize_token.register, pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes),
normalize_token(cat.categories),
cat.ordered]
@partial(normalize_token.register, pd.Series)
def normalize_series(s):
return [s.name, s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index)]
@partial(normalize_token.register, pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data += [df.columns, df.index]
return list(map(normalize_token, data))
with ignoring(ImportError):
import numpy as np
@partial(normalize_token.register, np.ndarray)
def normalize_array(x):
if not x.shape:
return (str(x), x.dtype)
if hasattr(x, 'mode') and hasattr(x, 'filename'):
return x.filename, os.path.getmtime(x.filename), x.dtype, x.shape
if x.dtype.hasobject:
try:
data = md5('-'.join(x.flat)).hexdigest()
except TypeError:
data = md5(b'-'.join([str(item).encode() for item in x.flat])).hexdigest()
else:
try:
data = md5(x.ravel().view('i1').data).hexdigest()
except (BufferError, AttributeError, ValueError):
data = md5(x.copy().ravel().view('i1').data).hexdigest()
return (data, x.dtype, x.shape, x.strides)
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
with ignoring(ImportError):
from collections import OrderedDict
@partial(normalize_token.register, OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
def tokenize(*args, **kwargs):
""" Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
| {
"repo_name": "vikhyat/dask",
"path": "dask/base.py",
"copies": "1",
"size": "7836",
"license": "bsd-3-clause",
"hash": 5843821734337380000,
"line_mean": 31.65,
"line_max": 90,
"alpha_frac": 0.6010719755,
"autogenerated": false,
"ratio": 3.8506142506142504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.495168622611425,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
from hashlib import md5
from operator import attrgetter
import pickle
import os
import uuid
import warnings
from toolz import merge, groupby, curry, identity
from toolz.functoolz import Compose
from .compatibility import bind_method, unicode
from .context import _globals
from .utils import Dispatch, ignoring
__all__ = ("Base", "compute", "normalize_token", "tokenize", "visualize")
class Base(object):
"""Base class for dask collections"""
def visualize(self, filename='mydask', format=None, optimize_graph=False,
**kwargs):
"""
Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.base.visualize
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
return visualize(self, filename=filename, format=format,
optimize_graph=optimize_graph, **kwargs)
def _visualize(self, filename='mydask', format=None, optimize_graph=False):
warn = DeprecationWarning("``_visualize`` is deprecated, use "
"``visualize`` instead.")
warnings.warn(warn)
return self.visualize(filename=filename, format=format,
optimize_graph=optimize_graph)
def compute(self, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default
is to check the global settings first, and then fall back to
the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
"""
return compute(self, **kwargs)[0]
@classmethod
def _get(cls, dsk, keys, get=None, **kwargs):
get = get or _globals['get'] or cls._default_get
dsk2 = cls._optimize(dsk, keys, **kwargs)
return get(dsk2, keys, **kwargs)
@classmethod
def _bind_operator(cls, op):
""" bind operator to this class """
name = op.__name__
if name.endswith('_'):
# for and_ and or_
name = name[:-1]
elif name == 'inv':
name = 'invert'
meth = '__{0}__'.format(name)
if name in ('abs', 'invert', 'neg', 'pos'):
bind_method(cls, meth, cls._get_unary_operator(op))
else:
bind_method(cls, meth, cls._get_binary_operator(op))
if name in ('eq', 'gt', 'ge', 'lt', 'le', 'ne', 'getitem'):
return
rmeth = '__r{0}__'.format(name)
bind_method(cls, rmeth, cls._get_binary_operator(op, inv=True))
@classmethod
def _get_unary_operator(cls, op):
""" Must return a method used by unary operator """
raise NotImplementedError
@classmethod
def _get_binary_operator(cls, op, inv=False):
""" Must return a method used by binary operator """
raise NotImplementedError
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
args : object
Any number of objects. If the object is a dask collection, it's
computed and the result is returned. Otherwise it's passed through
unchanged.
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default is
to check the global settings first, and then fall back to defaults for
the collections.
optimize_graph : bool, optional
If True [default], the optimizations for each collection are applied
before computation. Otherwise the graph is run as is. This can be
useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> compute(a, b)
(45, 4.5)
"""
variables = [a for a in args if isinstance(a, Base)]
if not variables:
return args
get = kwargs.pop('get', None) or _globals['get']
if not get:
get = variables[0]._default_get
if not all(a._default_get == get for a in variables):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler `get` function using either "
"the `get` kwarg or globally with `set_options`.")
if kwargs.get('optimize_graph', True):
groups = groupby(attrgetter('_optimize'), variables)
dsk = merge([opt(merge([v.dask for v in val]),
[v._keys() for v in val], **kwargs)
for opt, val in groups.items()])
else:
dsk = merge(var.dask for var in variables)
keys = [var._keys() for var in variables]
results = get(dsk, keys, **kwargs)
results_iter = iter(results)
return tuple(a if not isinstance(a, Base)
else a._finalize(next(results_iter))
for a in args)
def visualize(*args, **kwargs):
"""
Visualize several dask graphs at once.
Requires ``graphviz`` to be installed. All options that are not the dask
graph(s) should be passed as keyword arguments.
Parameters
----------
dsk : dict(s) or collection(s)
The dask graph(s) to visualize.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
dsks = [arg for arg in args if isinstance(arg, dict)]
args = [arg for arg in args if isinstance(arg, Base)]
filename = kwargs.pop('filename', 'mydask')
optimize_graph = kwargs.pop('optimize_graph', False)
from dask.dot import dot_graph
if optimize_graph:
dsks.extend([arg._optimize(arg.dask, arg._keys()) for arg in args])
else:
dsks.extend([arg.dask for arg in args])
dsk = merge(dsks)
return dot_graph(dsk, filename=filename, **kwargs)
def normalize_function(func):
if isinstance(func, curry):
func = func._partial
if isinstance(func, Compose):
first = getattr(func, 'first', None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
kws = tuple(sorted(func.keywords.items())) if func.keywords else ()
return (normalize_function(func.func), func.args, kws)
else:
try:
return pickle.dumps(func, protocol=0)
except:
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=0)
except:
return str(func)
normalize_token = Dispatch()
normalize_token.register((int, float, str, unicode, bytes, type(None), type,
slice),
identity)
@partial(normalize_token.register, dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@partial(normalize_token.register, (tuple, list, set))
def normalize_seq(seq):
return type(seq).__name__, list(map(normalize_token, seq))
@partial(normalize_token.register, object)
def normalize_object(o):
if callable(o):
return normalize_function(o)
else:
return uuid.uuid4().hex
@partial(normalize_token.register, Base)
def normalize_base(b):
return type(b).__name__, b.key
with ignoring(ImportError):
import pandas as pd
@partial(normalize_token.register, pd.Index)
def normalize_index(ind):
return [ind.name, normalize_token(ind.values)]
@partial(normalize_token.register, pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes),
normalize_token(cat.categories),
cat.ordered]
@partial(normalize_token.register, pd.Series)
def normalize_series(s):
return [s.name, s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index)]
@partial(normalize_token.register, pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data += [df.columns, df.index]
return list(map(normalize_token, data))
with ignoring(ImportError):
import numpy as np
@partial(normalize_token.register, np.ndarray)
def normalize_array(x):
if not x.shape:
return (str(x), x.dtype)
if hasattr(x, 'mode') and hasattr(x, 'filename'):
return x.filename, os.path.getmtime(x.filename), x.dtype, x.shape
if x.dtype.hasobject:
try:
data = md5('-'.join(x.flat).encode('utf-8')).hexdigest()
except TypeError:
data = md5(b'-'.join([str(item).encode() for item in x.flat])).hexdigest()
else:
try:
data = md5(x.ravel().view('i1').data).hexdigest()
except (BufferError, AttributeError, ValueError):
data = md5(x.copy().ravel().view('i1').data).hexdigest()
return (data, x.dtype, x.shape, x.strides)
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
with ignoring(ImportError):
from collections import OrderedDict
@partial(normalize_token.register, OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
def tokenize(*args, **kwargs):
""" Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
| {
"repo_name": "mikegraham/dask",
"path": "dask/base.py",
"copies": "2",
"size": "11960",
"license": "bsd-3-clause",
"hash": -5314435182935780000,
"line_mean": 32.7853107345,
"line_max": 90,
"alpha_frac": 0.6008361204,
"autogenerated": false,
"ratio": 4.1015089163237315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5702345036723732,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
from hashlib import md5
from operator import attrgetter
import pickle
import os
import uuid
from toolz import merge, groupby, curry, identity
from toolz.functoolz import Compose
from .compatibility import bind_method, unicode
from .context import _globals
from .utils import Dispatch, ignoring
__all__ = ("Base", "compute", "normalize_token", "tokenize", "visualize")
class Base(object):
"""Base class for dask collections"""
def visualize(self, filename='mydask', format=None, optimize_graph=False,
**kwargs):
"""
Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.base.visualize
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
return visualize(self, filename=filename, format=format,
optimize_graph=optimize_graph, **kwargs)
def compute(self, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default
is to check the global settings first, and then fall back to
the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
"""
return compute(self, **kwargs)[0]
@classmethod
def _get(cls, dsk, keys, get=None, **kwargs):
get = get or _globals['get'] or cls._default_get
dsk2 = cls._optimize(dsk, keys, **kwargs)
return get(dsk2, keys, **kwargs)
@classmethod
def _bind_operator(cls, op):
""" bind operator to this class """
name = op.__name__
if name.endswith('_'):
# for and_ and or_
name = name[:-1]
elif name == 'inv':
name = 'invert'
meth = '__{0}__'.format(name)
if name in ('abs', 'invert', 'neg', 'pos'):
bind_method(cls, meth, cls._get_unary_operator(op))
else:
bind_method(cls, meth, cls._get_binary_operator(op))
if name in ('eq', 'gt', 'ge', 'lt', 'le', 'ne', 'getitem'):
return
rmeth = '__r{0}__'.format(name)
bind_method(cls, rmeth, cls._get_binary_operator(op, inv=True))
@classmethod
def _get_unary_operator(cls, op):
""" Must return a method used by unary operator """
raise NotImplementedError
@classmethod
def _get_binary_operator(cls, op, inv=False):
""" Must return a method used by binary operator """
raise NotImplementedError
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
args : object
Any number of objects. If the object is a dask collection, it's
computed and the result is returned. Otherwise it's passed through
unchanged.
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default is
to check the global settings first, and then fall back to defaults for
the collections.
optimize_graph : bool, optional
If True [default], the optimizations for each collection are applied
before computation. Otherwise the graph is run as is. This can be
useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> compute(a, b)
(45, 4.5)
"""
variables = [a for a in args if isinstance(a, Base)]
if not variables:
return args
get = kwargs.pop('get', None) or _globals['get']
optimizations = (kwargs.pop('optimizations', None) or
_globals.get('optimizations', []))
if not get:
get = variables[0]._default_get
if not all(a._default_get == get for a in variables):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler `get` function using either "
"the `get` kwarg or globally with `set_options`.")
if kwargs.get('optimize_graph', True):
groups = groupby(attrgetter('_optimize'), variables)
groups = {opt: [merge([v.dask for v in val]),
[v._keys() for v in val]]
for opt, val in groups.items()}
for opt in optimizations:
groups = {k: [opt(dsk, keys), keys]
for k, (dsk, keys) in groups.items()}
dsk = merge([opt(dsk, keys, **kwargs)
for opt, (dsk, keys) in groups.items()])
else:
dsk = merge(var.dask for var in variables)
keys = [var._keys() for var in variables]
results = get(dsk, keys, **kwargs)
results_iter = iter(results)
return tuple(a if not isinstance(a, Base)
else a._finalize(next(results_iter))
for a in args)
def visualize(*args, **kwargs):
"""
Visualize several dask graphs at once.
Requires ``graphviz`` to be installed. All options that are not the dask
graph(s) should be passed as keyword arguments.
Parameters
----------
dsk : dict(s) or collection(s)
The dask graph(s) to visualize.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
dsks = [arg for arg in args if isinstance(arg, dict)]
args = [arg for arg in args if isinstance(arg, Base)]
filename = kwargs.pop('filename', 'mydask')
optimize_graph = kwargs.pop('optimize_graph', False)
from dask.dot import dot_graph
if optimize_graph:
dsks.extend([arg._optimize(arg.dask, arg._keys()) for arg in args])
else:
dsks.extend([arg.dask for arg in args])
dsk = merge(dsks)
return dot_graph(dsk, filename=filename, **kwargs)
def normalize_function(func):
if isinstance(func, curry):
func = func._partial
if isinstance(func, Compose):
first = getattr(func, 'first', None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
kws = tuple(sorted(func.keywords.items())) if func.keywords else ()
return (normalize_function(func.func), func.args, kws)
else:
try:
result = pickle.dumps(func, protocol=0)
if b'__main__' not in result: # abort on dynamic functions
return result
except:
pass
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=0)
except:
return str(func)
normalize_token = Dispatch()
normalize_token.register((int, float, str, unicode, bytes, type(None), type,
slice),
identity)
@partial(normalize_token.register, dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@partial(normalize_token.register, (tuple, list, set))
def normalize_seq(seq):
return type(seq).__name__, list(map(normalize_token, seq))
@partial(normalize_token.register, object)
def normalize_object(o):
if callable(o):
return normalize_function(o)
else:
return uuid.uuid4().hex
@partial(normalize_token.register, Base)
def normalize_base(b):
return type(b).__name__, b.key
with ignoring(ImportError):
import pandas as pd
@partial(normalize_token.register, pd.Index)
def normalize_index(ind):
return [ind.name, normalize_token(ind.values)]
@partial(normalize_token.register, pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes),
normalize_token(cat.categories),
cat.ordered]
@partial(normalize_token.register, pd.Series)
def normalize_series(s):
return [s.name, s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index)]
@partial(normalize_token.register, pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data += [df.columns, df.index]
return list(map(normalize_token, data))
with ignoring(ImportError):
import numpy as np
@partial(normalize_token.register, np.ndarray)
def normalize_array(x):
if not x.shape:
return (str(x), x.dtype)
if hasattr(x, 'mode') and getattr(x, 'filename', None):
if hasattr(x.base, 'ctypes'):
offset = (x.ctypes.get_as_parameter().value -
x.base.ctypes.get_as_parameter().value)
else:
offset = 0 # root memmap's have mmap object as base
return (x.filename, os.path.getmtime(x.filename), x.dtype,
x.shape, x.strides, offset)
if x.dtype.hasobject:
try:
data = md5('-'.join(x.flat).encode('utf-8')).hexdigest()
except TypeError:
data = md5(b'-'.join([str(item).encode() for item in x.flat])).hexdigest()
else:
try:
data = md5(x.ravel().view('i1').data).hexdigest()
except (BufferError, AttributeError, ValueError):
data = md5(x.copy().ravel().view('i1').data).hexdigest()
return (data, x.dtype, x.shape, x.strides)
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
with ignoring(ImportError):
from collections import OrderedDict
@partial(normalize_token.register, OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
def tokenize(*args, **kwargs):
""" Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/base.py",
"copies": "1",
"size": "12334",
"license": "mit",
"hash": -2786354958232452000,
"line_mean": 32.5163043478,
"line_max": 90,
"alpha_frac": 0.5946165072,
"autogenerated": false,
"ratio": 4.086812458581842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5181428965781842,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
from itertools import chain
import datashape
from datashape import (
DataShape, Option, Record, Unit, dshape, var, Fixed, Var, promote, object_,
)
from datashape.predicates import isscalar, iscollection, isrecord
from toolz import (
isdistinct, frequencies, concat as tconcat, unique, get, first, compose,
keymap,
)
import toolz.curried.operator as op
from odo.utils import copydoc
from .core import common_subexpression
from .expressions import Expr, ElemWise, label, Field
from .expressions import dshape_method_list
from ..compatibility import zip_longest, _strtypes
from ..utils import listpack
__all__ = ['Sort', 'Distinct', 'Head', 'Merge', 'IsIn', 'isin', 'distinct',
'merge', 'head', 'sort', 'Join', 'join', 'transform', 'Concat',
'concat', 'Tail', 'tail']
class Sort(Expr):
""" Table in sorted order
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.sort('amount', ascending=False).schema
dshape("{name: string, amount: int32}")
Some backends support sorting by arbitrary rowwise tables, e.g.
>>> accounts.sort(-accounts.amount) # doctest: +SKIP
"""
__slots__ = '_hash', '_child', '_key', 'ascending'
@property
def dshape(self):
return self._child.dshape
@property
def key(self):
if self._key is () or self._key is None:
return self._child.fields[0]
if isinstance(self._key, tuple):
return list(self._key)
else:
return self._key
def _len(self):
return self._child._len()
@property
def _name(self):
return self._child._name
def __str__(self):
return "%s.sort(%s, ascending=%s)" % (self._child, repr(self._key),
self.ascending)
def sort(child, key=None, ascending=True):
""" Sort a collection
Parameters
----------
key : str, list of str, or Expr
Defines by what you want to sort.
* A single column string: ``t.sort('amount')``
* A list of column strings: ``t.sort(['name', 'amount'])``
* An expression: ``t.sort(-t.amount)``
ascending : bool, optional
Determines order of the sort
"""
if not isrecord(child.dshape.measure):
key = None
if isinstance(key, list):
key = tuple(key)
return Sort(child, key, ascending)
class Distinct(Expr):
""" Remove duplicate elements from an expression
Parameters
----------
on : tuple of :class:`~blaze.expr.expressions.Field`
The subset of fields or names of fields to be distinct on.
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = distinct(t)
>>> data = [('Alice', 100, 1),
... ('Bob', 200, 2),
... ('Alice', 100, 1)]
>>> from blaze.compute.python import compute
>>> sorted(compute(e, data))
[('Alice', 100, 1), ('Bob', 200, 2)]
Use a subset by passing `on`:
>>> import pandas as pd
>>> e = distinct(t, 'name')
>>> data = pd.DataFrame([['Alice', 100, 1],
... ['Alice', 200, 2],
... ['Bob', 100, 1],
... ['Bob', 200, 2]],
... columns=['name', 'amount', 'id'])
>>> compute(e, data)
name amount id
0 Alice 100 1
1 Bob 100 1
"""
__slots__ = '_hash', '_child', 'on'
@property
def dshape(self):
return datashape.var * self._child.dshape.measure
@property
def fields(self):
return self._child.fields
@property
def _name(self):
return self._child._name
def __str__(self):
return 'distinct({child}{on})'.format(
child=self._child,
on=(', ' if self.on else '') + ', '.join(map(str, self.on))
)
@copydoc(Distinct)
def distinct(expr, *on):
fields = frozenset(expr.fields)
_on = []
append = _on.append
for n in on:
if isinstance(n, Field):
if n._child.isidentical(expr):
n = n._name
else:
raise ValueError('{0} is not a field of {1}'.format(n, expr))
if not isinstance(n, _strtypes):
raise TypeError('on must be a name or field, not: {0}'.format(n))
elif n not in fields:
raise ValueError('{0} is not a field of {1}'.format(n, expr))
append(n)
return Distinct(expr, tuple(_on))
class _HeadOrTail(Expr):
__slots__ = '_hash', '_child', 'n'
@property
def dshape(self):
return self.n * self._child.dshape.subshape[0]
def _len(self):
return min(self._child._len(), self.n)
@property
def _name(self):
return self._child._name
def __str__(self):
return '%s.%s(%d)' % (self._child, type(self).__name__.lower(), self.n)
class Head(_HeadOrTail):
""" First `n` elements of collection
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.head(5).dshape
dshape("5 * {name: string, amount: int32}")
See Also
--------
blaze.expr.collections.Tail
"""
pass
@copydoc(Head)
def head(child, n=10):
return Head(child, n)
class Tail(_HeadOrTail):
""" Last `n` elements of collection
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.tail(5).dshape
dshape("5 * {name: string, amount: int32}")
See Also
--------
blaze.expr.collections.Head
"""
pass
@copydoc(Tail)
def tail(child, n=10):
return Tail(child, n)
def transform(t, replace=True, **kwargs):
""" Add named columns to table
>>> from blaze import symbol
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> transform(t, z=t.x + t.y).fields
['x', 'y', 'z']
"""
if replace and set(t.fields).intersection(set(kwargs)):
t = t[[c for c in t.fields if c not in kwargs]]
args = [t] + [v.label(k) for k, v in sorted(kwargs.items(), key=first)]
return merge(*args)
def schema_concat(exprs):
""" Concatenate schemas together. Supporting both Records and Units
In the case of Units, the name is taken from expr.name
"""
names, values = [], []
for c in exprs:
schema = c.schema[0]
if isinstance(schema, Option):
schema = schema.ty
if isinstance(schema, Record):
names.extend(schema.names)
values.extend(schema.types)
elif isinstance(schema, Unit):
names.append(c._name)
values.append(schema)
else:
raise TypeError("All schemas must have Record or Unit shape."
"\nGot %s" % c.schema[0])
return dshape(Record(list(zip(names, values))))
class Merge(ElemWise):
""" Merge many fields together
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, x: int, y: real}')
>>> merge(accounts.name, z=accounts.x + accounts.y).fields
['name', 'z']
"""
__slots__ = '_hash', '_child', 'children'
@property
def schema(self):
return schema_concat(self.children)
@property
def fields(self):
return list(tconcat(child.fields for child in self.children))
def _subterms(self):
yield self
for i in self.children:
for node in i._subterms():
yield node
def _get_field(self, key):
for child in self.children:
if key in child.fields:
if isscalar(child.dshape.measure):
return child
else:
return child[key]
def _project(self, key):
if not isinstance(key, (tuple, list)):
raise TypeError("Expected tuple or list, got %s" % key)
return merge(*[self[c] for c in key])
def _leaves(self):
return list(unique(tconcat(i._leaves() for i in self.children)))
@copydoc(Merge)
def merge(*exprs, **kwargs):
if len(exprs) + len(kwargs) == 1:
if exprs:
return exprs[0]
if kwargs:
[(k, v)] = kwargs.items()
return v.label(k)
# Get common sub expression
exprs += tuple(label(v, k) for k, v in sorted(kwargs.items(), key=first))
try:
child = common_subexpression(*exprs)
except Exception:
raise ValueError("No common subexpression found for input expressions")
result = Merge(child, exprs)
if not isdistinct(result.fields):
raise ValueError(
"Repeated columns found: " + ', '.join(
k for k, v in frequencies(result.fields).items() if v > 1
),
)
return result
def unpack(l):
""" Unpack items from collections of nelements 1
>>> unpack('hello')
'hello'
>>> unpack(['hello'])
'hello'
"""
if isinstance(l, (tuple, list, set)) and len(l) == 1:
return next(iter(l))
else:
return l
class Join(Expr):
""" Join two tables on common columns
Parameters
----------
lhs, rhs : Expr
Expressions to join
on_left : str, optional
The fields from the left side to join on.
If no ``on_right`` is passed, then these are the fields for both
sides.
on_right : str, optional
The fields from the right side to join on.
how : {'inner', 'outer', 'left', 'right'}
What type of join to perform.
suffixes: pair of str
The suffixes to be applied to the left and right sides
in order to resolve duplicate field names.
Examples
--------
>>> from blaze import symbol
>>> names = symbol('names', 'var * {name: string, id: int}')
>>> amounts = symbol('amounts', 'var * {amount: int, id: int}')
Join tables based on shared column name
>>> joined = join(names, amounts, 'id')
Join based on different column names
>>> amounts = symbol('amounts', 'var * {amount: int, acctNumber: int}')
>>> joined = join(names, amounts, 'id', 'acctNumber')
See Also
--------
blaze.expr.collections.Merge
"""
__slots__ = (
'_hash', 'lhs', 'rhs', '_on_left', '_on_right', 'how', 'suffixes',
)
__inputs__ = 'lhs', 'rhs'
@property
def on_left(self):
on_left = self._on_left
if isinstance(on_left, tuple):
return list(on_left)
return on_left
@property
def on_right(self):
on_right = self._on_right
if isinstance(on_right, tuple):
return list(on_right)
return on_right
@property
def schema(self):
"""
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> s = symbol('t', 'var * {name: string, id: int}')
>>> join(t, s).schema
dshape("{name: string, amount: int32, id: int32}")
>>> join(t, s, how='left').schema
dshape("{name: string, amount: int32, id: ?int32}")
Overlapping but non-joined fields append _left, _right
>>> a = symbol('a', 'var * {x: int, y: int}')
>>> b = symbol('b', 'var * {x: int, y: int}')
>>> join(a, b, 'x').fields
['x', 'y_left', 'y_right']
"""
option = lambda dt: dt if isinstance(dt, Option) else Option(dt)
on_left = self.on_left
if not isinstance(on_left, list):
on_left = on_left,
on_right = self.on_right
if not isinstance(on_right, list):
on_right = on_right,
right_types = keymap(
dict(zip(on_right, on_left)).get,
self.rhs.dshape.measure.dict,
)
joined = (
(name, promote(dt, right_types[name], promote_option=False))
for n, (name, dt) in enumerate(filter(
compose(op.contains(on_left), first),
self.lhs.dshape.measure.fields,
))
)
left = [
(name, dt) for name, dt in zip(
self.lhs.fields,
types_of_fields(self.lhs.fields, self.lhs)
) if name not in on_left
]
right = [
(name, dt) for name, dt in zip(
self.rhs.fields,
types_of_fields(self.rhs.fields, self.rhs)
) if name not in on_right
]
# Handle overlapping but non-joined case, e.g.
left_other = set(name for name, dt in left if name not in on_left)
right_other = set(name for name, dt in right if name not in on_right)
overlap = left_other & right_other
left_suffix, right_suffix = self.suffixes
left = ((name + left_suffix if name in overlap else name, dt)
for name, dt in left)
right = ((name + right_suffix if name in overlap else name, dt)
for name, dt in right)
if self.how in ('right', 'outer'):
left = ((name, option(dt)) for name, dt in left)
if self.how in ('left', 'outer'):
right = ((name, option(dt)) for name, dt in right)
return dshape(Record(chain(joined, left, right)))
@property
def dshape(self):
# TODO: think if this can be generalized
return var * self.schema
def types_of_fields(fields, expr):
""" Get the types of fields in an expression
Examples
--------
>>> from blaze import symbol
>>> expr = symbol('e', 'var * {x: int64, y: float32}')
>>> types_of_fields('y', expr)
ctype("float32")
>>> types_of_fields(['y', 'x'], expr)
(ctype("float32"), ctype("int64"))
>>> types_of_fields('x', expr.x)
ctype("int64")
"""
if isinstance(expr.dshape.measure, Record):
return get(fields, expr.dshape.measure)
else:
if isinstance(fields, (tuple, list, set)):
assert len(fields) == 1
fields, = fields
assert fields == expr._name
return expr.dshape.measure
@copydoc(Join)
def join(lhs, rhs, on_left=None, on_right=None,
how='inner', suffixes=('_left', '_right')):
if not on_left and not on_right:
on_left = on_right = unpack(list(sorted(
set(lhs.fields) & set(rhs.fields),
key=lhs.fields.index)))
if not on_right:
on_right = on_left
if isinstance(on_left, tuple):
on_left = list(on_left)
if isinstance(on_right, tuple):
on_right = list(on_right)
if not on_left or not on_right:
raise ValueError(
"Can not Join. No shared columns between %s and %s" % (lhs, rhs),
)
left_types = listpack(types_of_fields(on_left, lhs))
right_types = listpack(types_of_fields(on_right, rhs))
if len(left_types) != len(right_types):
raise ValueError(
'Length of on_left=%d not equal to length of on_right=%d' % (
len(left_types), len(right_types),
),
)
for n, promotion in enumerate(map(partial(promote, promote_option=False),
left_types,
right_types)):
if promotion == object_:
raise TypeError(
"Schema's of joining columns do not match,"
' no promotion found for %s=%s and %s=%s' % (
on_left[n], left_types[n], on_right[n], right_types[n],
),
)
_on_left = tuple(on_left) if isinstance(on_left, list) else on_left
_on_right = (tuple(on_right) if isinstance(on_right, list)
else on_right)
how = how.lower()
if how not in ('inner', 'outer', 'left', 'right'):
raise ValueError("How parameter should be one of "
"\n\tinner, outer, left, right."
"\nGot: %s" % how)
return Join(lhs, rhs, _on_left, _on_right, how, suffixes)
class Concat(Expr):
""" Stack tables on common columns
Parameters
----------
lhs, rhs : Expr
Collections to concatenate
axis : int, optional
The axis to concatenate on.
Examples
--------
>>> from blaze import symbol
Vertically stack tables:
>>> names = symbol('names', '5 * {name: string, id: int32}')
>>> more_names = symbol('more_names', '7 * {name: string, id: int32}')
>>> stacked = concat(names, more_names)
>>> stacked.dshape
dshape("12 * {name: string, id: int32}")
Vertically stack matrices:
>>> mat_a = symbol('a', '3 * 5 * int32')
>>> mat_b = symbol('b', '3 * 5 * int32')
>>> vstacked = concat(mat_a, mat_b, axis=0)
>>> vstacked.dshape
dshape("6 * 5 * int32")
Horizontally stack matrices:
>>> hstacked = concat(mat_a, mat_b, axis=1)
>>> hstacked.dshape
dshape("3 * 10 * int32")
See Also
--------
blaze.expr.collections.Merge
"""
__slots__ = '_hash', 'lhs', 'rhs', 'axis'
__inputs__ = 'lhs', 'rhs'
@property
def dshape(self):
axis = self.axis
ldshape = self.lhs.dshape
lshape = ldshape.shape
return DataShape(
*(lshape[:axis] + (
_shape_add(lshape[axis], self.rhs.dshape.shape[axis]),
) + lshape[axis + 1:] + (ldshape.measure,))
)
def _shape_add(a, b):
if isinstance(a, Var) or isinstance(b, Var):
return var
return Fixed(a.val + b.val)
@copydoc(Concat)
def concat(lhs, rhs, axis=0):
ldshape = lhs.dshape
rdshape = rhs.dshape
if ldshape.measure != rdshape.measure:
raise TypeError(
'Mismatched measures: {l} != {r}'.format(
l=ldshape.measure, r=rdshape.measure
),
)
lshape = ldshape.shape
rshape = rdshape.shape
for n, (a, b) in enumerate(zip_longest(lshape, rshape, fillvalue=None)):
if n != axis and a != b:
raise TypeError(
'Shapes are not equal along axis {n}: {a} != {b}'.format(
n=n, a=a, b=b,
),
)
if axis < 0 or 0 < len(lshape) <= axis:
raise ValueError(
"Invalid axis '{a}', must be in range: [0, {n})".format(
a=axis, n=len(lshape)
),
)
return Concat(lhs, rhs, axis)
class IsIn(ElemWise):
"""Check if an expression contains values from a set.
Return a boolean expression indicating whether another expression
contains values that are members of a collection.
Parameters
----------
expr : Expr
Expression whose elements to check for membership in `keys`
keys : Sequence
Elements to test against. Blaze stores this as a ``frozenset``.
Examples
--------
Check if a vector contains any of 1, 2 or 3:
>>> from blaze import symbol
>>> t = symbol('t', '10 * int64')
>>> expr = t.isin([1, 2, 3])
>>> expr.dshape
dshape("10 * bool")
"""
__slots__ = '_hash', '_child', '_keys'
@property
def schema(self):
return datashape.bool_
def __str__(self):
return '%s.%s(%s)' % (self._child, type(self).__name__.lower(),
self._keys)
@copydoc(IsIn)
def isin(expr, keys):
if isinstance(keys, Expr):
raise TypeError('keys argument cannot be an expression, '
'it must be an iterable object such as a list, '
'tuple or set')
return IsIn(expr, frozenset(keys))
dshape_method_list.extend([
(iscollection, set([sort, head, tail])),
(lambda ds: len(ds.shape) == 1, set([distinct])),
(lambda ds: len(ds.shape) == 1 and isscalar(ds.measure), set([isin])),
])
| {
"repo_name": "alexmojaki/blaze",
"path": "blaze/expr/collections.py",
"copies": "1",
"size": "20040",
"license": "bsd-3-clause",
"hash": 9182114499936217000,
"line_mean": 26.8720445063,
"line_max": 79,
"alpha_frac": 0.5426147705,
"autogenerated": false,
"ratio": 3.7193763919821827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9761432293936956,
"avg_score": 0.00011177370904541441,
"num_lines": 719
} |
from __future__ import absolute_import, division, print_function
from functools import partial
from itertools import product
import numpy as np
from toolz import curry
from ..base import tokenize
from .core import Array, normalize_chunks
from .numpy_compat import full
def dims_from_size(size, blocksize):
"""
>>> list(dims_from_size(30, 8))
[8, 8, 8, 6]
"""
result = (blocksize,) * (size // blocksize)
if size % blocksize:
result = result + (size % blocksize,)
return result
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
if 'shape' not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop('shape')
if not isinstance(shape, (tuple, list)):
shape = (shape,)
chunks = kwargs.pop('chunks', None)
chunks = normalize_chunks(chunks, shape)
name = kwargs.pop('name', None)
dtype = kwargs.pop('dtype', None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
name = name or 'wrapped-' + tokenize(func, shape, chunks, dtype, args, kwargs)
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
func = partial(func, dtype=dtype, **kwargs)
vals = ((func,) + (s,) + args for s in shapes)
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, dtype=dtype)
@curry
def wrap(wrap_func, func, **kwargs):
f = partial(wrap_func, func, **kwargs)
template = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also requires a
keyword argument chunks=(...)
Original signature follows below.
"""
if func.__doc__ is not None:
f.__doc__ = template % {'name': func.__name__} + func.__doc__
f.__name__ = 'blocked_' + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
ones = w(np.ones, dtype='f8')
zeros = w(np.zeros, dtype='f8')
empty = w(np.empty, dtype='f8')
full = w(full)
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/array/wrap.py",
"copies": "3",
"size": "2043",
"license": "mit",
"hash": -478737828738086340,
"line_mean": 24.8607594937,
"line_max": 82,
"alpha_frac": 0.6172295644,
"autogenerated": false,
"ratio": 3.4804088586030666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00015250876925423213,
"num_lines": 79
} |
from __future__ import absolute_import, division, print_function
from functools import partial
from itertools import product
import numpy as np
try:
from cytoolz import curry
except ImportError:
from toolz import curry
from ..base import tokenize
from .core import Array, normalize_chunks
from .numpy_compat import full
def dims_from_size(size, blocksize):
"""
>>> list(dims_from_size(30, 8))
[8, 8, 8, 6]
"""
result = (blocksize,) * (size // blocksize)
if size % blocksize:
result = result + (size % blocksize,)
return result
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
if 'shape' not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop('shape')
if not isinstance(shape, (tuple, list)):
shape = (shape,)
chunks = kwargs.pop('chunks', None)
chunks = normalize_chunks(chunks, shape)
name = kwargs.pop('name', None)
dtype = kwargs.pop('dtype', None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
name = name or 'wrapped-' + tokenize(func, shape, chunks, dtype, args, kwargs)
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
func = partial(func, dtype=dtype, **kwargs)
vals = ((func,) + (s,) + args for s in shapes)
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, dtype=dtype)
@curry
def wrap(wrap_func, func, **kwargs):
f = partial(wrap_func, func, **kwargs)
template = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also requires a
keyword argument chunks=(...)
Original signature follows below.
"""
if func.__doc__ is not None:
f.__doc__ = template % {'name': func.__name__} + func.__doc__
f.__name__ = 'blocked_' + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
ones = w(np.ones, dtype='f8')
zeros = w(np.zeros, dtype='f8')
empty = w(np.empty, dtype='f8')
full = w(full)
| {
"repo_name": "cpcloud/dask",
"path": "dask/array/wrap.py",
"copies": "1",
"size": "2103",
"license": "bsd-3-clause",
"hash": 4928302954157824000,
"line_mean": 24.3373493976,
"line_max": 82,
"alpha_frac": 0.6195910604,
"autogenerated": false,
"ratio": 3.487562189054726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46071532494547257,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
from multipledispatch import Dispatcher
__all__ = 'select_functions',
def match(condition, data):
""" Does the condition match the data
>>> match(1, 1)
True
>>> match(int, 1)
True
>>> match(lambda x: x > 0, 1)
True
Use tuples for many possibilities
>>> match((1, 2, 3), 1)
True
>>> match((int, float), 1)
True
"""
return ((condition == data) or
(isinstance(condition, type)
and isinstance(data, condition)) or
(not isinstance(condition, type)
and callable(condition)
and condition(data)) or
(isinstance(condition, tuple)
and any(match(c, data) for c in condition)))
def name(func):
""" Name of a function, robust to partials """
if isinstance(func, partial):
return name(func.func)
elif isinstance(func, Dispatcher):
return func.name
else:
return func.__name__
def select_functions(methods, data):
"""
Select appropriate functions given types and predicates
"""
s = set()
for condition, funcs in methods:
if match(condition, data):
s |= funcs
return dict((name(func), func) for func in s)
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/method_dispatch.py",
"copies": "18",
"size": "1334",
"license": "bsd-3-clause",
"hash": -164464064857052640,
"line_mean": 24.1698113208,
"line_max": 64,
"alpha_frac": 0.5869565217,
"autogenerated": false,
"ratio": 4.19496855345912,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import itertools
import numpy as np
from .core import Array, normalize_chunks, stack
from .wrap import empty
from . import chunk
from ..base import tokenize
def linspace(start, stop, num=50, chunks=None, dtype=None):
"""
Return `num` evenly spaced values over the closed interval [`start`,
`stop`].
TODO: implement the `endpoint`, `restep`, and `dtype` keyword args
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The last value of the sequence.
num : int, optional
Number of samples to include in the returned dask array, including the
endpoints.
chunks : int
The number of samples on each block. Note that the last block will have
fewer samples if `num % blocksize != 0`
Returns
-------
samples : dask array
See Also
--------
dask.array.arange
"""
num = int(num)
if chunks is None:
raise ValueError("Must supply a chunks= keyword argument")
chunks = normalize_chunks(chunks, (num,))
range_ = stop - start
space = float(range_) / (num - 1)
if dtype is None:
dtype = np.linspace(0, 1, 1).dtype
name = 'linspace-' + tokenize((start, stop, num, chunks, dtype))
dsk = {}
blockstart = start
for i, bs in enumerate(chunks[0]):
blockstop = blockstart + ((bs - 1) * space)
task = (partial(np.linspace, dtype=dtype), blockstart, blockstop, bs)
blockstart = blockstart + (space * bs)
dsk[(name, i)] = task
return Array(dsk, name, chunks, dtype=dtype)
def arange(*args, **kwargs):
"""
Return evenly spaced values from `start` to `stop` with step size `step`.
The values are half-open [start, stop), so including start and excluding
stop. This is basically the same as python's range function but for dask
arrays.
When using a non-integer step, such as 0.1, the results will often not be
consistent. It is better to use linspace for these cases.
Parameters
----------
start : int, optional
The starting value of the sequence. The default is 0.
stop : int
The end of the interval, this value is excluded from the interval.
step : int, optional
The spacing between the values. The default is 1 when not specified.
The last value of the sequence.
chunks : int
The number of samples on each block. Note that the last block will have
fewer samples if ``len(array) % chunks != 0``.
Returns
-------
samples : dask array
See Also
--------
dask.array.linspace
"""
if len(args) == 1:
start = 0
stop = args[0]
step = 1
elif len(args) == 2:
start = args[0]
stop = args[1]
step = 1
elif len(args) == 3:
start, stop, step = args
else:
raise TypeError('''
arange takes 3 positional arguments: arange([start], stop, [step])
''')
if 'chunks' not in kwargs:
raise ValueError("Must supply a chunks= keyword argument")
chunks = kwargs['chunks']
dtype = kwargs.get('dtype', None)
if dtype is None:
dtype = np.arange(0, 1, step).dtype
num = max(np.ceil((stop - start) / step), 0)
chunks = normalize_chunks(chunks, (num,))
name = 'arange-' + tokenize((start, stop, step, chunks, num))
dsk = {}
elem_count = 0
for i, bs in enumerate(chunks[0]):
blockstart = start + (elem_count * step)
blockstop = start + ((elem_count + bs) * step)
task = (chunk.arange, blockstart, blockstop, step, bs, dtype)
dsk[(name, i)] = task
elem_count += bs
return Array(dsk, name, chunks, dtype=dtype)
def indices(dimensions, dtype=int, chunks=None):
"""
Implements NumPy's ``indices`` for Dask Arrays.
Generates a grid of indices covering the dimensions provided.
The final array has the shape ``(len(dimensions), *dimensions)``. The
chunks are used to specify the chunking for axis 1 up to
``len(dimensions)``. The 0th axis always has chunks of length 1.
Parameters
----------
dimensions : sequence of ints
The shape of the index grid.
dtype : dtype, optional
Type to use for the array. Default is ``int``.
chunks : sequence of ints
The number of samples on each block. Note that the last block will have
fewer samples if ``len(array) % chunks != 0``.
Returns
-------
grid : dask array
"""
if chunks is None:
raise ValueError("Must supply a chunks= keyword argument")
dimensions = tuple(dimensions)
dtype = np.dtype(dtype)
chunks = tuple(chunks)
if len(dimensions) != len(chunks):
raise ValueError("Need one more chunk than dimensions.")
grid = []
if np.prod(dimensions):
for i in range(len(dimensions)):
s = len(dimensions) * [None]
s[i] = slice(None)
s = tuple(s)
r = arange(dimensions[i], dtype=dtype, chunks=chunks[i])
r = r[s]
for j in itertools.chain(range(i), range(i + 1, len(dimensions))):
r = r.repeat(dimensions[j], axis=j)
grid.append(r)
if grid:
grid = stack(grid)
else:
grid = empty(
(len(dimensions),) + dimensions, dtype=dtype, chunks=(1,) + chunks
)
return grid
| {
"repo_name": "mraspaud/dask",
"path": "dask/array/creation.py",
"copies": "1",
"size": "5528",
"license": "bsd-3-clause",
"hash": -4266472822170771000,
"line_mean": 26.9191919192,
"line_max": 79,
"alpha_frac": 0.5982272069,
"autogenerated": false,
"ratio": 4.052785923753666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5151013130653666,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import json
import math
import os
import traceback
import PySide2
from pymel.core import Callback, colorEditor, colorIndex, columnLayout, objectType, NurbsCurveCV
from pymel.core import palettePort, PyNode, rotate, select, selected, selectedNodes, setParent, warning
from ....vendor import Qt
from .... import core
from .. import controllerShape
from .. import rig
from .. import cardlister
from .. import util
if 'SHAPE_DEBUG' not in globals():
SHAPE_DEBUG = False
class ShapeEditor(object):
'''
def __init__(self):
with columnLayout() as self.main:
button( l='Select CVs', c=core.alt.Callback(self.selectCVs) )
button( l='Select Pin Head', c=Callback(self.selectPinHead) )
'''
NUM_COLS = 4
CURVE_NAME = 'Fossil_Curve_Colorer'
SURFACE_NAME = 'Fossil_Surface_Colorer'
def __init__(self, source):
self.window = source
self.source = source.ui
# Not sure this is actually needed yet so keep it hidden.
self.controlCardList.hide()
if not SHAPE_DEBUG:
self.shapeDebug.hide()
self.hookupSignals()
self.buildShapeMenu(source.scaleFactor)
#self.surfaceColorLayout.setObjectName(self.SURFACE_NAME)
#setParent( self.SURFACE_NAME )
self.surfaceColorer = SurfaceColorEditor(self.source.surfaceColorGrid)
self.curveColorer = CurveColorEditor(self.source.curveColorGrid)
self.refresh()
"""
self.curveColorLayout.setObjectName( self.CURVE_NAME )
setParent( self.CURVE_NAME )
self.refreshCardList()
"""
def __getattr__(self, name):
return getattr( self.source, name)
def hookupSignals(self):
# Scaling
self.minus_one.clicked.connect( Callback(self.scaleCvs, 0.99) )
self.minus_ten.clicked.connect( Callback(self.scaleCvs, 0.90) )
self.plus_ten.clicked.connect( Callback(self.scaleCvs, 1.10) )
self.plus_one.clicked.connect( Callback(self.scaleCvs, 1.01) )
# Rotating
self.rot_local_x.clicked.connect( Callback(self.rotate, 'x', 45, 'local') )
self.rot_local_y.clicked.connect( Callback(self.rotate, 'y', 45, 'local') )
self.rot_local_z.clicked.connect( Callback(self.rotate, 'z', 45, 'local') )
self.rot_world_x.clicked.connect( Callback(self.rotate, 'x', 45, 'world') )
self.rot_world_y.clicked.connect( Callback(self.rotate, 'y', 45, 'world') )
self.rot_world_z.clicked.connect( Callback(self.rotate, 'z', 45, 'world') )
# Selecting
self.select_cvs.clicked.connect( Callback(self.selectCVs) )
self.select_pin_head.clicked.connect( Callback(self.selectPinHead) )
self.select_band_edge_1.clicked.connect( Callback(self.bandEdge, 1))
self.select_band_edge_2.clicked.connect( Callback(self.bandEdge, 2))
# Shapes
self.copyShapes.clicked.connect( Callback(self.transferShape) )
self.mirrorShapes.clicked.connect( Callback(self.transferShape, mirror=True) )
self.mirrorSide.clicked.connect( Callback(lambda: mirrorAllKinematicShapes(selected())) )
#self.mirrorSide.setContextMenuPolicy(Qt.QtCore.Qt.CustomContextMenu)
#self.mirrorSide.customContextMenuRequested.connect(self.XXXcontextMenuEvent)
self.copyToCBBtn.clicked.connect( Callback(self.copyToClipboad) )
self.pasteLocalBtn.clicked.connect( Callback(self.pasteFromCliboard, 'os') )
self.pasteWorldBtn.clicked.connect( Callback(self.pasteFromCliboard, 'ws') )
def copyToClipboad(self):
try:
info = controllerShape.getShapeInfo(selected()[0])
s = json.dumps(info)
core.text.clipboard.set( s )
except Exception:
warning('Unable to copy shapes')
def pasteFromCliboard(self, space):
'''
Deserialize shape info from the clipboard and apply it, either in space='os', object space, or 'ws', world space.
'''
try:
info = json.loads( core.text.clipboard.get() )
for obj in selected():
controllerShape.applyShapeInfo(obj, info, space)
except Exception:
warning('Unable to paste shapes')
def XXXcontextMenuEvent(self, point):
menu = Qt.QtWidgets.QMenu(self.mirrorSide)
menu.addAction('demo')
menu.addAction('test')
#menu.addAction(self.pasteAct)
menu.exec_( self.mirrorSide.mapToGlobal(point) )
@staticmethod
def scaleCvs(val):
#scaleFactor = [val] * 3 # Scale factor needs to be a 3 vector for all axes.
for obj in selected():
controllerShape.scaleAllCVs(obj, val)
#for shape in core.shape.getShapes(obj):
# scale(shape.cv, scaleFactor, r=True, os=True)
@staticmethod
def selectPinHead():
sel = selected()
if not sel:
return
# No the best method, assuming it's made of a tube and another shape but
# it works for now.
tube, outline, head = None, None, None
shapes = core.shape.getShapes(sel[0]) # This culls out switchers/vis shapes
for shape in shapes[:]:
if shape.name().count('tube'):
tube = shape
shapes.remove(tube)
elif shape.name().count('outline'):
outline = shape
shapes.remove(outline)
elif shape.name().count('sharedShape'):
shapes.remove(shape)
if len(shapes) == 1:
head = shapes[0]
if tube and outline and head:
select(head.cv[0:6][0:5], outline.cv[1:14], tube.cv[3][0:3])
@staticmethod
def bandEdge(side):
sel = selected()
if not sel:
return
obj = sel[0]
# If the selection is a cv, it probably means we're selecting the other side so assume the parent is the obj.
if isinstance(obj, NurbsCurveCV):
obj = obj.node().getParent()
if not obj.hasAttr('shapeType') or obj.shapeType.get() != 'band':
return
shapes = core.shape.getShapes(obj)
if shapes[0].type() == 'nurbsSurface':
surface = shapes[0]
outline = shapes[1]
else:
surface = shapes[1]
outline = shapes[0]
if side == 1:
select( outline.cv[0:14], surface.cv[3][0:7] )
else:
select( outline.cv[16:32], surface.cv[0][0:7] )
@staticmethod
def selectCVs():
sel = selected()
select(cl=True)
for obj in sel:
for shape in core.shape.getShapes(obj):
select( shape.cv, add=True )
@staticmethod
def rotate(dir, angle, space):
rot = [0, 0, 0]
rot[ ord(dir) - ord('x') ] = angle
trans = [ PyNode(obj) for obj in selectedNodes() if objectType(obj) == 'transform' ]
trans += [ PyNode(obj).getParent() for obj in selectedNodes() if objectType(obj).startswith('nurbs') ]
for obj in set(trans):
for shape in core.shape.getShapes(obj):
if space == 'world':
rotate( shape.cv, rot, r=True, ws=True )
else:
rotate( shape.cv, rot, r=True, os=True )
@staticmethod
def transferShape(mirror=False):
sel = selected()
if len(sel) > 1:
for dest in sel[1:]:
controllerShape.copyShape(sel[0], dest, mirror=mirror)
select(sel)
@staticmethod
def changeShape(shapeName):
sel = selected()
if sel:
for obj in sel:
#if obj.hasAttr( 'fossilCtrlType' ):
controllerShape.setShape(obj, shapeName)
select(sel)
def buildShapeMenu(self, scale):
shapeFolder = os.path.dirname( __file__ ).replace('\\', '/') + '/shapes'
shapeNames = controllerShape.listShapes()
"""
Old way had transparent background, but didn't scale the icons if the monitor had a scale set. New version
does at the sacrifice of transparent background
temp_style = []
template = "QPushButton#%s { background-image: url('%s'); border: none; width: 90; height: 90;}" # padding: 0; margin: 0;
for name in shapeNames:
temp_style.append( template % (name, shapeFolder + '/' + name + '.png') )
self.window.setStyleSheet( '\n'.join(temp_style) )
"""
row = 0
col = 0
for f in os.listdir(shapeFolder):
if f.lower().endswith('.png'):
shapeName = f[:-4]
if shapeName in shapeNames:
button = Qt.QtWidgets.QPushButton()
button.setFixedSize(64 * scale, 64 * scale)
#button.setObjectName(f[:-4])
img = PySide2.QtGui.QPixmap(shapeFolder + '/' + f)
img = img.scaled( PySide2.QtCore.QSize( 64 * scale, 64 * scale ),
PySide2.QtCore.Qt.AspectRatioMode.IgnoreAspectRatio,
PySide2.QtCore.Qt.TransformationMode.SmoothTransformation )
button.setFlat(True)
button.setAutoFillBackground(True)
button.setIcon( img )
button.setIconSize( img.size() )
button.clicked.connect( Callback(self.changeShape, shapeName) )
self.shape_chooser.addWidget(button, row, col)
col += 1
if col >= self.NUM_COLS:
col = 0
row += 1
def refreshCardList(self):
'''
Form cardLister, use .cardHierarchy() just like it does to refresh it's own list
# cardOrder = cardHierarchy()
'''
return
cards = cardlister.cardHierarchy()
cardToItem = {None: self.controlCardList.invisibleRootItem()}
for parent, children in cards:
for child in children:
item = Qt.QtWidgets.QTreeWidgetItem([child.name()])
cardToItem[parent].addChild(item)
cardToItem[child] = item
item.setExpanded(True)
def refresh(self):
self.curveColorer.update()
if SHAPE_DEBUG:
try: # &&& Don't have time for this now..
temp = selected(type='transform')
if temp:
card = temp[0]
else:
return
if not card.c.__class__.__name__ == 'Card': # &&& Not a great verification this is a card node.
main = rig.getMainController(card)
if main:
card = main.card
text = ''
try:
for attr in ['outputShape' + side + kin for side in ['Left', 'Right', 'Center'] for kin in ['ik', 'fk']]:
if card.hasAttr(attr):
text += '---- ' + attr + '\n\n'
text += core.text.asciiDecompress( card.attr(attr).get() ) + '\n\n'
except Exception:
print( traceback.format_exc() )
self.shapeDebug.setPlainText(text)
except:
pass
class SurfaceColorEditor(object):
WIDTH = 8.0
def __init__(self, grid):
self.customColor = [1.0, 1.0, 1.0]
"""
columnLayout(w=100, h=100)
self.surfacePalette = palettePort(
dim=(7, 4),
w=(7 * 20),
h=(4 * 20),
td=True,
colorEditable=False)
"""
#self.surfacePalette.changeCommand( core.alt.Callback(self.changeSurfaceColor) )
#self.surfacePalette.setRgbValue( [0] + self.customColor )
#for i, (name, c) in enumerate(core.shader.namedColors.items()):
# self.surfacePalette.setRgbValue( [i + 1] + list(c) )
for i, (name, c) in enumerate(core.shader.namedColors.items()):
col = i % self.WIDTH
row = math.floor(i / self.WIDTH)
b = PySide2.QtWidgets.QPushButton(' ')
pal = b.palette()
pal.setColor(PySide2.QtGui.QPalette.Button, PySide2.QtGui.QColor( c[0] * 255.0, c[1] * 255.0, c[2] * 255.0 ) )
b.setAutoFillBackground(True)
b.setPalette(pal)
b.clicked.connect( partial(self.changeSurfaceColor, i) )
grid.addWidget( b, row, col)
def changeSurfaceColor(self, colorIndex):
#colorIndex = self.surfacePalette.getSetCurCell() - 1
if colorIndex == -1:
self.defineSurfaceColor()
color = self.customColor[:]
else:
color = list(core.shader.namedColors.values()[colorIndex])
color.append(0.5)
sel = selected()
for obj in sel:
try:
core.shader.assign(obj, color)
except Exception:
pass
if sel:
select(sel)
def defineSurfaceColor(self):
val = colorEditor(rgb=self.customColor)
if val[-1] == '1': # Control has strange returns, see maya docs
self.customColor = [ float(f) for f in val.split()][:-1]
self.surfacePalette.setRgbValue( [0] + self.customColor )
palettePort(self.surfacePalette, e=True, redraw=True)
return True
return False
class CurveColorEditor(object):
WIDTH = 8
HEIGHT = 4
def __init__(self, grid):
self._colorChangeObjs = []
"""
columnLayout()
self.curvePalette = palettePort(
dim=(8, 4),
w=(8 * 20),
h=(4 * 20),
td=True,
colorEditable=False,
transparent=0)
self.curvePalette.changeCommand( core.alt.Callback(self.changeCurveColor) )
for i in range(1, 32):
param = [i] + colorIndex(i, q=True)
self.curvePalette.setRgbValue( param )
self.curvePalette.setRgbValue( (0, .6, .6, .6) )
"""
for i in range(1, 32):
col = i % self.WIDTH
row = math.floor(i / self.WIDTH)
b = PySide2.QtWidgets.QPushButton(' ')
pal = b.palette()
color = colorIndex(i, q=True)
pal.setColor(PySide2.QtGui.QPalette.Button, PySide2.QtGui.QColor( color[0] * 255.0, color[1] * 255.0, color[2] * 255.0 ) )
b.setAutoFillBackground(True)
b.setPalette(pal)
b.clicked.connect( partial(self.changeCurveColor, i) )
grid.addWidget( b, row, col)
def changeCurveColor(self, colorIndex):
#colorIndex = self.curvePalette.getSetCurCell()
select(cl=True)
for obj in self._colorChangeObjs:
controllerShape.setCurveColor(obj, colorIndex)
def update(self):
if selected():
self._colorChangeObjs = selected()
def mirrorAllKinematicShapes(ctrls):
'''
Copies all the shapes for that motion type, ex, ik left -> ik right
'''
done = set()
for ctrl in selected():
main = rig.getMainController(ctrl)
if main in done:
continue
if not main:
continue
other = main.getOppositeSide()
if not other:
continue
controllerShape.copyShape(main, other, mirror=True)
for name, ctrl in main.subControl.items():
controllerShape.copyShape(ctrl, other.subControl[name], mirror=True)
done.add(main) | {
"repo_name": "patcorwin/fossil",
"path": "pdil/tool/fossil/ui/controllerEdit.py",
"copies": "1",
"size": "16489",
"license": "bsd-3-clause",
"hash": -2290697681728292900,
"line_mean": 32.6530612245,
"line_max": 134,
"alpha_frac": 0.537995027,
"autogenerated": false,
"ratio": 4.077398615232443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5115393642232443,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import json
import os
import re
import time
import datetime
import jinja2
from .conda_interface import PY3
from .environ import get_dict as get_environ
from .utils import (get_installed_packages, apply_pin_expressions, get_logger, HashableDict,
string_types)
from .render import get_env_dependencies
from .utils import copy_into, check_call_env, rm_rf, ensure_valid_spec
from .variants import DEFAULT_COMPILERS
from .exceptions import CondaBuildException
from . import _load_setup_py_data
class UndefinedNeverFail(jinja2.Undefined):
"""
A class for Undefined jinja variables.
This is even less strict than the default jinja2.Undefined class,
because it permits things like {{ MY_UNDEFINED_VAR[:2] }} and
{{ MY_UNDEFINED_VAR|int }}. This can mask lots of errors in jinja templates, so it
should only be used for a first-pass parse, when you plan on running a 'strict'
second pass later.
Note:
When using this class, any usage of an undefined variable in a jinja template is recorded
in the (global) all_undefined_names class member. Therefore, after jinja rendering,
you can detect which undefined names were used by inspecting that list.
Be sure to clear the all_undefined_names list before calling template.render().
"""
all_undefined_names = []
def __init__(self, hint=None, obj=jinja2.runtime.missing, name=None,
exc=jinja2.exceptions.UndefinedError):
jinja2.Undefined.__init__(self, hint, obj, name, exc)
# Using any of these methods on an Undefined variable
# results in another Undefined variable.
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = \
__complex__ = __pow__ = __rpow__ = \
lambda self, *args, **kwargs: self._return_undefined(self._undefined_name)
# Accessing an attribute of an Undefined variable
# results in another Undefined variable.
def __getattr__(self, k):
try:
return object.__getattr__(self, k)
except AttributeError:
self._return_undefined(self._undefined_name + '.' + k)
# Unlike the methods above, Python requires that these
# few methods must always return the correct type
__str__ = __repr__ = lambda self: self._return_value(str())
__unicode__ = lambda self: self._return_value(u'')
__int__ = lambda self: self._return_value(0)
__float__ = lambda self: self._return_value(0.0)
__nonzero__ = lambda self: self._return_value(False)
def _return_undefined(self, result_name):
# Record that this undefined variable was actually used.
UndefinedNeverFail.all_undefined_names.append(self._undefined_name)
return UndefinedNeverFail(hint=self._undefined_hint,
obj=self._undefined_obj,
name=result_name,
exc=self._undefined_exception)
def _return_value(self, value=None):
# Record that this undefined variable was actually used.
UndefinedNeverFail.all_undefined_names.append(self._undefined_name)
return value
class FilteredLoader(jinja2.BaseLoader):
"""
A pass-through for the given loader, except that the loaded source is
filtered according to any metadata selectors in the source text.
"""
def __init__(self, unfiltered_loader, config):
self._unfiltered_loader = unfiltered_loader
self.list_templates = unfiltered_loader.list_templates
self.config = config
def get_source(self, environment, template):
# we have circular imports here. Do a local import
from .metadata import select_lines, ns_cfg
contents, filename, uptodate = self._unfiltered_loader.get_source(environment,
template)
return (select_lines(contents, ns_cfg(self.config),
variants_in_place=bool(self.config.variant)), filename, uptodate)
def load_setup_py_data(m, setup_file='setup.py', from_recipe_dir=False, recipe_dir=None,
permit_undefined_jinja=True):
_setuptools_data = None
# we must copy the script into the work folder to avoid incompatible pyc files
origin_setup_script = os.path.join(os.path.dirname(__file__), '_load_setup_py_data.py')
dest_setup_script = os.path.join(m.config.work_dir, '_load_setup_py_data.py')
copy_into(origin_setup_script, dest_setup_script)
env = get_environ(m)
env["CONDA_BUILD_STATE"] = "RENDER"
if os.path.isfile(m.config.build_python):
args = [m.config.build_python, dest_setup_script, m.config.work_dir, setup_file]
if from_recipe_dir:
assert recipe_dir, 'recipe_dir must be set if from_recipe_dir is True'
args.append('--from-recipe-dir')
args.extend(['--recipe-dir', recipe_dir])
if permit_undefined_jinja:
args.append('--permit-undefined-jinja')
check_call_env(args, env=env)
# this is a file that the subprocess will have written
with open(os.path.join(m.config.work_dir, 'conda_build_loaded_setup_py.json')) as f:
_setuptools_data = json.load(f)
else:
try:
_setuptools_data = _load_setup_py_data.load_setup_py_data(setup_file,
from_recipe_dir=from_recipe_dir,
recipe_dir=recipe_dir,
work_dir=m.config.work_dir,
permit_undefined_jinja=permit_undefined_jinja)
except (TypeError, OSError):
# setup.py file doesn't yet exist. Will get picked up in future parsings
pass
except ImportError as e:
if permit_undefined_jinja:
log = get_logger(__name__)
log.debug("Reading setup.py failed due to missing modules. This is probably OK, "
"since it may succeed in later passes. Watch for incomplete recipe "
"info, though.")
else:
raise CondaBuildException("Could not render recipe - need modules "
"installed in root env. Import error was \"{}\"".format(e))
# cleanup: we must leave the source tree empty unless the source code is already present
rm_rf(os.path.join(m.config.work_dir, '_load_setup_py_data.py'))
return _setuptools_data if _setuptools_data else {}
def load_setuptools(m, setup_file='setup.py', from_recipe_dir=False, recipe_dir=None,
permit_undefined_jinja=True):
log = get_logger(__name__)
log.warn("Deprecation notice: the load_setuptools function has been renamed to "
"load_setup_py_data. load_setuptools will be removed in a future release.")
return load_setup_py_data(m, setup_file=setup_file, from_recipe_dir=from_recipe_dir,
recipe_dir=recipe_dir, permit_undefined_jinja=permit_undefined_jinja)
def load_npm():
# json module expects bytes in Python 2 and str in Python 3.
mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}
with open('package.json', **mode_dict) as pkg:
return json.load(pkg)
def load_file_regex(config, load_file, regex_pattern, from_recipe_dir=False,
recipe_dir=None, permit_undefined_jinja=True):
match = False
log = get_logger(__name__)
cd_to_work = False
if from_recipe_dir and recipe_dir:
load_file = os.path.abspath(os.path.join(recipe_dir, load_file))
elif os.path.exists(config.work_dir):
cd_to_work = True
cwd = os.getcwd()
os.chdir(config.work_dir)
if not os.path.isabs(load_file):
load_file = os.path.join(config.work_dir, load_file)
else:
message = ("Did not find {} file in manually specified location, and source "
"not downloaded yet.".format(load_file))
if permit_undefined_jinja:
log.debug(message)
return {}
else:
raise RuntimeError(message)
if os.path.isfile(load_file):
with open(load_file, 'r') as lfile:
match = re.search(regex_pattern, lfile.read())
else:
if not permit_undefined_jinja:
raise TypeError('{} is not a file that can be read'.format(load_file))
# Reset the working directory
if cd_to_work:
os.chdir(cwd)
return match if match else None
cached_env_dependencies = {}
def pin_compatible(m, package_name, lower_bound=None, upper_bound=None, min_pin='x.x.x.x.x.x',
max_pin='x', permit_undefined_jinja=False, exact=False, bypass_env_check=False):
"""dynamically pin based on currently installed version.
only mandatory input is package_name.
upper_bound is the authoritative upper bound, if provided. The lower bound is the the
currently installed version.
pin expressions are of the form 'x.x' - the number of pins is the number of x's separated
by ``.``.
"""
global cached_env_dependencies
compatibility = ""
# optimization: this is slow (requires solver), so better to bypass it
# until the finalization stage.
if not bypass_env_check and not permit_undefined_jinja:
# this is the version split up into its component bits.
# There are two cases considered here (so far):
# 1. Good packages that follow semver style (if not philosophy). For example, 1.2.3
# 2. Evil packages that cram everything alongside a single major version. For example, 9b
key = (m.name(), HashableDict(m.config.variant))
if key in cached_env_dependencies:
pins = cached_env_dependencies[key]
else:
if m.is_cross and not m.build_is_host:
pins, _, _ = get_env_dependencies(m, 'host', m.config.variant)
else:
pins, _, _ = get_env_dependencies(m, 'build', m.config.variant)
if m.build_is_host:
host_pins, _, _ = get_env_dependencies(m, 'host', m.config.variant)
pins.extend(host_pins)
cached_env_dependencies[key] = pins
versions = {p.split(' ')[0]: p.split(' ')[1:] for p in pins}
if versions:
if exact and versions.get(package_name):
compatibility = ' '.join(versions[package_name])
else:
version = lower_bound or versions.get(package_name)
if version:
if hasattr(version, '__iter__') and not isinstance(version, string_types):
version = version[0]
else:
version = str(version)
if upper_bound:
if min_pin or lower_bound:
compatibility = ">=" + str(version) + ","
compatibility += '<{upper_bound}'.format(upper_bound=upper_bound)
else:
compatibility = apply_pin_expressions(version, min_pin, max_pin)
if (not compatibility and not permit_undefined_jinja and not bypass_env_check):
check = re.compile(r'pin_compatible\s*\(\s*[''"]{}[''"]'.format(package_name))
if check.search(m.extract_requirements_text()):
raise RuntimeError("Could not get compatibility information for {} package. "
"Is it one of your host dependencies?".format(package_name))
return " ".join((package_name, compatibility)) if compatibility is not None else package_name
def pin_subpackage_against_outputs(metadata, matching_package_keys, outputs, min_pin, max_pin,
exact, permit_undefined_jinja, skip_build_id=False):
pin = None
if matching_package_keys:
# two ways to match:
# 1. only one other output named the same as the subpackage_name from the key
# 2. whole key matches (both subpackage name and variant (used keys only))
if len(matching_package_keys) == 1:
key = matching_package_keys[0]
elif len(matching_package_keys) > 1:
key = None
for pkg_name, variant in matching_package_keys:
# This matches other outputs with any keys that are common to both
# metadata objects. For debugging, the keys are always the (package
# name, used vars+values). It used to be (package name, variant) -
# but that was really big and hard to look at.
shared_vars = set(variant.keys()) & set(metadata.config.variant.keys())
if not shared_vars or all(variant[sv] == metadata.config.variant[sv]
for sv in shared_vars):
key = (pkg_name, variant)
break
if key in outputs:
sp_m = outputs[key][1]
if permit_undefined_jinja and not sp_m.version():
pin = None
else:
if exact:
pin = " ".join([sp_m.name(), sp_m.version(),
sp_m.build_id() if not skip_build_id else str(sp_m.build_number())])
else:
pin = "{0} {1}".format(sp_m.name(),
apply_pin_expressions(sp_m.version(), min_pin,
max_pin))
else:
pin = matching_package_keys[0][0]
return pin
def pin_subpackage(metadata, subpackage_name, min_pin='x.x.x.x.x.x', max_pin='x',
exact=False, permit_undefined_jinja=False, allow_no_other_outputs=False,
skip_build_id=False):
"""allow people to specify pinnings based on subpackages that are defined in the recipe.
For example, given a compiler package, allow it to specify either a compatible or exact
pinning on the runtime package that is also created by the compiler package recipe
"""
pin = None
if not hasattr(metadata, 'other_outputs'):
if allow_no_other_outputs:
pin = subpackage_name
else:
raise ValueError("Bug in conda-build: we need to have info about other outputs in "
"order to allow pinning to them. It's not here.")
else:
# two ways to match:
# 1. only one other output named the same as the subpackage_name from the key
# 2. whole key matches (both subpackage name and variant)
keys = list(metadata.other_outputs.keys())
matching_package_keys = [k for k in keys if k[0] == subpackage_name]
pin = pin_subpackage_against_outputs(metadata, matching_package_keys,
metadata.other_outputs, min_pin, max_pin,
exact, permit_undefined_jinja,
skip_build_id=skip_build_id)
if not pin:
pin = subpackage_name
if not permit_undefined_jinja and not allow_no_other_outputs:
raise ValueError("Didn't find subpackage version info for '{}', which is used in a"
" pin_subpackage expression. Is it actually a subpackage? If not, "
"you want pin_compatible instead.".format(subpackage_name))
return pin
def native_compiler(language, config):
compiler = language
for platform in [config.platform, config.platform.split('-')[0]]:
try:
compiler = DEFAULT_COMPILERS[platform][language]
break
except KeyError:
continue
if hasattr(compiler, 'keys'):
compiler = compiler.get(config.variant.get('python', 'nope'), 'vs2017')
return compiler
def compiler(language, config, permit_undefined_jinja=False):
"""Support configuration of compilers. This is somewhat platform specific.
Native compilers never list their host - it is always implied. Generally, they are
metapackages, pointing at a package that does specify the host. These in turn may be
metapackages, pointing at a package where the host is the same as the target (both being the
native architecture).
"""
compiler = native_compiler(language, config)
version = None
if config.variant:
target_platform = config.variant.get('target_platform', config.subdir)
language_compiler_key = '{}_compiler'.format(language)
# fall back to native if language-compiler is not explicitly set in variant
compiler = config.variant.get(language_compiler_key, compiler)
version = config.variant.get(language_compiler_key + '_version')
else:
target_platform = config.subdir
# support cross compilers. A cross-compiler package will have a name such as
# gcc_target
# gcc_linux-cos6-64
compiler = '_'.join([compiler, target_platform])
if version:
compiler = ' '.join((compiler, version))
compiler = ensure_valid_spec(compiler, warn=False)
return compiler
def cdt(package_name, config, permit_undefined_jinja=False):
"""Support configuration of Core Dependency Trees.
We should define CDTs in a single location. The current
idea is to emit parts of the following to index.json (the
bits that the solver could make use of) and parts to
about.json (the other bits).
"system": {
"os": {
"type": "windows", "linux", "bsd", "darwin",
"os_distribution": "CentOS", "FreeBSD", "Windows", "osx",
"os_version": "6.9", "10.12.3",
"os_kernel_version" : "2.6.32",
"os_libc_family": "glibc",
"os_libc_version": "2.12",
}
"cpu": {
# Whichever cpu_architecture/cpu_isa we build-out for:
# .. armv6 is compatible with and uses all CPU features of a Raspberry PI 1
# .. armv7a is compatible with and uses all CPU features of a Raspberry PI 2
# .. aarch64 is compatible with and uses all CPU features of a Raspberry PI 3
"cpu_architecture": "x86", "x86_64",
"armv6", "armv7a", "aarch32", "aarch64",
"powerpc", "powerpc64",
"s390", "s390x",
"cpu_isa": "nocona", "armv8.1-a", "armv8.3-a",
# "?" because the vfpu is specified by cpu_architecture + cpu_isa + rules.
"vfpu": "?",
"cpu_endianness": "BE", "LE",
}
"gpu ?": {
}
"compilerflags": {
# When put into a CDT these should be the base defaults.
# Package builds can and will change these frequently.
"CPPFLAGS": "-D_FORTIFY_SOURCE=2",
"CFLAGS": "-march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -O2 -pipe",
"CXXFLAGS": "-fvisibility-inlines-hidden -std=c++17 -fmessage-length=0 -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -O2 -pipe",
"LDFLAGS": "-Wl,-O1,--sort-common,--as-needed,-z,relro",
"FFLAGS": "-fopenmp",
# These are appended to the non-DEBUG values:
"DEBUG_CFLAGS": "-Og -g -Wall -Wextra -fcheck=all -fbacktrace -fimplicit-none -fvar-tracking-assignments",
"DEBUG_CXXFLAGS": "-Og -g -Wall -Wextra -fcheck=all -fbacktrace -fimplicit-none -fvar-tracking-assignments",
"DEBUG_FFLAGS": "-Og -g -Wall -Wextra -fcheck=all -fbacktrace -fimplicit-none -fvar-tracking-assignments",
}
}
""" # NOQA
cdt_name = 'cos6'
arch = config.host_arch or config.arch
if arch == 'ppc64le' or arch == 'aarch64':
cdt_name = 'cos7'
cdt_arch = arch
else:
cdt_arch = 'x86_64' if arch == '64' else 'i686'
if config.variant:
cdt_name = config.variant.get('cdt_name', cdt_name)
cdt_arch = config.variant.get('cdt_arch', cdt_arch)
if ' ' in package_name:
name = package_name.split(' ')[0]
ver_build = package_name.split(' ')[1:]
result = (name + '-' + cdt_name + '-' + cdt_arch + ' ' + ' '.join(ver_build))
else:
result = (package_name + '-' + cdt_name + '-' + cdt_arch)
return result
def resolved_packages(m, env, permit_undefined_jinja=False,
bypass_env_check=False):
"""Returns the final list of packages that are listed in host or build.
This include all packages (including the indirect dependencies) that will
be installed in the host or build environment. An example usage of this
jinja function can be::
requirements:
host:
- curl 7.55.1
run_constrained:
{% for package in resolved_packages('host') %}
- {{ package }}
{% endfor %}
which will render to::
requirements:
host:
- ca-certificates 2017.08.26 h1d4fec5_0
- curl 7.55.1 h78862de_4
- libgcc-ng 7.2.0 h7cc24e2_2
- libssh2 1.8.0 h9cfc8f7_4
- openssl 1.0.2n hb7f436b_0
- zlib 1.2.11 ha838bed_2
run_constrained:
- ca-certificates 2017.08.26 h1d4fec5_0
- curl 7.55.1 h78862de_4
- libgcc-ng 7.2.0 h7cc24e2_2
- libssh2 1.8.0 h9cfc8f7_4
- openssl 1.0.2n hb7f436b_0
- zlib 1.2.11 ha838bed_2
"""
if env not in ('host', 'build'):
raise ValueError('Only host and build dependencies are supported.')
package_names = []
# optimization: this is slow (requires solver), so better to bypass it
# until the finalization stage as done similarly in pin_compatible.
if not bypass_env_check and not permit_undefined_jinja:
package_names, _, _ = get_env_dependencies(m, env, m.config.variant)
return package_names
def context_processor(initial_metadata, recipe_dir, config, permit_undefined_jinja,
allow_no_other_outputs=False, bypass_env_check=False, skip_build_id=False,
variant=None):
"""
Return a dictionary to use as context for jinja templates.
initial_metadata: Augment the context with values from this MetaData object.
Used to bootstrap metadata contents via multiple parsing passes.
"""
ctx = get_environ(m=initial_metadata, for_env=False, skip_build_id=skip_build_id,
escape_backslash=True, variant=variant)
environ = dict(os.environ)
environ.update(get_environ(m=initial_metadata, skip_build_id=skip_build_id))
ctx.update(
load_setup_py_data=partial(load_setup_py_data, m=initial_metadata, recipe_dir=recipe_dir,
permit_undefined_jinja=permit_undefined_jinja),
# maintain old alias for backwards compatibility:
load_setuptools=partial(load_setuptools, m=initial_metadata, recipe_dir=recipe_dir,
permit_undefined_jinja=permit_undefined_jinja),
load_npm=load_npm,
load_file_regex=partial(load_file_regex, config=config, recipe_dir=recipe_dir,
permit_undefined_jinja=permit_undefined_jinja),
installed=get_installed_packages(os.path.join(config.host_prefix, 'conda-meta')),
pin_compatible=partial(pin_compatible, initial_metadata,
permit_undefined_jinja=permit_undefined_jinja,
bypass_env_check=bypass_env_check),
pin_subpackage=partial(pin_subpackage, initial_metadata,
permit_undefined_jinja=permit_undefined_jinja,
allow_no_other_outputs=allow_no_other_outputs,
skip_build_id=skip_build_id),
compiler=partial(compiler, config=config, permit_undefined_jinja=permit_undefined_jinja),
cdt=partial(cdt, config=config, permit_undefined_jinja=permit_undefined_jinja),
resolved_packages=partial(resolved_packages, initial_metadata,
permit_undefined_jinja=permit_undefined_jinja,
bypass_env_check=bypass_env_check),
time=time,
datetime=datetime,
environ=environ)
return ctx
| {
"repo_name": "pelson/conda-build",
"path": "conda_build/jinja_context.py",
"copies": "2",
"size": "24722",
"license": "bsd-3-clause",
"hash": -8923736294765678,
"line_mean": 45.382739212,
"line_max": 167,
"alpha_frac": 0.5962300785,
"autogenerated": false,
"ratio": 3.9733204757312763,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5569550554231276,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import numpy as np
import tables as tb
from datashape import Record, from_numpy, datetime_, date_
from blaze.expr import (Selection, Head, Field, Broadcast, Projection, Symbol,
Sort, Reduction, count, Slice, Expr, nelements)
from blaze.compatibility import basestring, map
from ..dispatch import dispatch
__all__ = ['drop', 'create_index']
@dispatch(tb.Table)
def discover(t):
return t.shape[0] * Record([[col, discover(getattr(t.cols, col))]
for col in t.colnames])
@dispatch(tb.Column)
def discover(c):
dshape = from_numpy(c.shape, c.dtype)
return {'time64': datetime_, 'time32': date_}.get(c.type,
dshape.subshape[1])
@dispatch(tb.Table)
def drop(t):
t.remove()
@dispatch(tb.Table, basestring)
def create_index(t, column, name=None, **kwargs):
create_index(getattr(t.cols, column), **kwargs)
@dispatch(tb.Table, list)
def create_index(t, columns, name=None, **kwargs):
if not all(map(partial(hasattr, t.cols), columns)):
raise ValueError('table %s does not have all passed in columns %s' %
(t, columns))
for column in columns:
create_index(t, column, **kwargs)
@dispatch(tb.Column)
def create_index(c, optlevel=9, kind='full', name=None, **kwargs):
c.create_index(optlevel=optlevel, kind=kind, **kwargs)
@dispatch(Selection, tb.Table)
def compute_up(expr, data, **kwargs):
predicate = optimize(expr.predicate, data)
assert isinstance(predicate, Broadcast)
s = predicate._scalars[0]
cols = [s[field] for field in s.fields]
expr_str = print_numexpr(cols, predicate._scalar_expr)
return data.read_where(expr_str)
@dispatch(Symbol, tb.Table)
def compute_up(ts, t, **kwargs):
return t
@dispatch(Reduction, (tb.Column, tb.Table))
def compute_up(r, c, **kwargs):
return compute_up(r, c[:])
@dispatch(Projection, tb.Table)
def compute_up(proj, t, **kwargs):
# only options here are
# read the whole thing in and then select
# or
# create an output array that is at most the size of the on disk table and
# fill it will the columns iteratively
# both of these options aren't ideal but pytables has no way to select
# multiple column subsets so pick the one where we can optimize for the best
# case rather than prematurely pessimizing
#
# TODO: benchmark on big tables because i'm not sure exactly what the
# implications here are for memory usage
columns = proj.fields
dtype = np.dtype([(col, t.dtype[col]) for col in columns])
out = np.empty(t.shape, dtype=dtype)
for c in columns:
out[c] = t.col(c)
return out
@dispatch(Field, tb.File)
def compute_up(expr, data, **kwargs):
return data.getNode('/')._v_children[expr._name]
@dispatch(Field, tb.Node)
def compute_up(expr, data, **kwargs):
return data._v_children[expr._name]
@dispatch(Field, tb.Table)
def compute_up(c, t, **kwargs):
return getattr(t.cols, c._name)
@dispatch(count, tb.Column)
def compute_up(r, c, **kwargs):
return len(c)
@dispatch(Head, (tb.Column, tb.Table))
def compute_up(h, t, **kwargs):
return t[:h.n]
@dispatch(Broadcast, tb.Table)
def compute_up(expr, data, **kwargs):
if len(expr._children) != 1:
raise ValueError("Only one child in Broadcast allowed")
s = expr._scalars[0]
cols = [s[field] for field in s.fields]
expr_str = print_numexpr(cols, expr._scalar_expr)
uservars = dict((c, getattr(data.cols, c)) for c in s.fields)
e = tb.Expr(expr_str, uservars=uservars, truediv=True)
return e.eval()
@dispatch(Sort, tb.Table)
def compute_up(s, t, **kwargs):
if isinstance(s.key, Field) and s.key._child.isidentical(s._child):
key = s.key._name
else:
key = s.key
assert hasattr(t.cols, key), 'Table has no column(s) %s' % s.key
result = t.read_sorted(sortby=key, checkCSI=True)
if s.ascending:
return result
return result[::-1]
@dispatch(Slice, (tb.Table, tb.Column))
def compute_up(expr, x, **kwargs):
return x[expr.index]
from .numexpr import broadcast_numexpr_collect, print_numexpr
from ..expr import Arithmetic, RealMath, USub, Not
Broadcastable = (Arithmetic, RealMath, Field, Not, USub)
WantToBroadcast = (Arithmetic, RealMath, Not, USub)
@dispatch(Expr, tb.Table)
def optimize(expr, seq):
return broadcast_numexpr_collect(expr, Broadcastable=Broadcastable,
WantToBroadcast=WantToBroadcast)
@dispatch(nelements, tb.Table)
def compute_up(expr, x, **kwargs):
return compute_up.dispatch(type(expr), np.ndarray)(expr, x, **kwargs)
| {
"repo_name": "xlhtc007/blaze",
"path": "blaze/compute/pytables.py",
"copies": "10",
"size": "4785",
"license": "bsd-3-clause",
"hash": 708549473064104800,
"line_mean": 27.8253012048,
"line_max": 80,
"alpha_frac": 0.6570532915,
"autogenerated": false,
"ratio": 3.297725706409373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00028965394221485,
"num_lines": 166
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import numpy as np
import tables as tb
import datashape as ds
from blaze.expr import (Selection, Head, Field, Broadcast, Projection,
Symbol, Sort, Reduction, count, Symbol, Slice)
from blaze.expr import eval_str
from blaze.compatibility import basestring, map
from ..dispatch import dispatch
__all__ = ['drop', 'create_index']
@dispatch(tb.Table)
def discover(t):
return t.shape[0] * ds.Record([[col, discover(getattr(t.cols, col))]
for col in t.colnames])
@dispatch(tb.Column)
def discover(c):
dshape = ds.from_numpy(c.shape, c.dtype)
return {'time64': ds.datetime_, 'time32': ds.date_}.get(c.type,
dshape.subshape[1])
@dispatch(tb.Table)
def drop(t):
t.remove()
@dispatch(tb.Table, basestring)
def create_index(t, column, name=None, **kwargs):
create_index(getattr(t.cols, column), **kwargs)
@dispatch(tb.Table, list)
def create_index(t, columns, name=None, **kwargs):
if not all(map(partial(hasattr, t.cols), columns)):
raise ValueError('table %s does not have all passed in columns %s' %
(t, columns))
for column in columns:
create_index(t, column, **kwargs)
@dispatch(tb.Column)
def create_index(c, optlevel=9, kind='full', name=None, **kwargs):
c.create_index(optlevel=optlevel, kind=kind, **kwargs)
@dispatch(Selection, tb.Table)
def compute_up(sel, t, **kwargs):
s = eval_str(sel.predicate._expr)
return t.read_where(s)
@dispatch(Symbol, tb.Table)
def compute_up(ts, t, **kwargs):
return t
@dispatch(Reduction, (tb.Column, tb.Table))
def compute_up(r, c, **kwargs):
return compute_up(r, c[:])
@dispatch(Projection, tb.Table)
def compute_up(proj, t, **kwargs):
# only options here are
# read the whole thing in and then select
# or
# create an output array that is at most the size of the on disk table and
# fill it will the columns iteratively
# both of these options aren't ideal but pytables has no way to select
# multiple column subsets so pick the one where we can optimize for the best
# case rather than prematurely pessimizing
#
# TODO: benchmark on big tables because i'm not sure exactly what the
# implications here are for memory usage
columns = proj.fields
dtype = np.dtype([(col, t.dtype[col]) for col in columns])
out = np.empty(t.shape, dtype=dtype)
for c in columns:
out[c] = t.col(c)
return out
@dispatch(Field, tb.Table)
def compute_up(c, t, **kwargs):
return getattr(t.cols, c._name)
@dispatch(count, tb.Column)
def compute_up(r, c, **kwargs):
return len(c)
@dispatch(Head, (tb.Column, tb.Table))
def compute_up(h, t, **kwargs):
return t[:h.n]
@dispatch(Broadcast, tb.Table)
def compute_up(c, t, **kwargs):
uservars = dict((col, getattr(t.cols, col)) for col in c.active_columns())
e = tb.Expr(str(c._expr), uservars=uservars, truediv=True)
return e.eval()
@dispatch(Sort, tb.Table)
def compute_up(s, t, **kwargs):
if isinstance(s.key, Field) and s.key._child.isidentical(s._child):
key = s.key._name
else:
key = s.key
assert hasattr(t.cols, key), 'Table has no column(s) %s' % s.key
result = t.read_sorted(sortby=key, checkCSI=True)
if s.ascending:
return result
return result[::-1]
@dispatch(Slice, tb.Table)
def compute_up(expr, x, **kwargs):
return x[expr.index]
| {
"repo_name": "vitan/blaze",
"path": "blaze/compute/pytables.py",
"copies": "1",
"size": "3582",
"license": "bsd-3-clause",
"hash": -4663923710757874000,
"line_mean": 26.984375,
"line_max": 80,
"alpha_frac": 0.644332775,
"autogenerated": false,
"ratio": 3.262295081967213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4406627856967213,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
from ...external.qt import QtGui
from ...external.qt.QtCore import Qt
from ...core import message as msg
from ...clients.histogram_client import HistogramClient
from ..widget_properties import (connect_int_spin, ButtonProperty,
FloatLineProperty,
ValueProperty)
from ..glue_toolbar import GlueToolbar
from ..mouse_mode import HRangeMode
from .data_viewer import DataViewer
from .mpl_widget import MplWidget, defer_draw
from ..qtutil import pretty_number, load_ui
__all__ = ['HistogramWidget']
WARN_SLOW = 10000000
def _hash(x):
return str(id(x))
class HistogramWidget(DataViewer):
LABEL = "Histogram"
_property_set = DataViewer._property_set + \
'component xlog ylog normed cumulative autoscale xmin xmax nbins'.split(
)
xmin = FloatLineProperty('ui.xmin', 'Minimum value')
xmax = FloatLineProperty('ui.xmax', 'Maximum value')
normed = ButtonProperty('ui.normalized_box', 'Normalized?')
autoscale = ButtonProperty('ui.autoscale_box',
'Autoscale view to histogram?')
cumulative = ButtonProperty('ui.cumulative_box', 'Cumulative?')
nbins = ValueProperty('ui.binSpinBox', 'Number of bins')
xlog = ButtonProperty('ui.xlog_box', 'Log-scale the x axis?')
ylog = ButtonProperty('ui.ylog_box', 'Log-scale the y axis?')
def __init__(self, session, parent=None):
super(HistogramWidget, self).__init__(session, parent)
self.central_widget = MplWidget()
self.setCentralWidget(self.central_widget)
self.option_widget = QtGui.QWidget()
self.ui = load_ui('histogramwidget', self.option_widget)
self._tweak_geometry()
self.client = HistogramClient(self._data,
self.central_widget.canvas.fig,
artist_container=self._container)
self._init_limits()
self.make_toolbar()
self._connect()
# maps _hash(componentID) -> componentID
self._component_hashes = {}
@staticmethod
def _get_default_tools():
return []
def _init_limits(self):
validator = QtGui.QDoubleValidator(None)
validator.setDecimals(7)
self.ui.xmin.setValidator(validator)
self.ui.xmax.setValidator(validator)
lo, hi = self.client.xlimits
self.ui.xmin.setText(str(lo))
self.ui.xmax.setText(str(hi))
def _tweak_geometry(self):
self.central_widget.resize(600, 400)
self.resize(self.central_widget.size())
def _connect(self):
ui = self.ui
cl = self.client
ui.attributeCombo.currentIndexChanged.connect(
self._set_attribute_from_combo)
ui.attributeCombo.currentIndexChanged.connect(
self._update_minmax_labels)
connect_int_spin(cl, 'nbins', ui.binSpinBox)
ui.normalized_box.toggled.connect(partial(setattr, cl, 'normed'))
ui.autoscale_box.toggled.connect(partial(setattr, cl, 'autoscale'))
ui.cumulative_box.toggled.connect(partial(setattr, cl, 'cumulative'))
ui.xlog_box.toggled.connect(partial(setattr, cl, 'xlog'))
ui.ylog_box.toggled.connect(partial(setattr, cl, 'ylog'))
ui.xmin.editingFinished.connect(self._set_limits)
ui.xmax.editingFinished.connect(self._set_limits)
@defer_draw
def _set_limits(self):
lo = float(self.ui.xmin.text())
hi = float(self.ui.xmax.text())
self.client.xlimits = lo, hi
def _update_minmax_labels(self):
lo, hi = pretty_number(self.client.xlimits)
self.ui.xmin.setText(lo)
self.ui.xmax.setText(hi)
def make_toolbar(self):
result = GlueToolbar(self.central_widget.canvas, self,
name='Histogram')
for mode in self._mouse_modes():
result.add_mode(mode)
self.addToolBar(result)
return result
def _mouse_modes(self):
axes = self.client.axes
def apply_mode(mode):
return self.apply_roi(mode.roi())
rect = HRangeMode(axes, roi_callback=apply_mode)
return [rect]
@defer_draw
def _update_attributes(self):
"""Repopulate the combo box that selects the quantity to plot"""
combo = self.ui.attributeCombo
component = self.component
new = self.client.component or component
combo.blockSignals(True)
combo.clear()
# implementation note:
# PySide doesn't robustly store python objects with setData
# use _hash(x) instead
model = QtGui.QStandardItemModel()
data_ids = set(_hash(d) for d in self._data)
self._component_hashes = dict((_hash(c), c) for d in self._data
for c in d.components)
found = False
for d in self._data:
if d not in self._container:
continue
item = QtGui.QStandardItem(d.label)
item.setData(_hash(d), role=Qt.UserRole)
assert item.data(Qt.UserRole) == _hash(d)
item.setFlags(item.flags() & ~Qt.ItemIsEnabled)
model.appendRow(item)
for c in d.visible_components:
if not d.get_component(c).numeric:
continue
if c is new:
found = True
item = QtGui.QStandardItem(c.label)
item.setData(_hash(c), role=Qt.UserRole)
model.appendRow(item)
combo.setModel(model)
# separators below data items
for i in range(combo.count()):
if combo.itemData(i) in data_ids:
combo.insertSeparator(i + 1)
combo.blockSignals(False)
if found:
self.component = new
else:
combo.setCurrentIndex(2) # skip first data + separator
self._set_attribute_from_combo()
@property
def component(self):
combo = self.ui.attributeCombo
index = combo.currentIndex()
return self._component_hashes.get(combo.itemData(index), None)
@component.setter
def component(self, component):
combo = self.ui.attributeCombo
if combo.count() == 0: # cold start problem, when restoring
self._update_attributes()
# combo.findData doesn't seem to work robustly
for i in range(combo.count()):
data = combo.itemData(i)
if data == _hash(component):
combo.setCurrentIndex(i)
return
raise IndexError("Component not present: %s" % component)
@defer_draw
def _set_attribute_from_combo(self, *args):
self.client.set_component(self.component)
self.update_window_title()
@defer_draw
def add_data(self, data):
""" Add data item to combo box.
If first addition, also update attributes """
if self.data_present(data):
return True
if data.size > WARN_SLOW and not self._confirm_large_data(data):
return False
self.client.add_layer(data)
self._update_attributes()
self._update_minmax_labels()
return True
def add_subset(self, subset):
pass
def _remove_data(self, data):
""" Remove data item from the combo box """
pass
def data_present(self, data):
return data in self._container
def register_to_hub(self, hub):
super(HistogramWidget, self).register_to_hub(hub)
self.client.register_to_hub(hub)
hub.subscribe(self,
msg.DataCollectionDeleteMessage,
handler=lambda x: self._remove_data(x.data))
hub.subscribe(self,
msg.DataUpdateMessage,
handler=lambda *args: self._update_labels())
hub.subscribe(self,
msg.ComponentsChangedMessage,
handler=lambda x: self._update_attributes())
def unregister(self, hub):
super(HistogramWidget, self).unregister(hub)
self.client.unregister(hub)
hub.unsubscribe_all(self)
@property
def window_title(self):
c = self.client.component
if c is not None:
label = str(c.label)
else:
label = 'Histogram'
return label
def _update_labels(self):
self.update_window_title()
self._update_attributes()
def __str__(self):
return "Histogram Widget"
def options_widget(self):
return self.option_widget
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/widgets/histogram_widget.py",
"copies": "1",
"size": "8704",
"license": "bsd-3-clause",
"hash": -879310024402242700,
"line_mean": 32.0950570342,
"line_max": 80,
"alpha_frac": 0.5960477941,
"autogenerated": false,
"ratio": 4.048372093023255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00004694174529409003,
"num_lines": 263
} |
from __future__ import absolute_import, division, print_function
from functools import partial
from . import converters, exceptions, filters, validators
from ._config import get_run_validators, set_run_validators
from ._funcs import asdict, assoc, astuple, evolve, has
from ._make import (
NOTHING, Attribute, Factory, attrib, attrs, fields, fields_dict,
make_class, validate
)
__version__ = "18.1.0"
__title__ = "attrs"
__description__ = "Classes Without Boilerplate"
__uri__ = "http://www.attrs.org/"
__doc__ = __description__ + " <" + __uri__ + ">"
__author__ = "Hynek Schlawack"
__email__ = "hs@ox.cx"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2015 Hynek Schlawack"
s = attributes = attrs
ib = attr = attrib
dataclass = partial(attrs, auto_attribs=True) # happy Easter ;)
__all__ = [
"Attribute",
"Factory",
"NOTHING",
"asdict",
"assoc",
"astuple",
"attr",
"attrib",
"attributes",
"attrs",
"converters",
"evolve",
"exceptions",
"fields",
"fields_dict",
"filters",
"get_run_validators",
"has",
"ib",
"make_class",
"s",
"set_run_validators",
"validate",
"validators",
]
| {
"repo_name": "cbrewster/servo",
"path": "tests/wpt/web-platform-tests/tools/third_party/attrs/src/attr/__init__.py",
"copies": "41",
"size": "1196",
"license": "mpl-2.0",
"hash": -7945556152844370000,
"line_mean": 19.9824561404,
"line_max": 68,
"alpha_frac": 0.6045150502,
"autogenerated": false,
"ratio": 3.197860962566845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 57
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import cloudpickle
from dask.callbacks import Callback
from kazoo.client import NoNodeError
from kazoo.recipe.lock import Lock as ZkLock
class ZookeeperBase(Callback):
def __init__(self, zk, name, ns='/epos', ephemeral=False):
self.zk = zk
self.ephemeral = ephemeral
template = '/{ns}/{name}/{task}'.replace('//', '/')
self.path = partial(template.format, ns=ns, name=name)
def _load(self, task):
return cloudpickle.loads(self.zk.get(self.path(task=task))[0])
def _save(self, task, value):
return self.zk.create(self.path(task=task), cloudpickle.dumps(value),
makepath=True)
def _finish(self, dsk, state, failed):
pass
def __exit__(self, *args):
if self.ephemeral:
self.zk.delete(self.path(task='').rstrip('/'), recursive=True)
super(ZookeeperBase, self).__exit__(*args)
class Persist(ZookeeperBase):
def _start(self, dsk):
persisted = {}
for k, v in dsk.items():
try:
persisted[k] = self._load(k)
except NoNodeError:
pass
overlap = set(dsk) & set(persisted)
for key in overlap:
dsk[key] = persisted[key]
def _posttask(self, key, value, dsk, state, id):
self._save(key, value)
class Lock(ZookeeperBase):
def __init__(self, zk, name, ns='/epos', ephemeral=False,
blocking=True, timeout=None):
super(Lock, self).__init__(
zk=zk, name=name, ns=ns, ephemeral=ephemeral)
self.blocking = blocking
self.timeout = timeout
self.locks = {}
def _pretask(self, key, dsk, state):
self.locks[key] = ZkLock(self.zk, self.path(task=key))
self.locks[key].acquire(blocking=self.blocking,
timeout=self.timeout)
def _posttask(self, key, value, dsk, state, id):
self.locks[key].release()
del self.locks[key]
def __exit__(self, *args):
for key, lock in self.locks.items():
lock.release()
del self.locks[key]
super(Lock, self).__exit__(*args)
| {
"repo_name": "lensacom/dask.mesos",
"path": "daskos/context.py",
"copies": "1",
"size": "2267",
"license": "apache-2.0",
"hash": 2089901027017111800,
"line_mean": 28.4415584416,
"line_max": 77,
"alpha_frac": 0.5765328628,
"autogenerated": false,
"ratio": 3.644694533762058,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47212273965620577,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import numpy as np
from matplotlib.ticker import AutoLocator, MaxNLocator, LogLocator
from matplotlib.ticker import (LogFormatterMathtext, ScalarFormatter,
FuncFormatter)
from ..core.data import CategoricalComponent
def small_view(data, attribute):
"""
Extract a downsampled view from a dataset, for quick
statistical summaries
"""
shp = data.shape
view = tuple([slice(None, None, max(s / 50, 1)) for s in shp])
return data[attribute, view]
def small_view_array(data):
"""
Same as small_view, except using a numpy array as input
"""
shp = data.shape
view = tuple([slice(None, None, max(s / 50, 1)) for s in shp])
return np.asarray(data)[view]
def visible_limits(artists, axis):
"""Determines the data limits for the data in a set of artists
Ignores non-visible artists
Assumes each artist as a get_data method wich returns a tuple of x,y
:param artists: An iterable collection of artists
:param axis: Which axis to compute. 0=xaxis, 1=yaxis
:rtype: A tuple of min, max for the requested axis, or None if
no data present
"""
data = []
for art in artists:
if not art.visible:
continue
xy = art.get_data()
assert isinstance(xy, tuple)
val = xy[axis]
if val.size > 0:
data.append(xy[axis])
if len(data) == 0:
return
data = np.hstack(data)
if data.size == 0:
return
data = data[np.isfinite(data)]
if data.size == 0:
return
lo, hi = np.nanmin(data), np.nanmax(data)
if not np.isfinite(lo):
return
return lo, hi
def tick_linker(all_categories, pos, *args):
try:
pos = np.round(pos)
return all_categories[int(pos)]
except IndexError:
return ''
def update_ticks(axes, coord, components, is_log):
""" Changes the axes to have the proper tick formatting based on the
type of component.
:param axes: A matplotlib axis object to alter
:param coord: 'x' or 'y'
:param components: A list() of components that are plotted along this axis
:param is_log: Boolean for log-scale.
:kwarg max_categories: The maximum number of categories to display.
:return: None or #categories if components is Categorical
"""
if coord == 'x':
axis = axes.xaxis
elif coord == 'y':
axis = axes.yaxis
else:
raise TypeError("coord must be one of x,y")
is_cat = all(isinstance(comp, CategoricalComponent) for comp in components)
if is_log:
axis.set_major_locator(LogLocator())
axis.set_major_formatter(LogFormatterMathtext())
elif is_cat:
all_categories = np.empty((0,), dtype=np.object)
for comp in components:
all_categories = np.union1d(comp._categories, all_categories)
locator = MaxNLocator(10, integer=True)
locator.view_limits(0, all_categories.shape[0])
format_func = partial(tick_linker, all_categories)
formatter = FuncFormatter(format_func)
axis.set_major_locator(locator)
axis.set_major_formatter(formatter)
return all_categories.shape[0]
else:
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
| {
"repo_name": "JudoWill/glue",
"path": "glue/clients/util.py",
"copies": "1",
"size": "3412",
"license": "bsd-3-clause",
"hash": -1243107895231004400,
"line_mean": 28.6695652174,
"line_max": 79,
"alpha_frac": 0.6377491208,
"autogenerated": false,
"ratio": 3.8423423423423424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49800914631423426,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import numpy as np
from scipy.spatial import KDTree
from scipy.special import psi
from joblib import Parallel, delayed
def estimate(X, Y, k=None, n_jobs=1):
""" Estimate univesal k-NN divergence.
Parameters
----------
X, Y:
2-dimensional array where each row is a sample.
k:
k-NN to be used. None for adaptive choice.
n_jobs:
number of jobs to run in parallel. Python 2 may only work with
``n_jobs=1``.
"""
if not (isinstance(k, int) or k is None):
raise ValueError('k has incorrect type.')
if k is not None and k <= 0:
raise ValueError('k cannot be <= 0')
X = np.array(X)
Y = np.array(Y)
if len(X.shape) != 2 or len(Y.shape) != 2:
raise ValueError('X or Y has incorrect dimension.')
if X.shape[0] <= 1 or Y.shape[0] <= 1:
raise ValueError('number of samples is not sufficient.')
if X.shape[1] != Y.shape[1]:
raise ValueError('numbers of columns of X and Y are different.')
d = X.shape[1]
n = X.shape[0]
m = Y.shape[0]
X_tree = KDTree(X)
Y_tree = KDTree(Y)
P = Parallel(n_jobs)
nhu_ro = P(delayed(__calc_nu_rho)(x, X_tree, Y_tree, k) for x in X)
r = (d / n) * sum(nhu_ro) + np.log(m / (n - 1))
if k is None:
l_k = P(delayed(__calc_l_k)(x, X_tree, Y_tree) for x in X)
r += (1 / n) * sum(l_k)
return r
def __get_epsilon(a, X_tree, Y_tree):
offset_X = len([None for x in X_tree.data if (x == np.array(a)).all()])
offset_Y = len([None for y in Y_tree.data if (y == np.array(a)).all()])
rho_d, _ = X_tree.query([a], offset_X+1)
nu_d, _ = Y_tree.query([a], offset_Y+1)
rho_d = rho_d[0] if offset_X == 0 else rho_d[0][-1]
nu_d = nu_d[0] if offset_Y == 0 else nu_d[0][-1]
return max(rho_d, nu_d) + 0.5 ** 40
def __get_epsilon_sample_num(a, tree, X_tree, Y_tree, default_offset=0):
e = __get_epsilon(a, X_tree, Y_tree)
return len(tree.query_ball_point(a, e)) - default_offset
def __get_distance(a, tree, X_tree, Y_tree, k, default_offset):
if k is None:
k_ = __get_epsilon_sample_num(a, tree, X_tree, Y_tree)
else:
k_ = k + default_offset
d, _ = tree.query([a], k_)
return d[0] if k_ == 1 else d[0][-1]
def __calc_nu_rho(x, X_tree, Y_tree, k):
rho = partial(__get_distance, tree=X_tree, default_offset=1,
X_tree=X_tree, Y_tree=Y_tree, k=k)
nu = partial(__get_distance, tree=Y_tree, default_offset=0,
X_tree=X_tree, Y_tree=Y_tree, k=k)
return np.log(nu(x) / rho(x))
def __calc_l_k(x, X_tree, Y_tree):
_l = partial(__get_epsilon_sample_num, tree=X_tree, default_offset=1,
X_tree=X_tree, Y_tree=Y_tree)
_k = partial(__get_epsilon_sample_num, tree=Y_tree, default_offset=0,
X_tree=X_tree, Y_tree=Y_tree)
return psi(_l(x)) - psi(_k(x))
| {
"repo_name": "slaypni/universal-divergence",
"path": "src/estimator.py",
"copies": "1",
"size": "3010",
"license": "mit",
"hash": 7462912781235174000,
"line_mean": 31.7173913043,
"line_max": 75,
"alpha_frac": 0.5641196013,
"autogenerated": false,
"ratio": 2.810457516339869,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8874577117639869,
"avg_score": 0,
"num_lines": 92
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import numpy as np
import numpy.fft as npfft
from .core import map_blocks
chunk_error = ("Dask array only supports taking an FFT along an axis that \n"
"has a single chunk. An FFT operation was tried on axis %s \n"
"which has chunks %s. To change the array's chunks use "
"dask.Array.rechunk.")
fft_preamble = """
Wrapping of numpy.fft.%s
The axis along which the FFT is applied must have a one chunk. To change
the array's chunking use dask.Array.rechunk.
The numpy.fft.%s docstring follows below:
"""
def _fft_wrap(fft_func, dtype, out_chunk_fn):
def func(a, n=None, axis=-1):
if len(a.chunks[axis]) != 1:
raise ValueError(chunk_error % (axis, a.chunks[axis]))
chunks = out_chunk_fn(a, n, axis)
return map_blocks(partial(fft_func, n=n, axis=axis), a, dtype=dtype,
chunks=chunks)
np_name = fft_func.__name__
func.__doc__ = (fft_preamble % (np_name, np_name)) + fft_func.__doc__
func.__name__ = np_name
return func
def _fft_out_chunks(a, n, axis):
""" For computing the output chunks of fft and ifft"""
if n is None:
return a.chunks
chunks = list(a.chunks)
chunks[axis] = (n,)
return chunks
def _rfft_out_chunks(a, n, axis):
if n is None:
n = a.chunks[axis][0]
chunks = list(a.chunks)
chunks[axis] = (n//2 + 1,)
return chunks
def _irfft_out_chunks(a, n, axis):
if n is None:
n = 2 * (a.chunks[axis][0] - 1)
chunks = list(a.chunks)
chunks[axis] = (n,)
return chunks
def _hfft_out_chunks(a, n, axis):
if n is None:
n = 2 * (a.chunks[axis][0] - 1)
chunks = list(a.chunks)
chunks[axis] = (n,)
return chunks
def _ihfft_out_chunks(a, n, axis):
if n is None:
n = a.chunks[axis][0]
chunks = list(a.chunks)
if n % 2 == 0:
m = (n//2) + 1
else:
m = (n + 1)//2
chunks[axis] = (m,)
return chunks
fft = _fft_wrap(npfft.fft, np.complex_, _fft_out_chunks)
ifft = _fft_wrap(npfft.ifft, np.complex_, _fft_out_chunks)
rfft = _fft_wrap(npfft.rfft, np.complex_, _rfft_out_chunks)
irfft = _fft_wrap(npfft.irfft, np.float_, _irfft_out_chunks)
hfft = _fft_wrap(npfft.hfft, np.float_, _hfft_out_chunks)
ihfft = _fft_wrap(npfft.ihfft, np.complex_, _ihfft_out_chunks)
| {
"repo_name": "pombredanne/dask",
"path": "dask/array/fft.py",
"copies": "3",
"size": "2467",
"license": "bsd-3-clause",
"hash": 1902008013467571000,
"line_mean": 22.9514563107,
"line_max": 77,
"alpha_frac": 0.5905958654,
"autogenerated": false,
"ratio": 2.9975698663426487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5088165731742649,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import numpy as np
from ..core.client import Client
from ..core.data import Data, IncompatibleAttribute, ComponentID, CategoricalComponent
from ..core.subset import RoiSubsetState, RangeSubsetState, CategoricalRoiSubsetState, AndState
from ..core.roi import PolygonalROI, RangeROI, CategoricalRoi, RectangularROI
from ..core.util import relim
from ..core.edit_subset_mode import EditSubsetMode
from ..core.message import ComponentReplacedMessage
from ..utils import lookup_class
from .viz_client import init_mpl
from .layer_artist import ScatterLayerArtist, LayerArtistContainer
from .util import update_ticks, visible_limits
from ..core.callback_property import (CallbackProperty, add_callback,
delay_callback)
class ScatterClient(Client):
"""
A client class that uses matplotlib to visualize tables as scatter plots.
"""
xmin = CallbackProperty(0)
xmax = CallbackProperty(1)
ymin = CallbackProperty(0)
ymax = CallbackProperty(1)
ylog = CallbackProperty(False)
xlog = CallbackProperty(False)
yflip = CallbackProperty(False)
xflip = CallbackProperty(False)
xatt = CallbackProperty()
yatt = CallbackProperty()
jitter = CallbackProperty()
def __init__(self, data=None, figure=None, axes=None,
artist_container=None):
"""
Create a new ScatterClient object
:param data: :class:`~glue.core.data.DataCollection` to use
:param figure:
Which matplotlib figure instance to draw to. One will be created if
not provided
:param axes:
Which matplotlib axes instance to use. Will be created if necessary
"""
Client.__init__(self, data=data)
figure, axes = init_mpl(figure, axes)
self.artists = artist_container
if self.artists is None:
self.artists = LayerArtistContainer()
self._layer_updated = False # debugging
self._xset = False
self._yset = False
self.axes = axes
self._connect()
self._set_limits()
def is_layer_present(self, layer):
""" True if layer is plotted """
return layer in self.artists
def get_layer_order(self, layer):
"""If layer exists as a single artist, return its zorder.
Otherwise, return None"""
artists = self.artists[layer]
if len(artists) == 1:
return artists[0].zorder
else:
return None
@property
def layer_count(self):
return len(self.artists)
def _connect(self):
add_callback(self, 'xlog', self._set_xlog)
add_callback(self, 'ylog', self._set_ylog)
add_callback(self, 'xflip', self._set_limits)
add_callback(self, 'yflip', self._set_limits)
add_callback(self, 'xmin', self._set_limits)
add_callback(self, 'xmax', self._set_limits)
add_callback(self, 'ymin', self._set_limits)
add_callback(self, 'ymax', self._set_limits)
add_callback(self, 'xatt', partial(self._set_xydata, 'x'))
add_callback(self, 'yatt', partial(self._set_xydata, 'y'))
add_callback(self, 'jitter', self._jitter)
self.axes.figure.canvas.mpl_connect('draw_event',
lambda x: self._pull_properties())
def _set_limits(self, *args):
xlim = min(self.xmin, self.xmax), max(self.xmin, self.xmax)
if self.xflip:
xlim = xlim[::-1]
ylim = min(self.ymin, self.ymax), max(self.ymin, self.ymax)
if self.yflip:
ylim = ylim[::-1]
xold = self.axes.get_xlim()
yold = self.axes.get_ylim()
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
if xlim != xold or ylim != yold:
self._redraw()
def plottable_attributes(self, layer, show_hidden=False):
data = layer.data
comp = data.components if show_hidden else data.visible_components
return [c for c in comp if
data.get_component(c).numeric]
def add_layer(self, layer):
""" Adds a new visual layer to a client, to display either a dataset
or a subset. Updates both the client data structure and the
plot.
Returns the created layer artist
:param layer: the layer to add
:type layer: :class:`~glue.core.data.Data` or :class:`~glue.core.subset.Subset`
"""
if layer.data not in self.data:
raise TypeError("Layer not in data collection")
if layer in self.artists:
return self.artists[layer][0]
result = ScatterLayerArtist(layer, self.axes)
self.artists.append(result)
self._update_layer(layer)
self._ensure_subsets_added(layer)
return result
def _ensure_subsets_added(self, layer):
if not isinstance(layer, Data):
return
for subset in layer.subsets:
self.add_layer(subset)
def _visible_limits(self, axis):
"""Return the min-max visible data boundaries for given axis"""
return visible_limits(self.artists, axis)
def _snap_xlim(self):
"""
Reset the plotted x rng to show all the data
"""
is_log = self.xlog
rng = self._visible_limits(0)
if rng is None:
return
rng = relim(rng[0], rng[1], is_log)
if self.xflip:
rng = rng[::-1]
self.axes.set_xlim(rng)
self._pull_properties()
def _snap_ylim(self):
"""
Reset the plotted y rng to show all the data
"""
rng = [np.infty, -np.infty]
is_log = self.ylog
rng = self._visible_limits(1)
if rng is None:
return
rng = relim(rng[0], rng[1], is_log)
if self.yflip:
rng = rng[::-1]
self.axes.set_ylim(rng)
self._pull_properties()
def snap(self):
"""Rescale axes to fit the data"""
self._snap_xlim()
self._snap_ylim()
self._redraw()
def set_visible(self, layer, state):
""" Toggle a layer's visibility
:param layer: which layer to modify
:type layer: class:`~glue.core.data.Data` or :class:`~glue.coret.Subset`
:param state: True to show. false to hide
:type state: boolean
"""
if layer not in self.artists:
return
for a in self.artists[layer]:
a.visible = state
self._redraw()
def is_visible(self, layer):
if layer not in self.artists:
return False
return any(a.visible for a in self.artists[layer])
def _set_xydata(self, coord, attribute, snap=True):
""" Redefine which components get assigned to the x/y axes
:param coord: 'x' or 'y'
Which axis to reassign
:param attribute:
Which attribute of the data to use.
:type attribute: core.data.ComponentID
:param snap:
If True, will rescale x/y axes to fit the data
:type snap: bool
"""
if coord not in ('x', 'y'):
raise TypeError("coord must be one of x,y")
if not isinstance(attribute, ComponentID):
raise TypeError("attribute must be a ComponentID")
# update coordinates of data and subsets
if coord == 'x':
new_add = not self._xset
self.xatt = attribute
self._xset = self.xatt is not None
elif coord == 'y':
new_add = not self._yset
self.yatt = attribute
self._yset = self.yatt is not None
# update plots
list(map(self._update_layer, self.artists.layers))
if coord == 'x' and snap:
self._snap_xlim()
if new_add:
self._snap_ylim()
elif coord == 'y' and snap:
self._snap_ylim()
if new_add:
self._snap_xlim()
self._update_axis_labels()
self._pull_properties()
self._redraw()
def _process_categorical_roi(self, roi):
""" Returns a RoiSubsetState object.
"""
if isinstance(roi, RectangularROI):
subsets = []
axes = [('x', roi.xmin, roi.xmax),
('y', roi.ymin, roi.ymax)]
for coord, lo, hi in axes:
comp = list(self._get_data_components(coord))
if comp:
if comp[0].categorical:
subset = CategoricalRoiSubsetState.from_range(comp[0], self.xatt, lo, hi)
else:
subset = RangeSubsetState(lo, hi, self.xatt)
else:
subset = None
subsets.append(subset)
else:
raise AssertionError
return AndState(*subsets)
def apply_roi(self, roi):
# every editable subset is updated
# using specified ROI
if isinstance(roi, RangeROI):
lo, hi = roi.range()
att = self.xatt if roi.ori == 'x' else self.yatt
if self._check_categorical(att):
comp = list(self._get_data_components(roi.ori))
if comp:
subset_state = CategoricalRoiSubsetState.from_range(comp[0], att, lo, hi)
else:
subset_state = None
else:
subset_state = RangeSubsetState(lo, hi, att)
else:
if self._check_categorical(self.xatt) or self._check_categorical(self.yatt):
subset_state = self._process_categorical_roi(roi)
else:
subset_state = RoiSubsetState()
subset_state.xatt = self.xatt
subset_state.yatt = self.yatt
x, y = roi.to_polygon()
subset_state.roi = PolygonalROI(x, y)
mode = EditSubsetMode()
visible = [d for d in self._data if self.is_visible(d)]
focus = visible[0] if len(visible) > 0 else None
mode.update(self._data, subset_state, focus_data=focus)
def _set_xlog(self, state):
""" Set the x axis scaling
:param state:
The new scaling for the x axis
:type state: string ('log' or 'linear')
"""
mode = 'log' if state else 'linear'
lim = self.axes.get_xlim()
self.axes.set_xscale(mode)
# Rescale if switching to log with negative bounds
if state and min(lim) <= 0:
self._snap_xlim()
self._redraw()
def _set_ylog(self, state):
""" Set the y axis scaling
:param state: The new scaling for the y axis
:type state: string ('log' or 'linear')
"""
mode = 'log' if state else 'linear'
lim = self.axes.get_ylim()
self.axes.set_yscale(mode)
# Rescale if switching to log with negative bounds
if state and min(lim) <= 0:
self._snap_ylim()
self._redraw()
def _remove_data(self, message):
"""Process DataCollectionDeleteMessage"""
for s in message.data.subsets:
self.delete_layer(s)
self.delete_layer(message.data)
def _remove_subset(self, message):
self.delete_layer(message.subset)
def delete_layer(self, layer):
if layer not in self.artists:
return
self.artists.pop(layer)
self._redraw()
assert not self.is_layer_present(layer)
def _update_data(self, message):
data = message.sender
self._update_layer(data)
def _numerical_data_changed(self, message):
data = message.sender
self._update_layer(data, force=True)
for s in data.subsets:
self._update_layer(s, force=True)
def _redraw(self):
self.axes.figure.canvas.draw()
def _jitter(self, *args):
for attribute in [self.xatt, self.yatt]:
if attribute is not None:
for data in self.data:
try:
comp = data.get_component(attribute)
comp.jitter(method=self.jitter)
except (IncompatibleAttribute, NotImplementedError):
continue
def _update_axis_labels(self, *args):
self.axes.set_xlabel(self.xatt)
self.axes.set_ylabel(self.yatt)
if self.xatt is not None:
update_ticks(self.axes, 'x',
list(self._get_data_components('x')),
self.xlog)
if self.yatt is not None:
update_ticks(self.axes, 'y',
list(self._get_data_components('y')),
self.ylog)
def _add_subset(self, message):
subset = message.sender
# only add subset if data layer present
if subset.data not in self.artists:
return
subset.do_broadcast(False)
self.add_layer(subset)
subset.do_broadcast(True)
def add_data(self, data):
result = self.add_layer(data)
for subset in data.subsets:
self.add_layer(subset)
return result
@property
def data(self):
"""The data objects in the scatter plot"""
return list(self._data)
def _get_data_components(self, coord):
""" Returns the components for each dataset for x and y axes.
"""
if coord == 'x':
attribute = self.xatt
elif coord == 'y':
attribute = self.yatt
else:
raise TypeError('coord must be x or y')
for data in self._data:
try:
yield data.get_component(attribute)
except IncompatibleAttribute:
pass
def _check_categorical(self, attribute):
""" A simple function to figure out if an attribute is categorical.
:param attribute: a core.Data.ComponentID
:return: True iff the attribute represents a CategoricalComponent
"""
for data in self._data:
try:
comp = data.get_component(attribute)
if isinstance(comp, CategoricalComponent):
return True
except IncompatibleAttribute:
pass
return False
def _update_subset(self, message):
self._update_layer(message.sender)
def restore_layers(self, layers, context):
""" Re-generate a list of plot layers from a glue-serialized list"""
for l in layers:
cls = lookup_class(l.pop('_type'))
if cls != ScatterLayerArtist:
raise ValueError("Scatter client cannot restore layer of type "
"%s" % cls)
props = dict((k, context.object(v)) for k, v in l.items())
layer = self.add_layer(props['layer'])
layer.properties = props
def _update_layer(self, layer, force=False):
""" Update both the style and data for the requested layer"""
if self.xatt is None or self.yatt is None:
return
if layer not in self.artists:
return
self._layer_updated = True
for art in self.artists[layer]:
art.xatt = self.xatt
art.yatt = self.yatt
art.force_update() if force else art.update()
self._redraw()
def _pull_properties(self):
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
xsc = self.axes.get_xscale()
ysc = self.axes.get_yscale()
xflip = (xlim[1] < xlim[0])
yflip = (ylim[1] < ylim[0])
with delay_callback(self, 'xmin', 'xmax', 'xflip', 'xlog'):
self.xmin = min(xlim)
self.xmax = max(xlim)
self.xflip = xflip
self.xlog = (xsc == 'log')
with delay_callback(self, 'ymin', 'ymax', 'yflip', 'ylog'):
self.ymin = min(ylim)
self.ymax = max(ylim)
self.yflip = yflip
self.ylog = (ysc == 'log')
def _on_component_replace(self, msg):
old = msg.old
new = msg.new
if self.xatt is old:
self.xatt = new
if self.yatt is old:
self.yatt = new
def register_to_hub(self, hub):
super(ScatterClient, self).register_to_hub(hub)
hub.subscribe(self, ComponentReplacedMessage, self._on_component_replace)
| {
"repo_name": "JudoWill/glue",
"path": "glue/clients/scatter_client.py",
"copies": "1",
"size": "16522",
"license": "bsd-3-clause",
"hash": 7072736969692220000,
"line_mean": 31.8469184891,
"line_max": 97,
"alpha_frac": 0.5601016826,
"autogenerated": false,
"ratio": 3.999515855725006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5059617538325006,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import numpy as np
from .core import Array, normalize_chunks
from ..base import tokenize
def linspace(start, stop, num=50, chunks=None, dtype=None):
"""
Return `num` evenly spaced values over the closed interval [`start`,
`stop`].
TODO: implement the `endpoint`, `restep`, and `dtype` keyword args
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The last value of the sequence.
chunks : int
The number of samples on each block. Note that the last block will have
fewer samples if `num % blocksize != 0`
num : int, optional
Number of samples to in the returned dask array, including the
endpoints.
Returns
-------
samples : dask array
"""
num = int(num)
if chunks is None:
raise ValueError("Must supply a chunks= keyword argument")
chunks = normalize_chunks(chunks, (num,))
range_ = stop - start
space = float(range_) / (num - 1)
name = 'linspace-' + tokenize((start, stop, num, chunks, dtype))
dsk = {}
blockstart = start
for i, bs in enumerate(chunks[0]):
blockstop = blockstart + ((bs - 1) * space)
task = (partial(np.linspace, dtype=dtype), blockstart, blockstop, bs)
blockstart = blockstart + (space * bs)
dsk[(name, i)] = task
return Array(dsk, name, chunks, dtype=dtype)
def arange(*args, **kwargs):
"""
Return evenly spaced values from `start` to `stop` with step size `step`.
The values are half-open [start, stop), so including start and excluding
stop. This is basically the same as python's range function but for dask
arrays.
When using a non-integer step, such as 0.1, the results will often not be
consistent. It is better to use linspace for these cases.
Parameters
----------
start : int, optional
The starting value of the sequence. The default is 0.
stop : int
The end of the interval, this value is excluded from the interval.
step : int, optional
The spacing between the values. The default is 1 when not specified.
The last value of the sequence.
chunks : int
The number of samples on each block. Note that the last block will have
fewer samples if `num % chunks != 0`.
num : int, optional
Number of samples to in the returned dask array, including the
endpoints.
Returns
-------
samples : dask array
"""
if len(args) == 1:
start = 0
stop = args[0]
step = 1
elif len(args) == 2:
start = args[0]
stop = args[1]
step = 1
elif len(args) == 3:
start, stop, step = args
else:
raise TypeError('''
arange takes 3 positional arguments: arange([start], stop, [step])
''')
if 'chunks' not in kwargs:
raise ValueError("Must supply a chunks= keyword argument")
chunks = kwargs['chunks']
dtype = kwargs.get('dtype', None)
if dtype is None:
dtype = np.arange(0, 1, step).dtype
range_ = stop - start
num = int(abs(range_ // step))
if (range_ % step) != 0:
num += 1
chunks = normalize_chunks(chunks, (num,))
name = 'arange-' + tokenize((start, stop, step, chunks, num))
dsk = {}
elem_count = 0
for i, bs in enumerate(chunks[0]):
blockstart = start + (elem_count * step)
blockstop = start + ((elem_count + bs) * step)
task = (np.arange, blockstart, blockstop, step, dtype)
dsk[(name, i)] = task
elem_count += bs
return Array(dsk, name, chunks, dtype=dtype)
def diag(v):
"""Construct a diagonal array, with ``v`` on the diagonal.
Currently only implements diagonal array creation on the zeroth diagonal.
Support for the ``k``th diagonal or diagonal extraction, as per the numpy
interface, is not yet implemented.
Parameters
----------
v : dask array
Returns
-------
out_array : dask array
Examples
--------
>>> diag(arange(3, chunks=3)).compute()
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 2]])
"""
if not isinstance(v, Array):
raise TypeError("v must be a dask array")
if v.ndim != 1:
raise NotImplementedError("Extracting diagonals with `diag` is not "
"implemented.")
chunks_1d = v.chunks[0]
name = 'diag-' + tokenize(v)
blocks = v._keys()
dsk = v.dask.copy()
for i, m in enumerate(chunks_1d):
for j, n in enumerate(chunks_1d):
key = (name, i, j)
if i == j:
dsk[key] = (np.diag, blocks[i])
else:
dsk[key] = (np.zeros, (m, n))
return Array(dsk, name, (chunks_1d, chunks_1d), dtype=v._dtype)
| {
"repo_name": "vikhyat/dask",
"path": "dask/array/creation.py",
"copies": "2",
"size": "4936",
"license": "bsd-3-clause",
"hash": 6356779549676619000,
"line_mean": 26.8870056497,
"line_max": 79,
"alpha_frac": 0.5836709887,
"autogenerated": false,
"ratio": 3.9236883942766294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.550735938297663,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import numpy as np
from .core import Array, normalize_chunks
from . import chunk
from ..base import tokenize
def linspace(start, stop, num=50, chunks=None, dtype=None):
"""
Return `num` evenly spaced values over the closed interval [`start`,
`stop`].
TODO: implement the `endpoint`, `restep`, and `dtype` keyword args
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The last value of the sequence.
num : int, optional
Number of samples to include in the returned dask array, including the
endpoints.
chunks : int
The number of samples on each block. Note that the last block will have
fewer samples if `num % blocksize != 0`
Returns
-------
samples : dask array
See Also
--------
dask.array.arange
"""
num = int(num)
if chunks is None:
raise ValueError("Must supply a chunks= keyword argument")
chunks = normalize_chunks(chunks, (num,))
range_ = stop - start
space = float(range_) / (num - 1)
if dtype is None:
dtype = np.linspace(0, 1, 1).dtype
name = 'linspace-' + tokenize((start, stop, num, chunks, dtype))
dsk = {}
blockstart = start
for i, bs in enumerate(chunks[0]):
blockstop = blockstart + ((bs - 1) * space)
task = (partial(np.linspace, dtype=dtype), blockstart, blockstop, bs)
blockstart = blockstart + (space * bs)
dsk[(name, i)] = task
return Array(dsk, name, chunks, dtype=dtype)
def arange(*args, **kwargs):
"""
Return evenly spaced values from `start` to `stop` with step size `step`.
The values are half-open [start, stop), so including start and excluding
stop. This is basically the same as python's range function but for dask
arrays.
When using a non-integer step, such as 0.1, the results will often not be
consistent. It is better to use linspace for these cases.
Parameters
----------
start : int, optional
The starting value of the sequence. The default is 0.
stop : int
The end of the interval, this value is excluded from the interval.
step : int, optional
The spacing between the values. The default is 1 when not specified.
The last value of the sequence.
chunks : int
The number of samples on each block. Note that the last block will have
fewer samples if ``len(array) % chunks != 0``.
Returns
-------
samples : dask array
See Also
--------
dask.array.linspace
"""
if len(args) == 1:
start = 0
stop = args[0]
step = 1
elif len(args) == 2:
start = args[0]
stop = args[1]
step = 1
elif len(args) == 3:
start, stop, step = args
else:
raise TypeError('''
arange takes 3 positional arguments: arange([start], stop, [step])
''')
if 'chunks' not in kwargs:
raise ValueError("Must supply a chunks= keyword argument")
chunks = kwargs['chunks']
dtype = kwargs.get('dtype', None)
if dtype is None:
dtype = np.arange(0, 1, step).dtype
num = max(np.ceil((stop - start) / step), 0)
chunks = normalize_chunks(chunks, (num,))
name = 'arange-' + tokenize((start, stop, step, chunks, num))
dsk = {}
elem_count = 0
for i, bs in enumerate(chunks[0]):
blockstart = start + (elem_count * step)
blockstop = start + ((elem_count + bs) * step)
task = (chunk.arange, blockstart, blockstop, step, bs, dtype)
dsk[(name, i)] = task
elem_count += bs
return Array(dsk, name, chunks, dtype=dtype)
| {
"repo_name": "gameduell/dask",
"path": "dask/array/creation.py",
"copies": "3",
"size": "3795",
"license": "bsd-3-clause",
"hash": 763515359029443300,
"line_mean": 26.700729927,
"line_max": 79,
"alpha_frac": 0.6018445323,
"autogenerated": false,
"ratio": 3.982161594963274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6084006127263274,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import numpy as np
from glue.core.callback_property import (CallbackProperty, add_callback,
delay_callback)
from glue.core.message import ComponentReplacedMessage, SettingsChangeMessage
from glue.core.edit_subset_mode import EditSubsetMode
from glue.core.roi import RectangularROI
from glue.core.subset import RangeSubsetState, CategoricalROISubsetState, AndState
from glue.core.data import Data, IncompatibleAttribute, ComponentID
from glue.core.client import Client
from glue.core.layer_artist import LayerArtistContainer
from glue.core.state import lookup_class_with_patches
from glue.core.util import relim, update_ticks, visible_limits
from glue.viewers.common.viz_client import init_mpl, update_appearance_from_settings
from .layer_artist import ScatterLayerArtist
class ScatterClient(Client):
"""
A client class that uses matplotlib to visualize tables as scatter plots.
"""
xmin = CallbackProperty(0)
xmax = CallbackProperty(1)
ymin = CallbackProperty(0)
ymax = CallbackProperty(1)
ylog = CallbackProperty(False)
xlog = CallbackProperty(False)
yflip = CallbackProperty(False)
xflip = CallbackProperty(False)
xatt = CallbackProperty()
yatt = CallbackProperty()
jitter = CallbackProperty()
def __init__(self, data=None, figure=None, axes=None,
layer_artist_container=None):
"""
Create a new ScatterClient object
:param data: :class:`~glue.core.data.DataCollection` to use
:param figure:
Which matplotlib figure instance to draw to. One will be created if
not provided
:param axes:
Which matplotlib axes instance to use. Will be created if necessary
"""
Client.__init__(self, data=data)
figure, axes = init_mpl(figure, axes)
self.artists = layer_artist_container
if self.artists is None:
self.artists = LayerArtistContainer()
self._layer_updated = False # debugging
self._xset = False
self._yset = False
self.axes = axes
self._connect()
self._set_limits()
def is_layer_present(self, layer):
""" True if layer is plotted """
return layer in self.artists
def get_layer_order(self, layer):
"""If layer exists as a single artist, return its zorder.
Otherwise, return None"""
artists = self.artists[layer]
if len(artists) == 1:
return artists[0].zorder
else:
return None
@property
def layer_count(self):
return len(self.artists)
def _connect(self):
add_callback(self, 'xlog', self._set_xlog)
add_callback(self, 'ylog', self._set_ylog)
add_callback(self, 'xflip', self._set_limits)
add_callback(self, 'yflip', self._set_limits)
add_callback(self, 'xmin', self._set_limits)
add_callback(self, 'xmax', self._set_limits)
add_callback(self, 'ymin', self._set_limits)
add_callback(self, 'ymax', self._set_limits)
add_callback(self, 'xatt', partial(self._set_xydata, 'x'))
add_callback(self, 'yatt', partial(self._set_xydata, 'y'))
add_callback(self, 'jitter', self._jitter)
self.axes.figure.canvas.mpl_connect('draw_event',
lambda x: self._pull_properties())
def _set_limits(self, *args):
xlim = min(self.xmin, self.xmax), max(self.xmin, self.xmax)
if self.xflip:
xlim = xlim[::-1]
ylim = min(self.ymin, self.ymax), max(self.ymin, self.ymax)
if self.yflip:
ylim = ylim[::-1]
xold = self.axes.get_xlim()
yold = self.axes.get_ylim()
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
if xlim != xold or ylim != yold:
self._redraw()
def plottable_attributes(self, layer, show_hidden=False):
data = layer.data
comp = data.components if show_hidden else data.visible_components
return [c for c in comp if
data.get_component(c).numeric
or data.get_component(c).categorical]
def add_layer(self, layer):
""" Adds a new visual layer to a client, to display either a dataset
or a subset. Updates both the client data structure and the
plot.
Returns the created layer artist
:param layer: the layer to add
:type layer: :class:`~glue.core.data.Data` or :class:`~glue.core.subset.Subset`
"""
if layer.data not in self.data:
raise TypeError("Layer not in data collection")
if layer in self.artists:
return self.artists[layer][0]
result = ScatterLayerArtist(layer, self.axes)
self.artists.append(result)
self._update_layer(layer)
self._ensure_subsets_added(layer)
return result
def _ensure_subsets_added(self, layer):
if not isinstance(layer, Data):
return
for subset in layer.subsets:
self.add_layer(subset)
def _visible_limits(self, axis):
"""Return the min-max visible data boundaries for given axis"""
return visible_limits(self.artists, axis)
def _snap_xlim(self):
"""
Reset the plotted x rng to show all the data
"""
is_log = self.xlog
rng = self._visible_limits(0)
if rng is None:
return
rng = relim(rng[0], rng[1], is_log)
if self.xflip:
rng = rng[::-1]
self.axes.set_xlim(rng)
self._pull_properties()
def _snap_ylim(self):
"""
Reset the plotted y rng to show all the data
"""
rng = [np.infty, -np.infty]
is_log = self.ylog
rng = self._visible_limits(1)
if rng is None:
return
rng = relim(rng[0], rng[1], is_log)
if self.yflip:
rng = rng[::-1]
self.axes.set_ylim(rng)
self._pull_properties()
def snap(self):
"""Rescale axes to fit the data"""
self._snap_xlim()
self._snap_ylim()
self._redraw()
def set_visible(self, layer, state):
""" Toggle a layer's visibility
:param layer: which layer to modify
:type layer: class:`~glue.core.data.Data` or :class:`~glue.coret.Subset`
:param state: True to show. false to hide
:type state: boolean
"""
if layer not in self.artists:
return
for a in self.artists[layer]:
a.visible = state
self._redraw()
def is_visible(self, layer):
if layer not in self.artists:
return False
return any(a.visible for a in self.artists[layer])
def _set_xydata(self, coord, attribute, snap=True):
""" Redefine which components get assigned to the x/y axes
:param coord: 'x' or 'y'
Which axis to reassign
:param attribute:
Which attribute of the data to use.
:type attribute: core.data.ComponentID
:param snap:
If True, will rescale x/y axes to fit the data
:type snap: bool
"""
if coord not in ('x', 'y'):
raise TypeError("coord must be one of x,y")
if not isinstance(attribute, ComponentID):
raise TypeError("attribute must be a ComponentID")
# update coordinates of data and subsets
if coord == 'x':
new_add = not self._xset
self.xatt = attribute
self._xset = self.xatt is not None
elif coord == 'y':
new_add = not self._yset
self.yatt = attribute
self._yset = self.yatt is not None
# update plots
list(map(self._update_layer, self.artists.layers))
if coord == 'x' and snap:
self._snap_xlim()
if new_add:
self._snap_ylim()
elif coord == 'y' and snap:
self._snap_ylim()
if new_add:
self._snap_xlim()
self._update_axis_labels()
self._pull_properties()
self._redraw()
def apply_roi(self, roi):
# every editable subset is updated
# using specified ROI
for x_comp, y_comp in zip(self._get_data_components('x'),
self._get_data_components('y')):
subset_state = x_comp.subset_from_roi(self.xatt, roi,
other_comp=y_comp,
other_att=self.yatt,
coord='x')
mode = EditSubsetMode()
visible = [d for d in self._data if self.is_visible(d)]
focus = visible[0] if len(visible) > 0 else None
mode.update(self._data, subset_state, focus_data=focus)
def _set_xlog(self, state):
""" Set the x axis scaling
:param state:
The new scaling for the x axis
:type state: string ('log' or 'linear')
"""
mode = 'log' if state else 'linear'
lim = self.axes.get_xlim()
self.axes.set_xscale(mode)
# Rescale if switching to log with negative bounds
if state and min(lim) <= 0:
self._snap_xlim()
self._redraw()
def _set_ylog(self, state):
""" Set the y axis scaling
:param state: The new scaling for the y axis
:type state: string ('log' or 'linear')
"""
mode = 'log' if state else 'linear'
lim = self.axes.get_ylim()
self.axes.set_yscale(mode)
# Rescale if switching to log with negative bounds
if state and min(lim) <= 0:
self._snap_ylim()
self._redraw()
def _remove_data(self, message):
"""Process DataCollectionDeleteMessage"""
for s in message.data.subsets:
self.delete_layer(s)
self.delete_layer(message.data)
def _remove_subset(self, message):
self.delete_layer(message.subset)
def delete_layer(self, layer):
if layer not in self.artists:
return
self.artists.pop(layer)
self._redraw()
assert not self.is_layer_present(layer)
def _update_data(self, message):
data = message.sender
self._update_layer(data)
def _numerical_data_changed(self, message):
data = message.sender
self._update_layer(data, force=True)
for s in data.subsets:
self._update_layer(s, force=True)
def _redraw(self):
self.axes.figure.canvas.draw()
def _jitter(self, *args):
for attribute in [self.xatt, self.yatt]:
if attribute is not None:
for data in self.data:
try:
comp = data.get_component(attribute)
comp.jitter(method=self.jitter)
except (IncompatibleAttribute, NotImplementedError):
continue
def _update_axis_labels(self, *args):
self.axes.set_xlabel(self.xatt)
self.axes.set_ylabel(self.yatt)
if self.xatt is not None:
update_ticks(self.axes, 'x',
list(self._get_data_components('x')),
self.xlog)
if self.yatt is not None:
update_ticks(self.axes, 'y',
list(self._get_data_components('y')),
self.ylog)
def _add_subset(self, message):
subset = message.sender
# only add subset if data layer present
if subset.data not in self.artists:
return
subset.do_broadcast(False)
self.add_layer(subset)
subset.do_broadcast(True)
def add_data(self, data):
result = self.add_layer(data)
for subset in data.subsets:
self.add_layer(subset)
return result
@property
def data(self):
"""The data objects in the scatter plot"""
return list(self._data)
def _get_attribute(self, coord):
if coord == 'x':
return self.xatt
elif coord == 'y':
return self.yatt
else:
raise TypeError('coord must be x or y')
def _get_data_components(self, coord):
""" Returns the components for each dataset for x and y axes.
"""
attribute = self._get_attribute(coord)
for data in self._data:
try:
yield data.get_component(attribute)
except IncompatibleAttribute:
pass
def _check_categorical(self, attribute):
""" A simple function to figure out if an attribute is categorical.
:param attribute: a core.Data.ComponentID
:return: True iff the attribute represents a CategoricalComponent
"""
for data in self._data:
try:
comp = data.get_component(attribute)
if comp.categorical:
return True
except IncompatibleAttribute:
pass
return False
def _update_subset(self, message):
self._update_layer(message.sender)
def restore_layers(self, layers, context):
""" Re-generate a list of plot layers from a glue-serialized list"""
for l in layers:
cls = lookup_class_with_patches(l.pop('_type'))
if cls != ScatterLayerArtist:
raise ValueError("Scatter client cannot restore layer of type "
"%s" % cls)
props = dict((k, context.object(v)) for k, v in l.items())
layer = self.add_layer(props['layer'])
layer.properties = props
def _update_layer(self, layer, force=False):
""" Update both the style and data for the requested layer"""
if self.xatt is None or self.yatt is None:
return
if layer not in self.artists:
return
self._layer_updated = True
for art in self.artists[layer]:
art.xatt = self.xatt
art.yatt = self.yatt
art.force_update() if force else art.update()
self._redraw()
def _pull_properties(self):
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
xsc = self.axes.get_xscale()
ysc = self.axes.get_yscale()
xflip = (xlim[1] < xlim[0])
yflip = (ylim[1] < ylim[0])
with delay_callback(self, 'xmin', 'xmax', 'xflip', 'xlog'):
self.xmin = min(xlim)
self.xmax = max(xlim)
self.xflip = xflip
self.xlog = (xsc == 'log')
with delay_callback(self, 'ymin', 'ymax', 'yflip', 'ylog'):
self.ymin = min(ylim)
self.ymax = max(ylim)
self.yflip = yflip
self.ylog = (ysc == 'log')
def _on_component_replace(self, msg):
old = msg.old
new = msg.new
if self.xatt is old:
self.xatt = new
if self.yatt is old:
self.yatt = new
def register_to_hub(self, hub):
super(ScatterClient, self).register_to_hub(hub)
hub.subscribe(self, ComponentReplacedMessage, self._on_component_replace)
def is_appearance_settings(msg):
return ('BACKGROUND_COLOR' in msg.settings
or 'FOREGROUND_COLOR' in msg.settings)
hub.subscribe(self, SettingsChangeMessage,
self._update_appearance_from_settings,
filter=is_appearance_settings)
def _update_appearance_from_settings(self, message):
update_appearance_from_settings(self.axes)
self._redraw()
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/scatter/client.py",
"copies": "2",
"size": "15861",
"license": "bsd-3-clause",
"hash": 8732573729360618000,
"line_mean": 31.568788501,
"line_max": 87,
"alpha_frac": 0.5652228737,
"autogenerated": false,
"ratio": 3.985175879396985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002047583901039812,
"num_lines": 487
} |
from __future__ import absolute_import, division, print_function
from functools import partial, wraps
from itertools import product
from math import factorial, log, ceil
import numpy as np
from toolz import compose, partition_all, merge, get
from . import chunk
from .core import _concatenate2, Array, atop, sqrt, lol_tuples
from .numpy_compat import divide
from ..compatibility import getargspec, builtins
from ..base import tokenize
from ..context import _globals
from ..utils import ignoring
def reduction(x, chunk, aggregate, axis=None, keepdims=None, dtype=None,
split_every=None, combine=None):
""" General version of reductions
>>> reduction(my_array, np.sum, np.sum, axis=0, keepdims=False) # doctest: +SKIP
"""
if axis is None:
axis = tuple(range(x.ndim))
if isinstance(axis, int):
axis = (axis,)
axis = tuple(i if i >= 0 else x.ndim + i for i in axis)
if dtype and 'dtype' in getargspec(chunk).args:
chunk = partial(chunk, dtype=dtype)
if dtype and 'dtype' in getargspec(aggregate).args:
aggregate = partial(aggregate, dtype=dtype)
# Normalize split_every
split_every = split_every or _globals.get('split_every', 32)
if isinstance(split_every, dict):
split_every = dict((k, split_every.get(k, 2)) for k in axis)
elif isinstance(split_every, int):
n = builtins.max(int(split_every ** (1/(len(axis) or 1))), 2)
split_every = dict.fromkeys(axis, n)
else:
split_every = dict((k, v) for (k, v) in enumerate(x.numblocks) if k in axis)
# Map chunk across all blocks
inds = tuple(range(x.ndim))
tmp = atop(partial(chunk, axis=axis, keepdims=True), inds, x, inds)
tmp._chunks = tuple((1,)*len(c) if i in axis else c for (i, c)
in enumerate(tmp.chunks))
# Reduce across intermediates
depth = 1
for i, n in enumerate(tmp.numblocks):
if i in split_every and split_every[i] != 1:
depth = int(builtins.max(depth, ceil(log(n, split_every[i]))))
func = compose(partial(combine or aggregate, axis=axis, keepdims=True),
partial(_concatenate2, axes=axis))
for i in range(depth - 1):
tmp = partial_reduce(func, tmp, split_every, True, None)
func = compose(partial(aggregate, axis=axis, keepdims=keepdims),
partial(_concatenate2, axes=axis))
return partial_reduce(func, tmp, split_every, keepdims=keepdims,
dtype=dtype)
def partial_reduce(func, x, split_every, keepdims=False, dtype=None, name=None):
"""Partial reduction across multiple axes.
Parameters
----------
func : function
x : Array
split_every : dict
Maximum reduction block sizes in each dimension.
Example
-------
Reduce across axis 0 and 2, merging a maximum of 1 block in the 0th
dimension, and 3 blocks in the 2nd dimension:
>>> partial_reduce(np.min, x, {0: 1, 2: 3}) # doctest: +SKIP
"""
name = name or 'p_reduce-' + tokenize(func, x, split_every, keepdims, dtype)
parts = [list(partition_all(split_every.get(i, 1), range(n))) for (i, n)
in enumerate(x.numblocks)]
keys = product(*map(range, map(len, parts)))
out_chunks = [tuple(1 for p in partition_all(split_every[i], c)) if i
in split_every else c for (i, c) in enumerate(x.chunks)]
if not keepdims:
out_axis = [i for i in range(x.ndim) if i not in split_every]
getter = lambda k: get(out_axis, k)
keys = map(getter, keys)
out_chunks = list(getter(out_chunks))
dsk = {}
for k, p in zip(keys, product(*parts)):
decided = dict((i, j[0]) for (i, j) in enumerate(p) if len(j) == 1)
dummy = dict(i for i in enumerate(p) if i[0] not in decided)
g = lol_tuples((x.name,), range(x.ndim), decided, dummy)
dsk[(name,) + k] = (func, g)
return Array(merge(dsk, x.dask), name, out_chunks, dtype=dtype)
@wraps(chunk.sum)
def sum(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).sum().dtype
else:
dt = None
return reduction(a, chunk.sum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every)
@wraps(chunk.prod)
def prod(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).prod().dtype
else:
dt = None
return reduction(a, chunk.prod, chunk.prod, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every)
@wraps(chunk.min)
def min(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.min, chunk.min, axis=axis, keepdims=keepdims,
dtype=a._dtype, split_every=split_every)
@wraps(chunk.max)
def max(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.max, chunk.max, axis=axis, keepdims=keepdims,
dtype=a._dtype, split_every=split_every)
@wraps(chunk.any)
def any(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.any, chunk.any, axis=axis, keepdims=keepdims,
dtype='bool', split_every=split_every)
@wraps(chunk.all)
def all(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.all, chunk.all, axis=axis, keepdims=keepdims,
dtype='bool', split_every=split_every)
@wraps(chunk.nansum)
def nansum(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = chunk.nansum(np.empty((1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, chunk.nansum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every)
with ignoring(AttributeError):
@wraps(chunk.nanprod)
def nanprod(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).nanprod().dtype
else:
dt = None
return reduction(a, chunk.nanprod, chunk.prod, axis=axis,
keepdims=keepdims, dtype=dt, split_every=split_every)
@wraps(chunk.nanmin)
def nanmin(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.nanmin, chunk.nanmin, axis=axis,
keepdims=keepdims, dtype=a._dtype, split_every=split_every)
@wraps(chunk.nanmax)
def nanmax(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.nanmax, chunk.nanmax, axis=axis,
keepdims=keepdims, dtype=a._dtype, split_every=split_every)
def numel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(np.ones_like(x), **kwargs)
def nannumel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(~np.isnan(x), **kwargs)
def mean_chunk(x, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
n = numel(x, dtype=dtype, **kwargs)
total = sum(x, dtype=dtype, **kwargs)
result = np.empty(shape=n.shape,
dtype=[('total', total.dtype), ('n', n.dtype)])
result['n'] = n
result['total'] = total
return result
def mean_combine(pair, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
n = sum(pair['n'], **kwargs)
total = sum(pair['total'], **kwargs)
result = np.empty(shape=n.shape, dtype=pair.dtype)
result['n'] = n
result['total'] = total
return result
def mean_agg(pair, dtype='f8', **kwargs):
return divide(pair['total'].sum(dtype=dtype, **kwargs),
pair['n'].sum(dtype=dtype, **kwargs), dtype=dtype)
@wraps(chunk.mean)
def mean(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.mean(np.empty(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, mean_chunk, mean_agg, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every, combine=mean_combine)
def nanmean(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.mean(np.empty(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, partial(mean_chunk, sum=chunk.nansum, numel=nannumel),
mean_agg, axis=axis, keepdims=keepdims, dtype=dt,
split_every=split_every,
combine=partial(mean_combine, sum=chunk.nansum, numel=nannumel))
with ignoring(AttributeError):
nanmean = wraps(chunk.nanmean)(nanmean)
def moment_chunk(A, order=2, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
total = sum(A, dtype=dtype, **kwargs)
n = numel(A, **kwargs)
u = total/n
M = np.empty(shape=n.shape + (order - 1,), dtype=dtype)
for i in range(2, order + 1):
M[..., i - 2] = sum((A - u)**i, dtype=dtype, **kwargs)
result = np.empty(shape=n.shape, dtype=[('total', total.dtype),
('n', n.dtype),
('M', M.dtype, (order-1,))])
result['total'] = total
result['n'] = n
result['M'] = M
return result
def _moment_helper(Ms, ns, inner_term, order, sum, kwargs):
M = Ms[..., order - 2].sum(**kwargs) + sum(ns * inner_term**order, **kwargs)
for k in range(1, order - 1):
coeff = factorial(order)/(factorial(k)*factorial(order - k))
M += coeff * sum(Ms[..., order - k - 2] * inner_term**k, **kwargs)
return M
def moment_combine(data, order=2, ddof=0, dtype='f8', sum=np.sum, **kwargs):
kwargs['dtype'] = dtype
kwargs['keepdims'] = True
totals = data['total']
ns = data['n']
Ms = data['M']
total = totals.sum(**kwargs)
n = sum(ns, **kwargs)
mu = divide(total, n, dtype=dtype)
inner_term = divide(totals, ns, dtype=dtype) - mu
M = np.empty(shape=n.shape + (order - 1,), dtype=dtype)
for o in range(2, order + 1):
M[..., o - 2] = _moment_helper(Ms, ns, inner_term, o, sum, kwargs)
result = np.zeros(shape=n.shape, dtype=[('total', total.dtype),
('n', n.dtype),
('M', Ms.dtype, (order-1,))])
result['total'] = total
result['n'] = n
result['M'] = M
return result
def moment_agg(data, order=2, ddof=0, dtype='f8', sum=np.sum, **kwargs):
totals = data['total']
ns = data['n']
Ms = data['M']
kwargs['dtype'] = dtype
# To properly handle ndarrays, the original dimensions need to be kept for
# part of the calculation.
keepdim_kw = kwargs.copy()
keepdim_kw['keepdims'] = True
n = sum(ns, **keepdim_kw)
mu = divide(totals.sum(**keepdim_kw), n, dtype=dtype)
inner_term = divide(totals, ns, dtype=dtype) - mu
M = _moment_helper(Ms, ns, inner_term, order, sum, kwargs)
return divide(M, sum(n, **kwargs) - ddof, dtype=dtype)
def moment(a, order, axis=None, dtype=None, keepdims=False, ddof=0,
split_every=None):
if not isinstance(order, int) or order < 2:
raise ValueError("Order must be an integer >= 2")
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.var(np.ones(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, partial(moment_chunk, order=order), partial(moment_agg,
order=order, ddof=ddof), axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every,
combine=partial(moment_combine, order=order))
@wraps(chunk.var)
def var(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.var(np.ones(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, moment_chunk, partial(moment_agg, ddof=ddof), axis=axis,
keepdims=keepdims, dtype=dt, split_every=split_every,
combine=moment_combine)
def nanvar(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.var(np.ones(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, partial(moment_chunk, sum=chunk.nansum, numel=nannumel),
partial(moment_agg, sum=np.nansum, ddof=ddof), axis=axis,
keepdims=keepdims, dtype=dt, split_every=split_every,
combine=partial(moment_combine, sum=np.nansum))
with ignoring(AttributeError):
nanvar = wraps(chunk.nanvar)(nanvar)
@wraps(chunk.std)
def std(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
result = sqrt(a.var(axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof,
split_every=split_every))
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
def nanstd(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
result = sqrt(nanvar(a, axis=axis, dtype=dtype, keepdims=keepdims,
ddof=ddof, split_every=split_every))
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
with ignoring(AttributeError):
nanstd = wraps(chunk.nanstd)(nanstd)
def vnorm(a, ord=None, axis=None, dtype=None, keepdims=False, split_every=None):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == np.inf:
return max(abs(a), axis=axis, keepdims=keepdims, split_every=split_every)
elif ord == -np.inf:
return min(abs(a), axis=axis, keepdims=keepdims, split_every=split_every)
elif ord == 1:
return sum(abs(a), axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)
elif ord % 2 == 0:
return sum(a**ord, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)**(1./ord)
else:
return sum(abs(a)**ord, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)**(1./ord)
def _arg_combine(data, axis, argfunc):
"""Merge intermediate results from ``arg_*`` functions"""
vals = data['vals']
arg = data['arg']
ns = data['n']
args = argfunc(vals, axis=axis)
offsets = np.roll(np.cumsum(ns, axis=axis), 1, axis)
offsets[tuple(slice(None) if i != axis else 0 for i in range(ns.ndim))] = 0
inds = list(reversed(np.meshgrid(*map(np.arange, args.shape), sparse=True)))
inds.insert(axis, args)
arg = (arg + offsets)[tuple(inds)]
vals = vals[tuple(inds)]
n = ns.sum(axis=axis).take(0, 0)
return arg, vals, n
def arg_chunk(func, argfunc, x, axis=None, **kwargs):
axis = axis[0] if isinstance(axis, tuple) else axis
vals = func(x, axis=axis, keepdims=True)
arg = argfunc(x, axis=axis, keepdims=True)
result = np.empty(shape=vals.shape, dtype=[('vals', vals.dtype),
('arg', arg.dtype),
('n', 'i8')])
result['vals'] = vals
result['arg'] = arg
result['n'] = x.shape[axis]
return result
def arg_combine(func, argfunc, data, axis=None, **kwargs):
axis = axis[0] if isinstance(axis, tuple) else axis
arg, vals, n = _arg_combine(data, axis, argfunc)
shape = tuple(s if i != axis else 1 for (i, s) in enumerate(data.shape))
result = np.empty(shape=shape, dtype=[('vals', vals.dtype),
('arg', arg.dtype),
('n', 'i8')])
result['vals'] = vals.reshape(shape)
result['arg'] = arg.reshape(shape)
result['n'] = n
return result
def arg_agg(func, argfunc, data, axis=None, **kwargs):
axis = axis[0] if isinstance(axis, tuple) else axis
return _arg_combine(data, axis, argfunc)[0]
def arg_reduction(func, argfunc):
chunk = partial(arg_chunk, func, argfunc)
agg = partial(arg_agg, func, argfunc)
combine = partial(arg_combine, func, argfunc)
@wraps(argfunc)
def _(a, axis=None, split_every=None):
if axis < 0:
axis = a.ndim + axis
return reduction(a, chunk, agg, axis=axis, dtype='i8',
split_every=split_every, combine=combine)
return _
argmin = arg_reduction(chunk.min, chunk.argmin)
argmax = arg_reduction(chunk.max, chunk.argmax)
nanargmin = arg_reduction(chunk.nanmin, chunk.nanargmin)
nanargmax = arg_reduction(chunk.nanmax, chunk.nanargmax)
| {
"repo_name": "vikhyat/dask",
"path": "dask/array/reductions.py",
"copies": "1",
"size": "17067",
"license": "bsd-3-clause",
"hash": -6765742454215944000,
"line_mean": 35.3901918977,
"line_max": 85,
"alpha_frac": 0.6002226519,
"autogenerated": false,
"ratio": 3.3869815439571345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4487204195857134,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial, wraps
from itertools import product, repeat
from math import factorial, log, ceil
import operator
import numpy as np
from toolz import compose, partition_all, get, accumulate, pluck
from . import chunk
from .core import _concatenate2, Array, atop, lol_tuples
from .ufunc import sqrt
from .numpy_compat import divide
from ..compatibility import getargspec, builtins
from ..base import tokenize
from ..context import _globals
from ..utils import ignoring, funcname
from .. import sharedict
def reduction(x, chunk, aggregate, axis=None, keepdims=None, dtype=None,
split_every=None, combine=None, name=None):
""" General version of reductions
>>> reduction(my_array, np.sum, np.sum, axis=0, keepdims=False) # doctest: +SKIP
"""
if axis is None:
axis = tuple(range(x.ndim))
if isinstance(axis, int):
axis = (axis,)
axis = tuple(validate_axis(x.ndim, a) for a in axis)
if dtype is None:
raise ValueError("Must specify dtype")
if 'dtype' in getargspec(chunk).args:
chunk = partial(chunk, dtype=dtype)
if 'dtype' in getargspec(aggregate).args:
aggregate = partial(aggregate, dtype=dtype)
# Map chunk across all blocks
inds = tuple(range(x.ndim))
# The dtype of `tmp` doesn't actually matter, and may be incorrect.
tmp = atop(chunk, inds, x, inds, axis=axis, keepdims=True, dtype=x.dtype)
tmp._chunks = tuple((1, ) * len(c) if i in axis else c for (i, c)
in enumerate(tmp.chunks))
return _tree_reduce(tmp, aggregate, axis, keepdims, dtype, split_every,
combine, name=name)
def _tree_reduce(x, aggregate, axis, keepdims, dtype, split_every=None,
combine=None, name=None):
"""Perform the tree reduction step of a reduction.
Lower level, users should use ``reduction`` or ``arg_reduction`` directly.
"""
# Normalize split_every
split_every = split_every or _globals.get('split_every', 4)
if isinstance(split_every, dict):
split_every = dict((k, split_every.get(k, 2)) for k in axis)
elif isinstance(split_every, int):
n = builtins.max(int(split_every ** (1 / (len(axis) or 1))), 2)
split_every = dict.fromkeys(axis, n)
else:
split_every = dict((k, v) for (k, v) in enumerate(x.numblocks) if k in axis)
# Reduce across intermediates
depth = 1
for i, n in enumerate(x.numblocks):
if i in split_every and split_every[i] != 1:
depth = int(builtins.max(depth, ceil(log(n, split_every[i]))))
func = compose(partial(combine or aggregate, axis=axis, keepdims=True),
partial(_concatenate2, axes=axis))
for i in range(depth - 1):
x = partial_reduce(func, x, split_every, True, dtype=dtype,
name=(name or funcname(combine or aggregate)) + '-partial')
func = compose(partial(aggregate, axis=axis, keepdims=keepdims),
partial(_concatenate2, axes=axis))
return partial_reduce(func, x, split_every, keepdims=keepdims, dtype=dtype,
name=(name or funcname(aggregate)) + '-aggregate')
def partial_reduce(func, x, split_every, keepdims=False, dtype=None, name=None):
"""Partial reduction across multiple axes.
Parameters
----------
func : function
x : Array
split_every : dict
Maximum reduction block sizes in each dimension.
Examples
--------
Reduce across axis 0 and 2, merging a maximum of 1 block in the 0th
dimension, and 3 blocks in the 2nd dimension:
>>> partial_reduce(np.min, x, {0: 1, 2: 3}) # doctest: +SKIP
"""
name = (name or funcname(func)) + '-' + tokenize(func, x, split_every,
keepdims, dtype)
parts = [list(partition_all(split_every.get(i, 1), range(n))) for (i, n)
in enumerate(x.numblocks)]
keys = product(*map(range, map(len, parts)))
out_chunks = [tuple(1 for p in partition_all(split_every[i], c)) if i
in split_every else c for (i, c) in enumerate(x.chunks)]
if not keepdims:
out_axis = [i for i in range(x.ndim) if i not in split_every]
getter = lambda k: get(out_axis, k)
keys = map(getter, keys)
out_chunks = list(getter(out_chunks))
dsk = {}
for k, p in zip(keys, product(*parts)):
decided = dict((i, j[0]) for (i, j) in enumerate(p) if len(j) == 1)
dummy = dict(i for i in enumerate(p) if i[0] not in decided)
g = lol_tuples((x.name,), range(x.ndim), decided, dummy)
dsk[(name,) + k] = (func, g)
return Array(sharedict.merge(x.dask, (name, dsk)), name, out_chunks, dtype=dtype)
@wraps(chunk.sum)
def sum(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
else:
dt = np.empty((1,), dtype=a.dtype).sum().dtype
return reduction(a, chunk.sum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every)
@wraps(chunk.prod)
def prod(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
else:
dt = np.empty((1,), dtype=a.dtype).prod().dtype
return reduction(a, chunk.prod, chunk.prod, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every)
@wraps(chunk.min)
def min(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.min, chunk.min, axis=axis, keepdims=keepdims,
dtype=a.dtype, split_every=split_every)
@wraps(chunk.max)
def max(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.max, chunk.max, axis=axis, keepdims=keepdims,
dtype=a.dtype, split_every=split_every)
@wraps(chunk.any)
def any(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.any, chunk.any, axis=axis, keepdims=keepdims,
dtype='bool', split_every=split_every)
@wraps(chunk.all)
def all(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.all, chunk.all, axis=axis, keepdims=keepdims,
dtype='bool', split_every=split_every)
@wraps(chunk.nansum)
def nansum(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
else:
dt = chunk.nansum(np.empty((1,), dtype=a.dtype)).dtype
return reduction(a, chunk.nansum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every)
with ignoring(AttributeError):
@wraps(chunk.nanprod)
def nanprod(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
else:
dt = chunk.nanprod(np.empty((1,), dtype=a.dtype)).dtype
return reduction(a, chunk.nanprod, chunk.prod, axis=axis,
keepdims=keepdims, dtype=dt, split_every=split_every)
@wraps(chunk.nancumsum)
def nancumsum(x, axis, dtype=None):
return cumreduction(chunk.nancumsum, operator.add, 0, x, axis, dtype)
@wraps(chunk.nancumprod)
def nancumprod(x, axis, dtype=None):
return cumreduction(chunk.nancumprod, operator.mul, 1, x, axis, dtype)
@wraps(chunk.nanmin)
def nanmin(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.nanmin, chunk.nanmin, axis=axis,
keepdims=keepdims, dtype=a.dtype, split_every=split_every)
@wraps(chunk.nanmax)
def nanmax(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.nanmax, chunk.nanmax, axis=axis,
keepdims=keepdims, dtype=a.dtype, split_every=split_every)
def numel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(np.ones_like(x), **kwargs)
def nannumel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(~np.isnan(x), **kwargs)
def mean_chunk(x, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
n = numel(x, dtype=dtype, **kwargs)
total = sum(x, dtype=dtype, **kwargs)
result = np.empty(shape=n.shape,
dtype=[('total', total.dtype), ('n', n.dtype)])
result['n'] = n
result['total'] = total
return result
def mean_combine(pair, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
n = sum(pair['n'], **kwargs)
total = sum(pair['total'], **kwargs)
result = np.empty(shape=n.shape, dtype=pair.dtype)
result['n'] = n
result['total'] = total
return result
def mean_agg(pair, dtype='f8', **kwargs):
return divide(pair['total'].sum(dtype=dtype, **kwargs),
pair['n'].sum(dtype=dtype, **kwargs), dtype=dtype)
@wraps(chunk.mean)
def mean(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
else:
dt = np.mean(np.empty(shape=(1,), dtype=a.dtype)).dtype
return reduction(a, mean_chunk, mean_agg, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every, combine=mean_combine)
def nanmean(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
else:
dt = np.mean(np.empty(shape=(1,), dtype=a.dtype)).dtype
return reduction(a, partial(mean_chunk, sum=chunk.nansum, numel=nannumel),
mean_agg, axis=axis, keepdims=keepdims, dtype=dt,
split_every=split_every,
combine=partial(mean_combine, sum=chunk.nansum, numel=nannumel))
with ignoring(AttributeError):
nanmean = wraps(chunk.nanmean)(nanmean)
def moment_chunk(A, order=2, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
total = sum(A, dtype=dtype, **kwargs)
n = numel(A, **kwargs).astype(np.int64, copy=False)
u = total / n
M = np.empty(shape=n.shape + (order - 1,), dtype=dtype)
for i in range(2, order + 1):
M[..., i - 2] = sum((A - u)**i, dtype=dtype, **kwargs)
result = np.empty(shape=n.shape, dtype=[('total', total.dtype),
('n', n.dtype),
('M', M.dtype, (order - 1,))])
result['total'] = total
result['n'] = n
result['M'] = M
return result
def _moment_helper(Ms, ns, inner_term, order, sum, kwargs):
M = Ms[..., order - 2].sum(**kwargs) + sum(ns * inner_term ** order, **kwargs)
for k in range(1, order - 1):
coeff = factorial(order) / (factorial(k) * factorial(order - k))
M += coeff * sum(Ms[..., order - k - 2] * inner_term**k, **kwargs)
return M
def moment_combine(data, order=2, ddof=0, dtype='f8', sum=np.sum, **kwargs):
kwargs['dtype'] = dtype
kwargs['keepdims'] = True
totals = data['total']
ns = data['n']
Ms = data['M']
total = totals.sum(**kwargs)
n = sum(ns, **kwargs)
mu = divide(total, n, dtype=dtype)
inner_term = divide(totals, ns, dtype=dtype) - mu
M = np.empty(shape=n.shape + (order - 1,), dtype=dtype)
for o in range(2, order + 1):
M[..., o - 2] = _moment_helper(Ms, ns, inner_term, o, sum, kwargs)
result = np.zeros(shape=n.shape, dtype=[('total', total.dtype),
('n', n.dtype),
('M', Ms.dtype, (order - 1,))])
result['total'] = total
result['n'] = n
result['M'] = M
return result
def moment_agg(data, order=2, ddof=0, dtype='f8', sum=np.sum, **kwargs):
totals = data['total']
ns = data['n']
Ms = data['M']
kwargs['dtype'] = dtype
# To properly handle ndarrays, the original dimensions need to be kept for
# part of the calculation.
keepdim_kw = kwargs.copy()
keepdim_kw['keepdims'] = True
n = sum(ns, **keepdim_kw)
mu = divide(totals.sum(**keepdim_kw), n, dtype=dtype)
inner_term = divide(totals, ns, dtype=dtype) - mu
M = _moment_helper(Ms, ns, inner_term, order, sum, kwargs)
return divide(M, sum(n, **kwargs) - ddof, dtype=dtype)
def moment(a, order, axis=None, dtype=None, keepdims=False, ddof=0,
split_every=None):
if not isinstance(order, int) or order < 2:
raise ValueError("Order must be an integer >= 2")
if dtype is not None:
dt = dtype
else:
dt = np.var(np.ones(shape=(1,), dtype=a.dtype)).dtype
return reduction(a, partial(moment_chunk, order=order),
partial(moment_agg, order=order, ddof=ddof),
axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every,
combine=partial(moment_combine, order=order))
@wraps(chunk.var)
def var(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
if dtype is not None:
dt = dtype
else:
dt = np.var(np.ones(shape=(1,), dtype=a.dtype)).dtype
return reduction(a, moment_chunk, partial(moment_agg, ddof=ddof), axis=axis,
keepdims=keepdims, dtype=dt, split_every=split_every,
combine=moment_combine, name='var')
def nanvar(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
if dtype is not None:
dt = dtype
else:
dt = np.var(np.ones(shape=(1,), dtype=a.dtype)).dtype
return reduction(a, partial(moment_chunk, sum=chunk.nansum, numel=nannumel),
partial(moment_agg, sum=np.nansum, ddof=ddof), axis=axis,
keepdims=keepdims, dtype=dt, split_every=split_every,
combine=partial(moment_combine, sum=np.nansum))
with ignoring(AttributeError):
nanvar = wraps(chunk.nanvar)(nanvar)
@wraps(chunk.std)
def std(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
result = sqrt(a.var(axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof,
split_every=split_every))
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
def nanstd(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
result = sqrt(nanvar(a, axis=axis, dtype=dtype, keepdims=keepdims,
ddof=ddof, split_every=split_every))
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
with ignoring(AttributeError):
nanstd = wraps(chunk.nanstd)(nanstd)
def vnorm(a, ord=None, axis=None, dtype=None, keepdims=False, split_every=None):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == np.inf:
return max(abs(a), axis=axis, keepdims=keepdims, split_every=split_every)
elif ord == -np.inf:
return min(abs(a), axis=axis, keepdims=keepdims, split_every=split_every)
elif ord == 1:
return sum(abs(a), axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)
elif ord % 2 == 0:
return sum(a ** ord, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every) ** (1. / ord)
else:
return sum(abs(a) ** ord, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every) ** (1. / ord)
def _arg_combine(data, axis, argfunc, keepdims=False):
"""Merge intermediate results from ``arg_*`` functions"""
axis = None if len(axis) == data.ndim or data.ndim == 1 else axis[0]
vals = data['vals']
arg = data['arg']
if axis is None:
local_args = argfunc(vals, axis=axis, keepdims=keepdims)
vals = vals.ravel()[local_args]
arg = arg.ravel()[local_args]
else:
local_args = argfunc(vals, axis=axis)
inds = np.ogrid[tuple(map(slice, local_args.shape))]
inds.insert(axis, local_args)
vals = vals[inds]
arg = arg[inds]
if keepdims:
vals = np.expand_dims(vals, axis)
arg = np.expand_dims(arg, axis)
return arg, vals
def arg_chunk(func, argfunc, x, axis, offset_info):
arg_axis = None if len(axis) == x.ndim or x.ndim == 1 else axis[0]
vals = func(x, axis=arg_axis, keepdims=True)
arg = argfunc(x, axis=arg_axis, keepdims=True)
if arg_axis is None:
offset, total_shape = offset_info
ind = np.unravel_index(arg.ravel()[0], x.shape)
total_ind = tuple(o + i for (o, i) in zip(offset, ind))
arg[:] = np.ravel_multi_index(total_ind, total_shape)
else:
arg += offset_info
result = np.empty(shape=vals.shape, dtype=[('vals', vals.dtype),
('arg', arg.dtype)])
result['vals'] = vals
result['arg'] = arg
return result
def arg_combine(func, argfunc, data, axis=None, **kwargs):
arg, vals = _arg_combine(data, axis, argfunc, keepdims=True)
result = np.empty(shape=vals.shape, dtype=[('vals', vals.dtype),
('arg', arg.dtype)])
result['vals'] = vals
result['arg'] = arg
return result
def arg_agg(func, argfunc, data, axis=None, **kwargs):
return _arg_combine(data, axis, argfunc, keepdims=False)[0]
def nanarg_agg(func, argfunc, data, axis=None, **kwargs):
arg, vals = _arg_combine(data, axis, argfunc, keepdims=False)
if np.any(np.isnan(vals)):
raise ValueError("All NaN slice encountered")
return arg
def arg_reduction(x, chunk, combine, agg, axis=None, split_every=None):
"""Generic function for argreduction.
Parameters
----------
x : Array
chunk : callable
Partialed ``arg_chunk``.
combine : callable
Partialed ``arg_combine``.
agg : callable
Partialed ``arg_agg``.
axis : int, optional
split_every : int or dict, optional
"""
if axis is None:
axis = tuple(range(x.ndim))
ravel = True
elif isinstance(axis, int):
if axis < 0:
axis += x.ndim
if axis < 0 or axis >= x.ndim:
raise ValueError("axis entry is out of bounds")
axis = (axis,)
ravel = x.ndim == 1
else:
raise TypeError("axis must be either `None` or int, "
"got '{0}'".format(axis))
# Map chunk across all blocks
name = 'arg-reduce-chunk-{0}'.format(tokenize(chunk, axis))
old = x.name
keys = list(product(*map(range, x.numblocks)))
offsets = list(product(*(accumulate(operator.add, bd[:-1], 0)
for bd in x.chunks)))
if ravel:
offset_info = zip(offsets, repeat(x.shape))
else:
offset_info = pluck(axis[0], offsets)
chunks = tuple((1, ) * len(c) if i in axis else c for (i, c)
in enumerate(x.chunks))
dsk = dict(((name,) + k, (chunk, (old,) + k, axis, off)) for (k, off)
in zip(keys, offset_info))
# The dtype of `tmp` doesn't actually matter, just need to provide something
tmp = Array(sharedict.merge(x.dask, (name, dsk)), name, chunks, dtype=x.dtype)
dtype = np.argmin([1]).dtype
return _tree_reduce(tmp, agg, axis, False, dtype, split_every, combine)
def make_arg_reduction(func, argfunc, is_nan_func=False):
"""Create a argreduction callable.
Parameters
----------
func : callable
The reduction (e.g. ``min``)
argfunc : callable
The argreduction (e.g. ``argmin``)
"""
chunk = partial(arg_chunk, func, argfunc)
combine = partial(arg_combine, func, argfunc)
if is_nan_func:
agg = partial(nanarg_agg, func, argfunc)
else:
agg = partial(arg_agg, func, argfunc)
@wraps(argfunc)
def _(x, axis=None, split_every=None):
return arg_reduction(x, chunk, combine, agg, axis, split_every)
return _
def _nanargmin(x, axis, **kwargs):
try:
return chunk.nanargmin(x, axis, **kwargs)
except ValueError:
return chunk.nanargmin(np.where(np.isnan(x), np.inf, x), axis, **kwargs)
def _nanargmax(x, axis, **kwargs):
try:
return chunk.nanargmax(x, axis, **kwargs)
except ValueError:
return chunk.nanargmax(np.where(np.isnan(x), -np.inf, x), axis, **kwargs)
argmin = make_arg_reduction(chunk.min, chunk.argmin)
argmax = make_arg_reduction(chunk.max, chunk.argmax)
nanargmin = make_arg_reduction(chunk.nanmin, _nanargmin, True)
nanargmax = make_arg_reduction(chunk.nanmax, _nanargmax, True)
def cumreduction(func, binop, ident, x, axis, dtype=None):
""" Generic function for cumulative reduction
Parameters
----------
func: callable
Cumulative function like np.cumsum or np.cumprod
binop: callable
Associated binary operator like ``np.cumsum->add`` or ``np.cumprod->mul``
ident: Number
Associated identity like ``np.cumsum->0`` or ``np.cumprod->1``
x: dask Array
axis: int
dtype: dtype
Returns
-------
dask array
See also
--------
cumsum
cumprod
"""
if dtype is None:
dtype = func(np.empty((0,), dtype=x.dtype)).dtype
assert isinstance(axis, int)
axis = validate_axis(x.ndim, axis)
m = x.map_blocks(func, axis=axis, dtype=dtype)
name = '%s-axis=%d-%s' % (func.__name__, axis, tokenize(x, dtype))
n = x.numblocks[axis]
full = slice(None, None, None)
slc = (full,) * axis + (slice(-1, None),) + (full,) * (x.ndim - axis - 1)
indices = list(product(*[range(nb) if i != axis else [0]
for i, nb in enumerate(x.numblocks)]))
dsk = dict()
for ind in indices:
shape = tuple(x.chunks[i][ii] if i != axis else 1
for i, ii in enumerate(ind))
dsk[(name, 'extra') + ind] = (np.full, shape, ident, m.dtype)
dsk[(name,) + ind] = (m.name,) + ind
for i in range(1, n):
last_indices = indices
indices = list(product(*[range(nb) if ii != axis else [i]
for ii, nb in enumerate(x.numblocks)]))
for old, ind in zip(last_indices, indices):
this_slice = (name, 'extra') + ind
dsk[this_slice] = (binop, (name, 'extra') + old,
(operator.getitem, (m.name,) + old, slc))
dsk[(name,) + ind] = (binop, this_slice, (m.name,) + ind)
return Array(sharedict.merge(m.dask, (name, dsk)), name, x.chunks, m.dtype)
@wraps(np.cumsum)
def cumsum(x, axis, dtype=None):
return cumreduction(np.cumsum, operator.add, 0, x, axis, dtype)
@wraps(np.cumprod)
def cumprod(x, axis, dtype=None):
return cumreduction(np.cumprod, operator.mul, 1, x, axis, dtype)
def validate_axis(ndim, axis):
""" Validate single axis dimension against number of dimensions """
if axis > ndim - 1 or axis < -ndim:
raise ValueError("Axis must be between -%d and %d, got %d" %
(ndim, ndim - 1, axis))
if axis < 0:
return axis + ndim
else:
return axis
| {
"repo_name": "cpcloud/dask",
"path": "dask/array/reductions.py",
"copies": "1",
"size": "23040",
"license": "bsd-3-clause",
"hash": 2528149604603851000,
"line_mean": 34.5555555556,
"line_max": 86,
"alpha_frac": 0.5967013889,
"autogenerated": false,
"ratio": 3.4183976261127595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45150990150127596,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial, wraps
from math import factorial
import numpy as np
from toolz import compose
from . import chunk
from .core import _concatenate2, Array, atop, sqrt, elemwise
from .numpy_compat import divide
from .slicing import insert_many
from ..compatibility import getargspec
from ..core import flatten
from ..utils import ignoring
def reduction(x, chunk, aggregate, axis=None, keepdims=None, dtype=None):
""" General version of reductions
>>> reduction(my_array, np.sum, np.sum, axis=0, keepdims=False) # doctest: +SKIP
"""
if axis is None:
axis = tuple(range(x.ndim))
if isinstance(axis, int):
axis = (axis,)
axis = tuple(i if i >= 0 else x.ndim + i for i in axis)
if dtype and 'dtype' in getargspec(chunk).args:
chunk = partial(chunk, dtype=dtype)
if dtype and 'dtype' in getargspec(aggregate).args:
aggregate = partial(aggregate, dtype=dtype)
chunk2 = partial(chunk, axis=axis, keepdims=True)
aggregate2 = partial(aggregate, axis=axis, keepdims=keepdims)
inds = tuple(range(x.ndim))
tmp = atop(chunk2, inds, x, inds)
inds2 = tuple(i for i in inds if i not in axis)
result = atop(compose(aggregate2, partial(_concatenate2, axes=axis)),
inds2, tmp, inds, dtype=dtype)
if keepdims:
dsk = result.dask.copy()
for k in flatten(result._keys()):
k2 = (k[0],) + insert_many(k[1:], axis, 0)
dsk[k2] = dsk.pop(k)
chunks = insert_many(result.chunks, axis, [1])
return Array(dsk, result.name, chunks=chunks, dtype=dtype)
else:
return result
@wraps(chunk.sum)
def sum(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).sum().dtype
else:
dt = None
return reduction(a, chunk.sum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt)
@wraps(chunk.prod)
def prod(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).prod().dtype
else:
dt = None
return reduction(a, chunk.prod, chunk.prod, axis=axis, keepdims=keepdims,
dtype=dt)
@wraps(chunk.min)
def min(a, axis=None, keepdims=False):
return reduction(a, chunk.min, chunk.min, axis=axis, keepdims=keepdims,
dtype=a._dtype)
@wraps(chunk.max)
def max(a, axis=None, keepdims=False):
return reduction(a, chunk.max, chunk.max, axis=axis, keepdims=keepdims,
dtype=a._dtype)
@wraps(chunk.argmin)
def argmin(a, axis=None):
return arg_reduction(a, chunk.min, chunk.argmin, axis=axis, dtype='i8')
@wraps(chunk.nanargmin)
def nanargmin(a, axis=None):
return arg_reduction(a, chunk.nanmin, chunk.nanargmin, axis=axis,
dtype='i8')
@wraps(chunk.argmax)
def argmax(a, axis=None):
return arg_reduction(a, chunk.max, chunk.argmax, axis=axis, dtype='i8')
@wraps(chunk.nanargmax)
def nanargmax(a, axis=None):
return arg_reduction(a, chunk.nanmax, chunk.nanargmax, axis=axis,
dtype='i8')
@wraps(chunk.any)
def any(a, axis=None, keepdims=False):
return reduction(a, chunk.any, chunk.any, axis=axis, keepdims=keepdims,
dtype='bool')
@wraps(chunk.all)
def all(a, axis=None, keepdims=False):
return reduction(a, chunk.all, chunk.all, axis=axis, keepdims=keepdims,
dtype='bool')
@wraps(chunk.nansum)
def nansum(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = chunk.nansum(np.empty((1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, chunk.nansum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt)
with ignoring(AttributeError):
@wraps(chunk.nanprod)
def nanprod(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).nanprod().dtype
else:
dt = None
return reduction(a, chunk.nanprod, chunk.prod, axis=axis,
keepdims=keepdims, dtype=dt)
@wraps(chunk.nanmin)
def nanmin(a, axis=None, keepdims=False):
return reduction(a, chunk.nanmin, chunk.nanmin, axis=axis, keepdims=keepdims,
dtype=a._dtype)
@wraps(chunk.nanmax)
def nanmax(a, axis=None, keepdims=False):
return reduction(a, chunk.nanmax, chunk.nanmax, axis=axis,
keepdims=keepdims, dtype=a._dtype)
def numel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(np.ones_like(x), **kwargs)
def nannumel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(~np.isnan(x), **kwargs)
def mean_chunk(x, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
n = numel(x, dtype=dtype, **kwargs)
total = sum(x, dtype=dtype, **kwargs)
result = np.empty(shape=n.shape,
dtype=[('total', total.dtype), ('n', n.dtype)])
result['n'] = n
result['total'] = total
return result
def mean_agg(pair, dtype='f8', **kwargs):
return divide(pair['total'].sum(dtype=dtype, **kwargs),
pair['n'].sum(dtype=dtype, **kwargs), dtype=dtype)
@wraps(chunk.mean)
def mean(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.mean(np.empty(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, mean_chunk, mean_agg, axis=axis, keepdims=keepdims,
dtype=dt)
def nanmean(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.mean(np.empty(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, partial(mean_chunk, sum=chunk.nansum, numel=nannumel),
mean_agg, axis=axis, keepdims=keepdims, dtype=dt)
with ignoring(AttributeError):
nanmean = wraps(chunk.nanmean)(nanmean)
def moment_chunk(A, order=2, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
total = sum(A, dtype=dtype, **kwargs)
n = numel(A, **kwargs)
u = total/n
M = np.empty(shape=n.shape + (order - 1,), dtype=dtype)
for i in range(2, order + 1):
M[..., i - 2] = sum((A - u)**i, dtype=dtype, **kwargs)
result = np.empty(shape=n.shape, dtype=[('total', total.dtype),
('n', n.dtype),
('M', M.dtype, (order-1,))])
result['total'] = total
result['n'] = n
result['M'] = M
return result
def moment_agg(data, order=2, ddof=0, dtype='f8', sum=np.sum, **kwargs):
totals = data['total']
ns = data['n']
Ms = data['M']
kwargs['dtype'] = dtype
# To properly handle ndarrays, the original dimensions need to be kept for
# part of the calculation.
keepdim_kw = kwargs.copy()
keepdim_kw['keepdims'] = True
n = sum(ns, **keepdim_kw)
mu = divide(totals.sum(**keepdim_kw), n, dtype=dtype)
inner_term = divide(totals, ns, dtype=dtype) - mu
result = Ms[..., -1].sum(**kwargs)
for k in range(1, order - 1):
coeff = factorial(order)/(factorial(k)*factorial(order - k))
result += coeff * sum(Ms[..., order - k - 2] * inner_term**k, **kwargs)
result += sum(ns * inner_term**order, **kwargs)
result = divide(result, sum(n, **kwargs) - ddof, dtype=dtype)
return result
def moment(a, order, axis=None, dtype=None, keepdims=False, ddof=0):
if not isinstance(order, int) or order < 2:
raise ValueError("Order must be an integer >= 2")
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.var(np.ones(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, partial(moment_chunk, order=order), partial(moment_agg,
order=order, ddof=ddof), axis=axis, keepdims=keepdims,
dtype=dt)
@wraps(chunk.var)
def var(a, axis=None, dtype=None, keepdims=False, ddof=0):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.var(np.ones(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, moment_chunk, partial(moment_agg, ddof=ddof), axis=axis,
keepdims=keepdims, dtype=dt)
def nanvar(a, axis=None, dtype=None, keepdims=False, ddof=0):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.var(np.ones(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, partial(moment_chunk, sum=chunk.nansum, numel=nannumel),
partial(moment_agg, sum=np.nansum, ddof=ddof), axis=axis,
keepdims=keepdims, dtype=dt)
with ignoring(AttributeError):
nanvar = wraps(chunk.nanvar)(nanvar)
@wraps(chunk.std)
def std(a, axis=None, dtype=None, keepdims=False, ddof=0):
result = sqrt(a.var(axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof))
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
def nanstd(a, axis=None, dtype=None, keepdims=False, ddof=0):
result = sqrt(nanvar(a, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof))
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
with ignoring(AttributeError):
nanstd = wraps(chunk.nanstd)(nanstd)
def vnorm(a, ord=None, axis=None, dtype=None, keepdims=False):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == np.inf:
return max(abs(a), axis=axis, keepdims=keepdims)
elif ord == -np.inf:
return min(abs(a), axis=axis, keepdims=keepdims)
elif ord == 1:
return sum(abs(a), axis=axis, dtype=dtype, keepdims=keepdims)
elif ord % 2 == 0:
return sum(a**ord, axis=axis, dtype=dtype, keepdims=keepdims)**(1./ord)
else:
return sum(abs(a)**ord, axis=axis, dtype=dtype, keepdims=keepdims)**(1./ord)
def arg_aggregate(func, argfunc, dims, pairs):
"""
>>> pairs = [([4, 3, 5], [10, 11, 12]),
... ([3, 5, 1], [1, 2, 3])]
>>> arg_aggregate(np.min, np.argmin, (100, 100), pairs)
array([101, 11, 103])
"""
pairs = list(pairs)
mins, argmins = zip(*pairs)
mins = np.array(mins)
argmins = np.array(argmins)
args = argfunc(mins, axis=0)
offsets = np.add.accumulate([0] + list(dims)[:-1])
offsets = offsets.reshape((len(offsets),) + (1,) * (argmins.ndim - 1))
return np.choose(args, argmins + offsets)
def arg_reduction(a, func, argfunc, axis=0, dtype=None):
""" General version of argmin/argmax
>>> arg_reduction(my_array, np.min, axis=0) # doctest: +SKIP
"""
if not isinstance(axis, int):
raise ValueError("Must specify integer axis= keyword argument.\n"
"For example:\n"
" Before: x.argmin()\n"
" After: x.argmin(axis=0)\n")
if axis < 0:
axis = a.ndim + axis
def argreduce(x):
""" Get both min/max and argmin/argmax of each block """
return (func(x, axis=axis), argfunc(x, axis=axis))
a2 = elemwise(argreduce, a)
return atop(partial(arg_aggregate, func, argfunc, a.chunks[axis]),
[i for i in range(a.ndim) if i != axis],
a2, list(range(a.ndim)), dtype=dtype)
| {
"repo_name": "pombredanne/dask",
"path": "dask/array/reductions.py",
"copies": "1",
"size": "11793",
"license": "bsd-3-clause",
"hash": -7251283381934206000,
"line_mean": 30.5320855615,
"line_max": 85,
"alpha_frac": 0.6006953277,
"autogenerated": false,
"ratio": 3.3210363277949875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4421731655494987,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial, wraps
from toolz import merge
import pandas as pd
from ..base import tokenize
from ..utils import M
def rolling_chunk(func, part1, part2, window, *args):
if part1.shape[0] < window - 1:
raise NotImplementedError("Window larger than partition size")
if window > 1:
extra = window - 1
combined = pd.concat([part1.iloc[-extra:], part2])
applied = func(combined, window, *args)
return applied.iloc[extra:]
else:
return func(part2, window, *args)
def wrap_rolling(func):
"""Create a chunked version of a pandas.rolling_* function"""
@wraps(func)
def rolling(arg, window, *args, **kwargs):
if not isinstance(window, int):
raise TypeError('Window must be an integer')
if window < 0:
raise ValueError('Window must be a positive integer')
if 'freq' in kwargs or 'how' in kwargs:
raise NotImplementedError('Resampling before rolling computations '
'not supported')
old_name = arg._name
token = tokenize(func, arg, window, args, kwargs)
new_name = 'rolling-' + token
f = partial(func, **kwargs)
dsk = {(new_name, 0): (f, (old_name, 0), window) + args}
for i in range(1, arg.npartitions + 1):
dsk[(new_name, i)] = (rolling_chunk, f, (old_name, i - 1),
(old_name, i), window) + args
return arg._constructor(merge(arg.dask, dsk), new_name,
arg, arg.divisions)
return rolling
rolling_count = wrap_rolling(pd.rolling_count)
rolling_sum = wrap_rolling(pd.rolling_sum)
rolling_mean = wrap_rolling(pd.rolling_mean)
rolling_median = wrap_rolling(pd.rolling_median)
rolling_min = wrap_rolling(pd.rolling_min)
rolling_max = wrap_rolling(pd.rolling_max)
rolling_std = wrap_rolling(pd.rolling_std)
rolling_var = wrap_rolling(pd.rolling_var)
rolling_skew = wrap_rolling(pd.rolling_skew)
rolling_kurt = wrap_rolling(pd.rolling_kurt)
rolling_quantile = wrap_rolling(pd.rolling_quantile)
rolling_apply = wrap_rolling(pd.rolling_apply)
rolling_window = wrap_rolling(pd.rolling_window)
def call_pandas_rolling_method_single(this_partition, rolling_kwargs,
method_name, method_args, method_kwargs):
# used for the start of the df/series (or for rolling through columns)
method = getattr(this_partition.rolling(**rolling_kwargs), method_name)
return method(*method_args, **method_kwargs)
def call_pandas_rolling_method_with_neighbors(prev_partition, this_partition,
next_partition, before, after,
rolling_kwargs, method_name,
method_args, method_kwargs):
if prev_partition.shape[0] != before or next_partition.shape[0] != after:
raise NotImplementedError("Window requires larger inter-partition view than partition size")
combined = pd.concat([prev_partition, this_partition, next_partition])
method = getattr(combined.rolling(**rolling_kwargs), method_name)
applied = method(*method_args, **method_kwargs)
if after:
return applied.iloc[before:-after]
else:
return applied.iloc[before:]
class Rolling(object):
# What you get when you do ddf.rolling(...) or similar
"""Provides rolling window calculations.
"""
def __init__(self, obj, window=None, min_periods=None, freq=None,
center=False, win_type=None, axis=0):
if freq is not None:
msg = 'The deprecated freq argument is not supported.'
raise NotImplementedError(msg)
self.obj = obj # dataframe or series
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.axis = axis
# Allow pandas to raise if appropriate
obj._meta.rolling(**self._rolling_kwargs())
def _rolling_kwargs(self):
return {
'window': self.window,
'min_periods': self.min_periods,
'center': self.center,
'win_type': self.win_type,
'axis': self.axis}
def _call_method(self, method_name, *args, **kwargs):
# make sure dask does not mistake this for a task
args = list(args)
old_name = self.obj._name
new_name = 'rolling-' + tokenize(
self.obj, self._rolling_kwargs(), method_name, args, kwargs)
dsk = {}
if self.axis in [1, 'columns'] or self.window <= 1 or self.obj.npartitions == 1:
# This is the easy scenario, we're rolling over columns (or not
# really rolling at all, so each chunk is independent.
for i in range(self.obj.npartitions):
dsk[new_name, i] = (
call_pandas_rolling_method_single, (old_name, i),
self._rolling_kwargs(), method_name, args, kwargs)
else:
# This is a bit trickier, we need to feed in information from the
# neighbors to roll along rows.
# Figure out how many we need to look at before and after.
if self.center:
before = self.window // 2
after = self.window - before - 1
else:
before = self.window - 1
after = 0
head_name = 'head-{}-{}'.format(after, old_name)
tail_name = 'tail-{}-{}'.format(before, old_name)
# First chunk, only look after (if necessary)
if after > 0:
next_partition = (head_name, 1)
dsk[next_partition] = (M.head, (old_name, 1), after)
else:
# Either we are only looking backward or this was the
# only chunk.
next_partition = self.obj._meta
dsk[new_name, 0] = (call_pandas_rolling_method_with_neighbors,
self.obj._meta, (old_name, 0),
next_partition, 0, after,
self._rolling_kwargs(), method_name,
args, kwargs)
# All the middle chunks
for i in range(1, self.obj.npartitions - 1):
# Get just the needed values from the previous partition
dsk[tail_name, i - 1] = (M.tail, (old_name, i - 1), before)
if after:
next_partition = (head_name, i + 1)
dsk[next_partition] = (M.head, (old_name, i + 1), after)
dsk[new_name, i] = (call_pandas_rolling_method_with_neighbors,
(tail_name, i - 1), (old_name, i),
next_partition, before, after,
self._rolling_kwargs(), method_name,
args, kwargs)
# The last chunk
if self.obj.npartitions > 1:
# if the first wasn't the only partition
end = self.obj.npartitions - 1
dsk[tail_name, end - 1] = (M.tail, (old_name, end - 1), before)
dsk[new_name, end] = (
call_pandas_rolling_method_with_neighbors,
(tail_name, end - 1), (old_name, end), self.obj._meta,
before, 0, self._rolling_kwargs(), method_name, args,
kwargs)
# Do the pandas operation to get the appropriate thing for metadata
pd_rolling = self.obj._meta.rolling(**self._rolling_kwargs())
metadata = getattr(pd_rolling, method_name)(*args, **kwargs)
return self.obj._constructor(merge(self.obj.dask, dsk),
new_name, metadata, self.obj.divisions)
def count(self, *args, **kwargs):
return self._call_method('count', *args, **kwargs)
def sum(self, *args, **kwargs):
return self._call_method('sum', *args, **kwargs)
def mean(self, *args, **kwargs):
return self._call_method('mean', *args, **kwargs)
def median(self, *args, **kwargs):
return self._call_method('median', *args, **kwargs)
def min(self, *args, **kwargs):
return self._call_method('min', *args, **kwargs)
def max(self, *args, **kwargs):
return self._call_method('max', *args, **kwargs)
def std(self, *args, **kwargs):
return self._call_method('std', *args, **kwargs)
def var(self, *args, **kwargs):
return self._call_method('var', *args, **kwargs)
def skew(self, *args, **kwargs):
return self._call_method('skew', *args, **kwargs)
def kurt(self, *args, **kwargs):
return self._call_method('kurt', *args, **kwargs)
def quantile(self, *args, **kwargs):
return self._call_method('quantile', *args, **kwargs)
def apply(self, *args, **kwargs):
return self._call_method('apply', *args, **kwargs)
def __repr__(self):
return 'Rolling [{}]'.format(','.join(
'{}={}'.format(k, v)
for k, v in self._rolling_kwargs().items() if v is not None))
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/dataframe/rolling.py",
"copies": "1",
"size": "9313",
"license": "mit",
"hash": 3721957123295669000,
"line_mean": 38.4618644068,
"line_max": 100,
"alpha_frac": 0.5590035434,
"autogenerated": false,
"ratio": 3.9714285714285715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5030432114828571,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial, wraps
from toolz import merge
import pandas as pd
from ..base import tokenize
def rolling_chunk(func, part1, part2, window, *args):
if part1.shape[0] < window-1:
raise NotImplementedError("Window larger than partition size")
if window > 1:
extra = window - 1
combined = pd.concat([part1.iloc[-extra:], part2])
applied = func(combined, window, *args)
return applied.iloc[extra:]
else:
return func(part2, window, *args)
def wrap_rolling(func):
"""Create a chunked version of a pandas.rolling_* function"""
@wraps(func)
def rolling(arg, window, *args, **kwargs):
if not isinstance(window, int):
raise TypeError('Window must be an integer')
if window < 0:
raise ValueError('Window must be a positive integer')
if 'freq' in kwargs or 'how' in kwargs:
raise NotImplementedError('Resampling before rolling computations '
'not supported')
old_name = arg._name
token = tokenize(func, arg, window, args, kwargs)
new_name = 'rolling-' + token
f = partial(func, **kwargs)
dsk = {(new_name, 0): (f, (old_name, 0), window) + args}
for i in range(1, arg.npartitions + 1):
dsk[(new_name, i)] = (rolling_chunk, f, (old_name, i - 1),
(old_name, i), window) + args
return arg._constructor(merge(arg.dask, dsk), new_name,
arg, arg.divisions)
return rolling
rolling_count = wrap_rolling(pd.rolling_count)
rolling_sum = wrap_rolling(pd.rolling_sum)
rolling_mean = wrap_rolling(pd.rolling_mean)
rolling_median = wrap_rolling(pd.rolling_median)
rolling_min = wrap_rolling(pd.rolling_min)
rolling_max = wrap_rolling(pd.rolling_max)
rolling_std = wrap_rolling(pd.rolling_std)
rolling_var = wrap_rolling(pd.rolling_var)
rolling_skew = wrap_rolling(pd.rolling_skew)
rolling_kurt = wrap_rolling(pd.rolling_kurt)
rolling_quantile = wrap_rolling(pd.rolling_quantile)
rolling_apply = wrap_rolling(pd.rolling_apply)
rolling_window = wrap_rolling(pd.rolling_window)
def call_pandas_rolling_method_single(this_partition, rolling_kwargs,
method_name, method_args, method_kwargs):
# used for the start of the df/series (or for rolling through columns)
method = getattr(this_partition.rolling(**rolling_kwargs), method_name)
return method(*method_args, **method_kwargs)
def call_pandas_rolling_method_with_neighbor(prev_partition, this_partition,
rolling_kwargs, method_name, method_args, method_kwargs):
# used for everything except for the start
window = rolling_kwargs['window']
if prev_partition.shape[0] < window-1:
raise NotImplementedError("Window larger than partition size")
if window > 1:
extra = window - 1
combined = pd.concat([prev_partition.iloc[-extra:], this_partition])
method = getattr(combined.rolling(window), method_name)
applied = method(*method_args, **method_kwargs)
return applied.iloc[extra:]
else:
method = getattr(this_partition.rolling(window), method_name)
return method(*method_args, **method_kwargs)
def tail(obj, n):
return obj.tail(n)
class Rolling(object):
# What you get when you do ddf.rolling(...) or similar
"""Provides rolling window calculations.
"""
def __init__(self, obj, kwargs):
self.obj = obj # dataframe or series
self.rolling_kwargs = kwargs
# Allow pandas to raise if appropriate
obj._pd.rolling(**kwargs)
def _call_method(self, method_name, *args, **kwargs):
args = list(args) # make sure dask does not mistake this for a task
old_name = self.obj._name
new_name = 'rolling-' + tokenize(
self.obj, self.rolling_kwargs, method_name, args, kwargs)
# For all but the first chunk, we'll pass the whole previous chunk
# in so we can use it to pre-feed our window
dsk = {(new_name, 0): (
call_pandas_rolling_method_single, (old_name, 0),
self.rolling_kwargs, method_name, args, kwargs)}
if self.rolling_kwargs['axis'] in [0, 'rows']:
# roll in the partition direction (will need to access neighbor)
window = self.rolling_kwargs['window']
tail_name = 'tail-{}-{}'.format(window-1, old_name)
for i in range(1, self.obj.npartitions + 1):
# Get just the needed values from the previous partition
dsk[tail_name, i-1] = (tail, (old_name, i-1), window-1)
dsk[new_name, i] = (
call_pandas_rolling_method_with_neighbor,
(tail_name, i-1), (old_name, i),
self.rolling_kwargs, method_name, args, kwargs)
else:
# no communication needed between partitions for columns
for i in range(1, self.obj.npartitions + 1):
dsk[new_name, i] = (
call_pandas_rolling_method_single, (old_name, i),
self.rolling_kwargs, method_name, args, kwargs)
# Do the pandas operation to get the appropriate thing for metadata
pd_rolling = self.obj._pd.rolling(**self.rolling_kwargs)
metadata = getattr(pd_rolling, method_name)(*args, **kwargs)
return self.obj._constructor(
merge(self.obj.dask, dsk),
new_name,
metadata,
self.obj.divisions)
def count(self, *args, **kwargs):
return self._call_method('count', *args, **kwargs)
def sum(self, *args, **kwargs):
return self._call_method('sum', *args, **kwargs)
def mean(self, *args, **kwargs):
return self._call_method('mean', *args, **kwargs)
def median(self, *args, **kwargs):
return self._call_method('median', *args, **kwargs)
def min(self, *args, **kwargs):
return self._call_method('min', *args, **kwargs)
def max(self, *args, **kwargs):
return self._call_method('max', *args, **kwargs)
def std(self, *args, **kwargs):
return self._call_method('std', *args, **kwargs)
def var(self, *args, **kwargs):
return self._call_method('var', *args, **kwargs)
def skew(self, *args, **kwargs):
return self._call_method('skew', *args, **kwargs)
def kurt(self, *args, **kwargs):
return self._call_method('kurt', *args, **kwargs)
def quantile(self, *args, **kwargs):
return self._call_method('quantile', *args, **kwargs)
def apply(self, *args, **kwargs):
return self._call_method('apply', *args, **kwargs)
| {
"repo_name": "mikegraham/dask",
"path": "dask/dataframe/rolling.py",
"copies": "1",
"size": "6779",
"license": "bsd-3-clause",
"hash": 8245465353481434000,
"line_mean": 37.2994350282,
"line_max": 79,
"alpha_frac": 0.6114471161,
"autogenerated": false,
"ratio": 3.737045203969129,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4848492320069129,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import reduce
import operator
import warnings
import datashape
from datashape import var, Map
from datashape.predicates import (
isscalar,
iscollection,
isrecord,
istabular,
)
from odo import odo
from pandas import DataFrame, Series
import blaze
from .compute import compute
from .compute.core import coerce_scalar
from .expr import Expr, Symbol, ndim
from .dispatch import dispatch
from .compatibility import _strtypes
__all__ = ['to_html']
def data(*args, **kwargs):
warnings.warn(DeprecationWarning(
'blaze.interactive.data has been moved to blaze.data'))
return blaze.expr.literal.data(*args, **kwargs)
def concrete_head(expr, n=10):
""" Return head of computed expression """
if not expr._resources():
raise ValueError("Expression does not contain data resources")
if not iscollection(expr.dshape):
return compute(expr)
head = expr.head(n + 1)
if not iscollection(expr.dshape):
return odo(head, object)
elif isrecord(expr.dshape.measure):
return odo(head, DataFrame)
df = odo(head, DataFrame)
df.columns = [expr._name]
return df
def _peek_tables(expr, n=10):
return concrete_head(expr, n).rename(columns={None: ''})
def repr_tables(expr, n=10):
result = concrete_head(expr, n).rename(columns={None: ''})
if isinstance(result, (DataFrame, Series)):
s = repr(result)
if len(result) > 10:
s = '\n'.join(s.split('\n')[:-1]) + '\n...'
return s
else:
return result.peek() # pragma: no cover
def numel(shape):
if var in shape:
return None
if not shape:
return 1
return reduce(operator.mul, shape, 1)
def short_dshape(ds, nlines=5):
s = datashape.coretypes.pprint(ds)
lines = s.split('\n')
if len(lines) > 5:
s = '\n'.join(lines[:nlines]) + '\n ...'
return s
def _peek(expr):
# Pure Expressions, not interactive
if not set(expr._resources().keys()).issuperset(expr._leaves()):
return expr
# Scalars
if ndim(expr) == 0 and isscalar(expr.dshape):
return coerce_scalar(compute(expr), str(expr.dshape))
# Tables
if (ndim(expr) == 1 and (istabular(expr.dshape) or
isscalar(expr.dshape.measure) or
isinstance(expr.dshape.measure, Map))):
return _peek_tables(expr, 10)
# Smallish arrays
if ndim(expr) >= 2 and numel(expr.shape) and numel(expr.shape) < 1000000:
return compute(expr)
# Other
dat = expr._resources().values()
if len(dat) == 1:
dat = list(dat)[0] # may be dict_values
return dat
def expr_repr(expr, n=10):
# Pure Expressions, not interactive
if not set(expr._resources().keys()).issuperset(expr._leaves()):
return str(expr)
# Scalars
if ndim(expr) == 0 and isscalar(expr.dshape):
return repr(coerce_scalar(compute(expr), str(expr.dshape)))
# Tables
if (ndim(expr) == 1 and (istabular(expr.dshape) or
isscalar(expr.dshape.measure) or
isinstance(expr.dshape.measure, Map))):
return repr_tables(expr, 10)
# Smallish arrays
if ndim(expr) >= 2 and numel(expr.shape) and numel(expr.shape) < 1000000:
return repr(compute(expr))
# Other
dat = expr._resources().values()
if len(dat) == 1:
dat = list(dat)[0] # may be dict_values
s = 'Data: %s' % dat
if not isinstance(expr, Symbol):
s += '\nExpr: %s' % str(expr)
s += '\nDataShape: %s' % short_dshape(expr.dshape, nlines=7)
return s
@dispatch(DataFrame)
def to_html(df):
return df.to_html()
@dispatch(Expr)
def to_html(expr):
# Tables
if not expr._resources() or ndim(expr) != 1:
return to_html(expr_repr(expr))
return to_html(concrete_head(expr))
@dispatch(object)
def to_html(o):
return repr(o)
@dispatch(_strtypes)
def to_html(o):
return o.replace('\n', '<br>')
def table_length(expr):
try:
return expr._len()
except ValueError:
return int(expr.count())
Expr.peek = _peek
Expr.__len__ = table_length
def convert_base(typ, x):
x = compute(x)
try:
return typ(x)
except:
return typ(odo(x, typ))
Expr.__int__ = lambda x: convert_base(int, x)
Expr.__float__ = lambda x: convert_base(float, x)
Expr.__complex__ = lambda x: convert_base(complex, x)
Expr.__bool__ = lambda x: convert_base(bool, x)
Expr.__nonzero__ = lambda x: convert_base(bool, x)
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/interactive.py",
"copies": "3",
"size": "4644",
"license": "bsd-3-clause",
"hash": -1531395286350966800,
"line_mean": 23.4421052632,
"line_max": 77,
"alpha_frac": 0.6106804479,
"autogenerated": false,
"ratio": 3.3897810218978104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.550046146979781,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import wraps
from collections import defaultdict
from glue.core.util import disambiguate
from glue.core.decorators import singleton
@singleton
class Registry(object):
""" Stores labels for classes of objects. Ensures uniqueness
The registry ensures that labels for objects of the same "group"
are unique, and disambiguates as necessary. By default,
objects types are used to group, but anything can be used as a group
Registry is a singleton, and thus all instances of Registry
share the same information
Usage:
>>> r = Registry()
>>> x, y, z = 3, 4, 5
>>> w = list()
>>> r.register(x, 'Label')
'Label'
>>> r.register(y, 'Label') # duplicate label disambiguated
'Label_01'
>>> r.register(w, 'Label') # uniqueness only enforced within groups
'Label'
>>> r.register(z, 'Label', group=int) # put z in integer registry
'Label_02'
"""
def __init__(self):
self._registry = defaultdict(dict)
self._disable = False
def register(self, obj, label, group=None):
""" Register label with object (possibly disamgiguating)
:param obj: The object to label
:param label: The desired label
:param group: (optional) use the registry for group (default=type(obj))
:rtype: str
*Returns*
The disambiguated label
"""
group = group or type(obj)
reg = self._registry[group]
has_obj = obj in reg
has_label = label in reg.values()
label_is_obj = has_label and has_obj and reg[obj] == label
if has_label and (not label_is_obj):
values = set(reg.values())
if has_obj:
values.remove(reg[obj])
if not self._disable:
label = disambiguate(label, values)
reg[obj] = label
return label
def unregister(self, obj, group=None):
group = group or type(obj)
reg = self._registry[group]
if obj in reg:
reg.pop(obj)
def clear(self):
""" Reset registry, clearing all stored values """
self._registry = defaultdict(dict)
def disable(func):
""" Decorator to temporarily disable disambiguation """
@wraps(func)
def wrapper(*args, **kwargs):
r = Registry()
old = r._disable
r._disable = True
try:
return func(*args, **kwargs)
finally:
r._disable = old
return wrapper
| {
"repo_name": "saimn/glue",
"path": "glue/core/registry.py",
"copies": "5",
"size": "2605",
"license": "bsd-3-clause",
"hash": 7838172009700650000,
"line_mean": 26.4210526316,
"line_max": 79,
"alpha_frac": 0.5888675624,
"autogenerated": false,
"ratio": 4.181380417335474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7270247979735474,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import wraps
from collections import Iterator
import numpy as np
from toolz import merge, merge_sorted
from .core import Array
from ..base import tokenize
from .. import sharedict
@wraps(np.percentile)
def _percentile(a, q, interpolation='linear'):
if not len(a):
return None
if isinstance(q, Iterator):
q = list(q)
if str(a.dtype) == 'category':
result = np.percentile(a.codes, q, interpolation=interpolation)
import pandas as pd
return pd.Categorical.from_codes(result, a.categories, a.ordered)
if np.issubdtype(a.dtype, np.datetime64):
a2 = a.astype('i8')
result = np.percentile(a2, q, interpolation=interpolation)
return result.astype(a.dtype)
if not np.issubdtype(a.dtype, np.number):
interpolation = 'nearest'
return np.percentile(a, q, interpolation=interpolation)
def percentile(a, q, interpolation='linear'):
""" Approximate percentile of 1-D array
See numpy.percentile for more information
"""
if not a.ndim == 1:
raise NotImplementedError(
"Percentiles only implemented for 1-d arrays")
q = np.array(q)
token = tokenize(a, list(q), interpolation)
name = 'percentile_chunk-' + token
dsk = dict(((name, i), (_percentile, (key), q, interpolation))
for i, key in enumerate(a._keys()))
name2 = 'percentile-' + token
dsk2 = {(name2, 0): (merge_percentiles, q, [q] * len(a.chunks[0]),
sorted(dsk), a.chunks[0], interpolation)}
dtype = a.dtype
if np.issubdtype(dtype, np.integer):
dtype = (np.array([], dtype=dtype) / 0.5).dtype
dsk = merge(dsk, dsk2)
dsk = sharedict.merge(a.dask, (name2, dsk))
return Array(dsk, name2, chunks=((len(q),),), dtype=dtype)
def merge_percentiles(finalq, qs, vals, Ns, interpolation='lower'):
""" Combine several percentile calculations of different data.
Parameters
----------
finalq : numpy.array
Percentiles to compute (must use same scale as ``qs``).
qs : sequence of numpy.arrays
Percentiles calculated on different sets of data.
vals : sequence of numpy.arrays
Resulting values associated with percentiles ``qs``.
Ns : sequence of integers
The number of data elements associated with each data set.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specify the type of interpolation to use to calculate final
percentiles. For more information, see numpy.percentile.
Examples
--------
>>> finalq = [10, 20, 30, 40, 50, 60, 70, 80]
>>> qs = [[20, 40, 60, 80], [20, 40, 60, 80]]
>>> vals = [np.array([1, 2, 3, 4]), np.array([10, 11, 12, 13])]
>>> Ns = [100, 100] # Both original arrays had 100 elements
>>> merge_percentiles(finalq, qs, vals, Ns)
array([ 1, 2, 3, 4, 10, 11, 12, 13])
"""
if isinstance(finalq, Iterator):
finalq = list(finalq)
finalq = np.array(finalq)
qs = list(map(list, qs))
vals = list(vals)
Ns = list(Ns)
L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N]))
if not L:
raise ValueError("No non-trivial arrays found")
qs, vals, Ns = L
# TODO: Perform this check above in percentile once dtype checking is easy
# Here we silently change meaning
if str(vals[0].dtype) == 'category':
result = merge_percentiles(finalq, qs, [v.codes for v in vals], Ns, interpolation)
import pandas as pd
return pd.Categorical.from_codes(result, vals[0].categories, vals[0].ordered)
if not np.issubdtype(vals[0].dtype, np.number):
interpolation = 'nearest'
if len(vals) != len(qs) or len(Ns) != len(qs):
raise ValueError('qs, vals, and Ns parameters must be the same length')
# transform qs and Ns into number of observations between percentiles
counts = []
for q, N in zip(qs, Ns):
count = np.empty(len(q))
count[1:] = np.diff(q)
count[0] = q[0]
count *= N
counts.append(count)
# Sort by calculated percentile values, then number of observations.
# >95% of the time in this function is spent in `merge_sorted` below.
# An alternative that uses numpy sort is shown. It is sometimes
# comparable to, but typically slower than, `merge_sorted`.
#
# >>> A = np.concatenate(map(np.array, map(zip, vals, counts)))
# >>> A.sort(0, kind='mergesort')
combined_vals_counts = merge_sorted(*map(zip, vals, counts))
combined_vals, combined_counts = zip(*combined_vals_counts)
combined_vals = np.array(combined_vals)
combined_counts = np.array(combined_counts)
# percentile-like, but scaled by total number of observations
combined_q = np.cumsum(combined_counts)
# rescale finalq percentiles to match combined_q
desired_q = finalq * sum(Ns)
# the behavior of different interpolation methods should be
# investigated further.
if interpolation == 'linear':
rv = np.interp(desired_q, combined_q, combined_vals)
else:
left = np.searchsorted(combined_q, desired_q, side='left')
right = np.searchsorted(combined_q, desired_q, side='right') - 1
np.minimum(left, len(combined_vals) - 1, left) # don't exceed max index
lower = np.minimum(left, right)
upper = np.maximum(left, right)
if interpolation == 'lower':
rv = combined_vals[lower]
elif interpolation == 'higher':
rv = combined_vals[upper]
elif interpolation == 'midpoint':
rv = 0.5 * (combined_vals[lower] + combined_vals[upper])
elif interpolation == 'nearest':
lower_residual = np.abs(combined_q[lower] - desired_q)
upper_residual = np.abs(combined_q[upper] - desired_q)
mask = lower_residual > upper_residual
index = lower # alias; we no longer need lower
index[mask] = upper[mask]
rv = combined_vals[index]
else:
raise ValueError("interpolation can only be 'linear', 'lower', "
"'higher', 'midpoint', or 'nearest'")
return rv
| {
"repo_name": "mraspaud/dask",
"path": "dask/array/percentile.py",
"copies": "2",
"size": "6272",
"license": "bsd-3-clause",
"hash": -6131227793143068000,
"line_mean": 36.3333333333,
"line_max": 90,
"alpha_frac": 0.620057398,
"autogenerated": false,
"ratio": 3.678592375366569,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5298649773366569,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.