text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from awx.main.models import Organization, Inventory, Group, Host
@pytest.mark.django_db
def test_create_group(run_module, admin_user):
org = Organization.objects.create(name='test-org')
inv = Inventory.objects.create(name='test-inv', organization=org)
variables = {"ansible_network_os": "iosxr"}
result = run_module('tower_group', dict(
name='Test Group',
inventory='test-inv',
variables=variables,
state='present'
), admin_user)
assert result.get('changed'), result
group = Group.objects.get(name='Test Group')
assert group.inventory == inv
assert group.variables == '{"ansible_network_os": "iosxr"}'
result.pop('invocation')
assert result == {
'id': group.id,
'name': 'Test Group',
'changed': True,
}
@pytest.mark.django_db
def test_associate_hosts_and_children(run_module, admin_user, organization):
inv = Inventory.objects.create(name='test-inv', organization=organization)
group = Group.objects.create(name='Test Group', inventory=inv)
inv_hosts = [Host.objects.create(inventory=inv, name='foo{0}'.format(i)) for i in range(3)]
group.hosts.add(inv_hosts[0], inv_hosts[1])
child = Group.objects.create(inventory=inv, name='child_group')
result = run_module('tower_group', dict(
name='Test Group',
inventory='test-inv',
hosts=[inv_hosts[1].name, inv_hosts[2].name],
children=[child.name],
state='present'
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result['changed'] is True
assert set(group.hosts.all()) == set([inv_hosts[1], inv_hosts[2]])
assert set(group.children.all()) == set([child])
@pytest.mark.django_db
def test_associate_on_create(run_module, admin_user, organization):
inv = Inventory.objects.create(name='test-inv', organization=organization)
child = Group.objects.create(name='test-child', inventory=inv)
host = Host.objects.create(name='test-host', inventory=inv)
result = run_module('tower_group', dict(
name='Test Group',
inventory='test-inv',
hosts=[host.name],
groups=[child.name],
state='present'
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result['changed'] is True
group = Group.objects.get(pk=result['id'])
assert set(group.hosts.all()) == set([host])
assert set(group.children.all()) == set([child])
@pytest.mark.django_db
def test_children_alias_of_groups(run_module, admin_user, organization):
inv = Inventory.objects.create(name='test-inv', organization=organization)
group = Group.objects.create(name='Test Group', inventory=inv)
child = Group.objects.create(inventory=inv, name='child_group')
result = run_module('tower_group', dict(
name='Test Group',
inventory='test-inv',
groups=[child.name],
state='present'
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result['changed'] is True
assert set(group.children.all()) == set([child])
@pytest.mark.django_db
def test_tower_group_idempotent(run_module, admin_user):
# https://github.com/ansible/ansible/issues/46803
org = Organization.objects.create(name='test-org')
inv = Inventory.objects.create(name='test-inv', organization=org)
group = Group.objects.create(
name='Test Group',
inventory=inv,
)
result = run_module('tower_group', dict(
name='Test Group',
inventory='test-inv',
state='present'
), admin_user)
result.pop('invocation')
assert result == {
'id': group.id,
'changed': False, # idempotency assertion
}
| {
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"path": "third_party/github.com/ansible/awx/awx_collection/test/awx/test_group.py",
"copies": "1",
"size": "3881",
"license": "apache-2.0",
"hash": 894703511905125200,
"line_mean": 32.1709401709,
"line_max": 95,
"alpha_frac": 0.6480288585,
"autogenerated": false,
"ratio": 3.6304957904583723,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4778524648958372,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from awx.main.models import Organization
@pytest.mark.django_db
def test_create_organization(run_module, admin_user):
module_args = {
'name': 'foo',
'description': 'barfoo',
'state': 'present',
'max_hosts': '0',
'tower_host': None,
'tower_username': None,
'tower_password': None,
'validate_certs': None,
'tower_oauthtoken': None,
'tower_config_file': None,
'custom_virtualenv': None
}
result = run_module('tower_organization', module_args, admin_user)
assert result.get('changed'), result
org = Organization.objects.get(name='foo')
assert result == {
"name": "foo",
"changed": True,
"id": org.id,
"invocation": {
"module_args": module_args
}
}
assert org.description == 'barfoo'
@pytest.mark.django_db
def test_create_organization_with_venv(run_module, admin_user, mocker):
path = '/var/lib/awx/venv/custom-venv/foobar13489435/'
with mocker.patch('awx.main.models.mixins.get_custom_venv_choices', return_value=[path]):
result = run_module('tower_organization', {
'name': 'foo',
'custom_virtualenv': path,
'state': 'present'
}, admin_user)
assert result.pop('changed'), result
org = Organization.objects.get(name='foo')
result.pop('invocation')
assert result == {
"name": "foo",
"id": org.id
}
assert org.custom_virtualenv == path
| {
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"path": "third_party/github.com/ansible/awx/awx_collection/test/awx/test_organization.py",
"copies": "1",
"size": "1616",
"license": "apache-2.0",
"hash": -1869186070485907000,
"line_mean": 25.9333333333,
"line_max": 93,
"alpha_frac": 0.5891089109,
"autogenerated": false,
"ratio": 3.689497716894977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4778606627794977,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from awx.main.models import Organization, Team
@pytest.mark.django_db
def test_create_team(run_module, admin_user):
org = Organization.objects.create(name='foo')
result = run_module('tower_team', {
'name': 'foo_team',
'description': 'fooin around',
'state': 'present',
'organization': 'foo'
}, admin_user)
team = Team.objects.filter(name='foo_team').first()
result.pop('invocation')
assert result == {
"changed": True,
"name": "foo_team",
"id": team.id if team else None,
}
team = Team.objects.get(name='foo_team')
assert team.description == 'fooin around'
assert team.organization_id == org.id
@pytest.mark.django_db
def test_modify_team(run_module, admin_user):
org = Organization.objects.create(name='foo')
team = Team.objects.create(
name='foo_team',
organization=org,
description='flat foo'
)
assert team.description == 'flat foo'
result = run_module('tower_team', {
'name': 'foo_team',
'description': 'fooin around',
'organization': 'foo'
}, admin_user)
team.refresh_from_db()
result.pop('invocation')
assert result == {
"changed": True,
"id": team.id,
}
assert team.description == 'fooin around'
# 2nd modification, should cause no change
result = run_module('tower_team', {
'name': 'foo_team',
'description': 'fooin around',
'organization': 'foo'
}, admin_user)
result.pop('invocation')
assert result == {
"id": team.id,
"changed": False
}
| {
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"path": "third_party/github.com/ansible/awx/awx_collection/test/awx/test_team.py",
"copies": "1",
"size": "1729",
"license": "apache-2.0",
"hash": 7687029209968111000,
"line_mean": 25.196969697,
"line_max": 66,
"alpha_frac": 0.5928282244,
"autogenerated": false,
"ratio": 3.6553911205073994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47482193449073995,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
from ansible import __version__
from ansible.errors import AnsibleError
if __version__.startswith('1'):
raise AnsibleError('Trellis no longer supports Ansible 1.x. Please upgrade to Ansible 2.x.')
# These imports will produce Traceback in Ansible 1.x, so place after version check
from __main__ import cli
from ansible.compat.six import iteritems
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
from ansible.template import Templar
class VarsModule(object):
''' Creates and modifies host variables '''
def __init__(self, inventory):
self.inventory = inventory
self.inventory_basedir = inventory.basedir()
self.loader = DataLoader()
self._options = cli.options if cli else None
def raw_triage(self, key_string, item, patterns):
# process dict values
if isinstance(item, AnsibleMapping):
return dict((key,self.raw_triage('.'.join([key_string, key]), value, patterns)) for key,value in item.iteritems())
# process list values
elif isinstance(item, AnsibleSequence):
return [self.raw_triage('.'.join([key_string, str(i)]), value, patterns) for i,value in enumerate(item)]
# wrap values if they match raw_vars pattern
elif isinstance(item, AnsibleUnicode):
match = next((pattern for pattern in patterns if re.match(pattern, key_string)), None)
return ''.join(['{% raw %}', item, '{% endraw %}']) if not item.startswith(('{% raw', '{%raw')) and match else item
def raw_vars(self, host, hostvars):
if 'raw_vars' not in hostvars:
return
raw_vars = Templar(variables=hostvars, loader=self.loader).template(hostvars['raw_vars'])
if not isinstance(raw_vars, list):
raise AnsibleError('The `raw_vars` variable must be defined as a list.')
patterns = [re.sub(r'\*', '(.)*', re.sub(r'\.', '\.', var)) for var in raw_vars if var.split('.')[0] in hostvars]
keys = set(pattern.split('\.')[0] for pattern in patterns)
for key in keys:
host.vars[key] = self.raw_triage(key, hostvars[key], patterns)
def cli_options(self):
options = []
strings = {
'--connection': 'connection',
'--inventory-file': 'inventory',
'--private-key': 'private_key_file',
'--ssh-common-args': 'ssh_common_args',
'--ssh-extra-args': 'ssh_extra_args',
'--timeout': 'timeout',
'--vault-password-file': 'vault_password_file',
}
for option,value in strings.iteritems():
if getattr(self._options, value, False):
options.append("{0}='{1}'".format(option, str(getattr(self._options, value))))
if getattr(self._options, 'ask_vault_pass', False):
options.append('--ask-vault-pass')
return ' '.join(options)
def darwin_without_passlib(self):
if not sys.platform.startswith('darwin'):
return False
try:
import passlib.hash
return False
except:
return True
def get_host_vars(self, host, vault_password=None):
self.raw_vars(host, host.get_group_vars())
host.vars['cli_options'] = self.cli_options()
host.vars['cli_ask_pass'] = getattr(self._options, 'ask_pass', False)
host.vars['cli_ask_become_pass'] = getattr(self._options, 'become_ask_pass', False)
host.vars['darwin_without_passlib'] = self.darwin_without_passlib()
return {}
| {
"repo_name": "jvandijk/epwp-trellis",
"path": "lib/trellis/plugins/vars/vars.py",
"copies": "2",
"size": "3734",
"license": "mit",
"hash": -5642737786814351000,
"line_mean": 38.3052631579,
"line_max": 127,
"alpha_frac": 0.6186395287,
"autogenerated": false,
"ratio": 4.032397408207343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5651036936907343,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
from __main__ import cli
from ansible.compat.six import iteritems
from ansible.errors import AnsibleError
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
from ansible.plugins.callback import CallbackBase
from ansible.template import Templar
class CallbackModule(CallbackBase):
''' Creates and modifies play and host variables '''
CALLBACK_VERSION = 2.0
CALLBACK_NAME = 'vars'
def __init__(self):
self.loader = DataLoader()
self._options = cli.options if cli else None
def raw_triage(self, key_string, item, patterns):
# process dict values
if isinstance(item, AnsibleMapping):
return AnsibleMapping(dict((key,self.raw_triage('.'.join([key_string, key]), value, patterns)) for key,value in item.iteritems()))
# process list values
elif isinstance(item, AnsibleSequence):
return AnsibleSequence([self.raw_triage('.'.join([key_string, str(i)]), value, patterns) for i,value in enumerate(item)])
# wrap values if they match raw_vars pattern
elif isinstance(item, AnsibleUnicode):
match = next((pattern for pattern in patterns if re.match(pattern, key_string)), None)
return AnsibleUnicode(''.join(['{% raw %}', item, '{% endraw %}'])) if not item.startswith(('{% raw', '{%raw')) and match else item
def raw_vars(self, play, host, hostvars):
if 'raw_vars' not in hostvars:
return
raw_vars = Templar(variables=hostvars, loader=self.loader).template(hostvars['raw_vars'])
if not isinstance(raw_vars, list):
raise AnsibleError('The `raw_vars` variable must be defined as a list.')
patterns = [re.sub(r'\*', '(.)*', re.sub(r'\.', '\.', var)) for var in raw_vars if var.split('.')[0] in hostvars]
keys = set(pattern.split('\.')[0] for pattern in patterns)
for key in keys:
if key in play.vars:
play.vars[key] = self.raw_triage(key, play.vars[key], patterns)
elif key in hostvars:
host.vars[key] = self.raw_triage(key, hostvars[key], patterns)
def cli_options(self):
options = []
strings = {
'--connection': 'connection',
'--inventory-file': 'inventory',
'--private-key': 'private_key_file',
'--ssh-common-args': 'ssh_common_args',
'--ssh-extra-args': 'ssh_extra_args',
'--timeout': 'timeout',
'--vault-password-file': 'vault_password_file',
}
for option,value in strings.iteritems():
if getattr(self._options, value, False):
options.append("{0}='{1}'".format(option, str(getattr(self._options, value))))
if getattr(self._options, 'ask_vault_pass', False):
options.append('--ask-vault-pass')
return ' '.join(options)
def darwin_without_passlib(self):
if not sys.platform.startswith('darwin'):
return False
try:
import passlib.hash
return False
except:
return True
def v2_playbook_on_play_start(self, play):
for host in play.get_variable_manager()._inventory.list_hosts(play.hosts[0]):
hostvars = play.get_variable_manager().get_vars(loader=self.loader, play=play, host=host)
self.raw_vars(play, host, hostvars)
host.vars['cli_options'] = self.cli_options()
host.vars['cli_ask_pass'] = getattr(self._options, 'ask_pass', False)
host.vars['cli_ask_become_pass'] = getattr(self._options, 'become_ask_pass', False)
host.vars['darwin_without_passlib'] = self.darwin_without_passlib()
| {
"repo_name": "mAAdhaTTah/bedrock-ansible",
"path": "lib/trellis/plugins/callback/vars.py",
"copies": "5",
"size": "3892",
"license": "mit",
"hash": -5227776952763439000,
"line_mean": 39.9684210526,
"line_max": 143,
"alpha_frac": 0.6138232271,
"autogenerated": false,
"ratio": 4.0123711340206185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002683700033740068,
"num_lines": 95
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
from __main__ import cli
from ansible.module_utils.six import iteritems
from ansible.errors import AnsibleError
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
from ansible.playbook.play_context import PlayContext
from ansible.playbook.task import Task
from ansible.plugins.callback import CallbackBase
from ansible.template import Templar
from ansible.utils.unsafe_proxy import wrap_var
from ansible import context
class CallbackModule(CallbackBase):
''' Creates and modifies play and host variables '''
CALLBACK_VERSION = 2.0
CALLBACK_NAME = 'vars'
def __init__(self):
super(CallbackModule, self).__init__()
self._options = context.CLIARGS
def raw_triage(self, key_string, item, patterns):
# process dict values
if isinstance(item, AnsibleMapping):
return AnsibleMapping(dict((key,self.raw_triage('.'.join([key_string, key]), value, patterns)) for key,value in iteritems(item)))
# process list values
elif isinstance(item, AnsibleSequence):
return AnsibleSequence([self.raw_triage('.'.join([key_string, str(i)]), value, patterns) for i,value in enumerate(item)])
# wrap values if they match raw_vars pattern
elif isinstance(item, AnsibleUnicode):
match = next((pattern for pattern in patterns if re.match(pattern, key_string)), None)
return wrap_var(item) if match else item
else:
return item
def raw_vars(self, play, host, hostvars):
if 'raw_vars' not in hostvars:
return
raw_vars = Templar(variables=hostvars, loader=play._loader).template(hostvars['raw_vars'])
if not isinstance(raw_vars, list):
raise AnsibleError('The `raw_vars` variable must be defined as a list.')
patterns = [re.sub(r'\*', '(.)*', re.sub(r'\.', '\.', var)) for var in raw_vars if var.split('.')[0] in hostvars]
keys = set(pattern.split('\.')[0] for pattern in patterns)
for key in keys:
if key in play.vars:
play.vars[key] = self.raw_triage(key, play.vars[key], patterns)
elif key in hostvars:
host.vars[key] = self.raw_triage(key, hostvars[key], patterns)
def cli_options(self):
options = []
strings = {
'--connection': 'connection',
'--private-key': 'private_key_file',
'--ssh-common-args': 'ssh_common_args',
'--ssh-extra-args': 'ssh_extra_args',
'--timeout': 'timeout',
'--vault-password-file': 'vault_password_file',
}
for option,value in iteritems(strings):
if self._options.get(value, False):
options.append("{0}='{1}'".format(option, str(self._options.get(value))))
for inventory in self._options.get('inventory'):
options.append("--inventory='{}'".format(str(inventory)))
if self._options.get('ask_vault_pass', False):
options.append('--ask-vault-pass')
return ' '.join(options)
def darwin_without_passlib(self):
if not sys.platform.startswith('darwin'):
return False
try:
import passlib.hash
return False
except:
return True
def v2_playbook_on_play_start(self, play):
env = play.get_variable_manager().get_vars(play=play).get('env', '')
env_group = next((group for key,group in iteritems(play.get_variable_manager()._inventory.groups) if key == env), False)
if env_group:
env_group.set_priority(20)
for host in play.get_variable_manager()._inventory.list_hosts(play.hosts[0]):
hostvars = play.get_variable_manager().get_vars(play=play, host=host)
self.raw_vars(play, host, hostvars)
host.vars['ssh_args_default'] = PlayContext(play=play)._ssh_args.default
host.vars['cli_options'] = self.cli_options()
host.vars['cli_ask_pass'] = self._options.get('ask_pass', False)
host.vars['cli_ask_become_pass'] = self._options.get('become_ask_pass', False)
host.vars['darwin_without_passlib'] = self.darwin_without_passlib()
| {
"repo_name": "buluma/trellis",
"path": "lib/trellis/plugins/callback/vars.py",
"copies": "5",
"size": "4354",
"license": "mit",
"hash": -8858269686396070000,
"line_mean": 38.9449541284,
"line_max": 141,
"alpha_frac": 0.6201194304,
"autogenerated": false,
"ratio": 3.9871794871794872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7107298917579488,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import subprocess
import os
import json
import sys
from ansible.errors import AnsibleError
import qb
from qb.ipc.rpc import client as rpc_client
def get_semver_path():
bin_path = os.path.join(qb.ROOT, 'node_modules', 'semver', 'bin', 'semver')
if not os.path.isfile(bin_path):
raise Exception("can't find semver at %s" % bin_path)
return bin_path
# get_semver_path()
def semver_inc(version, level = None, preid = None):
'''increment the version at level, with optional preid for pre- levels.
runs
semver --increment <level> [--preid <preid>] <version>
This does **not** do what you probably want... `preid` is ignored:
>>> semver_inc('1.0.0', 'patch', preid = 'dev')
'1.0.1'
>>> semver_inc('1.0.0', 'minor', preid = 'dev')
'1.1.0'
The only way to get the `preid` appended is to increment the prerelease:
>>> semver_inc('1.0.0', 'prerelease', preid = 'dev')
'1.0.1-dev.0'
'''
cmd = [
get_semver_path(),
'--increment',
]
if not (level is None):
cmd.append(level)
if not (preid is None):
cmd.append('--preid')
cmd.append(preid)
cmd.append(version)
out = subprocess.check_output(cmd)
return out.rstrip()
# semver_inc()
def semver_parse(version):
'''parse semver.
'''
stmt = (
'''console.log(JSON.stringify(require('semver')(%s), null, 2))''' %
json.dumps(version)
)
cmd = ['node', '--eval', stmt]
out = subprocess.check_output(
cmd,
cwd = qb.ROOT
)
version = json.loads(out)
version['is_release'] = len(version['prerelease']) == 0
version['is_dev'] = (
len(version['prerelease']) > 0 and
version['prerelease'][0] == 'dev'
)
version['is_rc'] = (
len(version['prerelease']) > 0 and
version['prerelease'][0] == 'rc'
)
if version['is_release']:
version['level'] = 'release'
else:
version['level'] = version['prerelease'][0]
# depreciated name for level
version['type'] = version['level']
version['release'] = "%(major)s.%(minor)s.%(patch)s" % version
return version
# semver_parse()
def qb_version_parse(version_string):
'''Parse version into QB::Package::Version
'''
return rpc_client.send('QB::Package::Version', 'from', version_string)
def qb_read_version(file_path):
'''Read a QB::Package::Version from a file.
'''
with open(file_path, 'r') as file:
return qb_version_parse(file.read())
class FilterModule(object):
''' version manipulation filters '''
def filters(self):
return {
'semver_inc': semver_inc,
'semver_parse': semver_parse,
'qb_version_parse': qb_version_parse,
'qb_read_version': qb_read_version,
}
# filters()
# FilterModule
# testing - call camel_case on first cli arg and print result
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "nrser/qb",
"path": "plugins/filter/version_filters.py",
"copies": "1",
"size": "3230",
"license": "mit",
"hash": -32479991914122900,
"line_mean": 21.275862069,
"line_max": 79,
"alpha_frac": 0.5606811146,
"autogenerated": false,
"ratio": 3.5185185185185186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45791996331185186,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
def usage():
x = [
'Usage: tidyrst.py <filename>',
'',
'This script tweaks a .rst file output by Ansible\'s plugin_formatter.py',
'such that it is suitable to be published on readthedocs.',
]
print('\n'.join(x))
def main():
if len(sys.argv) != 2 or sys.argv[1] in ('-h', '--help', '?'):
usage()
return
with open(sys.argv[1], 'r') as fd:
lines = fd.readlines()
with open(sys.argv[1], 'w') as fd:
for line in lines:
if '<modules_support>' in line:
fd.write('- This module is `maintained by the Ansible Community <https://docs.ansible.com/ansible/latest/user_guide/modules_support.html#modules-support>`_.\n')
elif '<common_return_values>' in line:
fd.write('Common return values are `documented here <https://docs.ansible.com/ansible/latest/reference_appendices/common_return_values.html#common-return-values>`_, the following are the fields unique to this module:\n')
elif '.. hint::' in line or 'edit this document' in line:
continue
else:
fd.write(line)
if __name__ == '__main__':
main()
| {
"repo_name": "PaloAltoNetworks-BD/ansible-pan",
"path": "docs/tidyrst.py",
"copies": "1",
"size": "1298",
"license": "isc",
"hash": -7300574429589530000,
"line_mean": 34.0810810811,
"line_max": 236,
"alpha_frac": 0.5878274268,
"autogenerated": false,
"ratio": 3.806451612903226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9880055978500355,
"avg_score": 0.0028446122405741186,
"num_lines": 37
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
"""
EXAMPLES = """
"""
RETURN = """
"""
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
try:
assert 'group_names' in variables, "Missing 'group_names' in variables"
assert len(terms) == 1, "Inheritance chain lookup plugin expects 1 term, got %s" % len(terms)
except AssertionError as exc:
raise AnsibleError(str(exc))
_vars = variables or {}
base = terms[0]
r = []
if 'override_' + base in _vars:
prefixes = ['override']
else:
prefixes = ['all', 'group', 'host']
prefixes[2:2] = [n.replace('-', '_') + '_group' for n in _vars['group_names']]
for prefix in prefixes:
_var = prefix + '_' + base
_val = _vars.get(_var, [])
if _val:
display.vvvv('%s = %s' % (_var, _val))
try:
t = self._templar.template(
_vars.get(_var, []), preserve_trailing_newlines=True,
convert_data=True, escape_backslashes=False)
except Exception as exc:
raise AnsibleError("Templating '%s' failed: %s" % (_var, str(exc)))
if t != _val:
display.vvvv('%s -> %s' % (_var, t))
r.extend(t)
# this is actually not invalid if the name is valid, and there's no way
# to default in the playbook, so we'll just return an empty list here
#if not r:
# raise AnsibleError("No inheritance chain variables exist for base '%s'" % base)
return r
| {
"repo_name": "galaxyproject/ansible-common-roles",
"path": "lookup_plugins/inheritance_chain.py",
"copies": "1",
"size": "1934",
"license": "mit",
"hash": -8784124468774985000,
"line_mean": 32.3448275862,
"line_max": 105,
"alpha_frac": 0.5496380558,
"autogenerated": false,
"ratio": 4.10615711252654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5155795168326539,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
import ovh
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def ovh_api_connect(module):
if not HAS_OVH:
module.fail_json(msg='python-ovh must be installed to use this module')
credentials = ['endpoint', 'application_key',
'application_secret', 'consumer_key']
credentials_in_parameters = [
cred in module.params for cred in credentials]
try:
if all(credentials_in_parameters):
client = ovh.Client(
**{credential: module.params[credential] for credential in credentials})
else:
client = ovh.Client()
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
return client
def ovh_argument_spec():
return dict(
endpoint=dict(required=False, default=None),
application_key=dict(required=False, default=None),
application_secret=dict(required=False, default=None),
consumer_key=dict(required=False, default=None),
)
| {
"repo_name": "synthesio/infra-ovh-ansible-module",
"path": "plugins/module_utils/ovh.py",
"copies": "1",
"size": "1171",
"license": "mit",
"hash": 6602414147330253000,
"line_mean": 30.6486486486,
"line_max": 88,
"alpha_frac": 0.6447480786,
"autogenerated": false,
"ratio": 3.9163879598662206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5061136038466221,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# see https://wiki.python.org/moin/PortingToPy3k/BilingualQuickRef
from .DAORunner import DAORunner
from .OutputProviders import *
from astwro.config import find_opt_file
import astwro.starlist
class Allstar(DAORunner):
""" `daophot` runner
Object of this class maintains single process of `allstar` and it's working directory.
Instance attributes:
:var str allstaropt: allstar.opt file to be copied into runner dir
:var DPOP_OPtion OPtion_result: initial options reported by `allstar`
:var DPOP_ATtach ATtach_result: results of command ATtach
:var str image: image which will be automatically used if not provided in `ALlstars` command
:var dict options: options which will be automatically set as OPTION command before every run,
can be either:
dictionary:
>>> dp = Allstar()
>>> dp.options = {'PROFILE ERROR': 5, 'FI': '6.0'}
iterable of tuples:
>>> dp.options = [('PR', 5.0), ('FITTING RADIUS', 6.0)]
"""
def __init__(self, dir=None, image=None, allstaropt=None, options=None, batch=False):
# type: ([str,object], [str], [str], [list,dict], bool) -> Allstar
"""
:param [str] dir: pathname or TmpDir object - working directory for daophot,
if None temp dir will be used and deleted on `Allstar.close()`
:param [str] image: if provided this file will be used if not provided in `ALlstars` command
setting image property has same effect
:param [str] allstaropt: allstar.opt file, if None build in default file will be used, can be added later
by `Runner.copy_to_runner_dir(file, 'allstar.opt')`
:param [list,dict] options: if provided options will be set on beginning of each process
list of tuples or dict
:param bool batch: whether Allstar have to work in batch mode.
"""
if allstaropt is not None:
self.allstaropt = allstaropt
else:
self.allstaropt = find_opt_file('allstar.opt', package='pydaophot')
self.image = self.expand_path(image)
self.options = {'WA': 0} # suppress animations
if options:
self.options.update(dict(options))
super(Allstar, self).__init__(dir=dir, batch=batch)
# base implementation of __init__ calls `_reset` also
self._update_executable('allstar')
def _reset(self):
super(Allstar, self)._reset()
self.OPtion_result = None
self.ALlstars_result = None
def __deepcopy__(self, memo):
from copy import deepcopy
new = super(Allstar, self).__deepcopy__(memo)
new.allstaropt = deepcopy(self.allstaropt, memo)
# new.OPtion_result = deepcopy(self.OPtion_result, memo)
# new.ALlstars_result = deepcopy(self.ALlstars_result, memo)
new.image = deepcopy(self.image, memo)
new.options = deepcopy(self.options, memo)
return new
def _pre_run(self, wait):
if not self.ALlstars_result:
raise Allstar.RunnerException('Add ALlstar command before run.', self)
super(Allstar, self)._pre_run(wait)
# set options, and prepare options parser
commands = ''
if self.options: # set options before
options = self.options
if isinstance(options, dict):
options = options.items() # options is dict
# else options is list of pairs
commands += ''.join('%s=%.2f\n' % (k, float(v)) for k, v in options if v is not None)
commands += '\n'
processor = AsOp_opt()
self.OPtion_result = processor
self._insert_processing_step(commands, output_processor=processor, on_beginning=True)
def _init_workdir_files(self, dir):
super(Allstar, self)._init_workdir_files(dir)
self.link_to_runner_dir(self.allstaropt, 'allstar.opt')
def set_options(self, options, value=None):
# type: ([str,dict,list], [str,float]) -> None
"""set option(s) before run.
Options can be either:
dictionary: `dp.OPtion({'GAIN': 9, 'FI': '6.0'})`
iterable of tuples: `dp.OPtion([('GA', 9.0), ('FITTING RADIUS', '6.0')])`
option key, followed by value in 'value' parameter:
`dp.OPtion('GA', 9.0)`
filename string of allstar.opt-formatted file (file will be symlinked as `allstar.opt`):
`dp.OPtion('opts/newallstar.opt')`
Once set, options will stay set in next runs, set option to `None` to unset
"""
if isinstance(options, str) and value is None: # filename
# allstar operates in his tmp dir and has limited buffer for file path
# so symlink file to its working dir
self.link_to_runner_dir(options, 'allstar.opt')
else:
if self.options is None:
self.options = {}
if value is not None: # single value
options = {options:value}
elif isinstance(options, list):
options = dict(options)
self.options.update(options)
def ALlstar(self, image_file=None, psf_file='i.psf', stars='i.ap', profile_photometry_file='i.als', subtracted_image_file=None):
# type: ([str], str, [str,object], str, str) -> AsOp_result
"""
Runs (or adds to execution queue in batch mode) daophot PICK command.
:param [str] image_file: input image filepath, if None, one set in constructor or 'i.fits' will be used
:param str psf_file: input file with psf from daophot PSF command
:param str stars: input magnitudes file, e.g. from aperture photometry done by :func:`Daophot.PHotometry`.
:param str profile_photometry_file: output file with aperture photometry results, default: i.als
:param str subtracted_image_file: output file with subtracted FITS image, default: do not generate image
:return: results object also accessible as :var:`Allstar.ALlstars_result` property
:rtype: AsOp_result
"""
self._get_ready_for_commands() # wait for completion before changes in working dir
if not image_file:
image_file = self.image
if not image_file:
image_file = 'i.fits'
l_img, a_img = self._prepare_input_file(image_file)
l_psf, a_psf = self._prepare_input_file(psf_file)
l_pht, a_pht = self._prepare_input_file(stars, astwro.starlist.DAO.XY_FILE)
l_als, a_als = self._prepare_output_file(profile_photometry_file)
l_sub, a_sub = self._prepare_output_file(subtracted_image_file)
commands = '{}\n{}\n{}\n{}\n{}'.format(l_img, l_psf, l_pht, l_als, l_sub)
if l_sub:
commands += '\n' # if subtracted image not needed, EOF (without new line) should be answer for it (allstar)
processor = AsOp_result(profile_photometry_file=a_als, subtracted_image_file=a_sub)
self._insert_processing_step(commands, output_processor=processor)
self.ALlstars_result = processor
if not self.batch_mode:
self.run()
return processor
| {
"repo_name": "majkelx/astwro",
"path": "astwro/pydaophot/Allstar.py",
"copies": "1",
"size": "7667",
"license": "mit",
"hash": 2196896722495996700,
"line_mean": 47.8343949045,
"line_max": 132,
"alpha_frac": 0.5888874397,
"autogenerated": false,
"ratio": 3.8722222222222222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4961109661922222,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__all__ = ['DDesc', 'Capabilities']
import abc
from blaze.error import StreamingDimensionError
from blaze.compute.strategy import CKERNEL
class Capabilities:
"""
A container for storing the different capabilities of the data descriptor.
Parameters
----------
immutable : bool
True if the array cannot be updated/enlarged.
deferred : bool
True if the array is an expression of other arrays.
persistent : bool
True if the array persists on files between sessions.
appendable : bool
True if the array can be enlarged efficiently.
remote : bool
True if the array is remote or distributed.
"""
def __init__(self, immutable, deferred, persistent, appendable, remote):
self._caps = ['immutable', 'deferred', 'persistent', 'appendable', 'remote']
self.immutable = immutable
self.deferred = deferred
self.persistent = persistent
self.appendable = appendable
self.remote = remote
def __str__(self):
caps = [attr+': '+str(getattr(self, attr)) for attr in self._caps]
return "capabilities:" + "\n".join(caps)
class DDesc:
"""
The Blaze data descriptor is an interface which exposes
data to Blaze. The data descriptor doesn't implement math
or any other kind of functions, its sole purpose is providing
single and multi-dimensional data to Blaze via a data shape,
and the indexing/iteration interfaces.
Indexing and python iteration must always return data descriptors,
this is the python interface to the data. A summary of the
data access patterns for a data descriptor dd, in the
0.3 version of blaze are:
- descriptor integer indexing
child_dd = dd[i, j, k]
- slice indexing
child_dd = dd[i:j]
- descriptor outer/leading dimension iteration
for child_dd in dd: do_something(child_dd)
- memory access via dynd array (either using dynd library
to process, or directly depending on the ABI of the dynd
array object, which will be stabilized prior to dynd 1.0)
The descriptor-based indexing methods operate only through the
Python interface, JIT-compiled access should be done through
processing the dynd type and corresponding array metadata.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def dshape(self):
"""
Returns the datashape for the data behind this datadescriptor.
Every data descriptor implementation must provide a dshape.
"""
# TODO: Does dshape make sense for a data descriptor? A data descriptor
# may have a lower-level concept of a data type that corresponds to a
# higher-level data shape. IMHO dshape should be on Array only
raise NotImplementedError
@abc.abstractproperty
def capabilities(self):
"""A container for the different capabilities."""
raise NotImplementedError
def __len__(self):
"""
The default implementation of __len__ is for the
behavior of a streaming dimension, where the size
of the dimension isn't known ahead of time.
If a data descriptor knows its dimension size,
it should implement __len__, and provide the size
as an integer.
"""
raise StreamingDimensionError('Cannot get the length of'
' a streaming dimension')
@abc.abstractmethod
def __iter__(self):
"""
This returns an iterator/generator which iterates over
the outermost/leading dimension of the data. If the
dimension is not also a stream, __len__ should also
be implemented. The iterator must return data
descriptors.
"""
raise NotImplementedError
@abc.abstractmethod
def __getitem__(self, key):
"""
This does integer/slice indexing, producing another
data descriptor.
"""
raise NotImplementedError
#@abc.abstractmethod # XXX should be there
def append(self, values):
"""
This allows appending values in the data descriptor.
"""
return NotImplementedError
def getattr(self, name):
raise NotImplementedError('this data descriptor does not support attribute access')
def dynd_arr(self):
"""Concrete data descriptors must provide their array data
as a dynd array, accessible via this method.
"""
if self.is_concrete:
raise NotImplementedError((
'Data descriptor of type %s'
' claims to be concrete, but did not'
' override dynd_arr()') % type(self))
else:
raise TypeError((
'Data descriptor of type %s is not '
'concrete') % type(self))
| {
"repo_name": "talumbau/blaze",
"path": "blaze/datadescriptor/data_descriptor.py",
"copies": "1",
"size": "4936",
"license": "bsd-3-clause",
"hash": -5668507365078139000,
"line_mean": 33.5174825175,
"line_max": 91,
"alpha_frac": 0.6397893031,
"autogenerated": false,
"ratio": 4.829745596868885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5969534899968886,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__all__ = ['IDataDescriptor', 'Capabilities']
import abc
from blaze.error import StreamingDimensionError
from blaze.compute.strategy import CKERNEL
class Capabilities:
"""
A container for storing the different capabilities of the data descriptor.
Parameters
----------
immutable : bool
True if the array cannot be updated/enlarged.
deferred : bool
True if the array is an expression of other arrays.
persistent : bool
True if the array persists on files between sessions.
appendable : bool
True if the array can be enlarged efficiently.
remote : bool
True if the array is remote or distributed.
"""
def __init__(self, immutable, deferred, persistent, appendable, remote):
self._caps = ['immutable', 'deferred', 'persistent', 'appendable', 'remote']
self.immutable = immutable
self.deferred = deferred
self.persistent = persistent
self.appendable = appendable
self.remote = remote
def __str__(self):
caps = [attr+': '+str(getattr(self, attr)) for attr in self._caps]
return "capabilities:" + "\n".join(caps)
class IDataDescriptor:
"""
The Blaze data descriptor is an interface which exposes
data to Blaze. The data descriptor doesn't implement math
or any other kind of functions, its sole purpose is providing
single and multi-dimensional data to Blaze via a data shape,
and the indexing/iteration interfaces.
Indexing and python iteration must always return data descriptors,
this is the python interface to the data. A summary of the
data access patterns for a data descriptor dd, in the
0.3 version of blaze are:
- descriptor integer indexing
child_dd = dd[i, j, k]
- slice indexing
child_dd = dd[i:j]
- descriptor outer/leading dimension iteration
for child_dd in dd: do_something(child_dd)
- memory access via dynd array (either using dynd library
to process, or directly depending on the ABI of the dynd
array object, which will be stabilized prior to dynd 1.0)
The descriptor-based indexing methods operate only through the
Python interface, JIT-compiled access should be done through
processing the dynd type and corresponding array metadata.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def dshape(self):
"""
Returns the datashape for the data behind this datadescriptor.
Every data descriptor implementation must provide a dshape.
"""
# TODO: Does dshape make sense for a data descriptor? A data descriptor
# may have a lower-level concept of a data type that corresponds to a
# higher-level data shape. IMHO dshape should be on Array only
raise NotImplementedError
@abc.abstractproperty
def capabilities(self):
"""A container for the different capabilities."""
raise NotImplementedError
def __len__(self):
"""
The default implementation of __len__ is for the
behavior of a streaming dimension, where the size
of the dimension isn't known ahead of time.
If a data descriptor knows its dimension size,
it should implement __len__, and provide the size
as an integer.
"""
raise StreamingDimensionError('Cannot get the length of'
' a streaming dimension')
@abc.abstractmethod
def __iter__(self):
"""
This returns an iterator/generator which iterates over
the outermost/leading dimension of the data. If the
dimension is not also a stream, __len__ should also
be implemented. The iterator must return data
descriptors.
"""
raise NotImplementedError
@abc.abstractmethod
def __getitem__(self, key):
"""
This does integer/slice indexing, producing another
data descriptor.
"""
raise NotImplementedError
#@abc.abstractmethod # XXX should be there
def append(self, values):
"""
This allows appending values in the data descriptor.
"""
return NotImplementedError
def getattr(self, name):
raise NotImplementedError('this data descriptor does not support attribute access')
def dynd_arr(self):
"""Concrete data descriptors must provide their array data
as a dynd array, accessible via this method.
"""
if self.is_concrete:
raise NotImplementedError((
'Data descriptor of type %s'
' claims to be concrete, but did not'
' override dynd_arr()') % type(self))
else:
raise TypeError((
'Data descriptor of type %s is not '
'concrete') % type(self))
| {
"repo_name": "mwiebe/blaze",
"path": "blaze/datadescriptor/data_descriptor.py",
"copies": "1",
"size": "4956",
"license": "bsd-3-clause",
"hash": 8883384596003745000,
"line_mean": 33.6573426573,
"line_max": 91,
"alpha_frac": 0.6412429379,
"autogenerated": false,
"ratio": 4.83984375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005923713358861331,
"num_lines": 143
} |
from __future__ import absolute_import, division, print_function
__all__ = ['JITCKernelData', 'wrap_ckernel_func']
import sys
import ctypes
from dynd import _lowlevel
from dynd._lowlevel import (CKernelPrefixStruct, CKernelPrefixStructPtr,
CKernelPrefixDestructor)
if sys.version_info >= (2, 7):
c_ssize_t = ctypes.c_ssize_t
else:
if ctypes.sizeof(ctypes.c_void_p) == 4:
c_ssize_t = ctypes.c_int32
else:
c_ssize_t = ctypes.c_int64
# Get some ctypes function pointers we need
if sys.platform == 'win32':
_malloc = ctypes.cdll.msvcrt.malloc
_free = ctypes.cdll.msvcrt.free
else:
_malloc = ctypes.pythonapi.malloc
_free = ctypes.pythonapi.free
_malloc.argtypes = (ctypes.c_size_t,)
_malloc.restype = ctypes.c_void_p
_free.argtypes = (ctypes.c_void_p,)
# Convert _free into a CFUNCTYPE so the assignment of it into the struct works
_free_proto = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
_free = _free_proto(ctypes.c_void_p.from_address(ctypes.addressof(_free)).value)
_py_decref = ctypes.pythonapi.Py_DecRef
_py_decref.argtypes = (ctypes.py_object,)
_py_incref = ctypes.pythonapi.Py_IncRef
_py_incref.argtypes = (ctypes.py_object,)
class JITCKernelData(ctypes.Structure):
_fields_ = [('base', CKernelPrefixStruct),
('owner', ctypes.py_object)]
def _jitkerneldata_destructor(jkd_ptr):
jkd = JITCKernelData.from_address(jkd_ptr)
# Free the reference to the owner object
_py_decref(jkd.owner)
jkd.owner = 0
_jitkerneldata_destructor = CKernelPrefixDestructor(_jitkerneldata_destructor)
def wrap_ckernel_func(out_ckb, ckb_offset, func, owner):
"""
This function generates a ckernel inside a ckernel_builder
object from a ctypes function pointer, typically created using a JIT like
Numba or directly using LLVM. The func must have its
argtypes set, and its last parameter must be a
CKernelPrefixStructPtr to be a valid CKernel function.
The owner should be a pointer to an object which
keeps the function pointer alive.
"""
functype = type(func)
# Validate the arguments
if not isinstance(func, ctypes._CFuncPtr):
raise TypeError('Require a ctypes function pointer to wrap')
if func.argtypes is None:
raise TypeError('The argtypes of the ctypes function ' +
'pointer must be set')
if func.argtypes[-1] != CKernelPrefixStructPtr:
raise TypeError('The last argument of the ctypes function ' +
'pointer must be CKernelPrefixStructPtr')
# Allocate the memory for the kernel data
ksize = ctypes.sizeof(JITCKernelData)
ckb_end_offset = ckb_offset + ksize
_lowlevel.ckernel_builder_ensure_capacity_leaf(out_ckb, ckb_end_offset)
# Populate the kernel data with the function
jkd = JITCKernelData.from_address(out_ckb.data + ckb_offset)
# Getting the raw pointer address seems to require these acrobatics
jkd.base.function = ctypes.c_void_p.from_address(ctypes.addressof(func))
jkd.base.destructor = _jitkerneldata_destructor
jkd.owner = ctypes.py_object(owner)
_py_incref(jkd.owner)
# Return the offset to the end of the ckernel
return ckb_end_offset
| {
"repo_name": "talumbau/blaze",
"path": "blaze/compute/ckernel/ckernel.py",
"copies": "6",
"size": "3213",
"license": "bsd-3-clause",
"hash": 8436886716973071000,
"line_mean": 36.3604651163,
"line_max": 80,
"alpha_frac": 0.7030812325,
"autogenerated": false,
"ratio": 3.3964059196617336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0130564046141,
"num_lines": 86
} |
from __future__ import absolute_import, division, print_function
__all__ = ['JITCKernelData', 'wrap_ckernel_func']
import sys
import ctypes
from dynd import nd, ndt, _lowlevel
from dynd._lowlevel import (CKernelPrefixStruct, CKernelPrefixStructPtr,
CKernelPrefixDestructor,
CKernelBuilder,
UnarySingleOperation, UnaryStridedOperation,
ExprSingleOperation, ExprStridedOperation, BinarySinglePredicate)
if sys.version_info >= (2, 7):
c_ssize_t = ctypes.c_ssize_t
else:
if ctypes.sizeof(ctypes.c_void_p) == 4:
c_ssize_t = ctypes.c_int32
else:
c_ssize_t = ctypes.c_int64
# Get some ctypes function pointers we need
if sys.platform == 'win32':
_malloc = ctypes.cdll.msvcrt.malloc
_free = ctypes.cdll.msvcrt.free
else:
_malloc = ctypes.pythonapi.malloc
_free = ctypes.pythonapi.free
_malloc.argtypes = (ctypes.c_size_t,)
_malloc.restype = ctypes.c_void_p
_free.argtypes = (ctypes.c_void_p,)
# Convert _free into a CFUNCTYPE so the assignment of it into the struct works
_free_proto = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
_free = _free_proto(ctypes.c_void_p.from_address(ctypes.addressof(_free)).value)
_py_decref = ctypes.pythonapi.Py_DecRef
_py_decref.argtypes = (ctypes.py_object,)
_py_incref = ctypes.pythonapi.Py_IncRef
_py_incref.argtypes = (ctypes.py_object,)
class JITCKernelData(ctypes.Structure):
_fields_ = [('base', CKernelPrefixStruct),
('owner', ctypes.py_object)]
def _jitkerneldata_destructor(jkd_ptr):
jkd = JITCKernelData.from_address(jkd_ptr)
# Free the reference to the owner object
_py_decref(jkd.owner)
jkd.owner = 0
_jitkerneldata_destructor = CKernelPrefixDestructor(_jitkerneldata_destructor)
def wrap_ckernel_func(out_ckb, ckb_offset, func, owner):
"""
This function generates a ckernel inside a ckernel_builder
object from a ctypes function pointer, typically created using a JIT like
Numba or directly using LLVM. The func must have its
argtypes set, and its last parameter must be a
CKernelPrefixStructPtr to be a valid CKernel function.
The owner should be a pointer to an object which
keeps the function pointer alive.
"""
functype = type(func)
# Validate the arguments
if not isinstance(func, ctypes._CFuncPtr):
raise TypeError('Require a ctypes function pointer to wrap')
if func.argtypes is None:
raise TypeError('The argtypes of the ctypes function ' +
'pointer must be set')
if func.argtypes[-1] != CKernelPrefixStructPtr:
raise TypeError('The last argument of the ctypes function ' +
'pointer must be CKernelPrefixStructPtr')
# Allocate the memory for the kernel data
ksize = ctypes.sizeof(JITCKernelData)
ckb_end_offset = ckb_offset + ksize
_lowlevel.ckernel_builder_ensure_capacity_leaf(out_ckb, ckb_end_offset)
# Populate the kernel data with the function
jkd = JITCKernelData.from_address(out_ckb.data + ckb_offset)
# Getting the raw pointer address seems to require these acrobatics
jkd.base.function = ctypes.c_void_p.from_address(ctypes.addressof(func))
jkd.base.destructor = _jitkerneldata_destructor
jkd.owner = ctypes.py_object(owner)
_py_incref(jkd.owner)
# Return the offset to the end of the ckernel
return ckb_end_offset
| {
"repo_name": "XinSong/blaze",
"path": "blaze/compute/ckernel/ckernel.py",
"copies": "7",
"size": "3373",
"license": "bsd-3-clause",
"hash": 7843156502629303000,
"line_mean": 36.8988764045,
"line_max": 80,
"alpha_frac": 0.7053068485,
"autogenerated": false,
"ratio": 3.434826883910387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.013448302264281821,
"num_lines": 89
} |
from __future__ import absolute_import, division, print_function
__all__ = [
'CustomSyntaxError',
'StreamingDimensionError',
'BroadcastError',
'ArrayWriteError'
]
class BlazeException(Exception):
"""Exception that all blaze exceptions derive from"""
#------------------------------------------------------------------------
# Generic Syntax Errors
#------------------------------------------------------------------------
syntax_error = """
File {filename}, line {lineno}
{line}
{pointer}
{error}: {msg}
"""
class CustomSyntaxError(BlazeException):
"""
Makes datashape parse errors look like Python SyntaxError.
"""
def __init__(self, lexpos, filename, text, msg=None):
self.lexpos = lexpos
self.filename = filename
self.text = text
self.msg = msg or 'invalid syntax'
self.lineno = text.count('\n', 0, lexpos) + 1
# Get the extent of the line with the error
linestart = text.rfind('\n', 0, lexpos)
if linestart < 0:
linestart = 0
else:
linestart += 1
lineend = text.find('\n', lexpos)
if lineend < 0:
lineend = len(text)
self.line = text[linestart:lineend]
self.col_offset = lexpos - linestart
def __str__(self):
pointer = ' '*self.col_offset + '^'
return syntax_error.format(
filename=self.filename,
lineno=self.lineno,
line=self.line,
pointer=pointer,
msg=self.msg,
error=self.__class__.__name__,
)
def __repr__(self):
return str(self)
#------------------------------------------------------------------------
# Array-related errors
#------------------------------------------------------------------------
class StreamingDimensionError(BlazeException):
"""
An error for when a streaming dimension is accessed
like a dimension of known size.
"""
pass
class BroadcastError(BlazeException):
"""
An error for when arrays can't be broadcast together.
"""
pass
class ArrayWriteError(BlazeException):
"""
An error for when trying to write to an array which is read only.
"""
pass
| {
"repo_name": "cezary12/blaze",
"path": "blaze/error.py",
"copies": "7",
"size": "2243",
"license": "bsd-3-clause",
"hash": 4503212096037697000,
"line_mean": 24.7816091954,
"line_max": 73,
"alpha_frac": 0.5131520285,
"autogenerated": false,
"ratio": 4.398039215686275,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018234764151279577,
"num_lines": 87
} |
from __future__ import absolute_import, division, print_function
__all__ = ['split_array_base', 'add_indexers_to_url', 'slice_as_string',
'index_tuple_as_string']
from pyparsing import (Word, Regex, Optional, ZeroOrMore,
StringStart, StringEnd, alphas, alphanums)
from ..py2help import _strtypes, _inttypes
# Parser to match the Blaze URL syntax
intNumber = Regex(r'[-+]?\b\d+\b')
arrayName = Regex(r'(/(\.session_)?\w*)*[a-zA-Z0-9_]+\b')
bracketsIndexer = (Optional(intNumber) +
Optional(':' + Optional(intNumber)) +
Optional(':' + Optional(intNumber)))
indexerPattern = (('.' + Word(alphas + '_', alphanums + '_')) ^
('[' + bracketsIndexer +
ZeroOrMore(',' + bracketsIndexer) + ']'))
arrayBase = (StringStart() +
arrayName + ZeroOrMore(indexerPattern) +
StringEnd())
def split_array_base(array_base):
pieces = arrayBase.parseString(array_base)
array_name = pieces[0]
indexers = []
i = 1
while i < len(pieces):
# Convert [...] into an int, a slice, or a tuple of int/slice
if pieces[i] == '[':
i += 1
ilist = []
while pieces[i-1] != ']':
if pieces[i] != ':':
first = int(pieces[i])
i += 1
else:
first = None
if pieces[i] in [',', ']']:
i += 1
ilist.append(first)
else:
i += 1
if pieces[i] not in [',', ':', ']']:
second = int(pieces[i])
i += 1
else:
second = None
if pieces[i] in [',', ']']:
i += 1
ilist.append(slice(first, second))
else:
i += 1
if pieces[i] not in [',', ']']:
third = int(pieces[i])
i += 1
else:
third = 1
ilist.append(slice(first, second, third))
i += 2
if len(ilist) == 1:
indexers.append(ilist[0])
else:
indexers.append(tuple(ilist))
elif pieces[i] == '.':
i += 1
else:
indexers.append(pieces[i])
i += 1
return array_name, indexers
def slice_as_interior_string(s):
if type(s) is int:
return str(s)
else:
result = ''
if s.start is not None:
result += str(s.start)
result += ':'
if s.stop is not None:
result += str(s.stop)
if s.step is not None and s.step != 1:
result += ':' + str(s.step)
return result
def slice_as_string(s):
return '[' + slice_as_interior_string(s) + ']'
def index_tuple_as_string(s):
result = '[' + slice_as_interior_string(s[0])
for i in s[1:]:
result += ',' + slice_as_interior_string(i)
result += ']'
return result
def add_indexers_to_url(base_url, indexers):
for idx in indexers:
if isinstance(idx, _strtypes):
base_url += '.' + idx
elif isinstance(idx, _inttypes):
base_url += '[' + str(idx) + ']'
elif isinstance(idx, slice):
base_url += slice_as_string(idx)
elif isinstance(idx, tuple):
base_url += index_tuple_as_string(idx)
else:
raise IndexError('Cannot process index object %r' % idx)
return base_url
| {
"repo_name": "markflorisson/blaze-core",
"path": "blaze/catalog/blaze_url.py",
"copies": "13",
"size": "3712",
"license": "bsd-3-clause",
"hash": 72413625770721360,
"line_mean": 31.5614035088,
"line_max": 72,
"alpha_frac": 0.4471982759,
"autogenerated": false,
"ratio": 4.012972972972973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 114
} |
from __future__ import (absolute_import, division, print_function)
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import os
from codecs import open
from setuptools import find_packages, setup
import versioneer
here = os.path.abspath(os.path.dirname(__file__))
# Dependencies.
with open('requirements.txt') as f:
requirements = f.readlines()
install_requires = [t.strip() for t in requirements]
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='odm2api',
version=versioneer.get_version(),
description='Python interface for the Observations Data Model 2 (ODM2)',
long_description=long_description,
url='https://github.com/ODM2/ODM2PythonAPI',
author='ODM2 team-Stephanie Reeder',
author_email='stephanie.reeder@usu.edu',
maintainer='David Valentine',
maintainer_email='david.valentine@gmail.com',
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering'
],
keywords='Observations Data Model ODM2',
packages=find_packages(exclude=['samplefiles', 'setup', 'tests*', 'Forms']),
install_requires=install_requires,
extras_require={
'mysql': ['pymysql'],
'postgis': ['psycopg2'],
'sqlite': ['pyspatialite >=3.0.0'],
},
cmdclass=versioneer.get_cmdclass(),
)
| {
"repo_name": "emiliom/ODM2PythonAPI",
"path": "setup.py",
"copies": "2",
"size": "1786",
"license": "bsd-3-clause",
"hash": -929170618330019100,
"line_mean": 30.3333333333,
"line_max": 80,
"alpha_frac": 0.6662933931,
"autogenerated": false,
"ratio": 3.690082644628099,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021659085986571366,
"num_lines": 57
} |
from __future__ import absolute_import, division, print_function
'''
A tiny python 2/3 compability layer implementing only a minimal subset as needed by muninn
Inspired by six, future, and jinja2
'''
import sys
import operator
PY3 = sys.version_info[0] == 3
if PY3:
long = int
string_types = (str, )
def is_python2_unicode(x):
return False
def dictkeys(d):
return list(d.keys())
def dictvalues(d):
return list(d.values())
def path_utf8(path):
return path.encode('utf-8')
def encode(s):
return s.encode('utf-8')
def decode(s):
return s.decode('utf-8')
itervalues = operator.methodcaller("values")
imap = map
izip = zip
from urllib.parse import urlparse as urlparse_mod
urlparse = urlparse_mod
input = input
else:
long = long
string_types = (basestring, )
def is_python2_unicode(x):
return type(x) is unicode
def path_utf8(path):
return path.decode(sys.getfilesystemencoding()).encode('utf-8')
def decode(s):
return s
def encode(s):
return s
dictkeys = operator.methodcaller("keys")
dictvalues = operator.methodcaller("values")
itervalues = operator.methodcaller("itervalues")
import itertools
imap = itertools.imap
izip = itertools.izip
from urlparse import urlparse as urlparse_mod
urlparse = urlparse_mod
input = raw_input
| {
"repo_name": "stcorp/muninn",
"path": "muninn/_compat.py",
"copies": "1",
"size": "1446",
"license": "bsd-3-clause",
"hash": 7164729426748800000,
"line_mean": 18.28,
"line_max": 90,
"alpha_frac": 0.6369294606,
"autogenerated": false,
"ratio": 3.845744680851064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49826741414510634,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__author__ = 'jmeline'
from datetime import datetime
from odm2api import serviceBase
from odm2api.models import (Actions, Results)
# ################################################################################
# Annotations
# ################################################################################
class UpdateODM2(serviceBase):
def update(self, value):
self._session.add(value)
self._session.commit()
return value
# ################################################################################
# Core
# ################################################################################
def updateResultValidDateTime(self, resultId, dateTime):
# check type of "validdatetime'
# if not datetime do this:
# dt = dateTime.to_datetime()
# else dt = dateTime
if (type(dateTime) != datetime):
dt = dateTime.to_datetime()
else:
dt = dateTime
q = self._session.query(Results).filter(Results.ResultID == int(resultId))
q.update({'ValidDateTime': dt})
self._session.commit()
def updateResult(self, resultID=None, valuecount=None, result=None):
if resultID:
q = self._session.query(Results).filter(Results.ResultID == int(resultID))
if valuecount:
q.update({'ValueCount': valuecount})
if result:
self._session.add(result)
self._session.commit()
def updateAction(self, actionID=None, begin=None, end=None, action=None):
if actionID:
q = self._session.query(Actions).filter(Actions.ActionID == int(actionID))
# if (type(begin) != datetime):
# begin = begin.to_datetime()
# if (type(end) != datetime):
# end = end.to_datetime()
if begin:
q.update({'BeginDateTime': begin})
if end:
q.update({'EndDateTime': end})
elif action:
self._session.add(action)
self._session.commit()
# ################################################################################
# Data Quality
# ################################################################################
# ################################################################################
# Equipment
# ################################################################################
# ################################################################################
# Extension Properties
# ################################################################################
# ################################################################################
# External Identifiers
# ################################################################################
# ################################################################################
# Lab Analyses
# ################################################################################
# ################################################################################
# Provenance
# ################################################################################
# ################################################################################
# Results
# ################################################################################
# ################################################################################
# Sampling Features
# ################################################################################
# ################################################################################
# Sensors
# ################################################################################
################################################################################
# ODM2
# ################################################################################
| {
"repo_name": "emiliom/ODM2PythonAPI",
"path": "odm2api/services/updateService.py",
"copies": "2",
"size": "3956",
"license": "bsd-3-clause",
"hash": -3962433863547732500,
"line_mean": 41.5376344086,
"line_max": 86,
"alpha_frac": 0.2919615774,
"autogenerated": false,
"ratio": 6.65993265993266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005035315874519929,
"num_lines": 93
} |
from __future__ import absolute_import, division, print_function
__authors__ = ["Russell Hewett, Stuart Mumford"]
__email__ = "stuart@mumford.me.uk"
import os
import glob
from collections import OrderedDict
import numpy as np
import astropy.io.fits
import sunpy
from sunpy.map.mapbase import GenericMap, MAP_CLASSES
from sunpy.map.header import MapMeta
from sunpy.map.compositemap import CompositeMap
from sunpy.map.mapcube import MapCube
from sunpy.io.file_tools import read_file
from sunpy.io.header import FileHeader
from sunpy.util.net import download_file
from sunpy.util import expand_list
from sunpy.util.datatype_factory_base import BasicRegistrationFactory
from sunpy.util.datatype_factory_base import NoMatchError
from sunpy.util.datatype_factory_base import MultipleMatchError
from sunpy.util.datatype_factory_base import ValidationFunctionError
from sunpy.extern import six
from sunpy.extern.six.moves.urllib.request import urlopen
# Make a mock DatabaseEntry class if sqlalchemy is not installed
try:
from sunpy.database.tables import DatabaseEntry
except ImportError:
class DatabaseEntry(object):
pass
__all__ = ['Map', 'MapFactory']
class MapFactory(BasicRegistrationFactory):
"""
Map(*args, **kwargs)
Map factory class. Used to create a variety of Map objects. Valid map types
are specified by registering them with the factory.
Examples
--------
>>> import sunpy.map
>>> sunpy.data.download_sample_data(overwrite=False) # doctest: +SKIP
>>> import sunpy.data.sample
>>> mymap = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
The SunPy Map factory accepts a wide variety of inputs for creating maps
* Preloaded tuples of (data, header) pairs
>>> mymap = sunpy.map.Map((data, header)) # doctest: +SKIP
headers are some base of `dict` or `collections.OrderedDict`, including `sunpy.io.header.FileHeader` or `sunpy.map.header.MapMeta` classes.
* data, header pairs, not in tuples
>>> mymap = sunpy.map.Map(data, header) # doctest: +SKIP
* File names
>>> mymap = sunpy.map.Map('file1.fits') # doctest: +SKIP
* All fits files in a directory by giving a directory
>>> mymap = sunpy.map.Map('local_dir/sub_dir') # doctest: +SKIP
* Some regex globs
>>> mymap = sunpy.map.Map('eit_*.fits') # doctest: +SKIP
* URLs
>>> mymap = sunpy.map.Map(url_str) # doctest: +SKIP
* DatabaseEntry
>>> mymap = sunpy.map.Map(db_result) # doctest: +SKIP
* Lists of any of the above
>>> mymap = sunpy.map.Map(['file1.fits', 'file2.fits', 'file3.fits', 'directory1/']) # doctest: +SKIP
* Any mixture of the above not in a list
>>> mymap = sunpy.map.Map((data, header), data2, header2, 'file1.fits', url_str, 'eit_*.fits') # doctest: +SKIP
"""
def _read_file(self, fname, **kwargs):
""" Read in a file name and return the list of (data, meta) pairs in
that file. """
# File gets read here. This needs to be generic enough to seamlessly
#call a fits file or a jpeg2k file, etc
pairs = read_file(fname, **kwargs)
new_pairs = []
for pair in pairs:
filedata, filemeta = pair
assert isinstance(filemeta, FileHeader)
#This tests that the data is more than 1D
if len(np.shape(filedata)) > 1:
data = filedata
meta = MapMeta(filemeta)
new_pairs.append((data, meta))
return new_pairs
def _validate_meta(self, meta):
"""
Validate a meta argument.
"""
if isinstance(meta, astropy.io.fits.header.Header):
return True
elif isinstance(meta, dict):
return True
else:
return False
def _parse_args(self, *args, **kwargs):
"""
Parses an args list for data-header pairs. args can contain any
mixture of the following entries:
* tuples of data,header
* data, header not in a tuple
* filename, which will be read
* directory, from which all files will be read
* glob, from which all files will be read
* url, which will be downloaded and read
* lists containing any of the above.
Example
-------
self._parse_args(data, header,
(data, header),
['file1', 'file2', 'file3'],
'file4',
'directory1',
'*.fits')
"""
data_header_pairs = list()
already_maps = list()
# Account for nested lists of items
args = expand_list(args)
# For each of the arguments, handle each of the cases
i = 0
while i < len(args):
arg = args[i]
# Data-header pair in a tuple
if ((type(arg) in [tuple, list]) and
len(arg) == 2 and
isinstance(arg[0], np.ndarray) and
self._validate_meta(arg[1])):
arg[1] = OrderedDict(arg[1])
data_header_pairs.append(arg)
# Data-header pair not in a tuple
elif (isinstance(arg, np.ndarray) and
self._validate_meta(args[i+1])):
pair = (args[i], OrderedDict(args[i+1]))
data_header_pairs.append(pair)
i += 1 # an extra increment to account for the data-header pairing
# File name
elif (isinstance(arg,six.string_types) and
os.path.isfile(os.path.expanduser(arg))):
path = os.path.expanduser(arg)
pairs = self._read_file(path, **kwargs)
data_header_pairs += pairs
# Directory
elif (isinstance(arg,six.string_types) and
os.path.isdir(os.path.expanduser(arg))):
path = os.path.expanduser(arg)
files = [os.path.join(path, elem) for elem in os.listdir(path)]
for afile in files:
data_header_pairs += self._read_file(afile, **kwargs)
# Glob
elif (isinstance(arg,six.string_types) and '*' in arg):
files = glob.glob( os.path.expanduser(arg) )
for afile in files:
data_header_pairs += self._read_file(afile, **kwargs)
# Already a Map
elif isinstance(arg, GenericMap):
already_maps.append(arg)
# A URL
elif (isinstance(arg,six.string_types) and
_is_url(arg)):
default_dir = sunpy.config.get("downloads", "download_dir")
url = arg
path = download_file(url, default_dir)
pairs = self._read_file(path, **kwargs)
data_header_pairs += pairs
# A database Entry
elif isinstance(arg, DatabaseEntry):
data_header_pairs += self._read_file(arg.path, **kwargs)
else:
raise ValueError("File not found or invalid input")
i += 1
#TODO:
# In the end, if there are already maps it should be put in the same
# order as the input, currently they are not.
return data_header_pairs, already_maps
def __call__(self, *args, **kwargs):
""" Method for running the factory. Takes arbitrary arguments and
keyword arguments and passes them to a sequence of pre-registered types
to determine which is the correct Map-type to build.
Arguments args and kwargs are passed through to the validation
function and to the constructor for the final type. For Map types,
validation function must take a data-header pair as an argument.
Parameters
----------
composite : boolean, optional
Indicates if collection of maps should be returned as a CompositeMap
cube : boolean, optional
Indicates if collection of maps should be returned as a MapCube
silence_errors : boolean, optional
If set, ignore data-header pairs which cause an exception.
Notes
-----
Extra keyword arguments are passed through to `sunpy.io.read_file` such
as `memmap` for FITS files.
"""
# Hack to get around Python 2.x not backporting PEP 3102.
composite = kwargs.pop('composite', False)
cube = kwargs.pop('cube', False)
silence_errors = kwargs.pop('silence_errors', False)
data_header_pairs, already_maps = self._parse_args(*args, **kwargs)
new_maps = list()
# Loop over each registered type and check to see if WidgetType
# matches the arguments. If it does, use that type.
for pair in data_header_pairs:
data, header = pair
meta = MapMeta(header)
try:
new_map = self._check_registered_widgets(data, meta, **kwargs)
except (NoMatchError, MultipleMatchError, ValidationFunctionError):
if not silence_errors:
raise
except:
raise
new_maps.append(new_map)
new_maps += already_maps
# If the list is meant to be a cube, instantiate a map cube
if cube:
return MapCube(new_maps, **kwargs)
# If the list is meant to be a composite map, instantiate one
if composite:
return CompositeMap(new_maps, **kwargs)
if len(new_maps) == 1:
return new_maps[0]
return new_maps
def _check_registered_widgets(self, data, meta, **kwargs):
candidate_widget_types = list()
for key in self.registry:
# Call the registered validation function for each registered class
if self.registry[key](data, meta, **kwargs):
candidate_widget_types.append(key)
n_matches = len(candidate_widget_types)
if n_matches == 0:
if self.default_widget_type is None:
raise NoMatchError("No types match specified arguments and no default is set.")
else:
candidate_widget_types = [self.default_widget_type]
elif n_matches > 1:
raise MultipleMatchError("Too many candidate types identified ({0}). Specify enough keywords to guarantee unique type identification.".format(n_matches))
# Only one is found
WidgetType = candidate_widget_types[0]
return WidgetType(data, meta, **kwargs)
def _is_url(arg):
try:
urlopen(arg)
except:
return False
return True
class InvalidMapInput(ValueError):
"""Exception to raise when input variable is not a Map instance and does
not point to a valid Map input file."""
pass
class InvalidMapType(ValueError):
"""Exception to raise when an invalid type of map is requested with Map
"""
pass
class NoMapsFound(ValueError):
"""Exception to raise when input does not point to any valid maps or files
"""
pass
Map = MapFactory(default_widget_type=GenericMap,
additional_validation_functions=['is_datasource_for'])
Map.registry = MAP_CLASSES
| {
"repo_name": "Alex-Ian-Hamilton/sunpy",
"path": "sunpy/map/map_factory.py",
"copies": "1",
"size": "11293",
"license": "bsd-2-clause",
"hash": 3409632983953087500,
"line_mean": 31.6387283237,
"line_max": 166,
"alpha_frac": 0.5949703356,
"autogenerated": false,
"ratio": 4.161016949152542,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017647869919617807,
"num_lines": 346
} |
from __future__ import (absolute_import, division, print_function)
__author__ = 'stephanie'
#import matplotlib.pyplot as plt
from odm2api.ODMconnection import dbconnection
from odm2api.ODM2.services.readService import *
from odm2api.ODM2.services import CreateODM2
# Create a connection to the ODM2 database
# ----------------------------------------
#connect to database
# createconnection (dbtype, servername, dbname, username, password)
# session_factory = dbconnection.createConnection('connection type: sqlite|mysql|mssql|postgresql', '/your/path/to/db/goes/here', 2.0)#sqlite
# session_factory = dbconnection.createConnection('postgresql', 'localhost', 'odm2', 'ODM', 'odm')
# session_factory = dbconnection.createConnection('mysql', 'localhost', 'odm2', 'ODM', 'odm')#mysql
session_factory= dbconnection.createConnection('mssql', "(local)", "ODM2", "ODM", "odm")#win MSSQL
# session_factory= dbconnection.createConnection('mssql', "arroyoodm2", "", "ODM", "odm")#mac/linux MSSQL
# session_factory = dbconnection.createConnection('sqlite', 'path/to/ODM2.sqlite', 2.0)
#_session = session_factory.getSession()
read = ReadODM2(session_factory)
create = CreateODM2(session_factory)
# Run some basic sample queries.
# ------------------------------
# Get all of the variables from the database and print their names to the console
allVars = read.getVariables()
print ("\n-------- Information about Variables ---------")
for x in allVars:
print(x.VariableCode + ": " + x.VariableNameCV)
# Get all of the people from the database
allPeople = read.getPeople()
print ("\n-------- Information about People ---------")
for x in allPeople:
print(x.PersonFirstName + " " + x.PersonLastName)
try:
print("\n-------- Information about an Affiliation ---------")
allaff = read.getAffiliations()
for x in allaff:
print(x.PersonObj.PersonFirstName + ": " + str(x.OrganizationID))
except Exception as e:
print("Unable to demo getAllAffiliations", e)
# Get all of the SamplingFeatures from the database that are Sites
try:
print ("\n-------- Information about Sites ---------")
siteFeatures = read.getSamplingFeatures(type= 'site')
# siteFeatures = read.getSamplingFeatures(type='Site')
numSites = len(siteFeatures)
print ("Successful query")
for x in siteFeatures:
print(x.SamplingFeatureCode + ": " + x.SamplingFeatureTypeCV )
except Exception as e:
print("Unable to demo getSamplingFeaturesByType", e)
# Now get the SamplingFeature object for a SamplingFeature code
try:
sf = read.getSamplingFeatures(codes=['USU-LBR-Mendon'])[0]
print(sf)
print("\n-------- Information about an individual SamplingFeature ---------")
print("The following are some of the attributes of a SamplingFeature retrieved using getSamplingFeatureByCode(): \n")
print("SamplingFeatureCode: " + sf.SamplingFeatureCode)
print("SamplingFeatureName: " + sf.SamplingFeatureName)
print("SamplingFeatureDescription: %s" % sf.SamplingFeatureDescription)
print("SamplingFeatureGeotypeCV: %s" % sf.SamplingFeatureGeotypeCV)
print("SamplingFeatureGeometry: %s" % sf.FeatureGeometry)
print("Elevation_m: %s" % str(sf.Elevation_m))
except Exception as e:
print("Unable to demo getSamplingFeatureByCode: ", e)
#add sampling feature
print("\n------------ Create Sampling Feature --------- \n")
try:
# from odm2api.ODM2.models import SamplingFeatures
session = session_factory.getSession()
newsf = Sites(FeatureGeometryWKT = "POINT(-111.946 41.718)", Elevation_m=100, ElevationDatumCV=sf.ElevationDatumCV,
SamplingFeatureCode= "TestSF",SamplingFeatureDescription = "this is a test in sample.py",
SamplingFeatureGeotypeCV= "Point", SamplingFeatureTypeCV=sf.SamplingFeatureTypeCV,SamplingFeatureUUID= sf.SamplingFeatureUUID+"2",
SiteTypeCV="cave", Latitude= "100", Longitude= "-100", SpatialReferenceID= 0)
c=create.createSamplingFeature(newsf)
#session.commit()
print("new sampling feature added to database", c)
except Exception as e :
print("error adding a sampling feature: " + str(e))
# Drill down and get objects linked by foreign keys
print("\n------------ Foreign Key Example --------- \n",)
try:
# Call getResults, but return only the first result
firstResult = read.getResults()[0]
print("The FeatureAction object for the Result is: ", firstResult.FeatureActionObj)
print("The Action object for the Result is: ", firstResult.FeatureActionObj.ActionObj)
print ("\nThe following are some of the attributes for the Action that created the Result: \n" +
"ActionTypeCV: " + firstResult.FeatureActionObj.ActionObj.ActionTypeCV + "\n" +
"ActionDescription: " + firstResult.FeatureActionObj.ActionObj.ActionDescription + "\n" +
"BeginDateTime: " + str(firstResult.FeatureActionObj.ActionObj.BeginDateTime) + "\n" +
"EndDateTime: " + str(firstResult.FeatureActionObj.ActionObj.EndDateTime) + "\n" +
"MethodName: " + firstResult.FeatureActionObj.ActionObj.MethodObj.MethodName + "\n" +
"MethodDescription: " + firstResult.FeatureActionObj.ActionObj.MethodObj.MethodDescription)
except Exception as e:
print("Unable to demo Foreign Key Example: ", e)
# Now get a particular Result using a ResultID
print("\n------- Example of Retrieving Attributes of a Result -------")
try:
tsResult = read.getResults(ids = [1])[0]
print (
"The following are some of the attributes for the TimeSeriesResult retrieved using getResults(ids=[1]): \n" +
"ResultTypeCV: " + tsResult.ResultTypeCV + "\n" +
# Get the ProcessingLevel from the TimeSeriesResult's ProcessingLevel object
"ProcessingLevel: " + tsResult.ProcessingLevelObj.Definition + "\n" +
"SampledMedium: " + tsResult.SampledMediumCV + "\n" +
# Get the variable information from the TimeSeriesResult's Variable object
"Variable: " + tsResult.VariableObj.VariableCode + ": " + tsResult.VariableObj.VariableNameCV + "\n"
#"AggregationStatistic: " + tsResult.AggregationStatisticCV + "\n" +
# Get the site information by drilling down
"SamplingFeature: " + tsResult.FeatureActionObj.SamplingFeatureObj.SamplingFeatureCode + " - " +
tsResult.FeatureActionObj.SamplingFeatureObj.SamplingFeatureName)
except Exception as e:
print("Unable to demo Example of retrieving Attributes of a time Series Result: ", e)
# Get the values for a particular TimeSeriesResult
print("\n-------- Example of Retrieving Time Series Result Values ---------")
tsValues = read.getResultValues(resultids = [1]) # Return type is a pandas datafram
# Print a few Time Series Values to the console
# tsValues.set_index('ValueDateTime', inplace=True)
try:
print(tsValues.head())
except Exception as e:
print(e)
# Plot the time series
try:
plt.figure()
ax=tsValues.plot(x='ValueDateTime', y='DataValue')
plt.show()
except Exception as e:
print("Unable to demo plotting of tsValues: ", e)
| {
"repo_name": "emiliom/ODM2PythonAPI",
"path": "Examples/Sample.py",
"copies": "1",
"size": "7067",
"license": "bsd-3-clause",
"hash": -6069408164856595000,
"line_mean": 41.8303030303,
"line_max": 141,
"alpha_frac": 0.6984576199,
"autogenerated": false,
"ratio": 3.661658031088083,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4860115650988083,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__author__ = 'Timothy D. Morton <tim.morton@gmail.com>'
"""
Defines objects useful for describing probability distributions.
"""
import numpy as np
import matplotlib.pyplot as plt
import logging
from scipy.interpolate import UnivariateSpline as interpolate
from scipy.integrate import quad
import numpy.random as rand
from scipy.special import erf
from scipy.optimize import leastsq
import pandas as pd
from plotutils import setfig
from .kde import KDE
#figure this generic loading thing out; draft stage currently
def load_distribution(filename,path=''):
fns = pd.read_hdf(filename,path)
store = pd.HDFStore(filename)
if '{}/samples'.format(path) in store:
samples = pd.read_hdf(filename,path+'/samples')
samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0)
cdf = interpolate(fns['vals'],fns['cdf'],s=0)
attrs = store.get_storer('{}/fns'.format(path)).attrs
keywords = attrs.keywords
t = attrs.disttype
store.close()
return t.__init__()
class Distribution(object):
"""Base class to describe probability distribution.
Has some minimal functional overlap with scipy.stats random variates
(e.g. `ppf`, `rvs`)
Parameters
----------
pdf : callable
The probability density function to be used. Does not have to be
normalized, but must be non-negative.
cdf : callable, optional
The cumulative distribution function. If not provided, this will
be tabulated from the pdf, as long as minval and maxval are also provided
name : string, optional
The name of the distribution (will be used, for example, to label a plot).
Default is empty string.
minval,maxval : float, optional
The minimum and maximum values of the distribution. The Distribution will
evaluate to zero outside these ranges, and this will also define the range
of the CDF. Defaults are -np.inf and +np.inf. If these are not explicity
provided, then a CDF function must be provided.
norm : float, optional
If not provided, this will be calculated by integrating the pdf from
minval to maxval so that the Distribution is a proper PDF that integrates
to unity. `norm` can be non-unity if desired, but beware, as this will
cause some things to act unexpectedly.
cdf_pts : int, optional
Number of points to tabulate in order to calculate CDF, if not provided.
Default is 500.
keywords : dict, optional
Optional dictionary of keywords; these will be saved with the distribution
when `save_hdf` is called.
Raises
------
ValueError
If `cdf` is not provided and minval or maxval are infinity.
"""
def __init__(self,pdf,cdf=None,name='',minval=-np.inf,maxval=np.inf,norm=None,
cdf_pts=500,keywords=None):
self.name = name
self.pdf = pdf
self.cdf = cdf
self.minval = minval
self.maxval = maxval
if keywords is None:
self.keywords = {}
else:
self.keywords = keywords
self.keywords['name'] = name
self.keywords['minval'] = minval
self.keywords['maxval'] = maxval
if norm is None:
self.norm = quad(self.pdf,minval,maxval,full_output=1)[0]
else:
self.norm = norm
if cdf is None and (minval == -np.inf or maxval == np.inf):
raise ValueError('must provide either explicit cdf function or explicit min/max values')
else: #tabulate & interpolate CDF.
pts = np.linspace(minval,maxval,cdf_pts)
pdfgrid = self(pts)
cdfgrid = pdfgrid.cumsum()/pdfgrid.cumsum().max()
cdf_fn = interpolate(pts,cdfgrid,s=0,k=1)
def cdf(x):
x = np.atleast_1d(x)
y = np.atleast_1d(cdf_fn(x))
y[np.where(x < self.minval)] = 0
y[np.where(x > self.maxval)] = 1
return y
self.cdf = cdf
#define minval_cdf, maxval_cdf
zero_mask = cdfgrid==0
one_mask = cdfgrid==1
if zero_mask.sum()>0:
self.minval_cdf = pts[zero_mask][-1] #last 0 value
if one_mask.sum()>0:
self.maxval_cdf = pts[one_mask][0] #first 1 value
def pctile(self,pct,res=1000):
"""Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float
"""
grid = np.linspace(self.minval,self.maxval,res)
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
ppf = pctile
def save_hdf(self,filename,path='',res=1000,logspace=False):
"""Saves distribution to an HDF5 file.
Saves a pandas `dataframe` object containing tabulated pdf and cdf
values at a specfied resolution. After saving to a particular path, a
distribution may be regenerated using the `Distribution_FromH5` subclass.
Parameters
----------
filename : string
File in which to save the distribution. Should end in .h5.
path : string, optional
Path in which to save the distribution within the .h5 file. By
default this is an empty string, which will lead to saving the
`fns` dataframe at the root level of the file.
res : int, optional
Resolution at which to grid the distribution for saving.
logspace : bool, optional
Sets whether the tabulated function should be gridded with log or
linear spacing. Default will be logspace=False, corresponding
to linear gridding.
"""
if logspace:
vals = np.logspace(np.log10(self.minval),
np.log10(self.maxval),
res)
else:
vals = np.linspace(self.minval,self.maxval,res)
d = {'vals':vals,
'pdf':self(vals),
'cdf':self.cdf(vals)}
df = pd.DataFrame(d)
df.to_hdf(filename,path+'/fns')
if hasattr(self,'samples'):
s = pd.Series(self.samples)
s.to_hdf(filename,path+'/samples')
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/fns'.format(path)).attrs
attrs.keywords = self.keywords
attrs.disttype = type(self)
store.close()
def __call__(self,x):
"""
Evaluates pdf. Forces zero outside of (self.minval,self.maxval). Will return
Parameters
----------
x : float, array-like
Value(s) at which to evaluate PDF.
Returns
-------
pdf : float, array-like
Probability density (or re-normalized density if self.norm was explicity
provided.
"""
y = self.pdf(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
y[(x < self.minval) | (x > self.maxval)] = 0
y /= self.norm
if np.size(x)==1:
return y[0]
else:
return y
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,
self.pctile(0.5),
self.pctile(0.84)-self.pctile(0.5),
self.pctile(0.5)-self.pctile(0.16))
def __repr__(self):
return '<%s object: %s>' % (type(self),str(self))
def plot(self,minval=None,maxval=None,fig=None,log=False,
npts=500,**kwargs):
"""
Plots distribution.
Parameters
----------
minval : float,optional
minimum value to plot. Required if minval of Distribution is
`-np.inf`.
maxval : float, optional
maximum value to plot. Required if maxval of Distribution is
`np.inf`.
fig : None or int, optional
Parameter to pass to `setfig`. If `None`, then a new figure is
created; if a non-zero integer, the plot will go to that figure
(clearing everything first), if zero, then will overplot on
current axes.
log : bool, optional
If `True`, the x-spacing of the points to plot will be logarithmic.
npoints : int, optional
Number of points to plot.
kwargs
Keyword arguments are passed to plt.plot
Raises
------
ValueError
If finite lower and upper bounds are not provided.
"""
if minval is None:
minval = self.minval
if maxval is None:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)')
if log:
xs = np.logspace(np.log10(minval),np.log10(maxval),npts)
else:
xs = np.linspace(minval,maxval,npts)
setfig(fig)
plt.plot(xs,self(xs),**kwargs)
plt.xlabel(self.name)
plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
"""Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
"""
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds]
def rvs(self,*args,**kwargs):
return self.resample(*args,**kwargs)
class Distribution_FromH5(Distribution):
"""Creates a Distribution object from one saved to an HDF file.
File must have a `DataFrame` saved under [path]/fns in
the .h5 file, containing 'vals', 'pdf', and 'cdf' columns.
If samples are saved in the HDF storer, then they will be restored
to this object; so will any saved keyword attributes.
These appropriate .h5 files will be created by a call to the `save_hdf`
method of the generic `Distribution` class.
Parameters
----------
filename : string
.h5 file where the distribution is saved.
path : string, optional
Path within the .h5 file where the distribution is saved. By
default this will be the root level, but can be anywhere.
kwargs
Keyword arguments are passed to the `Distribution` constructor.
"""
def __init__(self,filename,path='',**kwargs):
store = pd.HDFStore(filename,'r')
fns = store[path+'/fns']
if '{}/samples'.format(path) in store:
samples = store[path+'/samples']
self.samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0,k=1)
#check to see if tabulated CDF is monotonically increasing
d_cdf = fns['cdf'][1:] - fns['cdf'][:-1]
if np.any(d_cdf < 0):
logging.warning('tabulated CDF in {} is not strictly increasing. Recalculating CDF from PDF'.format(filename))
cdf = None #in this case, just recalc cdf from pdf
else:
cdf = interpolate(fns['vals'],fns['cdf'],s=0,k=1)
Distribution.__init__(self,pdf,cdf,minval=minval,maxval=maxval,
**kwargs)
store = pd.HDFStore(filename,'r')
try:
keywords = store.get_storer('{}/fns'.format(path)).attrs.keywords
for kw,val in keywords.iteritems():
setattr(self,kw,val)
except AttributeError:
logging.warning('saved distribution {} does not have keywords or disttype saved; perhaps this distribution was written with an older version.'.format(filename))
store.close()
class Empirical_Distribution(Distribution):
"""Generates a Distribution object given a tabulated PDF.
Parameters
----------
xs : array-like
x-values at which the PDF is evaluated
pdf : array-like
Values of pdf at provided x-values.
smooth : int or float
Smoothing parameter used by the interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,xs,pdf,smooth=0,**kwargs):
pdf /= np.trapz(pdf,xs)
fn = interpolate(xs,pdf,s=smooth)
keywords = {'smooth':smooth}
Distribution.__init__(self,fn,minval=xs.min(),maxval=xs.max(),
keywords=keywords,**kwargs)
class Gaussian_Distribution(Distribution):
"""Generates a normal distribution with given mu, sigma.
***It's probably better to use scipy.stats.norm rather than this
if you care about numerical precision/speed and don't care about the
plotting bells/whistles etc. the `Distribution` class provides.***
Parameters
----------
mu : float
Mean of normal distribution.
sig : float
Width of normal distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig,**kwargs):
self.mu = mu
self.sig = sig
def pdf(x):
return 1./np.sqrt(2*np.pi*sig**2)*np.exp(-(x-mu)**2/(2*sig**2))
def cdf(x):
return 0.5*(1 + erf((x-mu)/np.sqrt(2*sig**2)))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 10*sig
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 10*sig
keywords = {'mu':self.mu,'sig':self.sig}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +/- %.2f' % (self.name,self.mu,self.sig)
def resample(self,N,**kwargs):
return rand.normal(size=int(N))*self.sig + self.mu
class Hist_Distribution(Distribution):
"""Generates a distribution from a histogram of provided samples.
Uses `np.histogram` to create a histogram using the bins keyword,
then interpolates this histogram to create the pdf to pass to the
`Distribution` constructor.
Parameters
----------
samples : array-like
The samples used to create the distribution
bins : int or array-like, optional
Keyword passed to `np.histogram`. If integer, ths will be
the number of bins, if array-like, then this defines bin edges.
equibin : bool, optional
If true and ``bins`` is an integer ``N``, then the bins will be
found by splitting the data into ``N`` equal-sized groups.
smooth : int or float
Smoothing parameter used by the interpolation function.
order : int
Order of the spline to be used for interpolation. Default is
for linear interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,samples,bins=10,equibin=True,smooth=0,order=1,**kwargs):
self.samples = samples
if type(bins)==type(10) and equibin:
N = len(samples)//bins
sortsamples = np.sort(samples)
bins = sortsamples[0::N]
if bins[-1] != sortsamples[-1]:
bins = np.concatenate([bins,np.array([sortsamples[-1]])])
hist,bins = np.histogram(samples,bins=bins,density=True)
self.bins = bins
bins = (bins[1:] + bins[:-1])/2.
pdf_initial = interpolate(bins,hist,s=smooth,k=order)
def pdf(x):
x = np.atleast_1d(x)
y = pdf_initial(x)
w = np.where((x < self.bins[0]) | (x > self.bins[-1]))
y[w] = 0
return y
cdf = interpolate(bins,hist.cumsum()/hist.cumsum().max(),s=smooth,
k=order)
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
keywords = {'bins':bins,'smooth':smooth,'order':order}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def plothist(self,fig=None,**kwargs):
"""Plots a histogram of samples using provided bins.
Parameters
----------
fig : None or int
Parameter passed to `setfig`.
kwargs
Keyword arguments passed to `plt.hist`.
"""
setfig(fig)
plt.hist(self.samples,bins=self.bins,**kwargs)
def resample(self,N):
"""Returns a bootstrap resampling of provided samples.
Parameters
----------
N : int
Number of samples.
"""
inds = rand.randint(len(self.samples),size=N)
return self.samples[inds]
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
class Box_Distribution(Distribution):
"""Simple distribution uniform between provided lower and upper limits.
Parameters
----------
lo,hi : float
Lower/upper limits of the distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,lo,hi,**kwargs):
self.lo = lo
self.hi = hi
def pdf(x):
return 1./(hi-lo) + 0*x
def cdf(x):
x = np.atleast_1d(x)
y = (x - lo) / (hi - lo)
y[x < lo] = 0
y[x > hi] = 1
return y
Distribution.__init__(self,pdf,cdf,minval=lo,maxval=hi,**kwargs)
def __str__(self):
return '%.1f < %s < %.1f' % (self.lo,self.name,self.hi)
def resample(self,N):
"""Returns a random sampling.
"""
return rand.random(size=N)*(self.maxval - self.minval) + self.minval
############## Double LorGauss ###########
def double_lorgauss(x,p):
"""Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian.
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Input parameters: mu (mode of distribution),
sig1 (LH Gaussian width),
sig2 (RH Gaussian width),
gam1 (LH Lorentzian width),
gam2 (RH Lorentzian width),
G1 (LH Gaussian "strength"),
G2 (RH Gaussian "strength").
Returns
-------
values : float or array-like
Double LorGauss distribution evaluated at input(s). If single value provided,
single value returned.
"""
mu,sig1,sig2,gam1,gam2,G1,G2 = p
gam1 = float(gam1)
gam2 = float(gam2)
G1 = abs(G1)
G2 = abs(G2)
sig1 = abs(sig1)
sig2 = abs(sig2)
gam1 = abs(gam1)
gab2 = abs(gam2)
L2 = (gam1/(gam1 + gam2)) * ((gam2*np.pi*G1)/(sig1*np.sqrt(2*np.pi)) -
(gam2*np.pi*G2)/(sig2*np.sqrt(2*np.pi)) +
(gam2/gam1)*(4-G1-G2))
L1 = 4 - G1 - G2 - L2
#print G1,G2,L1,L2
y1 = G1/(sig1*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig1**2) +\
L1/(np.pi*gam1) * gam1**2/((x-mu)**2 + gam1**2)
y2 = G2/(sig2*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig2**2) +\
L2/(np.pi*gam2) * gam2**2/((x-mu)**2 + gam2**2)
lo = (x < mu)
hi = (x >= mu)
return y1*lo + y2*hi
def fit_double_lorgauss(bins,h,Ntry=5):
"""Uses lmfit to fit a "Double LorGauss" distribution to a provided histogram.
Uses a grid of starting guesses to try to avoid local minima.
Parameters
----------
bins, h : array-like
Bins and heights of a histogram, as returned by, e.g., `np.histogram`.
Ntry : int, optional
Spacing of grid for starting guesses. Will try `Ntry**2` different
initial values of the "Gaussian strength" parameters `G1` and `G2`.
Returns
-------
parameters : tuple
Parameters of best-fit "double LorGauss" distribution.
Raises
------
ImportError
If the lmfit module is not available.
"""
try:
from lmfit import minimize, Parameters, Parameter, report_fit
except ImportError:
raise ImportError('you need lmfit to use this function.')
#make sure histogram is normalized
h /= np.trapz(h,bins)
#zero-pad the ends of the distribution to keep fits positive
N = len(bins)
dbin = (bins[1:]-bins[:-1]).mean()
newbins = np.concatenate((np.linspace(bins.min() - N/10*dbin,bins.min(),N/10),
bins,
np.linspace(bins.max(),bins.max() + N/10*dbin,N/10)))
newh = np.concatenate((np.zeros(N/10),h,np.zeros(N/10)))
mu0 = bins[np.argmax(newh)]
sig0 = abs(mu0 - newbins[np.argmin(np.absolute(newh - 0.5*newh.max()))])
def set_params(G1,G2):
params = Parameters()
params.add('mu',value=mu0)
params.add('sig1',value=sig0)
params.add('sig2',value=sig0)
params.add('gam1',value=sig0/10)
params.add('gam2',value=sig0/10)
params.add('G1',value=G1)
params.add('G2',value=G2)
return params
sum_devsq_best = np.inf
outkeep = None
for G1 in np.linspace(0.1,1.9,Ntry):
for G2 in np.linspace(0.1,1.9,Ntry):
params = set_params(G1,G2)
def residual(ps):
pars = (params['mu'].value,
params['sig1'].value,
params['sig2'].value,
params['gam1'].value,
params['gam2'].value,
params['G1'].value,
params['G2'].value)
hmodel = double_lorgauss(newbins,pars)
return newh-hmodel
out = minimize(residual,params)
pars = (out.params['mu'].value,out.params['sig1'].value,
out.params['sig2'].value,out.params['gam1'].value,
out.params['gam2'].value,out.params['G1'].value,
out.params['G2'].value)
sum_devsq = ((newh - double_lorgauss(newbins,pars))**2).sum()
#print 'devs = %.1f; initial guesses for G1, G2; %.1f, %.1f' % (sum_devsq,G1, G2)
if sum_devsq < sum_devsq_best:
sum_devsq_best = sum_devsq
outkeep = out
return (outkeep.params['mu'].value,abs(outkeep.params['sig1'].value),
abs(outkeep.params['sig2'].value),abs(outkeep.params['gam1'].value),
abs(outkeep.params['gam2'].value),abs(outkeep.params['G1'].value),
abs(outkeep.params['G2'].value))
class DoubleLorGauss_Distribution(Distribution):
"""Defines a "double LorGauss" distribution according to the provided parameters.
Parameters
----------
mu,sig1,sig2,gam1,gam2,G1,G2 : float
Parameters of `double_lorgauss` function.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig1,sig2,gam1,gam2,G1,G2,**kwargs):
self.mu = mu
self.sig1 = sig1
self.sig2 = sig2
self.gam1 = gam1
self.gam2 = gam2
self.G1 = G1
#self.L1 = L1
self.G2 = G2
#self.L2 = L2
def pdf(x):
return double_lorgauss(x,(self.mu,self.sig1,self.sig2,
self.gam1,self.gam2,
self.G1,self.G2,))
keywords = {'mu':mu,'sig1':sig1,
'sig2':sig2,'gam1':gam1,'gam2':gam2,
'G1':G1,'G2':G2}
Distribution.__init__(self,pdf,keywords=keywords,**kwargs)
######## DoubleGauss #########
def doublegauss(x,p):
"""Evaluates normalized two-sided Gaussian distribution
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
Returns
-------
value : float or array-like
Distribution evaluated at input value(s). If single value provided,
single value returned.
"""
mu,sig1,sig2 = p
x = np.atleast_1d(x)
A = 1./(np.sqrt(2*np.pi)*(sig1+sig2)/2.)
ylo = A*np.exp(-(x-mu)**2/(2*sig1**2))
yhi = A*np.exp(-(x-mu)**2/(2*sig2**2))
y = x*0
wlo = np.where(x < mu)
whi = np.where(x >= mu)
y[wlo] = ylo[wlo]
y[whi] = yhi[whi]
if np.size(x)==1:
return y[0]
else:
return y
def doublegauss_cdf(x,p):
"""Cumulative distribution function for two-sided Gaussian
Parameters
----------
x : float
Input values at which to calculate CDF.
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
"""
x = np.atleast_1d(x)
mu,sig1,sig2 = p
sig1 = np.absolute(sig1)
sig2 = np.absolute(sig2)
ylo = float(sig1)/(sig1 + sig2)*(1 + erf((x-mu)/np.sqrt(2*sig1**2)))
yhi = float(sig1)/(sig1 + sig2) + float(sig2)/(sig1+sig2)*(erf((x-mu)/np.sqrt(2*sig2**2)))
lo = x < mu
hi = x >= mu
return ylo*lo + yhi*hi
def fit_doublegauss_samples(samples,**kwargs):
"""Fits a two-sided Gaussian to a set of samples.
Calculates 0.16, 0.5, and 0.84 quantiles and passes these to
`fit_doublegauss` for fitting.
Parameters
----------
samples : array-like
Samples to which to fit the Gaussian.
kwargs
Keyword arguments passed to `fit_doublegauss`.
"""
sorted_samples = np.sort(samples)
N = len(samples)
med = sorted_samples[N/2]
siglo = med - sorted_samples[int(0.16*N)]
sighi = sorted_samples[int(0.84*N)] - med
return fit_doublegauss(med,siglo,sighi,median=True,**kwargs)
def fit_doublegauss(med,siglo,sighi,interval=0.683,p0=None,median=False,return_distribution=True):
"""Fits a two-sided Gaussian distribution to match a given confidence interval.
The center of the distribution may be either the median or the mode.
Parameters
----------
med : float
The center of the distribution to which to fit. Default this
will be the mode unless the `median` keyword is set to True.
siglo : float
Value at lower quantile (`q1 = 0.5 - interval/2`) to fit. Often this is
the "lower error bar."
sighi : float
Value at upper quantile (`q2 = 0.5 + interval/2`) to fit. Often this is
the "upper error bar."
interval : float, optional
The confidence interval enclosed by the provided error bars. Default
is 0.683 (1-sigma).
p0 : array-like, optional
Initial guess `doublegauss` parameters for the fit (`mu, sig1, sig2`).
median : bool, optional
Whether to treat the `med` parameter as the median or mode
(default will be mode).
return_distribution: bool, optional
If `True`, then function will return a `DoubleGauss_Distribution` object.
Otherwise, will return just the parameters.
"""
if median:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med,med+sighi])
qvals = np.array([q1,0.5,q2])
def objfn(pars):
logging.debug('{}'.format(pars))
logging.debug('{} {}'.format(doublegauss_cdf(targetvals,pars),qvals))
return doublegauss_cdf(targetvals,pars) - qvals
if p0 is None:
p0 = [med,siglo,sighi]
pfit,success = leastsq(objfn,p0)
else:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med+sighi])
qvals = np.array([q1,q2])
def objfn(pars):
params = (med,pars[0],pars[1])
return doublegauss_cdf(targetvals,params) - qvals
if p0 is None:
p0 = [siglo,sighi]
pfit,success = leastsq(objfn,p0)
pfit = (med,pfit[0],pfit[1])
if return_distribution:
dist = DoubleGauss_Distribution(*pfit)
return dist
else:
return pfit
class DoubleGauss_Distribution(Distribution):
"""A Distribution oject representing a two-sided Gaussian distribution
This can be used to represent a slightly asymmetric distribution,
and consists of two half-Normal distributions patched together at the
mode, and normalized appropriately. The pdf and cdf are according to
the `doubleguass` and `doubleguass_cdf` functions, respectively.
Parameters
----------
mu : float
The mode of the distribution.
siglo : float
Width of lower half-Gaussian.
sighi : float
Width of upper half-Gaussian.
kwargs
Keyword arguments are passed to `Distribution` constructor.
"""
def __init__(self,mu,siglo,sighi,**kwargs):
self.mu = mu
self.siglo = float(siglo)
self.sighi = float(sighi)
def pdf(x):
return doublegauss(x,(mu,siglo,sighi))
def cdf(x):
return doublegauss_cdf(x,(mu,siglo,sighi))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 5*siglo
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 5*sighi
keywords = {'mu':mu,'siglo':siglo,'sighi':sighi}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,self.mu,self.sighi,self.siglo)
def resample(self,N,**kwargs):
"""Random resampling of the doublegauss distribution
"""
lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo)
hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi)
u = rand.random(size=N)
hi = (u < float(self.sighi)/(self.sighi + self.siglo))
lo = (u >= float(self.sighi)/(self.sighi + self.siglo))
vals = np.zeros(N)
vals[hi] = hivals[hi]
vals[lo] = lovals[lo]
return vals
def powerlawfn(alpha,minval,maxval):
C = powerlawnorm(alpha,minval,maxval)
def fn(inpx):
x = np.atleast_1d(inpx)
y = C*x**(alpha)
y[(x < minval) | (x > maxval)] = 0
return y
return fn
def powerlawnorm(alpha,minval,maxval):
if np.size(alpha)==1:
if alpha == -1:
C = 1/np.log(maxval/minval)
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
else:
C = np.zeros(np.size(alpha))
w = np.where(alpha==-1)
if len(w[0]>0):
C[w] = 1./np.log(maxval/minval)*np.ones(len(w[0]))
nw = np.where(alpha != -1)
C[nw] = (1+alpha[nw])/(maxval**(1+alpha[nw])-minval**(1+alpha[nw]))
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
return C
class PowerLaw_Distribution(Distribution):
def __init__(self,alpha,minval,maxval,**kwargs):
self.alpha = alpha
pdf = powerlawfn(alpha,minval,maxval)
Distribution.__init__(self,pdf,minval=minval,maxval=maxval)
######## KDE ###########
class KDE_Distribution(Distribution):
def __init__(self,samples,adaptive=True,draw_direct=True,bandwidth=None,**kwargs):
self.samples = samples
self.bandwidth = bandwidth
self.kde = KDE(samples,adaptive=adaptive,draw_direct=draw_direct,
bandwidth=bandwidth)
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
keywords = {'adaptive':adaptive,'draw_direct':draw_direct,
'bandwidth':bandwidth}
Distribution.__init__(self,self.kde,keywords=keywords,**kwargs)
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def resample(self,N,**kwargs):
return self.kde.resample(N,**kwargs)
class KDE_Distribution_Fromtxt(KDE_Distribution):
def __init__(self,filename,**kwargs):
samples = np.loadtxt(filename)
KDE_Distribution.__init__(self,samples,**kwargs)
| {
"repo_name": "timothydmorton/simpledist",
"path": "simpledist/distributions.py",
"copies": "1",
"size": "34971",
"license": "mit",
"hash": 4189991960774240000,
"line_mean": 31.8366197183,
"line_max": 172,
"alpha_frac": 0.5666123359,
"autogenerated": false,
"ratio": 3.7826933477555436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48493056836555437,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
"""
Build a ply lexer, but without the implicit magic and global
state, just load prebuilt ply parser and lexers at roughly the
same cost of loading a Python module.
"""
import functools
from ply.lex import Lexer, LexerReflect
from ply.yacc import ParserReflect, LRTable, LRParser
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
if args not in cache:
cache[args] = obj(*args, **kwargs)
return cache[args]
return memoizer
# lexer is passed as argument to ensure that memoization is
# unique for parser/lexer pair.
@memoize
def yaccfrom(module, tabmodule, lexer):
# Get the module dictionary used for the parser
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict)
pinfo.get_all()
# Read the tables
lr = LRTable()
lr.read_table(tabmodule)
lr.bind_callables(pinfo.pdict)
return LRParser(lr,pinfo.error_func)
@memoize
def lexfrom(module, lexmodule):
lexobj = Lexer()
lexobj.lexoptimize = 1
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict)
linfo.get_all()
lexobj.readtab(lexmodule,ldict)
return lexobj
| {
"repo_name": "zeeshanali/blaze",
"path": "blaze/plyhacks.py",
"copies": "9",
"size": "1446",
"license": "bsd-3-clause",
"hash": 789769418755892500,
"line_mean": 25.7777777778,
"line_max": 64,
"alpha_frac": 0.6894882434,
"autogenerated": false,
"ratio": 3.5182481751824817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8707736418582482,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
"""
C++ code printer
"""
from itertools import chain
from sympy.codegen.ast import Type, none
from .ccode import C89CodePrinter, C99CodePrinter
# from http://en.cppreference.com/w/cpp/keyword
reserved = {
'C++98': [
'and', 'and_eq', 'asm', 'auto', 'bitand', 'bitor', 'bool', 'break',
'case', 'catch,', 'char', 'class', 'compl', 'const', 'const_cast',
'continue', 'default', 'delete', 'do', 'double', 'dynamic_cast',
'else', 'enum', 'explicit', 'export', 'extern', 'false', 'float',
'for', 'friend', 'goto', 'if', 'inline', 'int', 'long', 'mutable',
'namespace', 'new', 'not', 'not_eq', 'operator', 'or', 'or_eq',
'private', 'protected', 'public', 'register', 'reinterpret_cast',
'return', 'short', 'signed', 'sizeof', 'static', 'static_cast',
'struct', 'switch', 'template', 'this', 'throw', 'true', 'try',
'typedef', 'typeid', 'typename', 'union', 'unsigned', 'using',
'virtual', 'void', 'volatile', 'wchar_t', 'while', 'xor', 'xor_eq'
]
}
reserved['C++11'] = reserved['C++98'][:] + [
'alignas', 'alignof', 'char16_t', 'char32_t', 'constexpr', 'decltype',
'noexcept', 'nullptr', 'static_assert', 'thread_local'
]
reserved['C++17'] = reserved['C++11'][:]
reserved['C++17'].remove('register')
# TM TS: atomic_cancel, atomic_commit, atomic_noexcept, synchronized
# concepts TS: concept, requires
# module TS: import, module
_math_functions = {
'C++98': {
'Mod': 'fmod',
'ceiling': 'ceil',
},
'C++11': {
'gamma': 'tgamma',
},
'C++17': {
'beta': 'beta',
'Ei': 'expint',
'zeta': 'riemann_zeta',
}
}
# from http://en.cppreference.com/w/cpp/header/cmath
for k in ('Abs', 'exp', 'log', 'log10', 'sqrt', 'sin', 'cos', 'tan', # 'Pow'
'asin', 'acos', 'atan', 'atan2', 'sinh', 'cosh', 'tanh', 'floor'):
_math_functions['C++98'][k] = k.lower()
for k in ('asinh', 'acosh', 'atanh', 'erf', 'erfc'):
_math_functions['C++11'][k] = k.lower()
def _attach_print_method(cls, sympy_name, func_name):
meth_name = '_print_%s' % sympy_name
if hasattr(cls, meth_name):
raise ValueError("Edit method (or subclass) instead of overwriting.")
def _print_method(self, expr):
return '{0}{1}({2})'.format(self._ns, func_name, ', '.join(map(self._print, expr.args)))
_print_method.__doc__ = "Prints code for %s" % k
setattr(cls, meth_name, _print_method)
def _attach_print_methods(cls, cont):
for sympy_name, cxx_name in cont[cls.standard].items():
_attach_print_method(cls, sympy_name, cxx_name)
class _CXXCodePrinterBase(object):
printmethod = "_cxxcode"
language = 'C++'
_ns = 'std::' # namespace
def __init__(self, settings=None):
super(_CXXCodePrinterBase, self).__init__(settings or {})
def _print_Max(self, expr):
from sympy import Max
if len(expr.args) == 1:
return self._print(expr.args[0])
return "%smax(%s, %s)" % (self._ns, expr.args[0], self._print(Max(*expr.args[1:])))
def _print_Min(self, expr):
from sympy import Min
if len(expr.args) == 1:
return self._print(expr.args[0])
return "%smin(%s, %s)" % (self._ns, expr.args[0], self._print(Min(*expr.args[1:])))
def _print_using(self, expr):
if expr.alias == none:
return 'using %s' % expr.type
else:
raise ValueError("C++98 does not support type aliases")
class CXX98CodePrinter(_CXXCodePrinterBase, C89CodePrinter):
standard = 'C++98'
reserved_words = set(reserved['C++98'])
# _attach_print_methods(CXX98CodePrinter, _math_functions)
class CXX11CodePrinter(_CXXCodePrinterBase, C99CodePrinter):
standard = 'C++11'
reserved_words = set(reserved['C++11'])
type_mappings = dict(chain(
CXX98CodePrinter.type_mappings.items(),
{
Type('int8'): ('int8_t', {'cstdint'}),
Type('int16'): ('int16_t', {'cstdint'}),
Type('int32'): ('int32_t', {'cstdint'}),
Type('int64'): ('int64_t', {'cstdint'}),
Type('uint8'): ('uint8_t', {'cstdint'}),
Type('uint16'): ('uint16_t', {'cstdint'}),
Type('uint32'): ('uint32_t', {'cstdint'}),
Type('uint64'): ('uint64_t', {'cstdint'}),
Type('complex64'): ('std::complex<float>', {'complex'}),
Type('complex128'): ('std::complex<double>', {'complex'}),
Type('bool'): ('bool', None),
}.items()
))
def _print_using(self, expr):
if expr.alias == none:
return super(CXX11CodePrinter, self)._print_using(expr)
else:
return 'using %(alias)s = %(type)s' % expr.kwargs(apply=self._print)
# _attach_print_methods(CXX11CodePrinter, _math_functions)
class CXX17CodePrinter(_CXXCodePrinterBase, C99CodePrinter):
standard = 'C++17'
reserved_words = set(reserved['C++17'])
_kf = dict(C99CodePrinter._kf, **_math_functions['C++17'])
def _print_beta(self, expr):
return self._print_math_func(expr)
def _print_Ei(self, expr):
return self._print_math_func(expr)
def _print_zeta(self, expr):
return self._print_math_func(expr)
# _attach_print_methods(CXX17CodePrinter, _math_functions)
cxx_code_printers = {
'c++98': CXX98CodePrinter,
'c++11': CXX11CodePrinter,
'c++17': CXX17CodePrinter
}
def cxxcode(expr, assign_to=None, standard='c++11', **settings):
""" C++ equivalent of :func:`~.ccode`. """
return cxx_code_printers[standard.lower()](settings).doprint(expr, assign_to)
| {
"repo_name": "kaushik94/sympy",
"path": "sympy/printing/cxxcode.py",
"copies": "1",
"size": "5694",
"license": "bsd-3-clause",
"hash": 5624514200370543000,
"line_mean": 32.8928571429,
"line_max": 96,
"alpha_frac": 0.5709518792,
"autogenerated": false,
"ratio": 3.149336283185841,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42202881623858407,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
class _slice(object):
""" A hashable slice object
>>> _slice(0, 10, None)
0:10
"""
def __init__(self, start, stop, step):
self.start = start
self.stop = stop
self.step = step
def __hash__(self):
return hash((slice, self.start, self.stop, self.step))
def __str__(self):
s = ''
if self.start is not None:
s = s + str(self.start)
s = s + ':'
if self.stop is not None:
s = s + str(self.stop)
if self.step is not None:
s = s + ':' + str(self.step)
return s
def __eq__(self, other):
return (type(self), self.start, self.stop, self.step) == \
(type(other), other.start, other.stop, other.step)
def as_slice(self):
return slice(self.start, self.stop, self.step)
__repr__ = __str__
class hashable_list(tuple):
def __str__(self):
return str(list(self))
def hashable_index(index):
""" Convert slice-thing into something hashable
>>> hashable_index(1)
1
>>> isinstance(hash(hashable_index((1, slice(10)))), int)
True
"""
if type(index) is tuple: # can't do isinstance due to hashable_list
return tuple(map(hashable_index, index))
elif isinstance(index, list):
return hashable_list(index)
elif isinstance(index, slice):
return _slice(index.start, index.stop, index.step)
return index
def replace_slices(index):
if isinstance(index, hashable_list):
return list(index)
elif isinstance(index, _slice):
return index.as_slice()
elif isinstance(index, tuple):
return tuple(map(replace_slices, index))
return index
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/expr/utils.py",
"copies": "1",
"size": "1782",
"license": "bsd-3-clause",
"hash": 4376416656756942000,
"line_mean": 24.0985915493,
"line_max": 72,
"alpha_frac": 0.5735129068,
"autogenerated": false,
"ratio": 3.7125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47860129067999996,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from warnings import warn
import types
import copy
import traceback
import inspect
from future.builtins import zip
from . import (UnrecognizedFormatError, InvalidRegistrationError,
DuplicateRegistrationError, ArgumentOverrideWarning,
FormatIdentificationWarning)
from .util import open_file, open_files
_formats = {}
_sniffers = {}
_aliases = {}
_empty_file_format = '<emptyfile>'
# We create a class and instantiate it dynamically so that exceptions are more
# obvious and so that only one object exists without copying this line.
FileSentinel = type('FileSentinel', (object, ), {})()
def _override_kwargs(kw, fmt_kw, warn_user):
for key in kw:
if key in fmt_kw and fmt_kw[key] != kw[key] and warn_user:
warn('Best guess was: %s=%s, continuing with user supplied: %s' % (
key, str(fmt_kw[key]), str(kw[key])
), ArgumentOverrideWarning)
fmt_kw[key] = kw[key]
return fmt_kw
def register_sniffer(format):
"""Return a decorator for a sniffer function.
A decorator factory for sniffer functions. Sniffers may only be registered
to simple formats. Sniffers for compound formats are automatically
generated from their component simple formats.
A sniffer function should have at least the following signature:
``<format_name>_sniffer(fh)``. `fh` is **always** an open filehandle.
This decorator provides the ability to use filepaths in the same argument
position as `fh`. They will automatically be opened and closed.
**The sniffer must not close the filehandle**, cleanup will be
handled external to the sniffer and is not its concern.
`**kwargs` are not passed to a sniffer, and a sniffer must not use them.
The job of a sniffer is to determine if a file appears to be in the given
format and to 'sniff' out any kwargs that would be of use to a reader
function.
The sniffer **must** return a tuple of (True, <kwargs dict>) if it believes
`fh` is a given `format`. Otherwise it should return (False, {}).
.. note:: Failure to adhere to the above interface specified for a sniffer
will result in unintended side-effects.
The sniffer may determine membership of a file in as many or as few
lines of the file as it deems necessary.
Parameters
----------
format : str
A format name which a decorated sniffer will be bound to.
Returns
-------
function
A decorator to be used on a sniffer. The decorator will raise a
``skbio.io.DuplicateRegistrationError`` if there already exists a
*sniffer* bound to the `format`.
See Also
--------
skbio.io.sniff
"""
def decorator(sniffer):
if format in _sniffers:
raise DuplicateRegistrationError(msg="'%s' already has a sniffer."
% format)
def wrapped_sniffer(fp, mode='U', **kwargs):
with open_file(fp, mode) as fh:
# The reason we do a copy is because we need the sniffer to not
# mutate the orginal file while guessing the format. The
# naive solution would be to seek to 0 at the end, but that
# would break an explicit offset provided by the user. Instead
# we create a shallow copy which works out of the box for
# file-like object, but does not work for real files. Instead
# the name attribute is reused in open for a new filehandle.
# Using seek and tell is not viable because in real files tell
# reflects the position of the read-ahead buffer and not the
# true offset of the iterator.
if hasattr(fh, 'name'):
cfh = open(fh.name, fh.mode)
else:
cfh = copy.copy(fh)
cfh.seek(0)
try:
return sniffer(cfh, **kwargs)
except Exception:
warn("'%s' has encountered a problem.\n"
"Please send the following to our issue tracker at\n"
"https://github.com/biocore/scikit-bio/issues\n\n"
"%s" % (sniffer.__name__, traceback.format_exc()),
FormatIdentificationWarning)
return False, {}
finally:
cfh.close()
wrapped_sniffer.__doc__ = sniffer.__doc__
wrapped_sniffer.__name__ = sniffer.__name__
_sniffers[format] = wrapped_sniffer
return wrapped_sniffer
return decorator
def register_reader(format, cls=None):
"""Return a decorator for a reader function.
A decorator factory for reader functions.
A reader function should have at least the following signature:
``<format_name>_to_<class_name_or_generator>(fh)``. `fh` is **always** an
open filehandle. This decorator provides the ability to use filepaths in
the same argument position as `fh`. They will automatically be opened and
closed.
**The reader must not close the filehandle**, cleanup will be
handled external to the reader and is not its concern. This is true even
in the case of generators.
Any additional `**kwargs` will be passed to the reader and may
be used if necessary.
The reader **must** return an instance of `cls` if `cls` is not None.
Otherwise the reader must return a generator. The generator need not deal
with closing the `fh`. That is already handled by this decorator.
.. note:: Failure to adhere to the above interface specified for a reader
will result in unintended side-effects.
Parameters
----------
format : str
A format name which a decorated reader will be bound to.
cls : type, optional
The class which a decorated reader will be bound to. When `cls` is None
the reader will be bound as returning a generator.
Default is None.
Returns
-------
function
A decorator to be used on a reader. The decorator will raise a
``skbio.io.DuplicateRegistrationError`` if there already exists a
*reader* bound to the same permutation of `fmt` and `cls`.
See Also
--------
skbio.io.read
"""
def decorator(reader):
format_class = _formats.setdefault(format, {}).setdefault(cls, {})
if 'reader' in format_class:
raise DuplicateRegistrationError('reader', format, cls)
file_args = []
reader_spec = inspect.getargspec(reader)
if reader_spec.defaults is not None:
# Concept from http://stackoverflow.com/a/12627202/579416
for key, default in zip(
reader_spec.args[-len(reader_spec.defaults):],
reader_spec.defaults):
if default is FileSentinel:
file_args.append(key)
# We wrap the reader so that basic file handling can be managed
# externally from the business logic.
if cls is None:
def wrapped_reader(fp, mode='U', mutate_fh=False, **kwargs):
file_keys = []
files = [fp]
for file_arg in file_args:
if file_arg in kwargs:
if kwargs[file_arg] is not None:
file_keys.append(file_arg)
files.append(kwargs[file_arg])
else:
kwargs[file_arg] = None
with open_files(files, mode) as fhs:
try:
for key, fh in zip(file_keys, fhs[1:]):
kwargs[key] = fh
generator = reader(fhs[0], **kwargs)
if not isinstance(generator, types.GeneratorType):
# Raise an exception to be handled next line,
# because although reader executed without error,
# it is not a generator.
raise Exception()
# If an exception is thrown at this point, it cannot
# be a generator. If there was a `yield` statment, then
# Python would have returned a generator regardless of the
# content. This does not preclude the generator from
# throwing exceptions.
except Exception:
raise InvalidRegistrationError("'%s' is not a "
"generator." %
reader.__name__)
while True:
yield next(generator)
else:
# When an object is instantiated we don't need to worry about the
# original position at every step, only at the end.
def wrapped_reader(fp, mode='U', mutate_fh=False, **kwargs):
file_keys = []
files = [fp]
for file_arg in file_args:
if file_arg in kwargs:
if kwargs[file_arg] is not None:
file_keys.append(file_arg)
files.append(kwargs[file_arg])
else:
kwargs[file_arg] = None
with open_files(files, mode) as fhs:
for key, fh in zip(file_keys, fhs[1:]):
kwargs[key] = fh
return reader(fhs[0], **kwargs)
wrapped_reader.__doc__ = reader.__doc__
wrapped_reader.__name__ = reader.__name__
format_class['reader'] = wrapped_reader
return wrapped_reader
return decorator
def register_writer(format, cls=None):
"""Return a decorator for a writer function.
A decorator factory for writer functions.
A writer function should have at least the following signature:
``<class_name_or_generator>_to_<format_name>(obj, fh)``. `fh` is **always**
an open filehandle. This decorator provides the ability to use filepaths in
the same argument position as `fh`. They will automatically be opened and
closed.
**The writer must not close the filehandle**, cleanup will be
handled external to the reader and is not its concern.
Any additional `**kwargs` will be passed to the writer and may be used if
necessary.
The writer must not return a value. Instead it should only mutate the `fh`
in a way consistent with it's purpose.
If the writer accepts a generator, it should exhaust the generator to
ensure that the potentially open filehandle backing said generator is
closed.
.. note:: Failure to adhere to the above interface specified for a writer
will result in unintended side-effects.
Parameters
----------
format : str
A format name which a decorated writer will be bound to.
cls : type, optional
The class which a decorated writer will be bound to. If `cls` is None
the writer will be bound as expecting a generator.
Default is None.
Returns
-------
function
A decorator to be used on a writer. The decorator will raise a
``skbio.io.DuplicateRegistrationError`` if there already exists a
*writer* bound to the same permutation of `fmt` and `cls`.
See Also
--------
skbio.io.write
skbio.io.get_writer
"""
def decorator(writer):
format_class = _formats.setdefault(format, {}).setdefault(cls, {})
if 'writer' in format_class:
raise DuplicateRegistrationError('writer', format, cls)
file_args = []
writer_spec = inspect.getargspec(writer)
if writer_spec.defaults is not None:
# Concept from http://stackoverflow.com/a/12627202/579416
for key, default in zip(
writer_spec.args[-len(writer_spec.defaults):],
writer_spec.defaults):
if default is FileSentinel:
file_args.append(key)
# We wrap the writer so that basic file handling can be managed
# externally from the business logic.
def wrapped_writer(obj, fp, mode='w', **kwargs):
file_keys = []
files = [fp]
for file_arg in file_args:
if file_arg in kwargs:
if kwargs[file_arg] is not None:
file_keys.append(file_arg)
files.append(kwargs[file_arg])
else:
kwargs[file_arg] = None
with open_files(files, mode) as fhs:
for key, fh in zip(file_keys, fhs[1:]):
kwargs[key] = fh
writer(obj, fhs[0], **kwargs)
wrapped_writer.__doc__ = writer.__doc__
wrapped_writer.__name__ = writer.__name__
format_class['writer'] = wrapped_writer
return wrapped_writer
return decorator
def list_read_formats(cls):
"""Return a list of available read formats for a given `cls` type.
Parameters
----------
cls : type
The class which will be used to determine what read formats exist for
an instance of `cls`.
Returns
-------
list
A list of available read formats for an instance of `cls`. List may be
empty.
See Also
--------
skbio.io.register_reader
"""
return _rw_list_formats('reader', cls)
def list_write_formats(cls):
"""Return a list of available write formats for a given `cls` instance.
Parameters
----------
cls : type
The class which will be used to determine what write formats exist for
an instance of `cls`.
Returns
-------
list
A list of available write formats for an instance of `cls`. List may be
empty.
See Also
--------
skbio.io.register_writer
"""
return _rw_list_formats('writer', cls)
def _rw_list_formats(name, cls):
formats = []
for fmt in _formats:
if cls in _formats[fmt] and name in _formats[fmt][cls]:
formats.append(fmt)
return formats
def get_sniffer(format):
"""Return a sniffer for a format.
Parameters
----------
format : str
A format string which has a registered sniffer.
Returns
-------
function or None
Returns a sniffer function if one exists for the given `fmt`.
Otherwise it will return None.
See Also
--------
skbio.io.register_sniffer
"""
return _sniffers.get(format, None)
def get_reader(format, cls=None):
"""Return a reader for a format.
Parameters
----------
format : str
A registered format string.
cls : type, optional
The class which the reader will return an instance of. If `cls` is
None, the reader will return a generator.
Default is None.
Returns
-------
function or None
Returns a reader function if one exists for a given `fmt` and `cls`.
Otherwise it will return None.
See Also
--------
skbio.io.register_reader
"""
return _rw_getter('reader', format, cls)
def get_writer(format, cls=None):
"""Return a writer for a format.
Parameters
----------
format : str
A registered format string.
cls : type, optional
The class which the writer will expect an instance of. If `cls` is
None, the writer will expect a generator that is identical to what
is returned by ``get_reader(<some_format>, None)``.
Default is None.
Returns
-------
function or None
Returns a writer function if one exists for a given `fmt` and `cls`.
Otherwise it will return None.
See Also
--------
skbio.io.register_writer
skbio.io.get_reader
"""
return _rw_getter('writer', format, cls)
def _rw_getter(name, fmt, cls):
if fmt in _formats:
if cls in _formats[fmt] and name in _formats[fmt][cls]:
return _formats[fmt][cls][name]
return None
def sniff(fp, cls=None, mode='U'):
"""Attempt to guess the format of a file and return format str and kwargs.
Parameters
----------
fp : filepath or filehandle
The provided file to guess the format of. Filepaths are automatically
closed; filehandles are the responsibility of the caller.
cls : type, optional
A provided class that restricts the search for the format. Only formats
which have a registered reader or writer for the given `cls` will be
tested.
Default is None.
Returns
-------
(str, kwargs)
A format name and kwargs for the corresponding reader.
Raises
------
UnrecognizedFormatError
This occurs when the format is not 'claimed' by any registered sniffer
or when the format is ambiguous and has been 'claimed' by more than one
sniffer.
See Also
--------
skbio.io.register_sniffer
"""
possibles = []
for fmt in _sniffers:
if cls is not None and fmt != _empty_file_format and (
fmt not in _formats or cls not in _formats[fmt]):
continue
format_sniffer = _sniffers[fmt]
is_format, fmt_kwargs = format_sniffer(fp, mode=mode)
if is_format:
possibles.append(fmt)
kwargs = fmt_kwargs
if not possibles:
raise UnrecognizedFormatError("Cannot guess the format for %s."
% str(fp))
if len(possibles) > 1:
raise UnrecognizedFormatError("File format is ambiguous, may be"
" one of %s." % str(possibles))
return possibles[0], kwargs
def read(fp, format=None, into=None, verify=True, mode='U', **kwargs):
"""Read a supported skbio file format into an instance or a generator.
This function is able to reference and execute all *registered* read
operations in skbio.
Parameters
----------
fp : filepath or filehandle
The location to read the given `format` `into`. Filepaths are
automatically closed when read; filehandles are the responsibility
of the caller. In the case of a generator, a filepath will be closed
when ``StopIteration`` is raised; filehandles are still the
responsibility of the caller.
format : str, optional
The format must be a format name with a reader for the given
`into` class. If a `format` is not provided or is None, all
registered sniffers for the provied `into` class will be evaluated to
attempt to guess the format.
Default is None.
into : type, optional
A class which has a registered reader for a given `format`. If `into`
is not provided or is None, read will return a generator.
Default is None.
verify : bool, optional
Whether or not to confirm the format of a file if `format` is provided.
Will raise a ``skbio.io.FormatIdentificationWarning`` if the sniffer of
`format` returns False.
Default is True.
mode : str, optional
The read mode. This is passed to `open(fp, mode)` internally.
Default is 'U'
kwargs : dict, optional
Will be passed directly to the appropriate reader.
Returns
-------
object or generator
If `into` is not None, an instance of the `into` class will be
provided with internal state consistent with the provided file.
If `into` is None, a generator will be returned.
Raises
------
ValueError
Raised when `format` and `into` are both None.
skbio.io.UnrecognizedFormatError
Raised when a reader could not be found for a given `format` or the
format could not be guessed.
skbio.io.FormatIdentificationWarning
Raised when `verify` is True and the sniffer of a `format` provided a
kwarg value that did not match the user's kwarg value.
See Also
--------
skbio.io.register_reader
skbio.io.register_sniffer
"""
if format is None and into is None:
raise ValueError("`format` and `into` cannot both be None.")
if format is None:
format, fmt_kwargs = sniff(fp, cls=into, mode=mode)
kwargs = _override_kwargs(kwargs, fmt_kwargs, verify)
elif verify:
sniffer = get_sniffer(format)
if sniffer is not None:
is_format, fmt_kwargs = sniffer(fp)
if not is_format:
warn("%s could not be positively identified as %s file." %
(str(fp), format),
FormatIdentificationWarning)
else:
kwargs = _override_kwargs(kwargs, fmt_kwargs, True)
reader = get_reader(format, into)
if reader is None:
raise UnrecognizedFormatError("Cannot read %s into %s, no reader "
"found." % (format, into.__name__
if into is not None
else 'generator'))
return reader(fp, mode=mode, **kwargs)
def write(obj, format, into, mode='w', **kwargs):
"""Write a supported skbio file format from an instance or a generator.
This function is able to reference and execute all *registered* write
operations in skbio.
Parameters
----------
obj : object
The object must have a registered writer for a provided `format`.
format : str
The format must be a registered format name with a writer for the given
`obj`.
into : filepath or filehandle
The location to write the given `format` from `obj` into. Filepaths are
automatically closed when written; filehandles are the responsibility
of the caller.
mode : str, optional
The write mode. This is passed to `open(fp, mode)` internally.
Default is 'w'.
kwargs : dict, optional
Will be passed directly to the appropriate writer.
Raises
------
skbio.io.UnrecognizedFormatError
Raised when a writer could not be found for the given `format` and
`obj`.
See Also
--------
skbio.io.register_writer
"""
cls = None
if not isinstance(obj, types.GeneratorType):
cls = obj.__class__
writer = get_writer(format, cls)
if writer is None:
raise UnrecognizedFormatError("Cannot write %s into %s, no %s writer "
"found." % (format, str(into),
'generator' if cls is None
else str(cls)))
writer(obj, into, mode=mode, **kwargs)
# This is meant to be a handy indicator to the user that they have done
# something wrong.
@register_sniffer(_empty_file_format)
def empty_file_sniffer(fh):
for line in fh:
if line.strip():
return False, {}
return True, {}
def initialize_oop_interface():
classes = set()
# Find each potential class
for fmt in _formats:
for cls in _formats[fmt]:
classes.add(cls)
# Add readers and writers for each class
for cls in classes:
if cls is not None:
_apply_read(cls)
_apply_write(cls)
def _apply_read(cls):
"""Add read method if any formats have a registered reader for `cls`."""
skbio_io_read = globals()['read']
read_formats = list_read_formats(cls)
if read_formats:
@classmethod
def read(cls, fp, format=None, **kwargs):
return skbio_io_read(fp, into=cls, format=format, **kwargs)
read.__func__.__doc__ = _read_docstring % (
cls.__name__,
_formats_for_docs(read_formats),
cls.__name__,
cls.__name__,
cls.__name__,
_import_paths(read_formats)
)
cls.read = read
def _apply_write(cls):
"""Add write method if any formats have a registered writer for `cls`."""
skbio_io_write = globals()['write']
write_formats = list_write_formats(cls)
if write_formats:
if not hasattr(cls, 'default_write_format'):
raise NotImplementedError(
"Classes with registered writers must provide a "
"`default_write_format`. Please add `default_write_format` to"
" '%s'." % cls.__name__)
def write(self, fp, format=cls.default_write_format, **kwargs):
skbio_io_write(self, into=fp, format=format, **kwargs)
write.__doc__ = _write_docstring % (
cls.__name__,
_formats_for_docs(write_formats),
cls.__name__,
cls.default_write_format,
_import_paths(write_formats)
)
cls.write = write
def _import_paths(formats):
lines = []
for fmt in formats:
lines.append("skbio.io." + fmt)
return '\n'.join(lines)
def _formats_for_docs(formats):
lines = []
for fmt in formats:
lines.append("- ``'%s'`` (:mod:`skbio.io.%s`)" % (fmt, fmt))
return '\n'.join(lines)
_read_docstring = """Create a new ``%s`` instance from a file.
This is a convenience method for :mod:`skbio.io.read`. For more
information about the I/O system in scikit-bio, please see
:mod:`skbio.io`.
Supported file formats include:
%s
Parameters
----------
fp : filepath or filehandle
The location to read the given `format`. Filepaths are
automatically closed when read; filehandles are the
responsibility of the caller.
format : str, optional
The format must be a format name with a reader for ``%s``.
If a `format` is not provided or is None, it will attempt to
guess the format.
kwargs : dict, optional
Keyword arguments passed to :mod:`skbio.io.read` and the file
format reader for ``%s``.
Returns
-------
%s
A new instance.
See Also
--------
write
skbio.io.read
%s
"""
_write_docstring = """Write an instance of ``%s`` to a file.
This is a convenience method for :mod:`skbio.io.write`. For more
information about the I/O system in scikit-bio, please see
:mod:`skbio.io`.
Supported file formats include:
%s
Parameters
----------
fp : filepath or filehandle
The location to write the given `format` into. Filepaths are
automatically closed when written; filehandles are the
responsibility of the caller.
format : str
The format must be a registered format name with a writer for
``%s``.
Default is `'%s'`.
kwargs : dict, optional
Keyword arguments passed to :mod:`skbio.io.write` and the
file format writer.
See Also
--------
read
skbio.io.write
%s
"""
| {
"repo_name": "Kleptobismol/scikit-bio",
"path": "skbio/io/_registry.py",
"copies": "1",
"size": "27288",
"license": "bsd-3-clause",
"hash": -2246576074933449500,
"line_mean": 32.3594132029,
"line_max": 79,
"alpha_frac": 0.5878774553,
"autogenerated": false,
"ratio": 4.38996138996139,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00004155406854544674,
"num_lines": 818
} |
from __future__ import (absolute_import, division, print_function)
""" DbWrench_DDL_postprocess.py
Emilio Mayorga (UW/APL)
8/15-18/2014
Take the DDL SQL output from DbWrench for PostgreSQL, and apply changes in order to
generate a new, blank ODM2 database following ODM2 conventions. Specifically:
1. All entity names will be lowercase
2. All entities will be under a single schema
3. The field samplingfeatures.featuregeometry will be PostGIS geometry field constrained
to be 2D, but otherwise free to store any project (eg, epsg:4326) and to
accept any geometry type (point, line, polygon, and collections thereof [multi-polygon, etc])
- Assumes that the source DDL SQL file is in the same directory as the script
- This DDL must be run on a pre-existing, "empty" database. All permissions (roles) settings
must be set up by the database administrator.
------------------------------------
Note: developed and tested on Linux only.
------------------------------------
See this great online tool (there are others), to test regex, export to code, etc
http://regex101.com
"""
import re
# =============== USER (run-time) CHANGES =================
# DDL input file name
ddlfile = 'ODM2_DDL_for_PostgreSQL9.3PostGIS2.1.sql'
# DDL output file name
ddlppfile = 'ODM2_DDL_for_PostgreSQL9.3PostGIS2.1_postprocessed.sql'
newschemaname = 'odm2'
odmversion = '2.0'
odm2infodct = {'schema': newschemaname, 'version': odmversion}
# =========================================================
pre_block = """ /* Post-processed DDL based on DbWrench export. 2014-8-18 10pm PDT */
-- IF THIS DDL SCRIPT IS TO *CREATE* THE DATABASE ITSELF,
-- WILL NEED TO FIRST KNOW THE DATABASE NAME AND ROLES TO BE USED.
/* Add single base schema for all odm2 entities */
CREATE SCHEMA %(schema)s;
COMMENT ON SCHEMA %(schema)s IS 'Schema holding all ODM2 (%(version)s) entities (tables, etc).';
""" % odm2infodct
post_block = """/* ** Set up samplingfeatures.featuregeometry as a heterogeneous, 2D PostGIS geom field. */
ALTER TABLE %(schema)s.samplingfeatures ALTER COLUMN featuregeometry TYPE geometry;
ALTER TABLE %(schema)s.samplingfeatures ADD CONSTRAINT
enforce_dims_featuregeometry CHECK (st_ndims(featuregeometry) = 2);
CREATE INDEX idx_samplingfeature_featuregeom ON %(schema)s.samplingfeatures USING gist (featuregeometry);
-- Populate and tweak geometry_columns
SELECT Populate_Geometry_Columns();
-- But it assigned a POINT type to %(schema)s.samplingfeatures. Need instead to use the generic
-- 'geometries', to accept any type (point, line, polygon, and collections thereof [multi-polygon, etc])
UPDATE public.geometry_columns SET
type = 'GEOMETRY' WHERE f_table_schema = '%(schema)s' AND f_table_name = 'samplingfeatures';
""" % odm2infodct
# Relies on these assumptions:
# 1. All schema names start with the prefix "ODM2"
# 2. No entity other than schemas starts with the prefix "ODM2"
p = re.compile(ur'(ODM2\w*?)(?=\.)')
ddl_pp_lines = []
with open(ddlfile, 'r') as ddl_f:
for ddl_ln in ddl_f.readlines():
ddl_ln.replace('"', '') # remove double quotes, if present
if 'schema' in ddl_ln.lower():
# Skip the line, so it won't be written out
# Assumes all schema statements are found as single lines
continue
else:
ddl_pp_lines.append(re.sub(p, newschemaname, ddl_ln))
# Write out new, post-processed DDL file
# Insert pre and post blocks, and the modified DDL lines in between
ddl_ppf = open(ddlppfile, 'w')
ddl_ppf.write(pre_block)
ddl_ppf.write('/* ================================================================\n')
ddl_ppf.write(' ================================================================ */\n\n')
ddl_ppf.writelines(ddl_pp_lines)
ddl_ppf.write('\n/* ================================================================\n')
ddl_ppf.write(' ================================================================ */\n\n')
ddl_ppf.write(post_block)
ddl_ppf.close()
| {
"repo_name": "ODM2/ODM2PythonAPI",
"path": "tests/schemas/postgresql/olderversions/DbWrench_DDL_postprocess.py",
"copies": "2",
"size": "3956",
"license": "bsd-3-clause",
"hash": 3075974794805874700,
"line_mean": 42.9555555556,
"line_max": 107,
"alpha_frac": 0.6468655207,
"autogenerated": false,
"ratio": 3.6293577981651377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026449126961677677,
"num_lines": 90
} |
from __future__ import absolute_import, division, print_function
# Define acceptable line styles
VALID_LINESTYLES = ['solid', 'dashed', 'dash-dot', 'dotted', 'none']
GREY = '#373737'
GRAY = GREY
BLUE = "#1F78B4"
GREEN = "#33A02C"
RED = "#E31A1C"
ORANGE = "#FF7F00"
PURPLE = "#6A3D9A"
YELLOW = "#FFFF99"
BROWN = "#8C510A"
PINK = "#FB9A99"
LIGHT_BLUE = "#A6CEE3"
LIGHT_GREEN = "#B2DF8A"
LIGHT_RED = "#FB9A99"
LIGHT_ORANGE = "#FDBF6F"
LIGHT_PURPLE = "#CAB2D6"
COLORS = [RED, GREEN, BLUE, BROWN, ORANGE, PURPLE, PINK]
__all__ = ['VisualAttributes']
class VisualAttributes(object):
'''
This class is used to define visual attributes for any kind of objects
The essential attributes of a VisualAttributes instance are:
:param color: A matplotlib color string
:param alpha: Opacity (0-1)
:param linewidth: The linewidth (float or int)
:param linestyle: The linestyle (``'solid' | 'dashed' | 'dash-dot' | 'dotted' | 'none'``)
:param marker: The matplotlib marker shape (``'o' | 's' | '^' | etc``)
:param markersize: The size of the marker (int)
'''
def __init__(self, parent=None, washout=False, color=GREY):
# Color can be specified using Matplotlib notation. Specifically, it
# can be:
# * A string with a common color (e.g. 'black', 'red', 'orange')
# * A string containing a float in the rng [0:1] for a shade of
# gray ('0.0' = black,'1.0' = white)
# * A tuple of three floats in the rng [0:1] for (R, G, B)
# * An HTML hexadecimal string (e.g. '#eeefff')
self.color = color
self.alpha = .5
# Line width in points (float or int)
self.linewidth = 1
# Line style, which can be one of 'solid', 'dashed', 'dash-dot',
# 'dotted', or 'none'
self.linestyle = 'solid'
self.marker = 'o'
self.markersize = 3
self.parent = parent
self._atts = ['color', 'alpha', 'linewidth', 'linestyle', 'marker',
'markersize']
def __eq__(self, other):
if not isinstance(other, VisualAttributes):
return False
return all(getattr(self, a) == getattr(other, a) for a in self._atts)
def set(self, other):
"""
Update this instance's properties based on another VisualAttributes instance.
"""
for att in self._atts:
setattr(self, att, getattr(other, att))
def copy(self, new_parent=None):
"""
Create a new instance with the same visual properties
"""
result = VisualAttributes()
result.set(self)
if new_parent is not None:
result.parent = new_parent
return result
def __eq__(self, other):
return all(getattr(self, att) == getattr(other, att)
for att in self._atts)
def __setattr__(self, attribute, value):
# Check that line style is valid
if attribute == 'linestyle' and value not in VALID_LINESTYLES:
raise Exception("Line style should be one of %s" %
'/'.join(VALID_LINESTYLES))
# Check that line width is valid
if attribute == 'linewidth':
if type(value) not in [float, int]:
raise Exception("Line width should be a float or an int")
if value < 0:
raise Exception("Line width should be positive")
# Check that the attribute exists (don't allow new attributes)
allowed = set(['color', 'linewidth', 'linestyle',
'alpha', 'parent', 'marker', 'markersize', '_atts'])
if attribute not in allowed:
raise Exception("Attribute %s does not exist" % attribute)
changed = getattr(self, attribute, None) != value
object.__setattr__(self, attribute, value)
# if parent has a broadcast method, broadcast the change
if (changed and hasattr(self, 'parent') and
hasattr(self.parent, 'broadcast') and
attribute != 'parent'):
self.parent.broadcast('style')
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/visual.py",
"copies": "1",
"size": "4094",
"license": "bsd-3-clause",
"hash": 6483695411819964000,
"line_mean": 32.5573770492,
"line_max": 93,
"alpha_frac": 0.5832926234,
"autogenerated": false,
"ratio": 3.7388127853881277,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48221054087881277,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from six import string_types
from tensorflow.python.keras.utils import tf_utils as keras_tf_utils
# By importing `distributions` as `tfd`, docstrings will show
# `tfd.Distribution`. We import `bijectors` the same way, for consistency.
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python import layers as tfl
from tensorflow_probability.python.internal import \
distribution_util as dist_util
from tensorflow_probability.python.layers.distribution_layer import _event_size
from tensorflow_probability.python.layers.internal import \
distribution_tensor_coercible as dtc
from odin.bay.distributions import NegativeBinomialDisp, ZeroInflated
__all__ = [
'DistributionLambda', 'MultivariateNormalLayer', 'BernoulliLayer',
'DeterministicLayer', 'VectorDeterministicLayer', 'OneHotCategoricalLayer',
'GammaLayer', 'DirichletLayer', 'GaussianLayer', 'NormalLayer',
'LogNormalLayer', 'LogisticLayer', 'ZIBernoulliLayer',
'update_convert_to_tensor_fn'
]
DistributionLambda = tfl.DistributionLambda
BernoulliLayer = tfl.IndependentBernoulli
LogisticLayer = tfl.IndependentLogistic
# ===========================================================================
# Helper
# ===========================================================================
def update_convert_to_tensor_fn(dist, fn):
assert isinstance(dist, dtc._TensorCoercible), \
"dist must be output from tfd.DistributionLambda"
assert callable(fn), "fn must be callable"
if isinstance(fn, property):
fn = fn.fget
dist._concrete_value = None
dist._convert_to_tensor_fn = fn
return dist
def _preprocess_eventshape(params, event_shape, n_dims=1):
if isinstance(event_shape, string_types):
if event_shape.lower().strip() == 'auto':
event_shape = params.shape[-n_dims:]
else:
raise ValueError("Not support for event_shape='%s'" % event_shape)
return event_shape
# ===========================================================================
# Simple distribution
# ===========================================================================
class DeterministicLayer(DistributionLambda):
"""
```none
pmf(x; loc) = 1, if x == loc, else 0
cdf(x; loc) = 1, if x >= loc, else 0
```
"""
def __init__(self,
event_shape=(),
convert_to_tensor_fn=tfd.Distribution.sample,
activity_regularizer=None,
validate_args=False,
**kwargs):
super(DeterministicLayer,
self).__init__(lambda t: type(self).new(t, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params, validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'DeterministicLayer', [params]):
params = tf.convert_to_tensor(value=params, name='params')
return tfd.Deterministic(loc=params, validate_args=validate_args)
@staticmethod
def params_size(event_size, name=None):
""" The number of `params` needed to create a single distribution. """
return event_size
class VectorDeterministicLayer(DistributionLambda):
"""
```none
pmf(x; loc)
= 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],
= 0, otherwise.
```
"""
def __init__(self,
event_shape=(),
convert_to_tensor_fn=tfd.Distribution.sample,
activity_regularizer=None,
validate_args=False,
**kwargs):
super(VectorDeterministicLayer,
self).__init__(lambda t: type(self).new(t, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params, validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'VectorDeterministicLayer', [params]):
params = tf.convert_to_tensor(value=params, name='params')
return tfd.VectorDeterministic(loc=params, validate_args=validate_args)
@staticmethod
def params_size(event_size, name=None):
""" The number of `params` needed to create a single distribution. """
return event_size
class OneHotCategoricalLayer(DistributionLambda):
""" A `d`-variate OneHotCategorical Keras layer from `d` params.
Parameters
----------
convert_to_tensor_fn: callable
that takes a `tfd.Distribution` instance and returns a
`tf.Tensor`-like object. For examples, see `class` docstring.
Default value: `tfd.Distribution.sample`.
sample_dtype: `dtype`
Type of samples produced by this distribution.
Default value: `None` (i.e., previous layer's `dtype`).
validate_args: `bool` (default `False`)
When `True` distribution parameters are checked for validity
despite possibly degrading runtime performance.
When `False` invalid inputs may silently render incorrect outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
Note
----
If input as probability values is given, it will be clipped by value
[1e-8, 1 - 1e-8]
"""
def __init__(self,
event_shape=(),
convert_to_tensor_fn=tfd.Distribution.sample,
probs_input=False,
sample_dtype=None,
activity_regularizer=None,
validate_args=False,
**kwargs):
super(OneHotCategoricalLayer, self).__init__(
lambda t: type(self).new(t, probs_input, sample_dtype, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params, probs_input=False, dtype=None, validate_args=False,
name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'OneHotCategoricalLayer', [params]):
params = tf.convert_to_tensor(value=params, name='params')
return tfd.OneHotCategorical(
logits=params if not probs_input else None,
probs=tf.clip_by_value(params, 1e-8, 1 -
1e-8) if probs_input else None,
dtype=dtype or params.dtype.base_dtype,
validate_args=validate_args)
@staticmethod
def params_size(event_size, name=None):
"""The number of `params` needed to create a single distribution."""
return event_size
class DirichletLayer(DistributionLambda):
"""
Parameters
----------
pre_softplus : bool (default: False)
applying softplus activation on the parameters before parameterizing
clip_for_stable : bool (default: True)
clipping the concentration into range [1e-3, 1e3] for stability
"""
def __init__(self,
event_shape='auto',
pre_softplus=False,
clip_for_stable=True,
convert_to_tensor_fn=tfd.Distribution.sample,
activity_regularizer=None,
validate_args=False,
**kwargs):
super(DirichletLayer,
self).__init__(lambda t: type(self).new(
t, event_shape, pre_softplus, clip_for_stable, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params,
event_shape='auto',
pre_softplus=False,
clip_for_stable=True,
validate_args=False,
name=None):
"""Create the distribution instance from a `params` vector."""
event_shape = _preprocess_eventshape(params, event_shape)
with tf.compat.v1.name_scope(name, 'DirichletLayer', [params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
# Clips the Dirichlet parameters to the numerically stable KL region
if pre_softplus:
params = tf.nn.softplus(params)
if clip_for_stable:
params = tf.clip_by_value(params, 1e-3, 1e3)
return tfd.Independent(
tfd.Dirichlet(concentration=tf.reshape(params, output_shape),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
@staticmethod
def params_size(event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name, 'Dirichlet_params_size', [event_shape]):
event_shape = tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32)
return _event_size(event_shape, name=name or 'Dirichlet_params_size')
class GaussianLayer(DistributionLambda):
"""An independent normal Keras layer.
Parameters
----------
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
softplus_scale : bool
if True, `scale = softplus(params) + softplus_inverse(1.0)`
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
def __init__(self,
event_shape=(),
softplus_scale=True,
convert_to_tensor_fn=tfd.Distribution.sample,
activity_regularizer=None,
validate_args=False,
**kwargs):
super(GaussianLayer, self).__init__(
lambda t: type(self).new(t, event_shape, softplus_scale, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params,
event_shape=(),
softplus_scale=True,
validate_args=False,
name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'NormalLayer', [params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
loc_params, scale_params = tf.split(params, 2, axis=-1)
if softplus_scale:
scale_params = tf.math.softplus(
scale_params) + tfp.math.softplus_inverse(1.0)
return tfd.Independent(
tfd.Normal(loc=tf.reshape(loc_params, output_shape),
scale=tf.reshape(scale_params, output_shape),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
@staticmethod
def params_size(event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name, 'Normal_params_size', [event_shape]):
event_shape = tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32)
return 2 * _event_size(event_shape, name=name or 'Normal_params_size')
class LogNormalLayer(DistributionLambda):
"""An independent LogNormal Keras layer.
Parameters
----------
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
softplus_scale : bool
if True, `scale = softplus(params) + softplus_inverse(1.0)`
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
def __init__(self,
event_shape=(),
softplus_scale=True,
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
activity_regularizer=None,
**kwargs):
super(LogNormalLayer, self).__init__(
lambda t: type(self).new(t, event_shape, softplus_scale, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params,
event_shape=(),
softplus_scale=True,
validate_args=False,
name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'LogNormalLayer', [params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
loc_params, scale_params = tf.split(params, 2, axis=-1)
if softplus_scale:
scale_params = tf.math.softplus(
scale_params) + tfp.math.softplus_inverse(1.0)
return tfd.Independent(
tfd.LogNormal(loc=tf.reshape(loc_params, output_shape),
scale=tf.reshape(scale_params, output_shape),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
@staticmethod
def params_size(event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name, 'LogNormal_params_size', [event_shape]):
event_shape = tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32)
return 2 * _event_size(event_shape, name=name or 'LogNormal_params_size')
class GammaLayer(DistributionLambda):
"""An independent Gamma Keras layer.
Parameters
----------
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
def __init__(self,
event_shape=(),
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
activity_regularizer=None,
**kwargs):
super(GammaLayer, self).__init__(
lambda t: type(self).new(t, event_shape, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params, event_shape=(), validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'GammaLayer', [params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
concentration_params, rate_params = tf.split(params, 2, axis=-1)
return tfd.Independent(
tfd.Gamma(concentration=tf.reshape(concentration_params,
output_shape),
rate=tf.reshape(rate_params, output_shape),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
@staticmethod
def params_size(event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name, 'Gamma_params_size', [event_shape]):
event_shape = tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32)
return 2 * _event_size(event_shape, name=name or 'Gamma_params_size')
# ===========================================================================
# Multivariate distribution
# ===========================================================================
class MultivariateNormalLayer(DistributionLambda):
"""A `d`-variate Multivariate Normal distribution Keras layer:
Different covariance mode:
- tril (lower triangle): `d + d * (d + 1) // 2` params.
- diag (diagonal) : `d + d` params.
- full (full) : `d + d * d` params.
Typical choices for `convert_to_tensor_fn` include:
- `tfd.Distribution.sample`
- `tfd.Distribution.mean`
- `tfd.Distribution.mode`
- `lambda s: s.mean() + 0.1 * s.stddev()`
Parameters
----------
event_size: Scalar `int` representing the size of single draw from this
distribution.
covariance_type : {'diag', 'tril', 'full'}
softplus_scale : bool
if True, `scale = softplus(params) + softplus_inverse(1.0)`
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object. For examples, see
`class` docstring.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
def __init__(self,
event_size,
covariance_type='diag',
softplus_scale=True,
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
activity_regularizer=None,
**kwargs):
super(MultivariateNormalLayer,
self).__init__(lambda t: type(self).new(
t, event_size, covariance_type, softplus_scale, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params,
event_size,
covariance_type,
softplus_scale,
validate_args=False,
name=None):
"""Create the distribution instance from a `params` vector."""
covariance_type = str(covariance_type).lower().strip()
assert covariance_type in ('full', 'tril', 'diag'), \
"No support for given covariance_type: '%s'" % covariance_type
if bool(softplus_scale):
scale_fn = lambda x: tf.math.softplus(x) + tfp.math.softplus_inverse(1.0)
else:
scale_fn = lambda x: x
with tf.compat.v1.name_scope(name, 'MultivariateNormalLayer',
[params, event_size]):
params = tf.convert_to_tensor(value=params, name='params')
if covariance_type == 'tril':
scale_tril = tfb.ScaleTriL(diag_shift=np.array(
1e-5, params.dtype.as_numpy_dtype()),
validate_args=validate_args)
return tfd.MultivariateNormalTriL(
loc=params[..., :event_size],
scale_tril=scale_tril(scale_fn(params[..., event_size:])),
validate_args=validate_args)
elif covariance_type == 'diag':
return tfd.MultivariateNormalDiag(loc=params[..., :event_size],
scale_diag=scale_fn(
params[..., event_size:]),
validate_args=validate_args)
elif covariance_type == 'full':
return tfd.MultivariateNormalFullCovariance(
loc=params[..., :event_size],
covariance_matrix=tf.reshape(scale_fn(params[..., event_size:]),
(event_size, event_size)),
validate_args=validate_args)
@staticmethod
def params_size(event_size, covariance_type='diag', name=None):
"""The number of `params` needed to create a single distribution."""
covariance_type = str(covariance_type).lower().strip()
assert covariance_type in ('full', 'tril', 'diag'), \
"No support for given covariance_type: '%s'" % covariance_type
with tf.compat.v1.name_scope(name, 'MultivariateNormal_params_size',
[event_size]):
if covariance_type == 'tril':
return event_size + event_size * (event_size + 1) // 2
elif covariance_type == 'diag':
return event_size + event_size
elif covariance_type == 'full':
return event_size + event_size * event_size
class ZIBernoulliLayer(DistributionLambda):
"""A Independent zero-inflated bernoulli keras layer
Parameters
----------
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
given_log_count : boolean
is the input representing log count values or the count itself
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
def __init__(self,
event_shape=(),
given_logits=True,
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
activity_regularizer=None,
**kwargs):
super(ZIBernoulliLayer, self).__init__(
lambda t: type(self).new(t, event_shape, given_logits, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params,
event_shape=(),
given_logits=True,
validate_args=False,
name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'ZIBernoulliLayer',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
(bernoulli_params, rate_params) = tf.split(params, 2, axis=-1)
bernoulli_params = tf.reshape(bernoulli_params, output_shape)
bern = tfd.Bernoulli(logits=bernoulli_params if given_logits else None,
probs=bernoulli_params if not given_logits else None,
validate_args=validate_args)
zibern = ZeroInflated(count_distribution=bern,
logits=tf.reshape(rate_params, output_shape),
validate_args=validate_args)
return tfd.Independent(
zibern,
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
@staticmethod
def params_size(event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name, 'ZeroInflatedBernoulli_params_size',
[event_shape]):
event_shape = tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32)
return 2 * _event_size(event_shape,
name=name or 'ZeroInflatedBernoulli_params_size')
# ===========================================================================
# Shortcut
# ===========================================================================
NormalLayer = GaussianLayer
| {
"repo_name": "imito/odin",
"path": "odin/bay/distribution_layers/basic.py",
"copies": "1",
"size": "26273",
"license": "mit",
"hash": -2009399128191258600,
"line_mean": 37.8653846154,
"line_max": 80,
"alpha_frac": 0.6005404788,
"autogenerated": false,
"ratio": 4.122548250431508,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5223088729231508,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 1
_version_micro = 0 # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python"]
# Description should be a one-liner:
description = "brain_diffusion: a tool for calculating and analyzing MSDs from trajectory datasets"
# Long description will go up on the pypi page
long_description = """
brain_diffusion
========
Brain_diffusion is a tool for calculating and analyzing MSDs from trajectory
datasets.
It can calculate MSDs from input trajectories, perform averaging over large
datasets, and visualization tools. It also provides templates for parallel
computing.
To get started using these components in your own software, please go to the
repository README_.
.. _README: https://github.com/ccurtis7/brain_diffusion/blob/master/README.md
License
=======
``brain_diffusion`` is licensed under the terms of the BSD 2-Clause license. See
the file "LICENSE" for information on the history of this software, terms &
conditions for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2015--, Chad Curtis, The University of Washington.
"""
NAME = "brain_diffusion"
MAINTAINER = "Chad Curtis"
MAINTAINER_EMAIL = "ccurtis7@uw.edu"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "https://github.com/ccurtis7/brain_diffusion"
DOWNLOAD_URL = ""
LICENSE = "BSD"
AUTHOR = "Chad Curtis"
AUTHOR_EMAIL = "ccurtis7@uw.edu"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
REQUIRES = ["numpy", "scipy", "matplotlib"]
| {
"repo_name": "ccurtis7/brain-diffusion",
"path": "brain_diffusion/version.py",
"copies": "1",
"size": "2314",
"license": "bsd-2-clause",
"hash": 3468396652527934500,
"line_mean": 31.1388888889,
"line_max": 99,
"alpha_frac": 0.7199654278,
"autogenerated": false,
"ratio": 3.554531490015361,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4774496917815361,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 1
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "Keratin: tools for machine learning from biomedical images"
# Long description will go up on the pypi page
long_description = """
Keratin
========
Tools for machine learning from biomedical images
For more details, see repository README_.
.. _README: https://github.com/uw-biomedical-ml/keratin/blob/master/README.md
License
=======
``keratin`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2015--, Ariel Rokem, The University of Washington
eScience Institute.
"""
NAME = "keratin"
MAINTAINER = "Ariel Rokem"
MAINTAINER_EMAIL = "arokem@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/uw-biomedical-ml/keratin"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Ariel Rokem"
AUTHOR_EMAIL = "arokem@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {}
| {
"repo_name": "uw-biomedical-ml/keratin",
"path": "keratin/version.py",
"copies": "1",
"size": "2098",
"license": "mit",
"hash": 979547059690466300,
"line_mean": 30.3134328358,
"line_max": 77,
"alpha_frac": 0.6963775024,
"autogenerated": false,
"ratio": 3.4966666666666666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4693044169066667,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 2
_version_micro = 1 # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python"]
# Description should be a one-liner:
description = "popylar: record software use with Google Analytics"
# Long description will go up on the pypi page
long_description = """
Popylar
========
Record software use with Google Analytics.
License
=======
``popylar`` is licensed under the terms of the BSD license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2016--, Ariel Rokem, The University of Washington
eScience Institute.
"""
NAME = "popylar"
MAINTAINER = "Ariel Rokem"
MAINTAINER_EMAIL = "arokem@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/popylar/popylar"
DOWNLOAD_URL = ""
LICENSE = "BSD"
AUTHOR = "Ariel Rokem"
AUTHOR_EMAIL = "arokem@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
REQUIRES = ["requests"]
| {
"repo_name": "popylar/popylar",
"path": "popylar/version.py",
"copies": "1",
"size": "1853",
"license": "bsd-2-clause",
"hash": -6254377337654638000,
"line_mean": 28.8870967742,
"line_max": 77,
"alpha_frac": 0.6999460335,
"autogenerated": false,
"ratio": 3.457089552238806,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9657035585738807,
"avg_score": 0,
"num_lines": 62
} |
from __future__ import absolute_import, division, print_function
from abc import ABCMeta, abstractmethod
import logging
from .data_factories import load_data
from .util import CallbackMixin
MAX_UNDO = 50
"""
The classes in this module allow user actions to be stored as commands,
which can be undone/redone
All UI frontends should map interactions to command objects, instead
of directly performing an action.
Commands have access to two sources of data: the first are the
keyword arguments passed to the constructor. These are stored as
attributes of self. The second is a session object passed to all
Command.do and Command.undo calls.
"""
class Command(object):
"""
A class to encapsulate (and possibly undo) state changes
Subclasses of this abstract base class must implement the
`do` and `undo` methods.
Both `do` and `undo` receive a single input argument named
`session` -- this is whatever object is passed to the constructor
of :class:`glue.core.command.CommandStack`. This object is used
to store and retrieve resources needed by each command. The
Glue application itself uses a :class:`~glue.core.session.Session`
instance for this.
Each class should also override the class-level kwargs list,
to list the required keyword arguments that should be passed to the
command constructor. The base class will check that these
keywords are indeed provided. Commands should not take
non-keyword arguments in the constructor method
"""
__metaclass__ = ABCMeta
kwargs = []
def __init__(self, **kwargs):
kwargs = kwargs.copy()
for k in self.kwargs:
if k not in kwargs:
raise RuntimeError("Required keyword %s not passed to %s" %
(k, type(self)))
setattr(self, k, kwargs.pop(k))
self.extra = kwargs
@abstractmethod
def do(self, session):
"""
Execute the command
:param session: An object used to store and fetch resources
needed by a Command.
"""
pass
@abstractmethod
def undo(self, session):
pass
@property
def label(self):
return type(self).__name__
class CommandStack(CallbackMixin):
"""
The command stack collects commands,
and saves them to enable undoing/redoing
After instantiation, something can be assigned to
the session property. This is passed as the sole argument
of all Command (un)do methods.
"""
def __init__(self):
super(CommandStack, self).__init__()
self._session = None
self._command_stack = []
self._undo_stack = []
@property
def session(self):
return self._session
@session.setter
def session(self, value):
self._session = value
@property
def undo_label(self):
""" Brief label for the command reversed by an undo """
if len(self._command_stack) == 0:
return ''
cmd = self._command_stack[-1]
return cmd.label
@property
def redo_label(self):
""" Brief label for the command executed on a redo"""
if len(self._undo_stack) == 0:
return ''
cmd = self._undo_stack[-1]
return cmd.label
def do(self, cmd):
"""
Execute and log a new command
:rtype: The return value of cmd.do()
"""
logging.getLogger(__name__).debug("Do %s", cmd)
self._command_stack.append(cmd)
result = cmd.do(self._session)
self._command_stack = self._command_stack[-MAX_UNDO:]
self._undo_stack = []
self.notify('do')
return result
def undo(self):
"""
Undo the previous command
:raises: IndexError, if there are no objects to undo
"""
try:
c = self._command_stack.pop()
logging.getLogger(__name__).debug("Undo %s", c)
except IndexError:
raise IndexError("No commands to undo")
self._undo_stack.append(c)
c.undo(self._session)
self.notify('undo')
def redo(self):
"""
Redo the previously-undone command
:raises: IndexError, if there are no undone actions
"""
try:
c = self._undo_stack.pop()
logging.getLogger(__name__).debug("Undo %s", c)
except IndexError:
raise IndexError("No commands to redo")
result = c.do(self._session)
self._command_stack.append(c)
self.notify('redo')
return result
def can_undo_redo(self):
"""
Return whether undo and redo options are possible
:rtype: (bool, bool) - Whether undo and redo are possible, respectively
"""
return len(self._command_stack) > 0, len(self._undo_stack) > 0
class LoadData(Command):
kwargs = ['path', 'factory']
label = 'load data'
def do(self, session):
return load_data(self.path, self.factory)
def undo(self, session):
pass
class AddData(Command):
kwargs = ['data']
label = 'add data'
def do(self, session):
session.data_collection.append(self.data)
def undo(self, session):
session.data_collection.remove(self.data)
class RemoveData(Command):
kwargs = ['data']
label = 'remove data'
def do(self, session):
session.data_collection.remove(self.data)
def undo(self, session):
session.data_collection.append(self.data)
class NewDataViewer(Command):
"""Add a new data viewer to the application
:param viewer: The class of viewer to create
:param data: The data object to initialize the viewer with, or None
:type date: :class:`~glue.core.data.Data` or None
"""
kwargs = ['viewer', 'data']
label = 'new data viewer'
def do(self, session):
v = session.application.new_data_viewer(self.viewer, self.data)
self.created = v
return v
def undo(self, session):
self.created.close(warn=False)
class AddLayer(Command):
"""Add a new layer to a viewer
:param layer: The layer to add
:type layer: :class:`~glue.core.data.Data` or :class:`~glue.core.subset.Subset`
:param viewer: The viewer to add the layer to
"""
kwargs = ['layer', 'viewer']
label = 'add layer'
def do(self, session):
self.viewer.add_layer(self.layer)
def undo(self, session):
self.viewer.remove_layer(self.layer)
class ApplyROI(Command):
"""
Apply an ROI to a client, updating subset states
:param client: Client to work on
:type client: :class:`~glue.core.client.Client`
:param roi: Roi to apply
:type roi: :class:`~glue.core.roi.Roi`
"""
kwargs = ['client', 'roi']
label = 'apply ROI'
def do(self, session):
self.old_states = {}
for data in self.client.data:
for subset in data.subsets:
self.old_states[subset] = subset.subset_state
self.client.apply_roi(self.roi)
def undo(self, session):
for data in self.client.data:
for subset in data.subsets:
if subset not in self.old_states:
subset.delete()
for k, v in self.old_states.items():
k.subset_state = v
class LinkData(Command):
pass
class SetViewState(Command):
pass
class NewTab(Command):
pass
class CloseTab(Command):
pass
class NewSubset(Command):
pass
class CopySubset(Command):
pass
class PasteSubset(Command):
pass
class SpecialPasteSubset(Command):
pass
class DeleteSubset(Command):
pass
class SetStyle(Command):
pass
class SetLabel(Command):
pass
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/command.py",
"copies": "1",
"size": "7758",
"license": "bsd-3-clause",
"hash": -7749255635346583000,
"line_mean": 23.785942492,
"line_max": 83,
"alpha_frac": 0.6142046919,
"autogenerated": false,
"ratio": 4.063907805133578,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000038034383082306405,
"num_lines": 313
} |
from __future__ import absolute_import, division, print_function
from abc import ABCMeta, abstractproperty, abstractmethod
import numpy as np
from glue.external import six
from glue.core.exceptions import IncompatibleAttribute
from glue.core.layer_artist import MatplotlibLayerArtist, ChangedTrigger
__all__ = ['HistogramLayerArtist']
@six.add_metaclass(ABCMeta)
class HistogramLayerBase(object):
lo = abstractproperty() # lo-cutoff for bin counting
hi = abstractproperty() # hi-cutoff for bin counting
nbins = abstractproperty() # number of bins
xlog = abstractproperty() # whether to space bins logarithmically
@abstractmethod
def get_data(self):
"""
Return array of bin counts
"""
pass
class HistogramLayerArtist(MatplotlibLayerArtist, HistogramLayerBase):
_property_set = MatplotlibLayerArtist._property_set + 'lo hi nbins xlog'.split()
lo = ChangedTrigger(0)
hi = ChangedTrigger(1)
nbins = ChangedTrigger(10)
xlog = ChangedTrigger(False)
att = ChangedTrigger()
def __init__(self, layer, axes):
super(HistogramLayerArtist, self).__init__(layer, axes)
self.ylog = False
self.cumulative = False
self.normed = False
self.y = np.array([])
self.x = np.array([])
self._y = np.array([])
self._scale_state = None
def get_data(self):
return self.x, self.y
def clear(self):
super(HistogramLayerArtist, self).clear()
self.x = np.array([])
self.y = np.array([])
self._y = np.array([])
def _calculate_histogram(self):
"""Recalculate the histogram, creating new patches"""
self.clear()
try:
data = self.layer[self.att].ravel()
if not np.isfinite(data).any():
return False
except IncompatibleAttribute as exc:
self.disable_invalid_attributes(*exc.args)
return False
if data.size == 0:
return
if self.lo > np.nanmax(data) or self.hi < np.nanmin(data):
return
if self.xlog:
data = np.log10(data)
rng = [np.log10(self.lo), np.log10(self.hi)]
else:
rng = self.lo, self.hi
nbinpatch = self._axes.hist(data,
bins=int(self.nbins),
range=rng)
self._y, self.x, self.artists = nbinpatch
return True
def _scale_histogram(self):
"""Modify height of bins to match ylog, cumulative, and norm"""
if self.x.size == 0:
return
y = self._y.astype(np.float)
dx = self.x[1] - self.x[0]
if self.normed:
div = y.sum() * dx
if div == 0:
div = 1
y /= div
if self.cumulative:
y = y.cumsum()
y /= y.max()
self.y = y
bottom = 0 if not self.ylog else 1e-100
for a, y in zip(self.artists, y):
a.set_height(y)
x, y = a.get_xy()
a.set_xy((x, bottom))
def _check_scale_histogram(self):
"""
If needed, rescale histogram to match cumulative/log/normed state.
"""
state = (self.normed, self.ylog, self.cumulative)
if state == self._scale_state:
return
self._scale_state = state
self._scale_histogram()
def update(self, view=None):
"""Sync plot.
The _change flag tracks whether the histogram needs to be
recalculated. If not, the properties of the existing
artists are updated
"""
self._check_subset_state_changed()
if self._changed:
if not self._calculate_histogram():
return
self._changed = False
self._scale_state = None
self._check_scale_histogram()
self._sync_style()
def _sync_style(self):
"""Update visual properties"""
style = self.layer.style
for artist in self.artists:
artist.set_facecolor(style.color)
artist.set_alpha(style.alpha)
artist.set_zorder(self.zorder)
artist.set_visible(self.visible and self.enabled)
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/histogram/layer_artist.py",
"copies": "2",
"size": "4273",
"license": "bsd-3-clause",
"hash": 3081000652849544700,
"line_mean": 29.0915492958,
"line_max": 84,
"alpha_frac": 0.5642405804,
"autogenerated": false,
"ratio": 3.9748837209302326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5539124301330232,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from abc import ABCMeta, abstractproperty, abstractmethod
import numpy as np
from glue.external import six
from glue.core.subset import Subset
from glue.core.exceptions import IncompatibleAttribute
from glue.core.layer_artist import MatplotlibLayerArtist, ChangedTrigger
__all__ = ['ScatterLayerArtist']
@six.add_metaclass(ABCMeta)
class ScatterLayerBase(object):
# which ComponentID to assign to X axis
xatt = abstractproperty()
# which ComponentID to assign to Y axis
yatt = abstractproperty()
@abstractmethod
def get_data(self):
"""
Returns
-------
array
The scatterpoint data as an (N, 2) array
"""
pass
class ScatterLayerArtist(MatplotlibLayerArtist, ScatterLayerBase):
xatt = ChangedTrigger()
yatt = ChangedTrigger()
_property_set = MatplotlibLayerArtist._property_set + ['xatt', 'yatt']
def __init__(self, layer, ax):
super(ScatterLayerArtist, self).__init__(layer, ax)
self.emphasis = None # an optional SubsetState of emphasized points
def _recalc(self):
self.clear()
assert len(self.artists) == 0
try:
x = self.layer[self.xatt].ravel()
y = self.layer[self.yatt].ravel()
except IncompatibleAttribute as exc:
self.disable_invalid_attributes(*exc.args)
return False
self.artists = self._axes.plot(x, y)
return True
def update(self, view=None, transpose=False):
self._check_subset_state_changed()
if self._changed: # erase and make a new artist
if not self._recalc(): # no need to update style
return
self._changed = False
has_emph = False
if self.emphasis is not None:
try:
s = Subset(self.layer.data)
s.subset_state = self.emphasis
if hasattr(self.layer, 'subset_state'):
s.subset_state &= self.layer.subset_state
x = s[self.xatt].ravel()
y = s[self.yatt].ravel()
self.artists.extend(self._axes.plot(x, y))
has_emph = True
except IncompatibleAttribute:
pass
self._sync_style()
if has_emph:
self.artists[-1].set_mec('green')
self.artists[-1].set_mew(2)
self.artists[-1].set_alpha(1)
def get_data(self):
try:
return self.layer[self.xatt].ravel(), self.layer[self.yatt].ravel()
except IncompatibleAttribute:
return np.array([]), np.array([])
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/scatter/layer_artist.py",
"copies": "2",
"size": "2696",
"license": "bsd-3-clause",
"hash": 4832079636540062000,
"line_mean": 28.6263736264,
"line_max": 79,
"alpha_frac": 0.5901335312,
"autogenerated": false,
"ratio": 3.9705449189985274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5560678450198527,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .__about__ import (
__author__, __copyright__, __email__, __license__, __summary__, __title__,
__uri__, __version__
)
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__"
]
def binary_search(data, target, lo=0, hi=None):
"""
Perform binary search on sorted list data for target. Returns int
representing position of target in data.
"""
hi = hi if hi is not None else len(data)
mid = (lo + hi) // 2
if hi < 2 or hi > len(data) or target > data[-1] or target < data[0]:
return -1
if data[mid] > target:
return binary_search(data, target, lo=lo, hi=mid)
elif data[mid] < target:
return binary_search(data, target, lo=(mid + 1), hi=hi)
elif data[mid] == target:
return mid
def reverse_list_inplace(data):
"""
Reverses a list in place without a temporary placeholder list. Returns
sorted list.
"""
return data[::-1]
| {
"repo_name": "fly/solutions",
"path": "solutions/__init__.py",
"copies": "2",
"size": "1066",
"license": "bsd-3-clause",
"hash": -8064506460446911000,
"line_mean": 28.6111111111,
"line_max": 78,
"alpha_frac": 0.574108818,
"autogenerated": false,
"ratio": 3.638225255972696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5212334073972696,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from addie.utilities.file_handler import FileHandler
from qtpy.QtCore import Qt
class ExportTable(object):
current_path = ''
column_label = []
data = []
output_text = []
def __init__(self, parent=None, filename=''):
self.parent = parent
self.filename = filename
self.table_ui = self.parent.ui.h3_table
def run(self):
self.collect_data()
self.format_data()
self.export_data()
def collect_data(self):
nbr_row = self.table_ui.rowCount()
# collect current folder
_path = self.parent.current_folder
self.current_path = "current_folder: %s" %_path
return
#FIXME
_full_column_label = []
nbr_column = self.table_ui.columnCount()
for j in range(nbr_column):
_column_label = str(self.parent.ui.table.horizontalHeaderItem(j).text())
_full_column_label.append(_column_label)
self.column_label = _full_column_label
_data = []
for i in range(nbr_row):
_row = []
# select flag
_select_flag = self.retrieve_flag_state(row = i, column = 0)
_row.append(_select_flag)
# name
_name_item = self.get_item_value(i, 1)
_row.append(_name_item)
# runs
_run_item = self.get_item_value(i, 2)
_row.append(_run_item)
# sample formula
_sample_formula = self.get_item_value(i, 3)
_row.append(_sample_formula)
# mass density
_mass_density = self.get_item_value(i, 4)
_row.append(_mass_density)
# radius
_radius = self.get_item_value(i, 5)
_row.append(_radius)
# packing fraction
_packing_fraction = self.get_item_value(i, 6)
_row.append(_packing_fraction)
# sample shape
_sample_shape = self.retrieve_sample_shape(row = i, column = 7)
_row.append(_sample_shape)
# do abs corr?
_do_corr = self.retrieve_abs_corr_state(row = i, column = 8)
_row.append(_do_corr)
_data.append(_row)
self.data = _data
def get_item_value(self, row, column):
if self.parent.ui.table.item(row, column) is None:
return ''
return str(self.parent.ui.table.item(row, column).text())
def format_data(self):
_current_path = self.current_path
_column_label = self.column_label
_data = self.data
output_text = []
output_text.append("#" + _current_path)
_title = "|".join(_column_label)
output_text.append("#" + _title)
for _row in _data:
_formatted_row = "|".join(_row)
output_text.append(_formatted_row)
self.output_text = output_text
def export_data(self):
_filename = self.filename
if _filename == '':
return
_output_text = self.output_text
_o_file = FileHandler(filename = _filename)
_o_file.create_ascii(contain = _output_text)
def retrieve_abs_corr_state(self, row=0, column=8):
if self.parent.ui.table.cellWidget(row, 8) is None:
return "False"
_widget = self.parent.ui.table.cellWidget(row, 8).children()[1]
if _widget.checkState() == Qt.Checked:
return 'True'
else:
return 'False'
def retrieve_sample_shape(self, row=0, column=7):
_widget = self.parent.ui.table.cellWidget(row, column)
if _widget is None:
return 'Cylinder'
_selected_index = _widget.currentIndex()
_sample_shape = str(_widget.itemText(_selected_index))
return _sample_shape
def retrieve_flag_state(self, row=0, column=0):
_widget = self.parent.ui.table.cellWidget(row, column).children()[1]
if _widget is None:
return "False"
if _widget.checkState() == Qt.Checked:
return "True"
else:
return "False"
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/export_table.py",
"copies": "1",
"size": "4150",
"license": "mit",
"hash": -6889428934039647000,
"line_mean": 28.8561151079,
"line_max": 84,
"alpha_frac": 0.5501204819,
"autogenerated": false,
"ratio": 3.7119856887298748,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9750066367645429,
"avg_score": 0.0024079605968889465,
"num_lines": 139
} |
from __future__ import (absolute_import, division, print_function)
from addie.utilities.list_runs_parser import ListRunsParser
def get_list_of_runs_found_and_not_found(str_runs="",
oncat_result={},
check_not_found=True):
"""This method compare the list of runs from the string passed in, to the
output produced by oncat. If a run is in the two inputs, it means it has
been found, if not, it hasn't been found"""
if str_runs:
o_parser = ListRunsParser(current_runs=str_runs)
list_of_runs = o_parser.list_current_runs
else:
check_not_found = False
list_of_runs_found = []
for _json in oncat_result:
_run_number = _json['indexed']['run_number']
list_of_runs_found.append("{}".format(_run_number))
if check_not_found:
list_of_runs_not_found = set(list_of_runs) - set(list_of_runs_found)
else:
list_of_runs_not_found = []
return {'not_found': list_of_runs_not_found,
'found': list_of_runs_found}
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/import_from_database/utilities.py",
"copies": "1",
"size": "1088",
"license": "mit",
"hash": 1966877095336831000,
"line_mean": 35.2666666667,
"line_max": 77,
"alpha_frac": 0.5983455882,
"autogenerated": false,
"ratio": 3.543973941368078,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9642319529568077,
"avg_score": 0,
"num_lines": 30
} |
from __future__ import absolute_import, division, print_function
from appr.api.gevent_app import GeventApp
from appr.commands.command_base import CommandBase
class RunServerCmd(CommandBase):
name = 'run-server'
help_message = 'Run the registry server (with gunicorn)'
parse_unknown = False
def __init__(self, options, unknown=None):
super(RunServerCmd, self).__init__(options)
self.options = options
self.status = {}
def _call(self):
GeventApp(self.options).run()
@classmethod
def _add_arguments(cls, parser):
parser.add_argument("-p", "--port", nargs="?", default=5000, type=int,
help="server port listen")
parser.add_argument("-b", "--bind", nargs="?", default="0.0.0.0",
help="server bind address")
parser.add_argument("--db-class", nargs="?", default="filesystem",
help="db class for storage")
def _render_dict(self):
return self.status
def _render_console(self):
return self.status['result']
| {
"repo_name": "app-registry/appr",
"path": "appr/commands/runserver.py",
"copies": "2",
"size": "1097",
"license": "apache-2.0",
"hash": -2947900130490309600,
"line_mean": 32.2424242424,
"line_max": 78,
"alpha_frac": 0.5970829535,
"autogenerated": false,
"ratio": 4.033088235294118,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5630171188794117,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from appr.auth import ApprAuth
from appr.commands.command_base import CommandBase, PackageSplit
class LogoutCmd(CommandBase):
name = 'logout'
help_message = "logout"
def __init__(self, options):
super(LogoutCmd, self).__init__(options)
self.status = None
self.registry_host = options.registry_host
self.package_parts = options.package_parts
pname = self.package_parts.get('package', None)
namespace = self.package_parts.get('namespace', None)
self.package = None
if pname:
self.package = "%s/%s" % (namespace, pname)
elif namespace:
self.package = namespace
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_option(parser)
parser.add_argument('registry', nargs='?', default=None, action=PackageSplit,
help="registry url: quay.io[/namespace][/repo]\n" +
"If namespace and/or repo are passed, creds only logout for them")
def _call(self):
client = self.RegistryClient(self.registry_host)
ApprAuth().delete_token(client.host, scope=self.package)
self.status = "Logout complete"
if self.registry_host != '*':
self.status += " from %s" % self.registry_host
def _render_dict(self):
return {"status": self.status, 'host': self.registry_host, "scope": self.package}
def _render_console(self):
return " >>> %s" % self.status
| {
"repo_name": "app-registry/appr",
"path": "appr/commands/logout.py",
"copies": "2",
"size": "1567",
"license": "apache-2.0",
"hash": -7046009653481206000,
"line_mean": 36.3095238095,
"line_max": 94,
"alpha_frac": 0.6158264199,
"autogenerated": false,
"ratio": 3.9771573604060912,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5592983780306091,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from appr.commands.command_base import CommandBase
from appr.display import print_channels
class ChannelCmd(CommandBase):
name = 'channel'
help_message = "Manage package channels"
def __init__(self, options):
super(ChannelCmd, self).__init__(options)
self.package = options.package
self.registry_host = options.registry_host
self.delete = options.delete
self.channel = options.channel
self.remove = options.remove_release
self.add = options.set_release
self.version = options.version
self.version_parts = options.version_parts
self.status = None
self.channels = {}
self.ssl_verify = options.cacert or not options.insecure
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_option(parser)
cls._add_packagename_option(parser)
cls._add_packageversion_option(parser)
parser.add_argument("-c", "--channel", default=None, help="channel name")
parser.add_argument("--set-release", default=False, action='store_true',
help="Add release to the channel")
parser.add_argument("--delete", default=False, action='store_true',
help="delete the channel")
parser.add_argument("--remove-release", default=False, action='store_true',
help="Remove a release from the channel")
def _call(self):
client = self.RegistryClient(self.registry_host, requests_verify=self.ssl_verify)
package = self.package
name = self.channel
if self.delete is True:
self.channels = client.delete_channel(package, name)
self.status = ">>> Channel '%s' on '%s' deleted" % (name, package)
elif self.add:
self.channels = client.create_channel_release(package, name, self.version)
self.status = ">>> Release '%s' added on '%s'" % (self.version, name)
elif self.remove:
self.channels = client.delete_channel_release(package, name, self.version)
self.status = ">>> Release '%s' removed from '%s'" % (self.version, name)
else:
self.channels = client.show_channels(package, name)
if name is not None:
self.channels = [self.channels]
self.status = print_channels(self.channels)
def _render_dict(self):
return self.channels
def _render_console(self):
return "%s" % self.status
| {
"repo_name": "app-registry/appr",
"path": "appr/commands/channel.py",
"copies": "2",
"size": "2579",
"license": "apache-2.0",
"hash": -5383369982921837000,
"line_mean": 40.5967741935,
"line_max": 89,
"alpha_frac": 0.6157425359,
"autogenerated": false,
"ratio": 4.166397415185783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5782139951085784,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from appr.exception import PackageReleaseNotFound, raise_channel_not_found
class ChannelBase(object):
def __init__(self, name, package, current=None):
self.package = package
self.name = name
self.current = current
def exists(self):
return self._exists()
@classmethod
def get(cls, name, package):
raise NotImplementedError
def current_release(self):
return self.current
def add_release(self, release, package_class):
if self._check_release(release, package_class) is False:
raise PackageReleaseNotFound("Release %s doesn't exist for package %s" %
(release, self.package), {
"package": self.package,
"release": release})
self.current = release
return self.save()
def remove_release(self, release):
if not self.exists():
raise_channel_not_found(self.package, self.name)
return self._remove_release(release)
def _check_release(self, release_name, package_class):
release = package_class.get_release(self.package, release_name)
if release is None or str(release) != release_name:
return False
else:
return True
def to_dict(self):
releases = self.releases()
return ({"releases": releases, "name": self.name, "current": self.current_release()})
def __repr__(self):
return "%s(%s, %s)" % (self.__class__, self.name, self.package)
@classmethod
def all(cls, package):
raise NotImplementedError
def releases(self):
""" Returns the list of releases """
raise NotImplementedError
def _add_release(self, release):
raise NotImplementedError
def _remove_release(self, release):
raise NotImplementedError
def _exists(self):
""" Check if the channel is saved already """
raise NotImplementedError
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
@classmethod
def dump_all(cls, package_class=None):
""" produce a dict with all packages """
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/models/channel_base.py",
"copies": "2",
"size": "2317",
"license": "apache-2.0",
"hash": 4612868594318555000,
"line_mean": 29.4868421053,
"line_max": 93,
"alpha_frac": 0.5986189038,
"autogenerated": false,
"ratio": 4.661971830985915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002947763651933338,
"num_lines": 76
} |
from __future__ import absolute_import, division, print_function
from appr.models.channel_base import ChannelBase
from appr.models.kv.models_index_base import ModelsIndexBase
class ChannelKvBase(ChannelBase):
index_class = ModelsIndexBase
@property
def index(self):
return self.index_class(self.package)
@classmethod
def all(cls, package):
index = cls.index_class(package)
result = []
for channel_data in index.channels():
channel = cls(channel_data['name'], package, channel_data['current'])
result.append(channel)
return result
@classmethod
def get(cls, name, package):
index = cls.index_class(package)
channel_dict = index.channel(name)
return cls(name, package, channel_dict['current'])
def releases(self):
return self.index.channel_releases(self.name)
def _remove_release(self, release):
return self.index.delete_channel_release(self.name, release)
def _exists(self):
return self.index.ischannel_exists(self.name)
def save(self):
return self.index.add_channel(self.name, self.current)
def delete(self):
return self.index.delete_channel(self.name)
@classmethod
def dump_all(cls, package_class=None):
index = cls.index_class()
result = []
for package_name in index.package_names():
packageindex = cls.index_class(package_name)
result += packageindex.channels()
return result
| {
"repo_name": "app-registry/appr",
"path": "appr/models/kv/channel_kv_base.py",
"copies": "2",
"size": "1530",
"license": "apache-2.0",
"hash": -3751629563330509000,
"line_mean": 29,
"line_max": 81,
"alpha_frac": 0.6503267974,
"autogenerated": false,
"ratio": 3.9947780678851177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00023912003825920613,
"num_lines": 51
} |
from __future__ import absolute_import, division, print_function
from argparse import ArgumentParser
from collections import Counter
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
def print_nearest_words(args):
word = args.word.lower().strip()
# Load the word vectors
embeddings_index = {}
f = open(args.vectors)
for line in f:
values = line.split(' ')
w = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[w] = coefs
f.close()
w_v = np.zeros_like(embeddings_index[w])
for w in word.split():
if w not in embeddings_index.keys():
continue
w_v += embeddings_index[w]
# Get the similarity scores
score_dict = {}
for w in embeddings_index.keys():
if word == w:
continue
score = cosine_similarity(w_v.reshape(1, -1), embeddings_index[w].reshape(1, -1))[0][0]
score_dict[w] = score
closest = Counter(score_dict).most_common(args.num_words)
close_words = []
for word, score in closest:
if args.verbose:
print(score, word)
else:
close_words.append(word)
if not args.verbose:
print(', '.join(close_words))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--vectors', default='vectors.txt', help='Word vector file')
parser.add_argument('--vocab', default='vocab.txt', help='Vocab file')
parser.add_argument('--word', default='dollar', help='Input word')
parser.add_argument('--verbose', type=bool, default=False, help='Print score')
parser.add_argument('--num_words', type=int, default=5, help='Number of closest words to print')
args = parser.parse_args()
print_nearest_words(args)
| {
"repo_name": "hardikp/fnlp",
"path": "test_word_vectors.py",
"copies": "1",
"size": "1800",
"license": "mit",
"hash": -3391694802570671000,
"line_mean": 29,
"line_max": 100,
"alpha_frac": 0.6227777778,
"autogenerated": false,
"ratio": 3.71900826446281,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9838108495977884,
"avg_score": 0.0007355092569852308,
"num_lines": 60
} |
from __future__ import absolute_import, division, print_function
from argparse import ArgumentParser
from subprocess import Popen
import os
DEPENDENCIES = [
('develop', [
# Install development packages
'-rdevelop-requirements.txt',
'-rtests/develop-requirements.txt',
# Install extra packages
'-rrequirements.txt',
'-rtest-requirements.txt'
]),
('pip', [
'-rrequirements.txt',
'-rtest-requirements.txt'
]),
('travis', [
'--editable=git+https://github.com/OpenEntityMap/oem-framework.git@{BRANCH}#egg=oem-framework',
'--editable=git+https://github.com/OpenEntityMap/oem-core.git@{BRANCH}#egg=oem-core',
'--editable=git+https://github.com/OpenEntityMap/oem-format-json.git@{BRANCH}#egg=oem-format-json',
'--editable=git+https://github.com/OpenEntityMap/oem-storage-file.git@{BRANCH}#egg=oem-storage-file',
'-rrequirements.txt',
'-rtest-requirements.txt'
])
]
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('env')
args = parser.parse_args()
# Retrieve branch
branch = os.environ.get('CURRENT_BRANCH') or 'master'
# Install environment dependencies
env_parts = args.env.split('-')
for key, dependencies in DEPENDENCIES:
if key not in env_parts:
continue
for dep in dependencies:
dep = dep.replace('{BRANCH}', branch)
# Install dependency
print('Installing dependency: %r' % (dep,))
process = Popen(['pip', 'install', dep])
process.wait()
| {
"repo_name": "OpenEntityMap/oem-client",
"path": "scripts/install_dependencies.py",
"copies": "1",
"size": "1629",
"license": "bsd-3-clause",
"hash": 4670344135389564,
"line_mean": 28.0892857143,
"line_max": 109,
"alpha_frac": 0.612645795,
"autogenerated": false,
"ratio": 3.8329411764705883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49455869714705886,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from astropy.wcs import WCS
from qtpy.QtWidgets import QMessageBox
from glue.viewers.matplotlib.qt.toolbar import MatplotlibViewerToolbar
from glue.core.edit_subset_mode import EditSubsetMode
from glue.core import command
from glue.viewers.matplotlib.qt.data_viewer import MatplotlibDataViewer
from glue.viewers.scatter.qt.layer_style_editor import ScatterLayerStyleEditor
from glue.viewers.scatter.layer_artist import ScatterLayerArtist
from glue.viewers.image.qt.layer_style_editor import ImageLayerStyleEditor
from glue.viewers.image.qt.layer_style_editor_subset import ImageLayerSubsetStyleEditor
from glue.viewers.image.layer_artist import ImageLayerArtist, ImageSubsetLayerArtist
from glue.viewers.image.qt.options_widget import ImageOptionsWidget
from glue.viewers.image.state import ImageViewerState
from glue.viewers.image.compat import update_image_viewer_state
from glue.external.echo import delay_callback
from glue.external.modest_image import imshow
from glue.viewers.image.composite_array import CompositeArray
# Import the mouse mode to make sure it gets registered
from glue.viewers.image.contrast_mouse_mode import ContrastBiasMode # noqa
__all__ = ['ImageViewer']
IDENTITY_WCS = WCS(naxis=2)
IDENTITY_WCS.wcs.ctype = ["X", "Y"]
IDENTITY_WCS.wcs.crval = [0., 0.]
IDENTITY_WCS.wcs.crpix = [1., 1.]
IDENTITY_WCS.wcs.cdelt = [1., 1.]
class ImageViewer(MatplotlibDataViewer):
LABEL = '2D Image'
_toolbar_cls = MatplotlibViewerToolbar
_layer_style_widget_cls = {ImageLayerArtist: ImageLayerStyleEditor,
ImageSubsetLayerArtist: ImageLayerSubsetStyleEditor,
ScatterLayerArtist: ScatterLayerStyleEditor}
_state_cls = ImageViewerState
_options_cls = ImageOptionsWidget
allow_duplicate_data = True
# NOTE: _data_artist_cls and _subset_artist_cls are not defined - instead
# we override get_data_layer_artist and get_subset_layer_artist for
# more advanced logic.
tools = ['select:rectangle', 'select:xrange',
'select:yrange', 'select:circle',
'select:polygon', 'image:contrast_bias']
def __init__(self, session, parent=None, state=None):
super(ImageViewer, self).__init__(session, parent=parent, wcs=True, state=state)
self.axes.set_adjustable('datalim')
self.state.add_callback('x_att', self._set_wcs)
self.state.add_callback('y_att', self._set_wcs)
self.state.add_callback('slices', self._on_slice_change)
self.state.add_callback('reference_data', self._set_wcs)
self.axes._composite = CompositeArray()
self.axes._composite_image = imshow(self.axes, self.axes._composite,
origin='lower', interpolation='nearest')
self._set_wcs()
def close(self, **kwargs):
super(ImageViewer, self).close(**kwargs)
if self.axes._composite_image is not None:
self.axes._composite_image.remove()
self.axes._composite_image = None
def _update_axes(self, *args):
if self.state.x_att_world is not None:
self.axes.set_xlabel(self.state.x_att_world.label)
if self.state.y_att_world is not None:
self.axes.set_ylabel(self.state.y_att_world.label)
self.axes.figure.canvas.draw()
def add_data(self, data):
result = super(ImageViewer, self).add_data(data)
# If this is the first layer (or the first after all layers were)
# removed, set the WCS for the axes.
if len(self.layers) == 1:
self._set_wcs()
return result
def _on_slice_change(self, event=None):
if self.state.x_att is None or self.state.y_att is None or self.state.reference_data is None:
return
coords = self.state.reference_data.coords
ix = self.state.x_att.axis
iy = self.state.y_att.axis
x_dep = list(coords.dependent_axes(ix))
y_dep = list(coords.dependent_axes(iy))
if ix in x_dep:
x_dep.remove(ix)
if iy in x_dep:
x_dep.remove(iy)
if ix in y_dep:
y_dep.remove(ix)
if iy in y_dep:
y_dep.remove(iy)
if x_dep or y_dep:
self._set_wcs(event=event, relim=False)
def _set_wcs(self, event=None, relim=True):
if self.state.x_att is None or self.state.y_att is None or self.state.reference_data is None:
return
ref_coords = self.state.reference_data.coords
if hasattr(ref_coords, 'wcs'):
self.axes.reset_wcs(slices=self.state.wcsaxes_slice, wcs=ref_coords.wcs)
elif hasattr(ref_coords, 'wcsaxes_dict'):
self.axes.reset_wcs(slices=self.state.wcsaxes_slice, **ref_coords.wcsaxes_dict)
else:
self.axes.reset_wcs(IDENTITY_WCS)
self._update_appearance_from_settings()
self._update_axes()
if relim:
self.state.reset_limits()
# TODO: move some of the ROI stuff to state class?
def apply_roi(self, roi):
if len(self.layers) > 0:
subset_state = self._roi_to_subset_state(roi)
cmd = command.ApplySubsetState(data_collection=self._data,
subset_state=subset_state)
self._session.command_stack.do(cmd)
else:
# Make sure we force a redraw to get rid of the ROI
self.axes.figure.canvas.draw()
def _roi_to_subset_state(self, roi):
if self.state.x_att is None or self.state.y_att is None or self.state.reference_data is None:
return
# TODO Does subset get applied to all data or just visible data?
x_comp = self.state.x_att.parent.get_component(self.state.x_att)
y_comp = self.state.y_att.parent.get_component(self.state.y_att)
return x_comp.subset_from_roi(self.state.x_att, roi,
other_comp=y_comp,
other_att=self.state.y_att,
coord='x')
def _scatter_artist(self, axes, state, layer=None, layer_state=None):
if len(self._layer_artist_container) == 0:
QMessageBox.critical(self, "Error", "Can only add a scatter plot "
"overlay once an image is present",
buttons=QMessageBox.Ok)
return None
return ScatterLayerArtist(axes, state, layer=layer, layer_state=None)
def get_data_layer_artist(self, layer=None, layer_state=None):
if layer.ndim == 1:
cls = self._scatter_artist
else:
cls = ImageLayerArtist
return self.get_layer_artist(cls, layer=layer, layer_state=layer_state)
def get_subset_layer_artist(self, layer=None, layer_state=None):
if layer.ndim == 1:
cls = self._scatter_artist
else:
cls = ImageSubsetLayerArtist
return self.get_layer_artist(cls, layer=layer, layer_state=layer_state)
@staticmethod
def update_viewer_state(rec, context):
return update_image_viewer_state(rec, context)
def show_crosshairs(self, x, y):
if getattr(self, '_crosshairs', None) is not None:
self._crosshairs.remove()
self._crosshairs, = self.axes.plot([x], [y], '+', ms=12,
mfc='none', mec='#d32d26',
mew=1, zorder=100)
self.axes.figure.canvas.draw()
def hide_crosshairs(self):
if getattr(self, '_crosshairs', None) is not None:
self._crosshairs.remove()
self._crosshairs = None
self.axes.figure.canvas.draw()
def update_aspect(self, aspect=None):
super(ImageViewer, self).update_aspect(aspect=aspect)
if self.state.reference_data is not None and self.state.x_att is not None and self.state.y_att is not None:
nx = self.state.reference_data.shape[self.state.x_att.axis]
ny = self.state.reference_data.shape[self.state.y_att.axis]
self.axes.set_xlim(-0.5, nx - 0.5)
self.axes.set_ylim(-0.5, ny - 0.5)
self.axes.figure.canvas.draw()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/image/qt/data_viewer.py",
"copies": "1",
"size": "8334",
"license": "bsd-3-clause",
"hash": -9016623576057325000,
"line_mean": 38.4976303318,
"line_max": 115,
"alpha_frac": 0.6253899688,
"autogenerated": false,
"ratio": 3.592241379310345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4717631348110345,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from attr import Attribute
from attr._make import NOTHING, make_class
def simple_class(cmp=False, repr=False, hash=False):
"""
Return a new simple class.
"""
return make_class(
"C", ["a", "b"],
cmp=cmp, repr=repr, hash=hash, init=True,
)
def simple_attr(name, default=NOTHING, validator=None, repr=True,
cmp=True, hash=True, init=True):
"""
Return an attribute with a name and no other bells and whistles.
"""
return Attribute(
name=name, default=default, validator=validator, repr=repr,
cmp=cmp, hash=hash, init=init
)
class TestSimpleClass(object):
"""
Tests for the testing helper function `make_class`.
"""
def test_returns_class(self):
"""
Returns a class object.
"""
assert type is simple_class().__class__
def returns_distinct_classes(self):
"""
Each call returns a completely new class.
"""
assert simple_class() is not simple_class()
| {
"repo_name": "cyli/attrs",
"path": "tests/__init__.py",
"copies": "1",
"size": "1088",
"license": "mit",
"hash": 4652770657683999000,
"line_mean": 24.9047619048,
"line_max": 68,
"alpha_frac": 0.6084558824,
"autogenerated": false,
"ratio": 3.8718861209964412,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49803420033964413,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .auth import SymantecAuth
from .order import (
Order, GetOrderByPartnerOrderID, GetOrdersByDateRange,
GetModifiedOrders, ModifyOrder, ChangeApproverEmail, Reissue, Revoke,
GetQuickApproverList, ValidateOrderParameters
)
from .email import ResendEmail
from .session import SymantecSession
class Symantec(object):
order_class = Order
get_order_by_partner_order_id_class = GetOrderByPartnerOrderID
get_orders_by_date_range_class = GetOrdersByDateRange
get_modified_orders_class = GetModifiedOrders
get_quick_approver_list_class = GetQuickApproverList
modify_order_class = ModifyOrder
validate_order_parameters_class = ValidateOrderParameters
change_approver_email_class = ChangeApproverEmail
reissue_class = Reissue
revoke_class = Revoke
resend_email_class = ResendEmail
def __init__(self, username, password,
url="https://api.geotrust.com/webtrust/partner"):
self.url = url
self.session = SymantecSession()
self.session.auth = SymantecAuth(username, password)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
self.session.close()
def submit(self, obj):
resp = self.session.post(self.url, obj.serialize())
resp.raise_for_status()
return obj.response(resp.content)
def order(self, **kwargs):
obj = self.order_class(**kwargs)
return self.submit(obj)
def get_order_by_partner_order_id(self, **kwargs):
return self.submit(self.get_order_by_partner_order_id_class(**kwargs))
def get_orders_by_date_range(self, **kwargs):
return self.submit(self.get_orders_by_date_range_class(**kwargs))
def get_modified_orders(self, **kwargs):
return self.submit(self.get_modified_orders_class(**kwargs))
def modify_order(self, **kwargs):
return self.submit(self.modify_order_class(**kwargs))
def change_approver_email(self, **kwargs):
return self.submit(self.change_approver_email_class(**kwargs))
def reissue(self, **kwargs):
return self.submit(self.reissue_class(**kwargs))
def revoke(self, **kwargs):
return self.submit(self.revoke_class(**kwargs))
def resend_email(self, **kwargs):
obj = self.resend_email_class(**kwargs)
return self.submit(obj)
def validate_order_parameters(self, **kwargs):
return self.submit(self.validate_order_parameters_class(**kwargs))
def get_quick_approver_list(self, **kwargs):
return self.submit(self.get_quick_approver_list_class(**kwargs))
| {
"repo_name": "jmvrbanac/symantecssl",
"path": "symantecssl/core.py",
"copies": "1",
"size": "2696",
"license": "apache-2.0",
"hash": 6684378858191363000,
"line_mean": 32.2839506173,
"line_max": 78,
"alpha_frac": 0.6828635015,
"autogenerated": false,
"ratio": 3.6334231805929917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9816286682092992,
"avg_score": 0,
"num_lines": 81
} |
from __future__ import (absolute_import, division, print_function)
from bcube_owslib.etree import etree
from bcube_owslib.util import nspath, testXMLValue, openURL
from bcube_owslib.util import xml_to_dict as _xml_to_dict
from datetime import datetime
from dateutil import parser
namespaces = {
'wml1.1':'{http://www.cuahsi.org/waterML/1.1/}',
'wml1.0':'{http://www.cuahsi.org/waterML/1.0/}',
'xsi':'{http://www.w3.org/2001/XMLSchema-instance',
'xsd':'{http://www.w3.org/2001/XMLSchema'
}
def ns(namespace):
return namespaces.get(namespace)
class XMLParser(object):
"""
Convienence class; provides some useful shortcut methods to make retrieving xml elements from etree
a little easier.
"""
def __init__(self,xml_root,namespace):
try:
self._root = etree.parse(xml_root)
except:
self._root = xml_root
if not namespace in namespaces:
raise ValueError('Unsupported namespace passed in to parser!')
self._ns = namespace
def _find(self,tofind):
try:
return self._root.find(namespaces.get(self._ns) + tofind)
except:
return None
def _findall(self,tofind):
try:
return self._root.findall(namespaces.get(self._ns) + tofind)
except:
return None
class SitesResponse(XMLParser):
"""
Parses the response from a 'GetSites' request
Parameters
===========
:xmlio - A file-like object that holds the xml response from the request.
Return
=======
An object constructed from a dictionary parse of the response. The object has get access and can iterate
over the sites returned.
"""
def __init__(self,xml,version='wml1.1'):
super(SitesResponse,self).__init__(xml,version)
self.parse_sites_response()
def __iter__(self):
for s in self.sites:
yield s
def __getitem__(self,key):
if isinstance(key,int) and key < len(self.sites):
return self.sites[key]
if isinstance(key,str):
site = [site for site in self.sites for code in site.site_info.site_codes if code == key]
if len(site) > 0:
return site[0]
raise KeyError('Unknown key ' + str(key))
def parse_sites_response(self,xml=None):
"""
"""
if xml is not None:
try:
self._root = etree.parse(xml)
except:
self._root = xml
# try:
self.query_info = QueryInfo(self._find('queryInfo'), self._ns)
self.sites = [Site(site, self._ns) for site in self._findall('site')]
# except:
# raise ValueError('Cannot parse sitesResponse element correctly')
"""Accesability properties/methods"""
@property
def site_codes(self):
return [site.site_info.site_codes for site in self.sites]
@property
def site_names(self):
return [site.site_info.site_name for site in self.sites]
class QueryInfo(XMLParser):
"""
"""
def __init__(self,xml_root,version='wml1.1'):
super(QueryInfo, self).__init__(xml_root,version)
self.parse_query_info()
def parse_query_info(self, xml=None):
if xml is not None:
try:
self._root = etree.parse(xml)
except:
self._root = xml
# try:
# create queryinfo object from dict
xml_dict = _xml_to_dict(self._root)
self.creation_time = parser.parse(xml_dict.get('creation_time')) if xml_dict.get('creation_time') is not None else None
self.notes = [testXMLValue(note) for note in self._findall('note')]
self.criteria = Criteria(self._find('criteria'), self._ns)
# except:
# raise ValueError('Unable to parse queryInfo element correctly')
class Criteria(XMLParser):
"""
"""
def __init__(self,xml_root,version='wml1.1'):
super(Criteria, self).__init__(xml_root,version)
self.parse_criteria()
def parse_criteria(self, xml=None):
if xml is not None:
try:
self._root = etree.parse(xml)
except:
self._root = xml
# try:
xml_dict = _xml_to_dict(self._root,depth=4)
self.method_called = self._root.attrib.get('MethodCalled')
self.location_param = xml_dict.get('location_param')
self.variable_param = xml_dict.get('variable_param')
try:
self.begin_date_time = parser.parse(xml_dict['begin_date_time'])
except:
self.begin_date_time = None
try:
self.end_date_time = parser.parse(xml_dict['end_date_time'])
except:
self.end_date_time = None
self.parameters = [(param.attrib.get('name'),param.attrib.get('value')) for param in self._findall('parameter')]
# except:
# raise ValueError('Unable to parse xml for criteria element')
class Site(XMLParser):
def __init__(self, xml, version='wml1.1'):
super(Site,self).__init__(xml,version)
self.parse_site()
def __iter__(self):
for c in self.series_catalogs:
yield c
def __getitem__(self,key):
if isinstance(key,int) and key < len(self.series_catalogs):
return self.series_catalogs[key]
if isinstance(key,str):
var = [series.variable for catalog in self.series_catalogs for series in catalog if series.code == key]
if len(var) > 0:
return var[0]
raise KeyError('Unknown key ' + str(key))
"""Accessor propeties/methods"""
@property
def name(self):
return self.site_info.site_name
@property
def codes(self):
return self.site_info.site_codes
@property
def variable_names(self):
return list(set([series.variable.variable_name for catalog in self.series_catalogs for series in catalog]))
@property
def variable_codes(self):
return list(set([series.variable.variable_code for catalog in self.series_catalogs for series in catalog]))
@property
def geo_coords(self):
return self.site_info.location.geo_coords
@property
def latitudes(self):
return [g[1] for g in self.site_info.location.geo_coords]
@property
def longitudes(self):
return [g[0] for g in self.site_info.location.geo_coords]
def parse_site(self,xml=None):
if xml is not None:
try:
self._root = etree.parse(xml)
except:
self._root = xml
# try:
self.site_info = SiteInfo(self._find('siteInfo'), self._ns)
self.series_catalogs = [SeriesCatalog(elm, self._ns) for elm in self._findall('seriesCatalog')]
# self.extension = Extension(self._find('extension'), self._ns)
# except:
# raise ValueError('Unable to parse site element correctly')
class SiteInfo(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(SiteInfo,self).__init__(xml,version)
self.parse_siteinfo()
def parse_siteinfo(self,xml=None):
if xml is not None:
try:
self._root = etree.parse(xml)
except:
self._root = xml
# try:
xml_dict = _xml_to_dict(self._root)
self.site_name = xml_dict.get('site_name')
self.site_codes = [testXMLValue(code) for code in self._findall('siteCode')]
self.elevation = xml_dict.get('elevation_m')
self.vertical_datum = xml_dict.get('vertical_datum')
self.site_types = [testXMLValue(typ) for typ in self._findall('siteType')]
self.site_properties = dict([(prop.attrib.get('name'),testXMLValue(prop)) for prop in self._findall('siteProperty')])
self.altname = xml_dict.get('altname')
self.notes = [testXMLValue(note) for note in self._findall('note')]
# sub-objects
tzi = self._find('timeZoneInfo')
if tzi is not None:
self.time_zone_info = TimeZoneInfo(tzi, self._ns)
self.location = Location(self._find('geoLocation'), self._ns)
# except:
# raise ValueError('Unable to parse siteInfo element')
class Location(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(Location,self).__init__(xml,version)
self.parse_location()
def parse_location(self,xml=None):
if xml is not None:
try:
self._root = etree.parse(xml)
except:
self._root = xml
# try:
xml_dict = _xml_to_dict(self._root)
geogs = self._findall('geogLocation')
self.geo_coords = list()
self.srs = list()
for g in geogs:
self.geo_coords.append((testXMLValue(g.find(ns(self._ns) + 'longitude')),testXMLValue(g.find(ns(self._ns) + 'latitude'))))
self.srs.append(g.attrib.get('srs'))
locsite = self._findall('localSiteXY')
self.local_sites = list()
self.notes = list()
self.projections = list()
for ls in locsite:
z = testXMLValue(ls.find(ns(self._ns) + 'Z'))
if z is not None:
self.local_sites.append((testXMLValue(ls.find(ns(self._ns) + 'X')),testXMLValue(ls.find(ns(self._ns) + 'Y')),z))
else:
self.local_sites.append((testXMLValue(ls.find(ns(self._ns) + 'X')),testXMLValue(ls.find(ns(self._ns) + 'Y')),'0'))
self.notes.append([testXMLValue(note) for note in ls.findall(ns(self._ns) + 'note')])
self.projections.append(ls.attrib.get('projectionInformation'))
# except:
# raise ValueError('Unable to parse geoLocation element')
class TimeZoneInfo(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(TimeZoneInfo,self).__init__(xml,version)
self.parse_timezoneinfo()
def parse_timezoneinfo(self,xml=None):
if xml is not None:
try:
self._root = etree.parse(xml)
except:
self._root = xml
# try:
xml_dict = _xml_to_dict(self._root)
default = self._find('defaultTimeZone')
if default is not None:
self.zone_offset = default.attrib.get('zoneOffset')
self.zone_abbreviation = default.attrib.get('zoneAbbreviation')
daylight = self._find('daylightSavingsTimeZone')
if daylight is not None:
self.daylight_zone_offset = daylight.attrib.get('zoneOffset')
self.daylight_zone_abbreviation = daylight.attrib.get('zoneAbbreviation')
# except:
# raise ValueError('Unable to properly parset the timeZoneInfo element')
class SeriesCatalog(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(SeriesCatalog,self).__init__(xml,version)
self.parse_seriescatalog()
def __iter__(self):
for s in self.series:
yield s
def __getitem__(self,key):
if isinstance(key,int) and key < len(self.series):
return self.series[key]
if isinstance(key,str):
srs = [series for series in self.series if series.code == key]
if len(srs) > 0:
return srs[0]
raise KeyError('Unknown key ' + str(key))
def parse_seriescatalog(self,xml=None):
if xml is not None:
try:
self._root = etree.parse(xml)
except:
self._root = xml
# try:
self.series = [Series(elm,self._ns) for elm in self._findall('series')]
# except:
# raise ValueError('Unable to properly parse the seriesCatalog element')
class Series(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(Series,self).__init__(xml,version)
self.parse_series()
"""Accessor proeprties/methods"""
@property
def name(self):
return self.variable.variable_name
@property
def code(self):
return self.variable.variable_code
def parse_series(self,xml=None):
if xml is not None:
try:
self._root = etree.parse(xml)
except:
self._root = xml
# try:
xml_dict = _xml_to_dict(self._root,depth=3)
self.value_count = xml_dict.get('value_count')
self.value_type = xml_dict.get('value_type')
self.general_category = xml_dict.get('general_category')
self.sample_medium = xml_dict.get('sample_medium')
self.data_type = xml_dict.get('data_type')
# date-time
self.begin_date_time = parser.parse(xml_dict.get('begin_date_time'))
self.begin_date_time_utc = parser.parse(xml_dict.get('begin_date_time_utc')) if xml_dict.get('begin_date_time_utc') is not None else None
self.end_date_time = parser.parse(xml_dict.get('end_date_time'))
self.end_date_time_utc = parser.parse(xml_dict.get('end_date_time_utc')) if xml_dict.get('end_date_time_utc') is not None else None
# method info
self.method_description = xml_dict.get('method_description')
self.method_code = xml_dict.get('method_code')
self.method_link = xml_dict.get('method_link')
method = self._find('method')
if method is not None:
self.method_id = method.attrib.get('methodID')
else:
self.method_id = None
# source info
self.organization = xml_dict.get('organization')
self.source_description = xml_dict.get('source_description')
self.citation = xml_dict.get('citation')
source = self._find('source')
if source is not None:
self.source_id = source.attrib.get('sourceID')
else:
self.source_id = None
# quality control info
self.quality_control_level_code = xml_dict.get('quality_control_level_code')
self.definition = xml_dict.get('definition')
qa = self._find('qualityControlLevel')
if qa is not None:
self.quality_control_level_id = qa.attrib.get('qualityControlLevelID')
else:
self.quality_control_level_id = None
# properties
self.properties = dict([(prop.attrib.get('name'),testXMLValue(prop)) for prop in self._findall('seriesProperty')])
# sub-objects
self.variable = Variable(self._find('variable'),self._ns)
# except:
# raise ValueError('Unable to correctly parse Series element')
class Variable(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(Variable,self).__init__(xml,version)
self.parse_variable()
def parse_variable(self,xml=None):
if xml is not None:
try:
self._root = etree.parse(xml)
except:
self._root = xml
# try:
xml_dict = _xml_to_dict(self._root)
self.value_type = xml_dict.get('value_type')
self.data_type = xml_dict.get('data_type')
self.general_category = xml_dict.get('general_category')
self.sample_medium = xml_dict.get('sample_medium')
self.no_data_value = xml_dict.get('no_data_value')
self.variable_name = xml_dict.get('variable_name')
self.variable_code = xml_dict.get('variable_code')
self.variable_description = xml_dict.get('variable_description')
self.speciation = xml_dict.get('speciation')
# notes and properties
notes = [(note.attrib.get('title'),testXMLValue(note)) for note in self._findall('note')]
none_notes = [note[1] for note in notes if note[0] is None]
self.notes = dict([note for note in notes if note[0] is not None])
if len(none_notes) > 0:
self.notes['none'] = none_notes
self.properties = dict([(prop.attrib.get('name'),testXMLValue(prop)) for prop in self._findall('variableProperty')])
# related
related = self._find('related')
if related is not None:
self.parent_codes = [dict([('network',code.attrib.get('network')),('vocabulary',code.attrib.get('vocabulary')),('default',code.attrib.get('default'))])
for code in related.findall(ns(self._ns) + 'parentCode')]
self.related_codes = [dict([('network',d.get('network')),('vocabulary',d.get('vocabulary')),('default',d.get('default'))])
for code in related.findall(ns(self._ns) + 'relatedCode')]
else:
self.parent_codes = None
self.related_codes = None
# sub-objects
if self._ns == 'wml1.0':
unit = self._find('units')
self.unit = Unit1_0(unit, self._ns) if unit is not None else None
timesupport = self._find('timeSupport')
self.time_support = TimeScale(timesupport, self._ns) if timesupport is not None else None
else:
unit = self._find('unit')
self.unit = Unit(unit, self._ns) if unit is not None else None
timescale = self._find('timeScale')
self.time_scale = TimeScale(timescale, self._ns) if timescale is not None else None
categories = self._find('categories')
if categories is not None:
self.categories = [Category(cat,self._ns) for cat in categories.findall(ns(self._ns) + 'category')]
else:
self.categories = None
# except:
# raise ValueError('Unable to correctly parse variable element')
class TimeScale(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(TimeScale,self).__init__(xml,version)
self.parse_timescale()
def parse_timescale(self):
try:
xml_dict = _xml_to_dict(self._root)
self.time_spacing = xml_dict.get('time_spacing')
self.time_support = xml_dict.get('time_support')
self.time_interval = xml_dict.get('time_interval')
unit = self._find('unit')
self.unit = Unit(unit, self._ns) if unit is not None else None
except:
raise
class Unit(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(Unit,self).__init__(xml,version)
self.parse_unit()
def parse_unit(self):
try:
xml_dict = _xml_to_dict(self._root)
self.name = xml_dict.get('unit_name')
self.unit_type = xml_dict.get('unit_type')
self.description = xml_dict.get('unit_description')
self.abbreviation = xml_dict.get('unit_abbreviation')
self.code = xml_dict.get('unit_code')
self.id = self._root.attrib.get('UnitID')
except:
raise
class Unit1_0(XMLParser):
def __init__(self,xml,version='wml1.0'):
super(Unit1_0,self).__init__(xml,version)
self.parse_unit()
def parse_unit(self):
try:
self.name = testXMLValue(self._root)
self.code = self._root.attrib.get('unitsCode')
self.abbreviation = self._root.attrib.get('unitsAbbreviation')
self.type = self._root.attrib.get('unitsType')
self.id = self._root.attrib.get('unitID')
except:
raise
class Category(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(Category,self).__init__(xml,version)
self.parse_category()
def parse_category(self):
try:
xml_dict = _xml_to_dict(self._root)
self.data_value = xml_dict.get('data_value')
self.description = xml_dict.get('description')
self.id = self._root.attrib.get('categoryID')
except:
raise
class TimeSeriesResponse(XMLParser):
"""
Parses the response from a 'GetValues' request
Parameters
===========
:xmlio - A file-like object that holds the xml response from the request.
Return
=======
An object constructed from a dictionary parse of the response. The object has get access and can
also iterate over each timeSeries element returned.
"""
def __init__(self,xml,version='wml1.1'):
super(TimeSeriesResponse,self).__init__(xml,version)
self.parse_timeseriesresponse()
"""Accessor properties/methods"""
@property
def series_names(self):
return [series.name for series in self.time_series]
@property
def variable_names(self):
return list(set([series.variable.variable_name for series in self.time_series]))
@property
def variable_codes(self):
return list(set([s.variable.variable_code for s in self.time_series]))
def get_series_by_variable(self,var_name=None,var_code=None):
if var_code is not None:
return [s for s in self.time_series if s.variable.variable_code == var_code]
elif var_name is not None:
return [series for series in self.time_series if series.variable.variable_name == var_name]
return None
def parse_timeseriesresponse(self):
try:
qi = self._find('queryInfo')
self.query_info = QueryInfo(qi,self._ns)
self.time_series = [TimeSeries(series,self._ns) for series in self._findall('timeSeries')]
except:
raise
class TimeSeries(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(TimeSeries,self).__init__(xml,version)
self.parse_timeseries()
def parse_timeseries(self):
try:
self.variable = Variable(self._find('variable'), self._ns)
self.values = [Values(val,self._ns) for val in self._findall('values')]
self.source_info = SiteInfo(self._find('sourceInfo'), self._ns)
self.name = self._root.attrib.get('name')
except:
raise
class Values(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(Values,self).__init__(xml,version)
self.parse_values()
def __iter__(self):
for v in self.values:
yield v
"""Accessor properties/methods"""
def get_date_values(self,method_id=None,source_id=None,sample_id=None,quality_level=None,utc=False):
varl = [v for v in self.values]
if method_id is not None:
varl = [v for v in varl if v.method_id == method_id]
if source_id is not None:
varl = [v for v in varl if v.source_id == source_id]
if sample_id is not None:
varl = [v for v in varl if v.sample_id == sample_id]
if quality_level is not None:
varl = [v for v in varl if v.quality_control_level == quality_level]
if not utc:
return [(v.date_time,v.value) for v in varl]
else:
return [(v.date_time_utc,v.value) for v in varl]
def parse_values(self):
xml_dict = _xml_to_dict(self._root)
# method info
self.methods = [Method(method,self._ns) for method in self._findall('method')]
# source info
self.sources = [Source(source,self._ns) for source in self._findall('source')]
# quality control info
self.qualit_control_levels = [QualityControlLevel(qal, self._ns) for qal in self._findall('qualityControlLevel')]
# offset info
self.offsets = [Offset(os,self._ns) for os in self._findall('offset')]
# sample info
self.samples = [Sample(sample,self._ns) for sample in self._findall('sample')]
# censor codes
self.censor_codes = [CensorCode(code, self._ns) for code in self._findall('censorCode')]
# unit
if self._ns == 'wml1.0':
self.unit_abbreviation = self._root.attrib.get('unitsAbbreviation')
self.unit_code = self._root.attrib.get('unitsCode')
self.count = self._root.attrib.get('count')
else:
unit = self._find('unit')
self.unit = Unit(unit, self._ns) if unit is not None else None
# values
self.values = [Value(val, self._ns) for val in self._findall('value')]
class Value(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(Value,self).__init__(xml,version)
self.parse_value()
def parse_value(self):
try:
self.value = testXMLValue(self._root)
d = self._root.attrib
self.qualifiers = d.get('qualifiers')
self.censor_code = d.get('censorCode')
self.date_time = parser.parse(d.get('dateTime')) if d.get('dateTime') is not None else None
self.time_offset = d.get('timeOffset')
self.date_time_utc = parser.parse(d.get('dateTimeUTC')) if d.get('dateTimeUTC') is not None else None
self.method_id = d.get('methodID')
self.source_id = d.get('sourceID')
self.accuracy_std_dev = d.get('accuracyStdDev')
self.sample_id = d.get('sampleID')
self.method_code = d.get('methodCode')
self.source_code = d.get('sourceCode')
self.lab_sample_code = d.get('lab_sample_code')
self.offset_value = d.get('offsetValue')
self.offset_type_id = d.get('offsetTypeID')
self.offset_type_code = d.get('offsetTypeCode')
self.coded_vocabulary = d.get('codedVocabulary')
self.coded_vocabulary_term = d.get('codedVocabularyTerm')
self.quality_control_level = d.get('qualityControlLevel')
self.metadata_time = d.get('metadataTime')
self.oid = d.get('oid')
except:
raise
class Sample(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(Sample,self).__init__(xml,version)
self.parse_sample()
def parse_sample(self):
try:
xml_dict = _xml_to_dict(self._root)
self.code = xml_dict.get('lab_sample_code')
self.type = xml_dict.get('sample_type')
lm = self._find('labMethod')
self.method = LabMethod(lm, self._ns) if lm is not None else None
except:
raise
class LabMethod(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(LabMethod,self).__init__(xml,version)
self.parse_labmethod()
def parse_labmethod(self):
try:
xml_dict = _xml_to_dict(self._root)
self.code = xml_dict.get('lab_code')
self.name = xml_dict.get('lab_name')
self.organization = xml_dict.get('lab_organization')
self.method_name = xml_dict.get('lab_method_name')
self.method_description = xml_dict.get('lab_method_description')
self.method_link = xml_dict.get('lab_method_link')
# sub-objects
source = self._find('labSourceDetails')
self.source_details = Source(source,self._ns) if source is not None else None
except:
raise
class Source(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(Source,self).__init__(xml,version)
self.parse_source()
def __str__(self):
return str(self.__dict__)
def get_contact(self,name):
ci = [ci for ci in self.contact_info if ci.name == name]
if len(ci) < 0:
return ci[0]
return None
def parse_source(self):
try:
xml_dict = _xml_to_dict(self._root)
self.code = xml_dict.get('source_code')
self.organization = xml_dict.get('organization')
self.description = xml_dict.get('source_description')
self.links = [testXMLValue(link) for link in self._findall('sourceLink')]
self.citation = xml_dict.get('citation')
# metadata
self.topic_category = xml_dict.get('topic_category')
self.title = xml_dict.get('title')
self.abstract = xml_dict.get('abstract')
self.profile_version = xml_dict.get('profile_version')
self.metadata_link = xml_dict.get('metadata_link')
# contact info
self.contact_info = [ContactInformation(ci,self._ns) for ci in self._findall('contactInformation')]
except:
raise
class ContactInformation(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(ContactInformation,self).__init__(xml,version)
self.parse_contactinformation()
def parse_contactinformation(self):
try:
xml_dict = _xml_to_dict(self._root)
self.name = xml_dict.get('contact_name')
self.type = xml_dict.get('type_of_contact')
self.email = [testXMLValue(email) for email in self._findall('email')]
self.phone = [testXMLValue(phone) for phone in self._findall('phone')]
self.address = [testXMLValue(address) for address in self._findall('address')]
except:
raise
class Offset(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(Offset,self).__init__(xml,version)
self.parse_offset()
def parse_offset(self):
try:
xml_dict = _xml_to_dict(self._root)
self.type_code = xml_dict.get('offset_type_code')
self.value = xml_dict.get('offset_value')
self.description = xml_dict.get('offset_description')
self.is_vertical = xml_dict.get('offset_is_vertical')
self.azimuth_degrees = xml_dict.get('offset_azimuth_degrees')
unit = self._root.find('unit')
if self._ns == 'wml1.0':
self.unit = Unit1_0(unit, self._ns) if unit is not None else None
else:
self.unit = Unit(unit,self._ns) if unit is not None else None
except:
raise
class Method(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(Method,self).__init__(xml,version)
self.parse_method()
def parse_method(self):
try:
xml_dict = _xml_to_dict(self._root)
self.code = xml_dict.get('method_code')
self.description = xml_dict.get('method_description')
self.link = xml_dict.get('method_link')
self.id = self._root.attrib.get('methodID')
except:
raise
class QualityControlLevel(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(QualityControlLevel,self).__init__(xml,version)
self.parse_qcl()
def parse_qcl(self):
try:
xml_dict = _xml_to_dict(self._root)
self.code = xml_dict.get('quality_control_level_code')
self.definition = xml_dict.get('definition')
self.explanation = xml_dict.get('explanation')
self.id = self._root.attrib.get('qualityControlLevelID')
except:
raise
class CensorCode(XMLParser):
def __init__(self,xml,version='wml1.1'):
super(CensorCode,self).__init__(xml,version)
self.parse_censorcode()
def parse_censorcode(self):
try:
xml_dict = _xml_to_dict(self._root)
self.code = xml_dict.get('censor_code')
self.description = xml_dict.get('censor_code_description')
self.id = self._root.attrib.get('censorCodeID')
except:
raise
class VariablesResponse(XMLParser):
"""
Parses the response from a 'GetVariableInfo' request
Parameters
===========
:xmlio - A file-like object that holds the xml response from the request.
Return
=======
An object constructed from a dictionary parse of the response. The object has get access to its variables and
can also be used as an iterator.
"""
def __init__(self,xml,version='wml1.1'):
super(VariablesResponse,self).__init__(xml,version)
self.parse_variablesresponse()
def __iter__(self):
for v in self.variables:
yield v
def __getitem__(self,key):
if isinstance(key,int) and key < len(self.variables):
return self.variables[key]
if isinstance(key,str):
v = [var for var in self.variables if var.variable_code == key]
if len(v) > 0:
return v[0]
v = [var for var in self.variables if var.variable_name == key]
if len(v) > 0:
return v[0]
raise KeyError('Unknown key ' + str(key))
"""Accessor properties/methods"""
@property
def variable_names(self):
return list(set([var.variable_name for var in self.variables]))
@property
def variable_codes(self):
return [var.variable_code for var in self.variables]
def parse_variablesresponse(self):
try:
qi = self._find('queryInfo')
self.query_info = QueryInfo(qi, self._ns) if qi is not None else None
varis = self._find('variables')
self.variables = [Variable(var,self._ns) for var in varis.findall(ns(self._ns) + 'variable')]
except:
raise
| {
"repo_name": "b-cube/pipeline-demo",
"path": "demo/bcube_owslib/waterml/wml.py",
"copies": "1",
"size": "32996",
"license": "mit",
"hash": -2767485401248502000,
"line_mean": 34.7874186551,
"line_max": 163,
"alpha_frac": 0.5823433143,
"autogenerated": false,
"ratio": 3.7610851476119915,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48434284619119916,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from bcube_owslib.waterml.wml import SitesResponse, TimeSeriesResponse, VariablesResponse, namespaces
from bcube_owslib.etree import etree
def ns(namespace):
return namespaces.get(namespace)
class WaterML_1_0(object):
def __init__(self, element):
if isinstance(element, str) or isinstance(element, unicode):
self._root = etree.fromstring(str(element))
else:
self._root = element
if hasattr(self._root, 'getroot'):
self._root = self._root.getroot()
self._ns = 'wml1.0'
@property
def response(self):
try:
if self._root.tag == str(ns(self._ns) + 'variablesResponse'):
return VariablesResponse(self._root, self._ns)
elif self._root.tag == str(ns(self._ns) + 'timeSeriesResponse'):
return TimeSeriesResponse(self._root, self._ns)
elif self._root.tag == str(ns(self._ns) + 'sitesResponse'):
return SitesResponse(self._root, self._ns)
except:
raise
raise ValueError('Unable to determine response type from xml')
| {
"repo_name": "b-cube/pipeline-demo",
"path": "demo/bcube_owslib/waterml/wml10.py",
"copies": "1",
"size": "1184",
"license": "mit",
"hash": 5052458777739053000,
"line_mean": 33.8235294118,
"line_max": 101,
"alpha_frac": 0.6157094595,
"autogenerated": false,
"ratio": 3.9865319865319866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5102241446031986,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from bisect import bisect
from collections import Iterable, Mapping
from collections import Iterator
from functools import partial, wraps
import inspect
from itertools import product
from numbers import Integral, Number
import operator
from operator import add, getitem, mul
import os
import sys
import traceback
import pickle
from threading import Lock
import uuid
import warnings
try:
from cytoolz.curried import (partition, concat, pluck, join, first,
groupby, valmap, accumulate, interleave,
sliding_window, assoc)
except ImportError:
from toolz.curried import (partition, concat, pluck, join, first,
groupby, valmap, accumulate,
interleave, sliding_window, assoc)
from toolz import pipe, map, reduce
import numpy as np
from . import chunk
from .slicing import slice_array
from . import numpy_compat
from ..base import Base, tokenize, normalize_token
from ..context import _globals
from ..utils import (homogeneous_deepmap, ndeepmap, ignoring, concrete,
is_integer, IndexCallable, funcname, derived_from,
SerializableLock, ensure_dict, package_of)
from ..compatibility import unicode, long, getargspec, zip_longest, apply
from ..delayed import to_task_dask
from .. import threaded, core
from .. import sharedict
from ..sharedict import ShareDict
def getarray(a, b, lock=None):
""" Mimics getitem but includes call to np.asarray
>>> getarray([1, 2, 3, 4, 5], slice(1, 4))
array([2, 3, 4])
"""
if isinstance(b, tuple) and any(x is None for x in b):
b2 = tuple(x for x in b if x is not None)
b3 = tuple(None if x is None else slice(None, None)
for x in b if not isinstance(x, (int, long)))
return getarray(a, b2, lock)[b3]
if lock:
lock.acquire()
try:
c = a[b]
if type(c) != np.ndarray:
c = np.asarray(c)
finally:
if lock:
lock.release()
return c
def getarray_nofancy(a, b, lock=None):
""" A simple wrapper around ``getarray``.
Used to indicate to the optimization passes that the backend doesn't
support fancy indexing.
"""
return getarray(a, b, lock=lock)
def getarray_inline(a, b, lock=None):
""" A getarray function that optimizations feel comfortable inlining
Slicing operations with this function may be inlined into a graph, such as
in the following rewrite
**Before**
>>> a = x[:10] # doctest: +SKIP
>>> b = a + 1 # doctest: +SKIP
>>> c = a * 2 # doctest: +SKIP
**After**
>>> b = x[:10] + 1 # doctest: +SKIP
>>> c = x[:10] * 2 # doctest: +SKIP
This inlining can be relevant to operations when running off of disk.
"""
return getarray(a, b, lock=lock)
from .optimization import optimize, fuse_slice
def slices_from_chunks(chunks):
""" Translate chunks tuple to a set of slices in product order
>>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE
[(slice(0, 2, None), slice(0, 3, None)),
(slice(0, 2, None), slice(3, 6, None)),
(slice(0, 2, None), slice(6, 9, None)),
(slice(2, 4, None), slice(0, 3, None)),
(slice(2, 4, None), slice(3, 6, None)),
(slice(2, 4, None), slice(6, 9, None))]
"""
cumdims = [list(accumulate(add, (0,) + bds[:-1])) for bds in chunks]
shapes = product(*chunks)
starts = product(*cumdims)
return [tuple(slice(s, s + dim) for s, dim in zip(start, shape))
for start, shape in zip(starts, shapes)]
def getem(arr, chunks, getitem=getarray, shape=None, out_name=None, lock=False):
""" Dask getting various chunks from an array-like
>>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
>>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
"""
out_name = out_name or arr
chunks = normalize_chunks(chunks, shape)
keys = list(product([out_name], *[range(len(bds)) for bds in chunks]))
slices = slices_from_chunks(chunks)
if lock:
values = [(getitem, arr, x, lock) for x in slices]
else:
values = [(getitem, arr, x) for x in slices]
return dict(zip(keys, values))
def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):
""" Dot product of many aligned chunks
>>> x = np.array([[1, 2], [1, 2]])
>>> y = np.array([[10, 20], [10, 20]])
>>> dotmany([x, x, x], [y, y, y])
array([[ 90, 180],
[ 90, 180]])
Optionally pass in functions to apply to the left and right chunks
>>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)
array([[150, 150],
[150, 150]])
"""
if leftfunc:
A = map(leftfunc, A)
if rightfunc:
B = map(rightfunc, B)
return sum(map(partial(np.dot, **kwargs), A, B))
def lol_tuples(head, ind, values, dummies):
""" List of list of tuple keys
Parameters
----------
head : tuple
The known tuple so far
ind : Iterable
An iterable of indices not yet covered
values : dict
Known values for non-dummy indices
dummies : dict
Ranges of values for dummy indices
Examples
--------
>>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {})
('x', 1, 0)
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 1, 0), ('x', 1, 1, 1)],
[('x', 1, 2, 0), ('x', 1, 2, 1)]]
"""
if not ind:
return head
if ind[0] not in dummies:
return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)
else:
return [lol_tuples(head + (v,), ind[1:], values, dummies)
for v in dummies[ind[0]]]
def zero_broadcast_dimensions(lol, nblocks):
"""
>>> lol = [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> nblocks = (4, 1, 2) # note singleton dimension in second place
>>> lol = [[('x', 1, 0, 0), ('x', 1, 0, 1)],
... [('x', 1, 1, 0), ('x', 1, 1, 1)],
... [('x', 1, 2, 0), ('x', 1, 2, 1)]]
>>> zero_broadcast_dimensions(lol, nblocks) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)]]
See Also
--------
lol_tuples
"""
f = lambda t: (t[0],) + tuple(0 if d == 1 else i for i, d in zip(t[1:], nblocks))
return homogeneous_deepmap(f, lol)
def broadcast_dimensions(argpairs, numblocks, sentinels=(1, (1,)),
consolidate=None):
""" Find block dimensions from arguments
Parameters
----------
argpairs: iterable
name, ijk index pairs
numblocks: dict
maps {name: number of blocks}
sentinels: iterable (optional)
values for singleton dimensions
consolidate: func (optional)
use this to reduce each set of common blocks into a smaller set
Examples
--------
>>> argpairs = [('x', 'ij'), ('y', 'ji')]
>>> numblocks = {'x': (2, 3), 'y': (3, 2)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Supports numpy broadcasting rules
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> numblocks = {'x': (2, 1), 'y': (1, 3)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Works in other contexts too
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
>>> broadcast_dimensions(argpairs, d)
{'i': 'Hello', 'j': (2, 3)}
"""
# List like [('i', 2), ('j', 1), ('i', 1), ('j', 2)]
L = concat([zip(inds, dims) for (x, inds), (x, dims)
in join(first, argpairs, first, numblocks.items())])
g = groupby(0, L)
g = dict((k, set([d for i, d in v])) for k, v in g.items())
g2 = dict((k, v - set(sentinels) if len(v) > 1 else v) for k, v in g.items())
if consolidate:
return valmap(consolidate, g2)
if g2 and not set(map(len, g2.values())) == set([1]):
raise ValueError("Shapes do not align %s" % g)
return valmap(first, g2)
def top(func, output, out_indices, *arrind_pairs, **kwargs):
""" Tensor operation
Applies a function, ``func``, across blocks from many different input
dasks. We arrange the pattern with which those blocks interact with sets
of matching indices. E.g.::
top(func, 'z', 'i', 'x', 'i', 'y', 'i')
yield an embarrassingly parallel communication pattern and is read as
$$ z_i = func(x_i, y_i) $$
More complex patterns may emerge, including multiple indices::
top(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')
$$ z_{ij} = func(x_{ij}, y_{ji}) $$
Indices missing in the output but present in the inputs results in many
inputs being sent to one function (see examples).
Examples
--------
Simple embarrassing map operation
>>> inc = lambda x: x + 1
>>> top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
Simple operation on two datasets
>>> add = lambda x, y: x + y
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Operation that flips one of the datasets
>>> addT = lambda x, y: x + y.T # Transpose each chunk
>>> # z_ij ~ x_ij y_ji
>>> # .. .. .. notice swap
>>> top(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Dot product with contraction over ``j`` index. Yields list arguments
>>> top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
Pass ``concatenate=True`` to concatenate arrays ahead of time
>>> top(f, 'z', 'i', 'x', 'ij', 'y', 'ij', concatenate=True,
... numblocks={'x': (2, 2), 'y': (2, 2,)}) # doctest: +SKIP
{('z', 0): (f, (concatenate_axes, [('x', 0, 0), ('x', 0, 1)], (1,)),
(concatenate_axes, [('y', 0, 0), ('y', 0, 1)], (1,)))
('z', 1): (f, (concatenate_axes, [('x', 1, 0), ('x', 1, 1)], (1,)),
(concatenate_axes, [('y', 1, 0), ('y', 1, 1)], (1,)))}
Supports Broadcasting rules
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}
Support keyword arguments with apply
>>> def f(a, b=0): return a + b
>>> top(f, 'z', 'i', 'x', 'i', numblocks={'x': (2,)}, b=10) # doctest: +SKIP
{('z', 0): (apply, f, [('x', 0)], {'b': 10}),
('z', 1): (apply, f, [('x', 1)], {'b': 10})}
See Also
--------
atop
"""
numblocks = kwargs.pop('numblocks')
concatenate = kwargs.pop('concatenate', None)
new_axes = kwargs.pop('new_axes', {})
argpairs = list(partition(2, arrind_pairs))
assert set(numblocks) == set(pluck(0, argpairs))
all_indices = pipe(argpairs, pluck(1), concat, set)
dummy_indices = all_indices - set(out_indices)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
dims = broadcast_dimensions(argpairs, numblocks)
for k in new_axes:
dims[k] = 1
# (0, 0), (0, 1), (0, 2), (1, 0), ...
keytups = list(product(*[range(dims[i]) for i in out_indices]))
# {i: 0, j: 0}, {i: 0, j: 1}, ...
keydicts = [dict(zip(out_indices, tup)) for tup in keytups]
# {j: [1, 2, 3], ...} For j a dummy index of dimension 3
dummies = dict((i, list(range(dims[i]))) for i in dummy_indices)
# Create argument lists
valtups = []
for kd in keydicts:
args = []
for arg, ind in argpairs:
tups = lol_tuples((arg,), ind, kd, dummies)
if any(nb == 1 for nb in numblocks[arg]):
tups2 = zero_broadcast_dimensions(tups, numblocks[arg])
else:
tups2 = tups
if concatenate and isinstance(tups2, list):
axes = [n for n, i in enumerate(ind) if i in dummies]
tups2 = (concatenate_axes, tups2, axes)
args.append(tups2)
valtups.append(args)
if not kwargs: # will not be used in an apply, should be a tuple
valtups = [tuple(vt) for vt in valtups]
# Add heads to tuples
keys = [(output,) + kt for kt in keytups]
dsk = {}
# Unpack delayed objects in kwargs
if kwargs:
task, dsk2 = to_task_dask(kwargs)
if dsk2:
dsk.update(ensure_dict(dsk2))
kwargs2 = task
else:
kwargs2 = kwargs
vals = [(apply, func, vt, kwargs2) for vt in valtups]
else:
vals = [(func,) + vt for vt in valtups]
dsk.update(dict(zip(keys, vals)))
return dsk
def _concatenate2(arrays, axes=[]):
""" Recursively Concatenate nested lists of arrays along axes
Each entry in axes corresponds to each level of the nested list. The
length of axes should correspond to the level of nesting of arrays.
>>> x = np.array([[1, 2], [3, 4]])
>>> _concatenate2([x, x], axes=[0])
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> _concatenate2([x, x], axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> _concatenate2([[x, x], [x, x]], axes=[0, 1])
array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
Supports Iterators
>>> _concatenate2(iter([x, x]), axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
"""
if isinstance(arrays, Iterator):
arrays = list(arrays)
if not isinstance(arrays, (list, tuple)):
return arrays
if len(axes) > 1:
arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]
module = package_of(type(max(arrays, key=lambda x: x.__array_priority__))) or np
return module.concatenate(arrays, axis=axes[0])
def apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype=True):
args = [np.ones((1,) * x.ndim, dtype=x.dtype)
if isinstance(x, Array) else x for x in args]
try:
o = func(*args, **kwargs)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
suggest = ("Please specify the dtype explicitly using the "
"`dtype` kwarg.\n\n") if suggest_dtype else ""
msg = ("`dtype` inference failed in `{0}`.\n\n"
"{1}"
"Original error is below:\n"
"------------------------\n"
"{2}\n\n"
"Traceback:\n"
"---------\n"
"{3}").format(funcname, suggest, repr(e), tb)
else:
msg = None
if msg is not None:
raise ValueError(msg)
return o.dtype
def map_blocks(func, *args, **kwargs):
""" Map a function across all blocks of a dask array.
Parameters
----------
func : callable
Function to apply to every block in the array.
args : dask arrays or constants
dtype : np.dtype, optional
The ``dtype`` of the output array. It is recommended to provide this.
If not provided, will be inferred by applying the function to a small
set of fake data.
chunks : tuple, optional
Chunk shape of resulting blocks if the function does not preserve
shape. If not provided, the resulting array is assumed to have the same
block structure as the first input array.
drop_axis : number or iterable, optional
Dimensions lost by the function.
new_axis : number or iterable, optional
New dimensions created by the function. Note that these are applied
after ``drop_axis`` (if present).
token : string, optional
The key prefix to use for the output array. If not provided, will be
determined from the function name.
name : string, optional
The key name to use for the output array. Note that this fully
specifies the output key name, and must be unique. If not provided,
will be determined by a hash of the arguments.
**kwargs :
Other keyword arguments to pass to function. Values must be constants
(not dask.arrays)
Examples
--------
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays.
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If the function changes shape of the blocks then you must provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
You have a bit of freedom in specifying chunks. If all of the output chunk
sizes are the same, you can provide just that chunk size as a single tuple.
>>> a = da.arange(18, chunks=(6,))
>>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))
If the function changes the dimension of the blocks you must specify the
created or destroyed dimensions.
>>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),
... new_axis=[0, 2])
Map_blocks aligns blocks by block positions without regard to shape. In the
following example we have two arrays with the same number of blocks but
with different shape and chunk sizes.
>>> x = da.arange(1000, chunks=(100,))
>>> y = da.arange(100, chunks=(10,))
The relevant attribute to match is numblocks.
>>> x.numblocks
(10,)
>>> y.numblocks
(10,)
If these match (up to broadcasting rules) then we can map arbitrary
functions across blocks
>>> def func(a, b):
... return np.array([a.max(), b.max()])
>>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')
dask.array<func, shape=(20,), dtype=int64, chunksize=(2,)>
>>> _.compute()
array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,
69, 799, 79, 899, 89, 999, 99])
Your block function can learn where in the array it is if it supports a
``block_id`` keyword argument. This will receive entries like (2, 0, 1),
the position of the block in the dask array.
>>> def func(block, block_id=None):
... pass
You may specify the key name prefix of the resulting task in the graph with
the optional ``token`` keyword argument.
>>> x.map_blocks(lambda x: x + 1, token='increment') # doctest: +SKIP
dask.array<increment, shape=(100,), dtype=int64, chunksize=(10,)>
"""
if not callable(func):
msg = ("First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)")
raise TypeError(msg % type(func).__name__)
name = kwargs.pop('name', None)
token = kwargs.pop('token', None)
if not name:
name = '%s-%s' % (token or funcname(func),
tokenize(token or func, args, **kwargs))
dtype = kwargs.pop('dtype', None)
chunks = kwargs.pop('chunks', None)
drop_axis = kwargs.pop('drop_axis', [])
new_axis = kwargs.pop('new_axis', [])
if isinstance(drop_axis, Number):
drop_axis = [drop_axis]
if isinstance(new_axis, Number):
new_axis = [new_axis]
arrs = [a for a in args if isinstance(a, Array)]
other = [(i, a) for i, a in enumerate(args) if not isinstance(a, Array)]
argpairs = [(a.name, tuple(range(a.ndim))[::-1]) for a in arrs]
numblocks = {a.name: a.numblocks for a in arrs}
arginds = list(concat(argpairs))
out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]
try:
spec = getargspec(func)
block_id = ('block_id' in spec.args or
'block_id' in getattr(spec, 'kwonly_args', ()))
except:
block_id = False
if block_id:
kwargs['block_id'] = '__dummy__'
if other:
dsk = top(partial_by_order, name, out_ind, *arginds,
numblocks=numblocks, function=func, other=other,
**kwargs)
else:
dsk = top(func, name, out_ind, *arginds, numblocks=numblocks,
**kwargs)
# If func has block_id as an argument, add it to the kwargs for each call
if block_id:
for k in dsk.keys():
dsk[k] = dsk[k][:-1] + (assoc(dsk[k][-1], 'block_id', k[1:]),)
if dtype is None:
if block_id:
kwargs2 = assoc(kwargs, 'block_id', first(dsk.keys())[1:])
else:
kwargs2 = kwargs
dtype = apply_infer_dtype(func, args, kwargs2, 'map_blocks')
if len(arrs) == 1:
numblocks = list(arrs[0].numblocks)
else:
dims = broadcast_dimensions(argpairs, numblocks)
numblocks = [b for (_, b) in sorted(dims.items(), reverse=True)]
if drop_axis:
if any(numblocks[i] > 1 for i in drop_axis):
raise ValueError("Can't drop an axis with more than 1 block. "
"Please use `atop` instead.")
dsk = dict((tuple(k for i, k in enumerate(k)
if i - 1 not in drop_axis), v)
for k, v in dsk.items())
numblocks = [n for i, n in enumerate(numblocks) if i not in drop_axis]
if new_axis:
new_axis = sorted(new_axis)
for i in new_axis:
if not 0 <= i <= len(numblocks):
ndim = len(numblocks)
raise ValueError("Can't add axis %d when current "
"axis are %r. Missing axis: "
"%r" % (i, list(range(ndim)),
list(range(ndim, i))))
numblocks.insert(i, 1)
dsk, old_dsk = dict(), dsk
for key in old_dsk:
new_key = list(key)
for i in new_axis:
new_key.insert(i + 1, 0)
dsk[tuple(new_key)] = old_dsk[key]
if chunks:
if len(chunks) != len(numblocks):
raise ValueError("Provided chunks have {0} dims, expected {1} "
"dims.".format(len(chunks), len(numblocks)))
chunks2 = []
for i, (c, nb) in enumerate(zip(chunks, numblocks)):
if isinstance(c, tuple):
if not len(c) == nb:
raise ValueError("Dimension {0} has {1} blocks, "
"chunks specified with "
"{2} blocks".format(i, nb, len(c)))
chunks2.append(c)
else:
chunks2.append(nb * (c,))
else:
if len(arrs) == 1:
chunks2 = list(arrs[0].chunks)
else:
try:
chunks2 = list(broadcast_chunks(*[a.chunks for a in arrs]))
except:
raise ValueError("Arrays in `map_blocks` don't align, can't "
"infer output chunks. Please provide "
"`chunks` kwarg.")
if drop_axis:
chunks2 = [c for (i, c) in enumerate(chunks2) if i not in drop_axis]
if new_axis:
for i in sorted(new_axis):
chunks2.insert(i, (1,))
chunks = tuple(chunks2)
return Array(sharedict.merge((name, dsk), *[a.dask for a in arrs]),
name, chunks, dtype)
def broadcast_chunks(*chunkss):
""" Construct a chunks tuple that broadcasts many chunks tuples
>>> a = ((5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((5, 5),)
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((1,), (5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((3, 3,), (5, 5),)
>>> broadcast_chunks(a, b)
Traceback (most recent call last):
...
ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]
"""
if len(chunkss) == 1:
return chunkss[0]
n = max(map(len, chunkss))
chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]
result = []
for i in range(n):
step1 = [c[i] for c in chunkss2]
if all(c == (1,) for c in step1):
step2 = step1
else:
step2 = [c for c in step1 if c != (1,)]
if len(set(step2)) != 1:
raise ValueError("Chunks do not align: %s" % str(step2))
result.append(step2[0])
return tuple(result)
@wraps(np.squeeze)
def squeeze(a, axis=None):
if 1 not in a.shape:
return a
if axis is None:
axis = tuple(i for i, d in enumerate(a.shape) if d == 1)
b = a.map_blocks(partial(np.squeeze, axis=axis), dtype=a.dtype)
chunks = tuple(bd for bd in b.chunks if bd != (1,))
name = 'squeeze-' + tokenize(a, axis)
old_keys = list(product([b.name], *[range(len(bd)) for bd in b.chunks]))
new_keys = list(product([name], *[range(len(bd)) for bd in chunks]))
dsk = {n: b.dask[o] for o, n in zip(old_keys, new_keys)}
return Array(sharedict.merge(b.dask, (name, dsk)), name, chunks, dtype=a.dtype)
def topk(k, x):
""" The top k elements of an array
Returns the k greatest elements of the array in sorted order. Only works
on arrays of a single dimension.
This assumes that ``k`` is small. All results will be returned in a single
chunk.
Examples
--------
>>> x = np.array([5, 1, 3, 6])
>>> d = from_array(x, chunks=2)
>>> d.topk(2).compute()
array([6, 5])
"""
if x.ndim != 1:
raise ValueError("Topk only works on arrays of one dimension")
token = tokenize(k, x)
name = 'chunk.topk-' + token
dsk = dict(((name, i), (chunk.topk, k, key))
for i, key in enumerate(x._keys()))
name2 = 'topk-' + token
dsk[(name2, 0)] = (getitem, (np.sort, (np.concatenate, list(dsk))),
slice(-1, -k - 1, -1))
chunks = ((k,),)
return Array(sharedict.merge((name2, dsk), x.dask), name2, chunks, dtype=x.dtype)
def store(sources, targets, lock=True, regions=None, compute=True, **kwargs):
""" Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or iterable of Arrays
targets: array-like or iterable of array-likes
These should support setitem syntax ``target[10:20] = ...``
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular ``threading.Lock`` object to be shared among all writes.
regions: tuple of slices or iterable of tuple of slices
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets, respectively.
compute: boolean, optional
If true compute immediately, return ``dask.delayed.Delayed`` otherwise
Examples
--------
>>> x = ... # doctest: +SKIP
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets]
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError("Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets)))
if isinstance(regions, tuple) or regions is None:
regions = [regions]
if len(sources) > 1 and len(regions) == 1:
regions *= len(sources)
if len(sources) != len(regions):
raise ValueError("Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions)))
updates = {}
keys = []
for tgt, src, reg in zip(targets, sources, regions):
# if out is a delayed object update dictionary accordingly
try:
dsk = {}
dsk.update(tgt.dask)
tgt = tgt.key
except AttributeError:
dsk = {}
update = insert_to_ooc(tgt, src, lock=lock, region=reg)
keys.extend(update)
update.update(dsk)
updates.update(update)
name = 'store-' + tokenize(*keys)
dsk = sharedict.merge((name, updates), *[src.dask for src in sources])
if compute:
Array._get(dsk, keys, **kwargs)
else:
from ..delayed import Delayed
dsk.update({name: keys})
return Delayed(name, dsk)
def blockdims_from_blockshape(shape, chunks):
"""
>>> blockdims_from_blockshape((10, 10), (4, 3))
((4, 4, 2), (3, 3, 3, 1))
>>> blockdims_from_blockshape((10, 0), (4, 0))
((4, 4, 2), (0,))
"""
if chunks is None:
raise TypeError("Must supply chunks= keyword argument")
if shape is None:
raise TypeError("Must supply shape= keyword argument")
if np.isnan(sum(shape)) or np.isnan(sum(chunks)):
raise ValueError("Array chunk sizes are unknown. shape: %s, chunks: %s"
% (shape, chunks))
if not all(map(is_integer, chunks)):
raise ValueError("chunks can only contain integers.")
if not all(map(is_integer, shape)):
raise ValueError("shape can only contain integers.")
shape = tuple(map(int, shape))
chunks = tuple(map(int, chunks))
return tuple(((bd,) * (d // bd) + ((d % bd,) if d % bd else ())
if d else (0,))
for d, bd in zip(shape, chunks))
def finalize(results):
if not results:
return concatenate3(results)
results2 = results
while isinstance(results2, (tuple, list)):
if len(results2) > 1:
return concatenate3(results)
else:
results2 = results2[0]
return unpack_singleton(results)
class Array(Base):
""" Parallel Dask Array
A parallel nd-array comprised of many numpy arrays arranged in a grid.
This constructor is for advanced uses only. For normal use see the
``da.from_array`` function.
Parameters
----------
dask : dict
Task dependency graph
name : string
Name of array in dask
shape : tuple of ints
Shape of the entire array
chunks: iterable of tuples
block sizes along each dimension
See Also
--------
dask.array.from_array
"""
__slots__ = 'dask', 'name', '_chunks', 'dtype'
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(finalize)
def __new__(cls, dask, name, chunks, dtype, shape=None):
self = super(Array, cls).__new__(cls)
assert isinstance(dask, Mapping)
if not isinstance(dask, ShareDict):
s = ShareDict()
s.update_with_key(dask, key=name)
dask = s
self.dask = dask
self.name = name
self._chunks = normalize_chunks(chunks, shape)
if self._chunks is None:
raise ValueError(chunks_none_error_message)
if dtype is None:
raise ValueError("You must specify the dtype of the array")
self.dtype = np.dtype(dtype)
for plugin in _globals.get('array_plugins', ()):
result = plugin(self)
if result is not None:
self = result
return self
def __reduce__(self):
return (Array, (self.dask, self.name, self.chunks, self.dtype))
@property
def numblocks(self):
return tuple(map(len, self.chunks))
@property
def npartitions(self):
return reduce(mul, self.numblocks, 1)
@property
def shape(self):
return tuple(map(sum, self.chunks))
def _get_chunks(self):
return self._chunks
def _set_chunks(self, chunks):
raise TypeError("Can not set chunks directly\n\n"
"Please use the rechunk method instead:\n"
" x.rechunk(%s)" % str(chunks))
chunks = property(_get_chunks, _set_chunks, "chunks property")
def __len__(self):
return sum(self.chunks[0])
def __repr__(self):
"""
>>> import dask.array as da
>>> da.ones((10, 10), chunks=(5, 5), dtype='i4')
dask.array<..., shape=(10, 10), dtype=int32, chunksize=(5, 5)>
"""
chunksize = str(tuple(c[0] for c in self.chunks))
name = self.name.rsplit('-', 1)[0]
return ("dask.array<%s, shape=%s, dtype=%s, chunksize=%s>" %
(name, self.shape, self.dtype, chunksize))
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
""" Number of elements in array """
return reduce(mul, self.shape, 1)
@property
def nbytes(self):
""" Number of bytes in array """
return self.size * self.dtype.itemsize
@property
def itemsize(self):
""" Length of one array element in bytes """
return self.dtype.itemsize
def _keys(self, *args):
if not args:
try:
return self._cached_keys
except AttributeError:
pass
if not self.chunks:
return [(self.name,)]
ind = len(args)
if ind + 1 == self.ndim:
result = [(self.name,) + args + (i,)
for i in range(self.numblocks[ind])]
else:
result = [self._keys(*(args + (i,)))
for i in range(self.numblocks[ind])]
if not args:
self._cached_keys = result
return result
__array_priority__ = 11 # higher than numpy.ndarray and numpy.matrix
def __array__(self, dtype=None, **kwargs):
x = self.compute()
if dtype and x.dtype != dtype:
x = x.astype(dtype)
if not isinstance(x, np.ndarray):
x = np.array(x)
return x
@property
def _elemwise(self):
return elemwise
@wraps(store)
def store(self, target, **kwargs):
return store([self], [target], **kwargs)
def to_hdf5(self, filename, datapath, **kwargs):
""" Store array in HDF5 file
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP
See Also
--------
da.store
h5py.File.create_dataset
"""
return to_hdf5(filename, datapath, self, **kwargs)
def to_dask_dataframe(self, columns=None):
""" Convert dask Array to dask Dataframe
Parameters
----------
columns: list or string
list of column names if DataFrame, single string if Series
See Also
--------
dask.dataframe.from_dask_array
"""
from ..dataframe import from_dask_array
return from_dask_array(self, columns=columns)
def __int__(self):
return int(self.compute())
def __bool__(self):
return bool(self.compute())
__nonzero__ = __bool__ # python 2
def __float__(self):
return float(self.compute())
def __complex__(self):
return complex(self.compute())
def __setitem__(self, key, value):
if isinstance(key, Array):
if isinstance(value, Array) and value.ndim > 1:
raise ValueError('boolean index array should have 1 dimension')
y = where(key, value, self)
self.dtype = y.dtype
self.dask = y.dask
self.name = y.name
return self
else:
raise NotImplementedError("Item assignment with %s not supported"
% type(key))
def __getitem__(self, index):
out = 'getitem-' + tokenize(self, index)
# Field access, e.g. x['a'] or x[['a', 'b']]
if (isinstance(index, (str, unicode)) or
(isinstance(index, list) and index and
all(isinstance(i, (str, unicode)) for i in index))):
if isinstance(index, (str, unicode)):
dt = self.dtype[index]
else:
dt = np.dtype([(name, self.dtype[name]) for name in index])
if dt.shape:
new_axis = list(range(self.ndim, self.ndim + len(dt.shape)))
chunks = self.chunks + tuple((i,) for i in dt.shape)
return self.map_blocks(getitem, index, dtype=dt.base, name=out,
chunks=chunks, new_axis=new_axis)
else:
return self.map_blocks(getitem, index, dtype=dt, name=out)
# Slicing
if isinstance(index, Array):
return slice_with_dask_array(self, index)
if not isinstance(index, tuple):
index = (index,)
if any(isinstance(i, Array) for i in index):
raise NotImplementedError("Indexing with a dask Array")
if all(isinstance(i, slice) and i == slice(None) for i in index):
return self
dsk, chunks = slice_array(out, self.name, self.chunks, index)
dsk2 = sharedict.merge(self.dask, (out, dsk))
return Array(dsk2, out, chunks, dtype=self.dtype)
def _vindex(self, key):
if (not isinstance(key, tuple) or
not len([k for k in key if isinstance(k, (np.ndarray, list))]) >= 2 or
not all(isinstance(k, (np.ndarray, list)) or k == slice(None, None)
for k in key)):
msg = ("vindex expects only lists and full slices\n"
"At least two entries must be a list\n"
"For other combinations try doing normal slicing first, followed\n"
"by vindex slicing. Got: \n\t%s")
raise IndexError(msg % str(key))
if any((isinstance(k, np.ndarray) and k.ndim != 1) or
(isinstance(k, list) and k and isinstance(k[0], list))
for k in key):
raise IndexError("vindex does not support multi-dimensional keys\n"
"Got: %s" % str(key))
if len(set(len(k) for k in key if isinstance(k, (list, np.ndarray)))) != 1:
raise IndexError("All indexers must have the same length, got\n"
"\t%s" % str(key))
key = key + (slice(None, None),) * (self.ndim - len(key))
key = [i if isinstance(i, list) else
i.tolist() if isinstance(i, np.ndarray) else
None for i in key]
return _vindex(self, *key)
@property
def vindex(self):
return IndexCallable(self._vindex)
@wraps(np.dot)
def dot(self, other):
return tensordot(self, other,
axes=((self.ndim - 1,), (other.ndim - 2,)))
@property
def A(self):
return self
@property
def T(self):
return transpose(self)
@derived_from(np.ndarray)
def transpose(self, *axes):
if not axes:
axes = None
elif len(axes) == 1 and isinstance(axes[0], Iterable):
axes = axes[0]
return transpose(self, axes=axes)
@wraps(np.ravel)
def ravel(self):
return ravel(self)
flatten = ravel
@wraps(np.reshape)
def reshape(self, *shape):
from .reshape import reshape
if len(shape) == 1 and not isinstance(shape[0], Number):
shape = shape[0]
return reshape(self, shape)
@wraps(topk)
def topk(self, k):
return topk(k, self)
def astype(self, dtype, **kwargs):
"""Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to False and the `dtype` requirement is satisfied, the input
array is returned instead of a copy.
"""
# Scalars don't take `casting` or `copy` kwargs - as such we only pass
# them to `map_blocks` if specified by user (different than defaults).
extra = set(kwargs) - {'casting', 'copy'}
if extra:
raise TypeError("astype does not take the following keyword "
"arguments: {0!s}".format(list(extra)))
casting = kwargs.get('casting', 'unsafe')
copy = kwargs.get('copy', True)
dtype = np.dtype(dtype)
if self.dtype == dtype:
return self
elif not np.can_cast(self.dtype, dtype, casting=casting):
raise TypeError("Cannot cast array from {0!r} to {1!r}"
" according to the rule "
"{2!r}".format(self.dtype, dtype, casting))
name = 'astype-' + tokenize(self, dtype, casting, copy)
return self.map_blocks(_astype, dtype=dtype, name=name,
astype_dtype=dtype, **kwargs)
def __abs__(self):
return elemwise(operator.abs, self)
def __add__(self, other):
return elemwise(operator.add, self, other)
def __radd__(self, other):
return elemwise(operator.add, other, self)
def __and__(self, other):
return elemwise(operator.and_, self, other)
def __rand__(self, other):
return elemwise(operator.and_, other, self)
def __div__(self, other):
return elemwise(operator.div, self, other)
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
def __eq__(self, other):
return elemwise(operator.eq, self, other)
def __gt__(self, other):
return elemwise(operator.gt, self, other)
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.invert, self)
def __lshift__(self, other):
return elemwise(operator.lshift, self, other)
def __rlshift__(self, other):
return elemwise(operator.lshift, other, self)
def __lt__(self, other):
return elemwise(operator.lt, self, other)
def __le__(self, other):
return elemwise(operator.le, self, other)
def __mod__(self, other):
return elemwise(operator.mod, self, other)
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
def __mul__(self, other):
return elemwise(operator.mul, self, other)
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __pos__(self):
return self
def __ror__(self, other):
return elemwise(operator.or_, other, self)
def __pow__(self, other):
return elemwise(operator.pow, self, other)
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
def __rshift__(self, other):
return elemwise(operator.rshift, self, other)
def __rrshift__(self, other):
return elemwise(operator.rshift, other, self)
def __sub__(self, other):
return elemwise(operator.sub, self, other)
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
def __xor__(self, other):
return elemwise(operator.xor, self, other)
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
@wraps(np.any)
def any(self, axis=None, keepdims=False, split_every=None):
from .reductions import any
return any(self, axis=axis, keepdims=keepdims, split_every=split_every)
@wraps(np.all)
def all(self, axis=None, keepdims=False, split_every=None):
from .reductions import all
return all(self, axis=axis, keepdims=keepdims, split_every=split_every)
@wraps(np.min)
def min(self, axis=None, keepdims=False, split_every=None):
from .reductions import min
return min(self, axis=axis, keepdims=keepdims, split_every=split_every)
@wraps(np.max)
def max(self, axis=None, keepdims=False, split_every=None):
from .reductions import max
return max(self, axis=axis, keepdims=keepdims, split_every=split_every)
@wraps(np.argmin)
def argmin(self, axis=None, split_every=None):
from .reductions import argmin
return argmin(self, axis=axis, split_every=split_every)
@wraps(np.argmax)
def argmax(self, axis=None, split_every=None):
from .reductions import argmax
return argmax(self, axis=axis, split_every=split_every)
@wraps(np.sum)
def sum(self, axis=None, dtype=None, keepdims=False, split_every=None):
from .reductions import sum
return sum(self, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)
@wraps(np.prod)
def prod(self, axis=None, dtype=None, keepdims=False, split_every=None):
from .reductions import prod
return prod(self, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)
@wraps(np.mean)
def mean(self, axis=None, dtype=None, keepdims=False, split_every=None):
from .reductions import mean
return mean(self, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)
@wraps(np.std)
def std(self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
from .reductions import std
return std(self, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof,
split_every=split_every)
@wraps(np.var)
def var(self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
from .reductions import var
return var(self, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof,
split_every=split_every)
def moment(self, order, axis=None, dtype=None, keepdims=False, ddof=0,
split_every=None):
"""Calculate the nth centralized moment.
Parameters
----------
order : int
Order of the moment that is returned, must be >= 2.
axis : int, optional
Axis along which the central moment is computed. The default is to
compute the moment of the flattened array.
dtype : data-type, optional
Type to use in computing the moment. For arrays of integer type the
default is float64; for arrays of float types it is the same as the
array type.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
N - ddof, where N represents the number of elements. By default
ddof is zero.
Returns
-------
moment : ndarray
References
----------
.. [1] Pebay, Philippe (2008), "Formulas for Robust, One-Pass Parallel
Computation of Covariances and Arbitrary-Order Statistical Moments"
(PDF), Technical Report SAND2008-6212, Sandia National Laboratories
"""
from .reductions import moment
return moment(self, order, axis=axis, dtype=dtype, keepdims=keepdims,
ddof=ddof, split_every=split_every)
def vnorm(self, ord=None, axis=None, keepdims=False, split_every=None):
""" Vector norm """
from .reductions import vnorm
return vnorm(self, ord=ord, axis=axis, keepdims=keepdims,
split_every=split_every)
@wraps(map_blocks)
def map_blocks(self, func, *args, **kwargs):
return map_blocks(func, self, *args, **kwargs)
def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):
""" Map a function over blocks of the array with some overlap
We share neighboring zones between blocks of the array, then map a
function, then trim away the neighboring strips.
Parameters
----------
func: function
The function to apply to each extended block
depth: int, tuple, or dict
The number of cells that each block should share with its neighbors
If a tuple or dict this can be different per axis
boundary: str, tuple, dict
how to handle the boundaries. Values include 'reflect',
'periodic', 'nearest', 'none', or any constant value like 0 or
np.nan
trim: bool
Whether or not to trim the excess after the map function. Set this
to false if your mapping function does this for you.
**kwargs:
Other keyword arguments valid in ``map_blocks``
Examples
--------
>>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])
>>> x = from_array(x, chunks=5)
>>> def derivative(x):
... return x - np.roll(x, 1)
>>> y = x.map_overlap(derivative, depth=1, boundary=0)
>>> y.compute()
array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])
>>> import dask.array as da
>>> x = np.arange(16).reshape((4, 4))
>>> d = da.from_array(x, chunks=(2, 2))
>>> d.map_overlap(lambda x: x + x.size, depth=1).compute()
array([[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]])
>>> func = lambda x: x + x.size
>>> depth = {0: 1, 1: 1}
>>> boundary = {0: 'reflect', 1: 'none'}
>>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE
array([[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27]])
"""
from .ghost import map_overlap
return map_overlap(self, func, depth, boundary, trim, **kwargs)
def cumsum(self, axis, dtype=None):
""" See da.cumsum for docstring """
from .reductions import cumsum
return cumsum(self, axis, dtype)
def cumprod(self, axis, dtype=None):
""" See da.cumprod for docstring """
from .reductions import cumprod
return cumprod(self, axis, dtype)
@wraps(squeeze)
def squeeze(self):
return squeeze(self)
def rechunk(self, chunks, threshold=None, block_size_limit=None):
""" See da.rechunk for docstring """
from . import rechunk # avoid circular import
return rechunk(self, chunks, threshold, block_size_limit)
@property
def real(self):
from .ufunc import real
return real(self)
@property
def imag(self):
from .ufunc import imag
return imag(self)
def conj(self):
from .ufunc import conj
return conj(self)
@wraps(np.clip)
def clip(self, min=None, max=None):
from .ufunc import clip
return clip(self, min, max)
def view(self, dtype, order='C'):
""" Get a view of the array as a new data type
Parameters
----------
dtype:
The dtype by which to view the array
order: string
'C' or 'F' (Fortran) ordering
This reinterprets the bytes of the array under a new dtype. If that
dtype does not have the same size as the original array then the shape
will change.
Beware that both numpy and dask.array can behave oddly when taking
shape-changing views of arrays under Fortran ordering. Under some
versions of NumPy this function will fail when taking shape-changing
views of Fortran ordered arrays if the first dimension has chunks of
size one.
"""
dtype = np.dtype(dtype)
mult = self.dtype.itemsize / dtype.itemsize
if order == 'C':
ascontiguousarray = np.ascontiguousarray
chunks = self.chunks[:-1] + (tuple(ensure_int(c * mult)
for c in self.chunks[-1]),)
elif order == 'F':
ascontiguousarray = np.asfortranarray
chunks = ((tuple(ensure_int(c * mult) for c in self.chunks[0]), ) +
self.chunks[1:])
else:
raise ValueError("Order must be one of 'C' or 'F'")
out = elemwise(ascontiguousarray, self, dtype=self.dtype)
out = elemwise(np.ndarray.view, out, dtype, dtype=dtype)
out._chunks = chunks
return out
@wraps(np.swapaxes)
def swapaxes(self, axis1, axis2):
return swapaxes(self, axis1, axis2)
@wraps(np.round)
def round(self, decimals=0):
return round(self, decimals=decimals)
def copy(self):
"""
Copy array. This is a no-op for dask.arrays, which are immutable
"""
return Array(self.dask, self.name, self.chunks, self.dtype)
def __deepcopy__(self, memo):
c = self.copy()
memo[id(self)] = c
return c
def to_delayed(self):
""" Convert Array into dask Delayed objects
Returns an array of values, one value per chunk.
See Also
--------
dask.array.from_delayed
"""
from ..delayed import Delayed
return np.array(ndeepmap(self.ndim, lambda k: Delayed(k, self.dask), self._keys()),
dtype=object)
@wraps(np.repeat)
def repeat(self, repeats, axis=None):
return repeat(self, repeats, axis=axis)
def ensure_int(f):
i = int(f)
if i != f:
raise ValueError("Could not coerce %f to integer" % f)
return i
normalize_token.register(Array, lambda a: a.name)
def normalize_chunks(chunks, shape=None):
""" Normalize chunks to tuple of tuples
>>> normalize_chunks((2, 2), shape=(5, 6))
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(4, 6)) # Idempotent
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks([[2, 2], [3, 3]]) # Cleans up lists to tuples
((2, 2), (3, 3))
>>> normalize_chunks(10, shape=(30, 5)) # Supports integer inputs
((10, 10, 10), (5,))
>>> normalize_chunks((), shape=(0, 0)) # respects null dimensions
((0,), (0,))
"""
if chunks is None:
raise ValueError(chunks_none_error_message)
if type(chunks) is not tuple:
if type(chunks) is list:
chunks = tuple(chunks)
if isinstance(chunks, Number):
chunks = (chunks,) * len(shape)
if not chunks and shape and all(s == 0 for s in shape):
chunks = ((0,),) * len(shape)
if shape and len(chunks) != len(shape):
if not (len(shape) == 1 and sum(chunks) == shape[0]):
raise ValueError(
"Chunks and shape must be of the same length/dimension. "
"Got chunks=%s, shape=%s" % (chunks, shape))
if shape is not None:
chunks = tuple(c if c is not None else s for c, s in zip(chunks, shape))
if chunks and shape is not None:
chunks = sum((blockdims_from_blockshape((s,), (c,))
if not isinstance(c, (tuple, list)) else (c,)
for s, c in zip(shape, chunks)), ())
for c in chunks:
if not c:
raise ValueError("Empty tuples are not allowed in chunks. Express "
"zero length dimensions with 0(s) in chunks")
return tuple(map(tuple, chunks))
def from_array(x, chunks, name=None, lock=False, fancy=True, getitem=None):
""" Create dask array from something that looks like an array
Input must have a ``.shape`` and support numpy-style slicing.
Parameters
----------
x : array_like
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions
like ((1000, 1000, 500), (400, 400)).
name : str, optional
The key name to use for the array. Defaults to a hash of ``x``.
Use ``name=False`` to generate a random name instead of hashing (fast)
lock : bool or Lock, optional
If ``x`` doesn't support concurrent reads then provide a lock here, or
pass in True to have dask.array create one for you.
fancy : bool, optional
If ``x`` doesn't support fancy indexing (e.g. indexing with lists or
arrays) then set to False. Default is True.
Examples
--------
>>> x = h5py.File('...')['/data/path'] # doctest: +SKIP
>>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP
If your underlying datastore does not support concurrent reads then include
the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple
arrays to coordinate around the same lock.
>>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP
"""
chunks = normalize_chunks(chunks, x.shape)
if len(chunks) != len(x.shape):
raise ValueError("Input array has %d dimensions but the supplied "
"chunks has only %d dimensions" %
(len(x.shape), len(chunks)))
if tuple(map(sum, chunks)) != x.shape:
raise ValueError("Chunks do not add up to shape. "
"Got chunks=%s, shape=%s" % (chunks, x.shape))
if name in (None, True):
token = tokenize(x, chunks)
original_name = 'array-original-' + token
name = name or 'array-' + token
elif name is False:
original_name = name = 'array-' + str(uuid.uuid1())
else:
original_name = name
if lock is True:
lock = SerializableLock()
if getitem is None:
if fancy:
getitem = getarray
else:
getitem = getarray_nofancy
dsk = getem(original_name, chunks, getitem, out_name=name, lock=lock)
dsk[original_name] = x
return Array(dsk, name, chunks, dtype=x.dtype)
def from_delayed(value, shape, dtype, name=None):
""" Create a dask array from a dask delayed value
This routine is useful for constructing dask arrays in an ad-hoc fashion
using dask delayed, particularly when combined with stack and concatenate.
The dask array will consist of a single chunk.
Examples
--------
>>> from dask import delayed
>>> value = delayed(np.ones)(5)
>>> array = from_delayed(value, (5,), float)
>>> array
dask.array<from-value, shape=(5,), dtype=float64, chunksize=(5,)>
>>> array.compute()
array([ 1., 1., 1., 1., 1.])
"""
from dask.delayed import delayed, Delayed
if not isinstance(value, Delayed) and hasattr(value, 'key'):
value = delayed(value)
name = name or 'from-value-' + tokenize(value, shape, dtype)
dsk = {(name,) + (0,) * len(shape): value.key}
chunks = tuple((d,) for d in shape)
return Array(sharedict.merge(value.dask, (name, dsk)), name, chunks, dtype)
def from_func(func, shape, dtype=None, name=None, args=(), kwargs={}):
""" Create dask array in a single block by calling a function
Calling the provided function with func(*args, **kwargs) should return a
NumPy array of the indicated shape and dtype.
Examples
--------
>>> a = from_func(np.arange, (3,), dtype='i8', args=(3,))
>>> a.compute()
array([0, 1, 2])
This works particularly well when coupled with dask.array functions like
concatenate and stack:
>>> arrays = [from_func(np.array, (), dtype='i8', args=(n,)) for n in range(5)]
>>> stack(arrays).compute()
array([0, 1, 2, 3, 4])
"""
name = name or 'from_func-' + tokenize(func, shape, dtype, args, kwargs)
if args or kwargs:
func = partial(func, *args, **kwargs)
dsk = {(name,) + (0,) * len(shape): (func,)}
chunks = tuple((i,) for i in shape)
return Array(dsk, name, chunks, dtype)
def common_blockdim(blockdims):
""" Find the common block dimensions from the list of block dimensions
Currently only implements the simplest possible heuristic: the common
block-dimension is the only one that does not span fully span a dimension.
This is a conservative choice that allows us to avoid potentially very
expensive rechunking.
Assumes that each element of the input block dimensions has all the same
sum (i.e., that they correspond to dimensions of the same size).
Examples
--------
>>> common_blockdim([(3,), (2, 1)])
(2, 1)
>>> common_blockdim([(1, 2), (2, 1)])
(1, 1, 1)
>>> common_blockdim([(2, 2), (3, 1)]) # doctest: +SKIP
Traceback (most recent call last):
...
ValueError: Chunks do not align
"""
if not any(blockdims):
return ()
non_trivial_dims = set([d for d in blockdims if len(d) > 1])
if len(non_trivial_dims) == 1:
return first(non_trivial_dims)
if len(non_trivial_dims) == 0:
return max(blockdims, key=first)
if np.isnan(sum(map(sum, blockdims))):
raise ValueError("Arrays chunk sizes are unknown: %s", blockdims)
if len(set(map(sum, non_trivial_dims))) > 1:
raise ValueError("Chunks do not add up to same value", blockdims)
# We have multiple non-trivial chunks on this axis
# e.g. (5, 2) and (4, 3)
# We create a single chunk tuple with the same total length
# that evenly divides both, e.g. (4, 1, 2)
# To accomplish this we walk down all chunk tuples together, finding the
# smallest element, adding it to the output, and subtracting it from all
# other elements and remove the element itself. We stop once we have
# burned through all of the chunk tuples.
# For efficiency's sake we reverse the lists so that we can pop off the end
rchunks = [list(ntd)[::-1] for ntd in non_trivial_dims]
total = sum(first(non_trivial_dims))
i = 0
out = []
while i < total:
m = min(c[-1] for c in rchunks)
out.append(m)
for c in rchunks:
c[-1] -= m
if c[-1] == 0:
c.pop()
i += m
return tuple(out)
def unify_chunks(*args, **kwargs):
"""
Unify chunks across a sequence of arrays
Parameters
----------
*args: sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
Examples
--------
>>> import dask.array as da
>>> x = da.ones(10, chunks=((5, 2, 3),))
>>> y = da.ones(10, chunks=((2, 3, 5),))
>>> chunkss, arrays = unify_chunks(x, 'i', y, 'i')
>>> chunkss
{'i': (2, 3, 2, 3)}
>>> x = da.ones((100, 10), chunks=(20, 5))
>>> y = da.ones((10, 100), chunks=(4, 50))
>>> chunkss, arrays = unify_chunks(x, 'ij', y, 'jk')
>>> chunkss # doctest: +SKIP
{'k': (50, 50), 'i': (20, 20, 20, 20, 20), 'j': (4, 1, 3, 2)}
Returns
-------
chunkss : dict
Map like {index: chunks}.
arrays : list
List of rechunked arrays.
See Also
--------
common_blockdim
"""
args = [asarray(a) if i % 2 == 0 else a for i, a in enumerate(args)]
warn = kwargs.get('warn', True)
arginds = list(partition(2, args)) # [x, ij, y, jk] -> [(x, ij), (y, jk)]
arrays, inds = zip(*arginds)
if all(ind == inds[0] for ind in inds) and all(a.chunks == arrays[0].chunks for a in arrays):
return dict(zip(inds[0], arrays[0].chunks)), arrays
nameinds = [(a.name, i) for a, i in arginds]
blockdim_dict = dict((a.name, a.chunks) for a, _ in arginds)
chunkss = broadcast_dimensions(nameinds, blockdim_dict,
consolidate=common_blockdim)
max_parts = max(arg.npartitions for arg in args[::2])
nparts = np.prod(list(map(len, chunkss.values())))
if warn and nparts and nparts >= max_parts * 10:
warnings.warn("Increasing number of chunks by factor of %d" %
(nparts / max_parts))
arrays = []
for a, i in arginds:
chunks = tuple(chunkss[j] if a.shape[n] > 1 else a.shape[n]
if not np.isnan(sum(chunkss[j])) else None
for n, j in enumerate(i))
if chunks != a.chunks and all(a.chunks):
arrays.append(a.rechunk(chunks))
else:
arrays.append(a)
return chunkss, arrays
def atop(func, out_ind, *args, **kwargs):
""" Tensor operation: Generalized inner and outer products
A broad class of blocked algorithms and patterns can be specified with a
concise multi-index notation. The ``atop`` function applies an in-memory
function across multiple blocks of multiple inputs in a variety of ways.
Many dask.array operations are special cases of atop including elementwise,
broadcasting, reductions, tensordot, and transpose.
Parameters
----------
func : callable
Function to apply to individual tuples of blocks
out_ind : iterable
Block pattern of the output, something like 'ijk' or (1, 2, 3)
*args : sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
**kwargs : dict
Extra keyword arguments to pass to function
dtype : np.dtype
Datatype of resulting array.
concatenate : bool, keyword only
If true concatenate arrays along dummy indices, else provide lists
adjust_chunks : dict
Dictionary mapping index to function to be applied to chunk sizes
new_axes : dict, keyword only
New indexes and their dimension lengths
Examples
--------
2D embarrassingly parallel operation from two arrays, x, and y.
>>> z = atop(operator.add, 'ij', x, 'ij', y, 'ij', dtype='f8') # z = x + y # doctest: +SKIP
Outer product multiplying x by y, two 1-d vectors
>>> z = atop(operator.mul, 'ij', x, 'i', y, 'j', dtype='f8') # doctest: +SKIP
z = x.T
>>> z = atop(np.transpose, 'ji', x, 'ij', dtype=x.dtype) # doctest: +SKIP
The transpose case above is illustrative because it does same transposition
both on each in-memory block by calling ``np.transpose`` and on the order
of the blocks themselves, by switching the order of the index ``ij -> ji``.
We can compose these same patterns with more variables and more complex
in-memory functions
z = X + Y.T
>>> z = atop(lambda x, y: x + y.T, 'ij', x, 'ij', y, 'ji', dtype='f8') # doctest: +SKIP
Any index, like ``i`` missing from the output index is interpreted as a
contraction (note that this differs from Einstein convention; repeated
indices do not imply contraction.) In the case of a contraction the passed
function should expect an iterable of blocks on any array that holds that
index. To receive arrays concatenated along contracted dimensions instead
pass ``concatenate=True``.
Inner product multiplying x by y, two 1-d vectors
>>> def sequence_dot(x_blocks, y_blocks):
... result = 0
... for x, y in zip(x_blocks, y_blocks):
... result += x.dot(y)
... return result
>>> z = atop(sequence_dot, '', x, 'i', y, 'i', dtype='f8') # doctest: +SKIP
Add new single-chunk dimensions with the ``new_axes=`` keyword, including
the length of the new dimension. New dimensions will always be in a single
chunk.
>>> def f(x):
... return x[:, None] * np.ones((1, 5))
>>> z = atop(f, 'az', x, 'a', new_axes={'z': 5}, dtype=x.dtype) # doctest: +SKIP
If the applied function changes the size of each chunk you can specify this
with a ``adjust_chunks={...}`` dictionary holding a function for each index
that modifies the dimension size in that index.
>>> def double(x):
... return np.concatenate([x, x])
>>> y = atop(double, 'ij', x, 'ij',
... adjust_chunks={'i': lambda n: 2 * n}, dtype=x.dtype) # doctest: +SKIP
See Also
--------
top - dict formulation of this function, contains most logic
"""
out = kwargs.pop('name', None) # May be None at this point
token = kwargs.pop('token', None)
dtype = kwargs.pop('dtype', None)
adjust_chunks = kwargs.pop('adjust_chunks', None)
new_axes = kwargs.get('new_axes', {})
if dtype is None:
raise ValueError("Must specify dtype of output array")
chunkss, arrays = unify_chunks(*args)
for k, v in new_axes.items():
chunkss[k] = (v,)
arginds = list(zip(arrays, args[1::2]))
numblocks = dict([(a.name, a.numblocks) for a, _ in arginds])
argindsstr = list(concat([(a.name, ind) for a, ind in arginds]))
# Finish up the name
if not out:
out = '%s-%s' % (token or funcname(func).strip('_'),
tokenize(func, out_ind, argindsstr, dtype, **kwargs))
dsk = top(func, out, out_ind, *argindsstr, numblocks=numblocks, **kwargs)
dsks = [a.dask for a, _ in arginds]
chunks = [chunkss[i] for i in out_ind]
if adjust_chunks:
for i, ind in enumerate(out_ind):
if ind in adjust_chunks:
if callable(adjust_chunks[ind]):
chunks[i] = tuple(map(adjust_chunks[ind], chunks[i]))
elif isinstance(adjust_chunks[ind], int):
chunks[i] = tuple(adjust_chunks[ind] for _ in chunks[i])
elif isinstance(adjust_chunks[ind], (tuple, list)):
chunks[i] = tuple(adjust_chunks[ind])
else:
raise NotImplementedError(
"adjust_chunks values must be callable, int, or tuple")
chunks = tuple(chunks)
return Array(sharedict.merge((out, dsk), *dsks), out, chunks, dtype=dtype)
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array(datetime.date(2000, 1, 1), dtype='datetime64[D]')
"""
while isinstance(x, (list, tuple)):
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x
def stack(seq, axis=0):
"""
Stack arrays along a new axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along a new dimension (axis=0 by default)
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also
--------
concatenate
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis + 1
if axis > ndim:
raise ValueError("Axis must not be greater than number of dimensions"
"\nData has %d dimensions, but got axis=%d" %
(ndim, axis))
if not all(x.shape == seq[0].shape for x in seq):
raise ValueError("Stacked arrays must have the same shape. Got %s",
[x.shape for x in seq])
ind = list(range(ndim))
uc_args = list(concat((x, ind) for x in seq))
_, seq = unify_chunks(*uc_args)
dt = reduce(np.promote_types, [a.dtype for a in seq])
seq = [x.astype(dt) for x in seq]
assert len(set(a.chunks for a in seq)) == 1 # same chunks
chunks = (seq[0].chunks[:axis] + ((1,) * n,) + seq[0].chunks[axis:])
names = [a.name for a in seq]
name = 'stack-' + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
inputs = [(names[key[axis + 1]], ) + key[1:axis + 1] + key[axis + 2:]
for key in keys]
values = [(getitem, inp, (slice(None, None, None),) * axis +
(None, ) + (slice(None, None, None), ) * (ndim - axis))
for inp in inputs]
dsk = dict(zip(keys, values))
dsk2 = sharedict.merge((name, dsk), *[a.dask for a in seq])
return Array(dsk2, name, chunks, dtype=dt)
def concatenate(seq, axis=0, allow_unknown_chunksizes=False):
"""
Concatenate arrays along an existing axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along an existing dimension (axis=0 by default)
Parameters
----------
seq: list of dask.arrays
axis: int
Dimension along which to align all of the arrays
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.concatenate(data, axis=0)
>>> x.shape
(12, 4)
>>> da.concatenate(data, axis=1).shape
(4, 12)
Result is a new dask Array
See Also
--------
stack
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis
if axis >= ndim:
msg = ("Axis must be less than than number of dimensions"
"\nData has %d dimensions, but got axis=%d")
raise ValueError(msg % (ndim, axis))
if (not allow_unknown_chunksizes and
not all(i == axis or all(x.shape[i] == seq[0].shape[i] for x in seq)
for i in range(ndim))):
if any(map(np.isnan, seq[0].shape)):
raise ValueError("Tried to concatenate arrays with unknown"
" shape %s. To force concatenation pass"
" allow_unknown_chunksizes=True."
% str(seq[0].shape))
raise ValueError("Shapes do not align: %s", [x.shape for x in seq])
inds = [list(range(ndim)) for i in range(n)]
for i, ind in enumerate(inds):
ind[axis] = -(i + 1)
uc_args = list(concat(zip(seq, inds)))
_, seq = unify_chunks(*uc_args, warn=False)
bds = [a.chunks for a in seq]
chunks = (seq[0].chunks[:axis] + (sum([bd[axis] for bd in bds], ()), ) +
seq[0].chunks[axis + 1:])
cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq]))
dt = reduce(np.promote_types, [a.dtype for a in seq])
seq = [x.astype(dt) for x in seq]
names = [a.name for a in seq]
name = 'concatenate-' + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
values = [(names[bisect(cum_dims, key[axis + 1]) - 1],) + key[1:axis + 1] +
(key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1], ) +
key[axis + 2:] for key in keys]
dsk = dict(zip(keys, values))
dsk2 = sharedict.merge((name, dsk), * [a.dask for a in seq])
return Array(dsk2, name, chunks, dtype=dt)
def atleast_3d(x):
if x.ndim == 1:
return x[None, :, None]
elif x.ndim == 2:
return x[:, :, None]
elif x.ndim > 2:
return x
else:
raise NotImplementedError()
def atleast_2d(x):
if x.ndim == 1:
return x[None, :]
elif x.ndim > 1:
return x
else:
raise NotImplementedError()
@wraps(np.vstack)
def vstack(tup):
tup = tuple(atleast_2d(x) for x in tup)
return concatenate(tup, axis=0)
@wraps(np.hstack)
def hstack(tup):
if all(x.ndim == 1 for x in tup):
return concatenate(tup, axis=0)
else:
return concatenate(tup, axis=1)
@wraps(np.dstack)
def dstack(tup):
tup = tuple(atleast_3d(x) for x in tup)
return concatenate(tup, axis=2)
@wraps(np.take)
def take(a, indices, axis=0):
if not -a.ndim <= axis < a.ndim:
raise ValueError('axis=(%s) out of bounds' % axis)
if axis < 0:
axis += a.ndim
if isinstance(a, np.ndarray) and isinstance(indices, Array):
return _take_dask_array_from_numpy(a, indices, axis)
else:
return a[(slice(None),) * axis + (indices,)]
@wraps(np.compress)
def compress(condition, a, axis=None):
if axis is None:
raise NotImplementedError("Must select axis for compression")
if not -a.ndim <= axis < a.ndim:
raise ValueError('axis=(%s) out of bounds' % axis)
if axis < 0:
axis += a.ndim
condition = np.array(condition, dtype=bool)
if condition.ndim != 1:
raise ValueError("Condition must be one dimensional")
if len(condition) < a.shape[axis]:
condition = condition.copy()
condition.resize(a.shape[axis])
slc = ((slice(None),) * axis + (condition, ) +
(slice(None),) * (a.ndim - axis - 1))
return a[slc]
def _take_dask_array_from_numpy(a, indices, axis):
assert isinstance(a, np.ndarray)
assert isinstance(indices, Array)
return indices.map_blocks(lambda block: np.take(a, block, axis),
chunks=indices.chunks,
dtype=a.dtype)
@wraps(np.transpose)
def transpose(a, axes=None):
if axes:
if len(axes) != a.ndim:
raise ValueError("axes don't match array")
else:
axes = tuple(range(a.ndim))[::-1]
axes = tuple(d + a.ndim if d < 0 else d for d in axes)
return atop(np.transpose, axes, a, tuple(range(a.ndim)),
dtype=a.dtype, axes=axes)
alphabet = 'abcdefghijklmnopqrstuvwxyz'
ALPHABET = alphabet.upper()
def _tensordot(a, b, axes):
x = max([a, b], key=lambda x: x.__array_priority__)
module = package_of(type(x)) or np
x = module.tensordot(a, b, axes=axes)
ind = [slice(None, None)] * x.ndim
for a in sorted(axes[0]):
ind.insert(a, None)
x = x[tuple(ind)]
return x
@wraps(np.tensordot)
def tensordot(lhs, rhs, axes=2):
if isinstance(axes, Iterable):
left_axes, right_axes = axes
else:
left_axes = tuple(range(lhs.ndim - 1, lhs.ndim - axes - 1, -1))
right_axes = tuple(range(0, axes))
if isinstance(left_axes, int):
left_axes = (left_axes,)
if isinstance(right_axes, int):
right_axes = (right_axes,)
if isinstance(left_axes, list):
left_axes = tuple(left_axes)
if isinstance(right_axes, list):
right_axes = tuple(right_axes)
dt = np.promote_types(lhs.dtype, rhs.dtype)
left_index = list(alphabet[:lhs.ndim])
right_index = list(ALPHABET[:rhs.ndim])
out_index = left_index + right_index
for l, r in zip(left_axes, right_axes):
out_index.remove(right_index[r])
right_index[r] = left_index[l]
intermediate = atop(_tensordot, out_index,
lhs, left_index,
rhs, right_index, dtype=dt,
axes=(left_axes, right_axes))
result = intermediate.sum(axis=left_axes)
return result
@wraps(np.dot)
def dot(a, b):
return tensordot(a, b, axes=((a.ndim - 1,), (b.ndim - 2,)))
def insert_to_ooc(out, arr, lock=True, region=None):
if lock is True:
lock = Lock()
def store(out, x, index, lock, region):
if lock:
lock.acquire()
try:
if region is None:
out[index] = np.asanyarray(x)
else:
out[fuse_slice(region, index)] = np.asanyarray(x)
finally:
if lock:
lock.release()
return None
slices = slices_from_chunks(arr.chunks)
name = 'store-%s' % arr.name
dsk = dict(((name,) + t[1:], (store, out, t, slc, lock, region))
for t, slc in zip(core.flatten(arr._keys()), slices))
return dsk
def asarray(array):
"""Coerce argument into a dask array
Examples
--------
>>> x = np.arange(3)
>>> asarray(x)
dask.array<asarray, shape=(3,), dtype=int64, chunksize=(3,)>
"""
if isinstance(array, Array):
return array
name = 'asarray-' + tokenize(array)
if not isinstance(getattr(array, 'shape', None), Iterable):
array = np.asarray(array)
dsk = {(name,) + (0,) * len(array.shape):
(getarray_inline, name) + ((slice(None, None),) * len(array.shape),),
name: array}
chunks = tuple((d,) for d in array.shape)
return Array(dsk, name, chunks, dtype=array.dtype)
def partial_by_order(*args, **kwargs):
"""
>>> partial_by_order(5, function=add, other=[(1, 10)])
15
"""
function = kwargs.pop('function')
other = kwargs.pop('other')
args2 = list(args)
for i, arg in other:
args2.insert(i, arg)
return function(*args2, **kwargs)
def is_scalar_for_elemwise(arg):
"""
>>> is_scalar_for_elemwise(42)
True
>>> is_scalar_for_elemwise('foo')
True
>>> is_scalar_for_elemwise(True)
True
>>> is_scalar_for_elemwise(np.array(42))
True
>>> is_scalar_for_elemwise([1, 2, 3])
True
>>> is_scalar_for_elemwise(np.array([1, 2, 3]))
False
>>> is_scalar_for_elemwise(from_array(np.array(0), chunks=()))
False
>>> is_scalar_for_elemwise(np.dtype('i4'))
True
"""
return (np.isscalar(arg) or
not isinstance(getattr(arg, 'shape', None), Iterable) or
isinstance(arg, np.dtype) or
(isinstance(arg, np.ndarray) and arg.ndim == 0))
def broadcast_shapes(*shapes):
"""
Determines output shape from broadcasting arrays.
Parameters
----------
shapes : tuples
The shapes of the arguments.
Returns
-------
output_shape : tuple
Raises
------
ValueError
If the input shapes cannot be successfully broadcast together.
"""
if len(shapes) == 1:
return shapes[0]
out = []
for sizes in zip_longest(*map(reversed, shapes), fillvalue=-1):
dim = max(sizes)
if any(i != -1 and i != 1 and i != dim and not np.isnan(i) for i in sizes):
raise ValueError("operands could not be broadcast together with "
"shapes {0}".format(' '.join(map(str, shapes))))
out.append(dim)
return tuple(reversed(out))
def elemwise(op, *args, **kwargs):
""" Apply elementwise function across arguments
Respects broadcasting rules
Examples
--------
>>> elemwise(add, x, y) # doctest: +SKIP
>>> elemwise(sin, x) # doctest: +SKIP
See Also
--------
atop
"""
if not set(['name', 'dtype']).issuperset(kwargs):
msg = "%s does not take the following keyword arguments %s"
raise TypeError(msg % (op.__name__, str(sorted(set(kwargs) - set(['name', 'dtype'])))))
args = [np.asarray(a) if isinstance(a, (list, tuple)) else a for a in args]
shapes = [getattr(arg, 'shape', ()) for arg in args]
shapes = [s if isinstance(s, Iterable) else () for s in shapes]
out_ndim = len(broadcast_shapes(*shapes)) # Raises ValueError if dimensions mismatch
expr_inds = tuple(range(out_ndim))[::-1]
arrays = [a for a in args if not is_scalar_for_elemwise(a)]
other = [(i, a) for i, a in enumerate(args) if is_scalar_for_elemwise(a)]
if 'dtype' in kwargs:
dt = kwargs['dtype']
else:
# We follow NumPy's rules for dtype promotion, which special cases
# scalars and 0d ndarrays (which it considers equivalent) by using
# their values to compute the result dtype:
# https://github.com/numpy/numpy/issues/6240
# We don't inspect the values of 0d dask arrays, because these could
# hold potentially very expensive calculations.
vals = [np.empty((1,) * a.ndim, dtype=a.dtype)
if not is_scalar_for_elemwise(a) else a
for a in args]
dt = apply_infer_dtype(op, vals, {}, 'elemwise', suggest_dtype=False)
name = kwargs.get('name', None) or '%s-%s' % (funcname(op),
tokenize(op, dt, *args))
if other:
return atop(partial_by_order, expr_inds,
*concat((a, tuple(range(a.ndim)[::-1])) for a in arrays),
dtype=dt, name=name, function=op, other=other,
token=funcname(op))
else:
return atop(op, expr_inds,
*concat((a, tuple(range(a.ndim)[::-1])) for a in arrays),
dtype=dt, name=name)
@wraps(np.around)
def around(x, decimals=0):
return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)
def isnull(values):
""" pandas.isnull for dask arrays """
import pandas as pd
return elemwise(pd.isnull, values, dtype='bool')
def notnull(values):
""" pandas.notnull for dask arrays """
return ~isnull(values)
@wraps(numpy_compat.isclose)
def isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
func = partial(numpy_compat.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)
return elemwise(func, arr1, arr2, dtype='bool')
def variadic_choose(a, *choices):
return np.choose(a, choices)
@wraps(np.choose)
def choose(a, choices):
return elemwise(variadic_choose, a, *choices)
where_error_message = """
The dask.array version of where only handles the three argument case.
da.where(x > 0, x, 0)
and not the single argument case
da.where(x > 0)
This is because dask.array operations must be able to infer the shape of their
outputs prior to execution. The number of positive elements of x requires
execution. See the ``np.where`` docstring for examples and the following link
for a more thorough explanation:
http://dask.pydata.org/en/latest/array-overview.html#construct
""".strip()
chunks_none_error_message = """
You must specify a chunks= keyword argument.
This specifies the chunksize of your array blocks.
See the following documentation page for details:
http://dask.pydata.org/en/latest/array-creation.html#chunks
""".strip()
@wraps(np.where)
def where(condition, x=None, y=None):
if x is None or y is None:
raise TypeError(where_error_message)
return choose(condition, [y, x])
@wraps(chunk.coarsen)
def coarsen(reduction, x, axes, trim_excess=False):
if (not trim_excess and
not all(bd % div == 0 for i, div in axes.items()
for bd in x.chunks[i])):
msg = "Coarsening factor does not align with block dimensions"
raise ValueError(msg)
if 'dask' in inspect.getfile(reduction):
reduction = getattr(np, reduction.__name__)
name = 'coarsen-' + tokenize(reduction, x, axes, trim_excess)
dsk = dict(((name,) + key[1:], (chunk.coarsen, reduction, key, axes,
trim_excess))
for key in core.flatten(x._keys()))
chunks = tuple(tuple(int(bd // axes.get(i, 1)) for bd in bds)
for i, bds in enumerate(x.chunks))
dt = reduction(np.empty((1,) * x.ndim, dtype=x.dtype)).dtype
return Array(sharedict.merge(x.dask, (name, dsk)), name, chunks, dtype=dt)
def split_at_breaks(array, breaks, axis=0):
""" Split an array into a list of arrays (using slices) at the given breaks
>>> split_at_breaks(np.arange(6), [3, 5])
[array([0, 1, 2]), array([3, 4]), array([5])]
"""
padded_breaks = concat([[None], breaks, [None]])
slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]
preslice = (slice(None),) * axis
split_array = [array[preslice + (s,)] for s in slices]
return split_array
@wraps(np.insert)
def insert(arr, obj, values, axis):
# axis is a required argument here to avoid needing to deal with the numpy
# default case (which reshapes the array to make it flat)
if not -arr.ndim <= axis < arr.ndim:
raise IndexError('axis %r is out of bounds for an array of dimension '
'%s' % (axis, arr.ndim))
if axis < 0:
axis += arr.ndim
if isinstance(obj, slice):
obj = np.arange(*obj.indices(arr.shape[axis]))
obj = np.asarray(obj)
scalar_obj = obj.ndim == 0
if scalar_obj:
obj = np.atleast_1d(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
if (np.diff(obj) < 0).any():
raise NotImplementedError(
'da.insert only implemented for monotonic ``obj`` argument')
split_arr = split_at_breaks(arr, np.unique(obj), axis)
if getattr(values, 'ndim', 0) == 0:
# we need to turn values into a dask array
name = 'values-' + tokenize(values)
dtype = getattr(values, 'dtype', type(values))
values = Array({(name,): values}, name, chunks=(), dtype=dtype)
values_shape = tuple(len(obj) if axis == n else s
for n, s in enumerate(arr.shape))
values = broadcast_to(values, values_shape)
elif scalar_obj:
values = values[(slice(None),) * axis + (None,)]
values_chunks = tuple(values_bd if axis == n else arr_bd
for n, (arr_bd, values_bd)
in enumerate(zip(arr.chunks,
values.chunks)))
values = values.rechunk(values_chunks)
counts = np.bincount(obj)[:-1]
values_breaks = np.cumsum(counts[counts > 0])
split_values = split_at_breaks(values, values_breaks, axis)
interleaved = list(interleave([split_arr, split_values]))
interleaved = [i for i in interleaved if i.nbytes]
return concatenate(interleaved, axis=axis)
@wraps(chunk.broadcast_to)
def broadcast_to(x, shape):
shape = tuple(shape)
ndim_new = len(shape) - x.ndim
if ndim_new < 0 or any(new != old
for new, old in zip(shape[ndim_new:], x.shape)
if old != 1):
raise ValueError('cannot broadcast shape %s to shape %s'
% (x.shape, shape))
name = 'broadcast_to-' + tokenize(x, shape)
chunks = (tuple((s,) for s in shape[:ndim_new]) +
tuple(bd if old > 1 else (new,)
for bd, old, new in zip(x.chunks, x.shape, shape[ndim_new:])))
dsk = dict(((name,) + (0,) * ndim_new + key[1:],
(chunk.broadcast_to, key, shape[:ndim_new] +
tuple(bd[i] for i, bd in zip(key[1:], chunks[ndim_new:]))))
for key in core.flatten(x._keys()))
return Array(sharedict.merge((name, dsk), x.dask), name, chunks, dtype=x.dtype)
@wraps(np.ravel)
def ravel(array):
return array.reshape((-1,))
@wraps(np.roll)
def roll(array, shift, axis=None):
result = array
if axis is None:
result = ravel(result)
if not isinstance(shift, Integral):
raise TypeError(
"Expect `shift` to be an instance of Integral"
" when `axis` is None."
)
shift = (shift,)
axis = (0,)
else:
try:
len(shift)
except TypeError:
shift = (shift,)
try:
len(axis)
except TypeError:
axis = (axis,)
if len(shift) != len(axis):
raise ValueError("Must have the same number of shifts as axes.")
for i, s in zip(axis, shift):
s = -s
s %= result.shape[i]
sl1 = result.ndim * [slice(None)]
sl2 = result.ndim * [slice(None)]
sl1[i] = slice(s, None)
sl2[i] = slice(None, s)
sl1 = tuple(sl1)
sl2 = tuple(sl2)
result = concatenate([result[sl1], result[sl2]], axis=i)
result = result.reshape(array.shape)
return result
def offset_func(func, offset, *args):
""" Offsets inputs by offset
>>> double = lambda x: x * 2
>>> f = offset_func(double, (10,))
>>> f(1)
22
>>> f(300)
620
"""
def _offset(*args):
args2 = list(map(add, args, offset))
return func(*args2)
with ignoring(Exception):
_offset.__name__ = 'offset_' + func.__name__
return _offset
@wraps(np.fromfunction)
def fromfunction(func, chunks=None, shape=None, dtype=None):
if chunks:
chunks = normalize_chunks(chunks, shape)
name = 'fromfunction-' + tokenize(func, chunks, shape, dtype)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
aggdims = [list(accumulate(add, (0,) + bd[:-1])) for bd in chunks]
offsets = list(product(*aggdims))
shapes = list(product(*chunks))
values = [(np.fromfunction, offset_func(func, offset), shp)
for offset, shp in zip(offsets, shapes)]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype=dtype)
@wraps(np.unique)
def unique(x):
name = 'unique-' + x.name
dsk = dict(((name, i), (np.unique, key)) for i, key in enumerate(x._keys()))
parts = Array._get(sharedict.merge((name, dsk), x.dask), list(dsk.keys()))
return np.unique(np.concatenate(parts))
@wraps(np.bincount)
def bincount(x, weights=None, minlength=None):
if minlength is None:
raise TypeError("Must specify minlength argument in da.bincount")
assert x.ndim == 1
if weights is not None:
assert weights.chunks == x.chunks
# Call np.bincount on each block, possibly with weights
token = tokenize(x, weights, minlength)
name = 'bincount-' + token
if weights is not None:
dsk = dict(((name, i),
(np.bincount, (x.name, i), (weights.name, i), minlength))
for i, _ in enumerate(x._keys()))
dtype = np.bincount([1], weights=[1]).dtype
else:
dsk = dict(((name, i), (np.bincount, (x.name, i), None, minlength))
for i, _ in enumerate(x._keys()))
dtype = np.bincount([]).dtype
# Sum up all of the intermediate bincounts per block
name = 'bincount-sum-' + token
dsk[(name, 0)] = (np.sum, list(dsk), 0)
chunks = ((minlength,),)
dsk = sharedict.merge((name, dsk), x.dask)
if weights is not None:
dsk.update(weights.dask)
return Array(dsk, name, chunks, dtype)
@wraps(np.digitize)
def digitize(a, bins, right=False):
bins = np.asarray(bins)
dtype = np.digitize([0], bins, right=False).dtype
return a.map_blocks(np.digitize, dtype=dtype, bins=bins, right=right)
def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):
"""
Blocked variant of numpy.histogram.
Follows the signature of numpy.histogram exactly with the following
exceptions:
- Either an iterable specifying the ``bins`` or the number of ``bins``
and a ``range`` argument is required as computing ``min`` and ``max``
over blocked arrays is an expensive operation that must be performed
explicitly.
- ``weights`` must be a dask.array.Array with the same block structure
as ``a``.
Examples
--------
Using number of bins and range:
>>> import dask.array as da
>>> import numpy as np
>>> x = da.from_array(np.arange(10000), chunks=10)
>>> h, bins = da.histogram(x, bins=10, range=[0, 10000])
>>> bins
array([ 0., 1000., 2000., 3000., 4000., 5000., 6000.,
7000., 8000., 9000., 10000.])
>>> h.compute()
array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000])
Explicitly specifying the bins:
>>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000]))
>>> bins
array([ 0, 5000, 10000])
>>> h.compute()
array([5000, 5000])
"""
if bins is None or (range is None and bins is None):
raise ValueError('dask.array.histogram requires either bins '
'or bins and range to be defined.')
if weights is not None and weights.chunks != a.chunks:
raise ValueError('Input array and weights must have the same '
'chunked structure')
if not np.iterable(bins):
bin_token = bins
mn, mx = range
if mn == mx:
mn -= 0.5
mx += 0.5
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else:
bin_token = bins
token = tokenize(a, bin_token, range, normed, weights, density)
nchunks = len(list(core.flatten(a._keys())))
chunks = ((1,) * nchunks, (len(bins) - 1,))
name = 'histogram-sum-' + token
# Map the histogram to all bins
def block_hist(x, weights=None):
return np.histogram(x, bins, weights=weights)[0][np.newaxis]
if weights is None:
dsk = dict(((name, i, 0), (block_hist, k))
for i, k in enumerate(core.flatten(a._keys())))
dtype = np.histogram([])[0].dtype
else:
a_keys = core.flatten(a._keys())
w_keys = core.flatten(weights._keys())
dsk = dict(((name, i, 0), (block_hist, k, w))
for i, (k, w) in enumerate(zip(a_keys, w_keys)))
dtype = weights.dtype
all_dsk = sharedict.merge(a.dask, (name, dsk))
if weights is not None:
all_dsk.update(weights.dask)
mapped = Array(all_dsk, name, chunks, dtype=dtype)
n = mapped.sum(axis=0)
# We need to replicate normed and density options from numpy
if density is not None:
if density:
db = from_array(np.diff(bins).astype(float), chunks=n.chunks)
return n / db / n.sum(), bins
else:
return n, bins
else:
# deprecated, will be removed from Numpy 2.0
if normed:
db = from_array(np.diff(bins).astype(float), chunks=n.chunks)
return n / (n * db).sum(), bins
else:
return n, bins
def eye(N, chunks, M=None, k=0, dtype=float):
"""
Return a 2-D Array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
chunks: int
chunk size of resulting blocks
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : Array of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
"""
if not isinstance(chunks, int):
raise ValueError('chunks must be an int')
token = tokenize(N, chunk, M, k, dtype)
name_eye = 'eye-' + token
eye = {}
if M is None:
M = N
vchunks = [chunks] * (N // chunks)
if N % chunks != 0:
vchunks.append(N % chunks)
hchunks = [chunks] * (M // chunks)
if M % chunks != 0:
hchunks.append(M % chunks)
for i, vchunk in enumerate(vchunks):
for j, hchunk in enumerate(hchunks):
if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks:
eye[name_eye, i, j] = (np.eye, vchunk, hchunk, k - (j - i) * chunks, dtype)
else:
eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype)
return Array(eye, name_eye, shape=(N, M),
chunks=(chunks, chunks), dtype=dtype)
@wraps(np.diag)
def diag(v):
name = 'diag-' + tokenize(v)
if isinstance(v, np.ndarray):
if v.ndim == 1:
chunks = ((v.shape[0],), (v.shape[0],))
dsk = {(name, 0, 0): (np.diag, v)}
elif v.ndim == 2:
chunks = ((min(v.shape),),)
dsk = {(name, 0): (np.diag, v)}
else:
raise ValueError("Array must be 1d or 2d only")
return Array(dsk, name, chunks, dtype=v.dtype)
if not isinstance(v, Array):
raise TypeError("v must be a dask array or numpy array, "
"got {0}".format(type(v)))
if v.ndim != 1:
if v.chunks[0] == v.chunks[1]:
dsk = dict(((name, i), (np.diag, row[i])) for (i, row)
in enumerate(v._keys()))
return Array(sharedict.merge(v.dask, (name, dsk)), name, (v.chunks[0],), dtype=v.dtype)
else:
raise NotImplementedError("Extracting diagonals from non-square "
"chunked arrays")
chunks_1d = v.chunks[0]
blocks = v._keys()
dsk = {}
for i, m in enumerate(chunks_1d):
for j, n in enumerate(chunks_1d):
key = (name, i, j)
if i == j:
dsk[key] = (np.diag, blocks[i])
else:
dsk[key] = (np.zeros, (m, n))
return Array(sharedict.merge(v.dask, (name, dsk)), name, (chunks_1d, chunks_1d),
dtype=v.dtype)
def triu(m, k=0):
"""
Upper triangle of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
triu : ndarray, shape (M, N)
Upper triangle of `m`, of same shape and data-type as `m`.
See Also
--------
tril : lower triangle of an array
"""
if m.ndim != 2:
raise ValueError('input must be 2 dimensional')
if m.shape[0] != m.shape[1]:
raise NotImplementedError('input must be a square matrix')
if m.chunks[0][0] != m.chunks[1][0]:
msg = ('chunks must be a square. '
'Use .rechunk method to change the size of chunks.')
raise NotImplementedError(msg)
rdim = len(m.chunks[0])
hdim = len(m.chunks[1])
chunk = m.chunks[0][0]
token = tokenize(m, k)
name = 'triu-' + token
dsk = {}
for i in range(rdim):
for j in range(hdim):
if chunk * (j - i + 1) < k:
dsk[(name, i, j)] = (np.zeros, (m.chunks[0][i], m.chunks[1][j]))
elif chunk * (j - i - 1) < k <= chunk * (j - i + 1):
dsk[(name, i, j)] = (np.triu, (m.name, i, j), k - (chunk * (j - i)))
else:
dsk[(name, i, j)] = (m.name, i, j)
return Array(sharedict.merge((name, dsk), m.dask), name,
shape=m.shape, chunks=m.chunks, dtype=m.dtype)
def tril(m, k=0):
"""
Lower triangle of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, M)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, M)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : upper triangle of an array
"""
if m.ndim != 2:
raise ValueError('input must be 2 dimensional')
if m.shape[0] != m.shape[1]:
raise NotImplementedError('input must be a square matrix')
if not len(set(m.chunks[0] + m.chunks[1])) == 1:
msg = ('All chunks must be a square matrix to perform lu decomposition. '
'Use .rechunk method to change the size of chunks.')
raise ValueError(msg)
rdim = len(m.chunks[0])
hdim = len(m.chunks[1])
chunk = m.chunks[0][0]
token = tokenize(m, k)
name = 'tril-' + token
dsk = {}
for i in range(rdim):
for j in range(hdim):
if chunk * (j - i + 1) < k:
dsk[(name, i, j)] = (m.name, i, j)
elif chunk * (j - i - 1) < k <= chunk * (j - i + 1):
dsk[(name, i, j)] = (np.tril, (m.name, i, j), k - (chunk * (j - i)))
else:
dsk[(name, i, j)] = (np.zeros, (m.chunks[0][i], m.chunks[1][j]))
dsk = sharedict.merge(m.dask, (name, dsk))
return Array(dsk, name, shape=m.shape, chunks=m.chunks, dtype=m.dtype)
def chunks_from_arrays(arrays):
""" Chunks tuple from nested list of arrays
>>> x = np.array([1, 2])
>>> chunks_from_arrays([x, x])
((2, 2),)
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x], [x]])
((1, 1), (2,))
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x, x]])
((1,), (2, 2))
>>> chunks_from_arrays([1, 1])
((1, 1),)
"""
if not arrays:
return ()
result = []
dim = 0
def shape(x):
try:
return x.shape
except AttributeError:
return (1,)
while isinstance(arrays, (list, tuple)):
result.append(tuple([shape(deepfirst(a))[dim] for a in arrays]))
arrays = arrays[0]
dim += 1
return tuple(result)
def deepfirst(seq):
""" First element in a nested list
>>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])
1
"""
if not isinstance(seq, (list, tuple)):
return seq
else:
return deepfirst(seq[0])
def ndimlist(seq):
if not isinstance(seq, (list, tuple)):
return 0
elif not seq:
return 1
else:
return 1 + ndimlist(seq[0])
def shapelist(a):
""" Get the shape of nested list """
if type(a) is list:
return tuple([len(a)] + list(shapelist(a[0])))
else:
return ()
def reshapelist(shape, seq):
""" Reshape iterator to nested shape
>>> reshapelist((2, 3), range(6))
[[0, 1, 2], [3, 4, 5]]
"""
if len(shape) == 1:
return list(seq)
else:
n = int(len(seq) / shape[0])
return [reshapelist(shape[1:], part) for part in partition(n, seq)]
def transposelist(arrays, axes, extradims=0):
""" Permute axes of nested list
>>> transposelist([[1,1,1],[1,1,1]], [2,1])
[[[1, 1], [1, 1], [1, 1]]]
>>> transposelist([[1,1,1],[1,1,1]], [2,1], extradims=1)
[[[[1], [1]], [[1], [1]], [[1], [1]]]]
"""
if len(axes) != ndimlist(arrays):
raise ValueError("Length of axes should equal depth of nested arrays")
if extradims < 0:
raise ValueError("`newdims` should be positive")
if len(axes) > len(set(axes)):
raise ValueError("`axes` should be unique")
ndim = max(axes) + 1
shape = shapelist(arrays)
newshape = [shape[axes.index(i)] if i in axes else 1 for i in range(ndim + extradims)]
result = list(core.flatten(arrays))
return reshapelist(newshape, result)
def concatenate3(arrays):
""" Recursive np.concatenate
Input should be a nested list of numpy arrays arranged in the order they
should appear in the array itself. Each array should have the same number
of dimensions as the desired output and the nesting of the lists.
>>> x = np.array([[1, 2]])
>>> concatenate3([[x, x, x], [x, x, x]])
array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])
>>> concatenate3([[x, x], [x, x], [x, x]])
array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])
"""
arrays = concrete(arrays)
if not arrays:
return np.empty(0)
advanced = max(core.flatten(arrays, container=(list, tuple)),
key=lambda x: getattr(x, '__array_priority__', 0))
module = package_of(type(advanced)) or np
if module is not np and hasattr(module, 'concatenate'):
x = unpack_singleton(arrays)
return _concatenate2(arrays, axes=list(range(x.ndim)))
ndim = ndimlist(arrays)
if not ndim:
return arrays
chunks = chunks_from_arrays(arrays)
shape = tuple(map(sum, chunks))
def dtype(x):
try:
return x.dtype
except AttributeError:
return type(x)
result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))
for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)):
if hasattr(arr, 'ndim'):
while arr.ndim < ndim:
arr = arr[None, ...]
result[idx] = arr
return result
def concatenate_axes(arrays, axes):
""" Recursively call np.concatenate along axes """
if len(axes) != ndimlist(arrays):
raise ValueError("Length of axes should equal depth of nested arrays")
extradims = max(0, deepfirst(arrays).ndim - (max(axes) + 1))
return concatenate3(transposelist(arrays, axes, extradims=extradims))
def to_hdf5(filename, *args, **kwargs):
""" Store arrays in HDF5 file
This saves several dask arrays into several datapaths in an HDF5 file.
It creates the necessary datasets and handles clean file opening/closing.
>>> da.to_hdf5('myfile.hdf5', '/x', x) # doctest: +SKIP
or
>>> da.to_hdf5('myfile.hdf5', {'/x': x, '/y': y}) # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> da.to_hdf5('myfile.hdf5', '/x', x, compression='lzf', shuffle=True) # doctest: +SKIP
This can also be used as a method on a single Array
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
See Also
--------
da.store
h5py.File.create_dataset
"""
if len(args) == 1 and isinstance(args[0], dict):
data = args[0]
elif (len(args) == 2 and
isinstance(args[0], str) and
isinstance(args[1], Array)):
data = {args[0]: args[1]}
else:
raise ValueError("Please provide {'/data/path': array} dictionary")
chunks = kwargs.pop('chunks', True)
import h5py
with h5py.File(filename) as f:
dsets = [f.require_dataset(dp, shape=x.shape, dtype=x.dtype,
chunks=tuple([c[0] for c in x.chunks])
if chunks is True else chunks, **kwargs)
for dp, x in data.items()]
store(list(data.values()), dsets)
def interleave_none(a, b):
"""
>>> interleave_none([0, None, 2, None], [1, 3])
(0, 1, 2, 3)
"""
result = []
i = j = 0
n = len(a) + len(b)
while i + j < n:
if a[i] is not None:
result.append(a[i])
i += 1
else:
result.append(b[j])
i += 1
j += 1
return tuple(result)
def keyname(name, i, okey):
"""
>>> keyname('x', 3, [None, None, 0, 2])
('x', 3, 0, 2)
"""
return (name, i) + tuple(k for k in okey if k is not None)
def _vindex(x, *indexes):
""" Point wise slicing
This is equivalent to numpy slicing with multiple input lists
>>> x = np.arange(56).reshape((7, 8))
>>> x
array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55]])
>>> d = from_array(x, chunks=(3, 4))
>>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])
>>> result.compute()
array([ 0, 9, 48, 7])
"""
indexes = [list(index) if index is not None else index for index in indexes]
bounds = [list(accumulate(add, (0,) + c)) for c in x.chunks]
bounds2 = [b for i, b in zip(indexes, bounds) if i is not None]
axis = _get_axis(indexes)
token = tokenize(x, indexes)
out_name = 'vindex-merge-' + token
points = list()
for i, idx in enumerate(zip(*[i for i in indexes if i is not None])):
block_idx = [np.searchsorted(b, ind, 'right') - 1
for b, ind in zip(bounds2, idx)]
inblock_idx = [ind - bounds2[k][j]
for k, (ind, j) in enumerate(zip(idx, block_idx))]
points.append((i, tuple(block_idx), tuple(inblock_idx)))
chunks = [c for i, c in zip(indexes, x.chunks) if i is None]
chunks.insert(0, (len(points),) if points else (0,))
chunks = tuple(chunks)
if points:
per_block = groupby(1, points)
per_block = dict((k, v) for k, v in per_block.items() if v)
other_blocks = list(product(*[list(range(len(c))) if i is None else [None]
for i, c in zip(indexes, x.chunks)]))
full_slices = [slice(None, None) if i is None else None for i in indexes]
name = 'vindex-slice-' + token
dsk = dict((keyname(name, i, okey),
(_vindex_transpose,
(_vindex_slice, (x.name,) + interleave_none(okey, key),
interleave_none(full_slices, list(zip(*pluck(2, per_block[key]))))),
axis))
for i, key in enumerate(per_block)
for okey in other_blocks)
dsk.update((keyname('vindex-merge-' + token, 0, okey),
(_vindex_merge,
[list(pluck(0, per_block[key])) for key in per_block],
[keyname(name, i, okey) for i in range(len(per_block))]))
for okey in other_blocks)
return Array(sharedict.merge(x.dask, (out_name, dsk)), out_name, chunks,
x.dtype)
# output has a zero dimension, just create a new zero-shape array with the
# same dtype
from .wrap import empty
return empty(tuple(map(sum, chunks)), chunks=chunks, dtype=x.dtype,
name=out_name)
def _get_axis(indexes):
""" Get axis along which point-wise slicing results lie
This is mostly a hack because I can't figure out NumPy's rule on this and
can't be bothered to go reading.
>>> _get_axis([[1, 2], None, [1, 2], None])
0
>>> _get_axis([None, [1, 2], [1, 2], None])
1
>>> _get_axis([None, None, [1, 2], [1, 2]])
2
"""
ndim = len(indexes)
indexes = [slice(None, None) if i is None else [0] for i in indexes]
x = np.empty((2,) * ndim)
x2 = x[tuple(indexes)]
return x2.shape.index(1)
def _vindex_slice(block, points):
""" Pull out point-wise slices from block """
points = [p if isinstance(p, slice) else list(p) for p in points]
return block[tuple(points)]
def _vindex_transpose(block, axis):
""" Rotate block so that points are on the first dimension """
axes = [axis] + list(range(axis)) + list(range(axis + 1, block.ndim))
return block.transpose(axes)
def _vindex_merge(locations, values):
"""
>>> locations = [0], [2, 1]
>>> values = [np.array([[1, 2, 3]]),
... np.array([[10, 20, 30], [40, 50, 60]])]
>>> _vindex_merge(locations, values)
array([[ 1, 2, 3],
[40, 50, 60],
[10, 20, 30]])
"""
locations = list(map(list, locations))
values = list(values)
n = sum(map(len, locations))
shape = list(values[0].shape)
shape[0] = n
shape = tuple(shape)
dtype = values[0].dtype
x = np.empty(shape, dtype=dtype)
ind = [slice(None, None) for i in range(x.ndim)]
for loc, val in zip(locations, values):
ind[0] = loc
x[tuple(ind)] = val
return x
@wraps(np.array)
def array(x, dtype=None, ndmin=None):
while x.ndim < ndmin:
x = x[None, :]
if dtype is not None and x.dtype != dtype:
x = x.astype(dtype)
return x
@wraps(np.cov)
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
# This was copied almost verbatim from np.cov
# See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
# check ddof
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X = X - X.mean(axis=1 - axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
@wraps(np.corrcoef)
def corrcoef(x, y=None, rowvar=1):
from .ufunc import sqrt
c = cov(x, y, rowvar)
if c.shape == ():
return c / c
d = diag(c)
d = d.reshape((d.shape[0], 1))
sqr_d = sqrt(d)
return (c / sqr_d) / sqr_d.T
def to_npy_stack(dirname, x, axis=0):
""" Write dask array to a stack of .npy files
This partitions the dask.array along one axis and stores each block along
that axis as a single .npy file in the specified directory
Examples
--------
>>> x = da.ones((5, 10, 10), chunks=(2, 4, 4)) # doctest: +SKIP
>>> da.to_npy_stack('data/', x, axis=0) # doctest: +SKIP
$ tree data/
data/
|-- 0.npy
|-- 1.npy
|-- 2.npy
|-- info
The ``.npy`` files store numpy arrays for ``x[0:2], x[2:4], and x[4:5]``
respectively, as is specified by the chunk size along the zeroth axis. The
info file stores the dtype, chunks, and axis information of the array.
You can load these stacks with the ``da.from_npy_stack`` function.
>>> y = da.from_npy_stack('data/') # doctest: +SKIP
See Also
--------
from_npy_stack
"""
chunks = tuple((c if i == axis else (sum(c),))
for i, c in enumerate(x.chunks))
xx = x.rechunk(chunks)
if not os.path.exists(dirname):
os.path.mkdir(dirname)
meta = {'chunks': chunks, 'dtype': x.dtype, 'axis': axis}
with open(os.path.join(dirname, 'info'), 'wb') as f:
pickle.dump(meta, f)
name = 'to-npy-stack-' + str(uuid.uuid1())
dsk = dict(((name, i), (np.save, os.path.join(dirname, '%d.npy' % i), key))
for i, key in enumerate(core.flatten(xx._keys())))
Array._get(sharedict.merge(dsk, xx.dask), list(dsk))
def from_npy_stack(dirname, mmap_mode='r'):
""" Load dask array from stack of npy files
See ``da.to_npy_stack`` for docstring
Parameters
----------
dirname: string
Directory of .npy files
mmap_mode: (None or 'r')
Read data in memory map mode
"""
with open(os.path.join(dirname, 'info'), 'rb') as f:
info = pickle.load(f)
dtype = info['dtype']
chunks = info['chunks']
axis = info['axis']
name = 'from-npy-stack-%s' % dirname
keys = list(product([name], *[range(len(c)) for c in chunks]))
values = [(np.load, os.path.join(dirname, '%d.npy' % i), mmap_mode)
for i in range(len(chunks[axis]))]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype)
def _astype(x, astype_dtype=None, **kwargs):
return x.astype(astype_dtype, **kwargs)
@wraps(np.round)
def round(a, decimals=0):
return a.map_blocks(np.round, decimals=decimals, dtype=a.dtype)
@wraps(np.swapaxes)
def swapaxes(a, axis1, axis2):
if axis1 == axis2:
return a
if axis1 < 0:
axis1 = axis1 + a.ndim
if axis2 < 0:
axis2 = axis2 + a.ndim
ind = list(range(a.ndim))
out = list(ind)
out[axis1], out[axis2] = axis2, axis1
return atop(np.swapaxes, out, a, ind, axis1=axis1, axis2=axis2,
dtype=a.dtype)
@wraps(np.repeat)
def repeat(a, repeats, axis=None):
if axis is None:
if a.ndim == 1:
axis = 0
else:
raise NotImplementedError("Must supply an integer axis value")
if not isinstance(repeats, Integral):
raise NotImplementedError("Only integer valued repeats supported")
if repeats == 1:
return a
cchunks = np.cumsum((0,) + a.chunks[axis])
slices = []
for c_start, c_stop in sliding_window(2, cchunks):
ls = np.linspace(c_start, c_stop, repeats).round(0)
for ls_start, ls_stop in sliding_window(2, ls):
if ls_start != ls_stop:
slices.append(slice(ls_start, ls_stop))
all_slice = slice(None, None, None)
slices = [(all_slice,) * axis + (s,) + (all_slice,) * (a.ndim - axis - 1)
for s in slices]
slabs = [a[slc] for slc in slices]
out = []
for slab in slabs:
chunks = list(slab.chunks)
assert len(chunks[axis]) == 1
chunks[axis] = (chunks[axis][0] * repeats,)
chunks = tuple(chunks)
result = slab.map_blocks(np.repeat, repeats, axis=axis, chunks=chunks,
dtype=slab.dtype)
out.append(result)
return concatenate(out, axis=axis)
@wraps(np.tile)
def tile(A, reps):
if not isinstance(reps, Integral):
raise NotImplementedError("Only integer valued `reps` supported.")
if reps < 0:
raise ValueError("Negative `reps` are not allowed.")
elif reps == 0:
return A[..., :0]
elif reps == 1:
return A
return concatenate(reps * [A], axis=-1)
def slice_with_dask_array(x, index):
y = elemwise(getitem, x, index, dtype=x.dtype)
name = 'getitem-' + tokenize(x, index)
dsk = {(name, i): k
for i, k in enumerate(core.flatten(y._keys()))}
chunks = ((np.nan,) * y.npartitions,)
return Array(sharedict.merge(y.dask, (name, dsk)), name, chunks, x.dtype)
| {
"repo_name": "mraspaud/dask",
"path": "dask/array/core.py",
"copies": "1",
"size": "125859",
"license": "bsd-3-clause",
"hash": 2918324616147735600,
"line_mean": 31.4629868455,
"line_max": 107,
"alpha_frac": 0.5569407035,
"autogenerated": false,
"ratio": 3.5181696203947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45751103238947,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from bisect import bisect
from collections import Iterable, MutableMapping, Mapping
from collections import Iterator
from functools import partial, wraps
import inspect
from itertools import product
from numbers import Integral, Number
import operator
from operator import add, getitem, mul
import os
import sys
import traceback
import pickle
from threading import Lock
import uuid
import warnings
try:
from cytoolz.curried import (partition, concat, pluck, join, first,
groupby, valmap, accumulate, interleave,
sliding_window, assoc)
except ImportError:
from toolz.curried import (partition, concat, pluck, join, first,
groupby, valmap, accumulate,
interleave, sliding_window, assoc)
from toolz import pipe, map, reduce
import numpy as np
from . import chunk
from .slicing import slice_array
from . import numpy_compat
from ..base import Base, tokenize, normalize_token
from ..context import _globals
from ..utils import (homogeneous_deepmap, ndeepmap, ignoring, concrete,
is_integer, IndexCallable, funcname, derived_from,
SerializableLock, ensure_dict)
from ..compatibility import unicode, long, getargspec, zip_longest, apply
from ..delayed import to_task_dask
from .. import threaded, core
from .. import sharedict
from ..sharedict import ShareDict
def getarray(a, b, lock=None):
""" Mimics getitem but includes call to np.asarray
>>> getarray([1, 2, 3, 4, 5], slice(1, 4))
array([2, 3, 4])
"""
if isinstance(b, tuple) and any(x is None for x in b):
b2 = tuple(x for x in b if x is not None)
b3 = tuple(None if x is None else slice(None, None)
for x in b if not isinstance(x, (int, long)))
return getarray(a, b2, lock)[b3]
if lock:
lock.acquire()
try:
c = a[b]
if type(c) != np.ndarray:
c = np.asarray(c)
finally:
if lock:
lock.release()
return c
def getarray_nofancy(a, b, lock=None):
""" A simple wrapper around ``getarray``.
Used to indicate to the optimization passes that the backend doesn't
support "fancy indexing"
"""
return getarray(a, b, lock=lock)
def getarray_inline(a, b, lock=None):
return getarray(a, b, lock=lock)
from .optimization import optimize, fuse_slice
def slices_from_chunks(chunks):
""" Translate chunks tuple to a set of slices in product order
>>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE
[(slice(0, 2, None), slice(0, 3, None)),
(slice(0, 2, None), slice(3, 6, None)),
(slice(0, 2, None), slice(6, 9, None)),
(slice(2, 4, None), slice(0, 3, None)),
(slice(2, 4, None), slice(3, 6, None)),
(slice(2, 4, None), slice(6, 9, None))]
"""
cumdims = [list(accumulate(add, (0,) + bds[:-1])) for bds in chunks]
shapes = product(*chunks)
starts = product(*cumdims)
return [tuple(slice(s, s + dim) for s, dim in zip(start, shape))
for start, shape in zip(starts, shapes)]
def getem(arr, chunks, shape=None, out_name=None, fancy=True, lock=False):
""" Dask getting various chunks from an array-like
>>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
>>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
"""
out_name = out_name or arr
chunks = normalize_chunks(chunks, shape)
keys = list(product([out_name], *[range(len(bds)) for bds in chunks]))
slices = slices_from_chunks(chunks)
getter = getarray if fancy else getarray_nofancy
if lock:
values = [(getter, arr, x, lock) for x in slices]
else:
values = [(getter, arr, x) for x in slices]
return dict(zip(keys, values))
def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):
""" Dot product of many aligned chunks
>>> x = np.array([[1, 2], [1, 2]])
>>> y = np.array([[10, 20], [10, 20]])
>>> dotmany([x, x, x], [y, y, y])
array([[ 90, 180],
[ 90, 180]])
Optionally pass in functions to apply to the left and right chunks
>>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)
array([[150, 150],
[150, 150]])
"""
if leftfunc:
A = map(leftfunc, A)
if rightfunc:
B = map(rightfunc, B)
return sum(map(partial(np.dot, **kwargs), A, B))
def lol_tuples(head, ind, values, dummies):
""" List of list of tuple keys
Parameters
----------
head : tuple
The known tuple so far
ind : Iterable
An iterable of indices not yet covered
values : dict
Known values for non-dummy indices
dummies : dict
Ranges of values for dummy indices
Examples
--------
>>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {})
('x', 1, 0)
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 1, 0), ('x', 1, 1, 1)],
[('x', 1, 2, 0), ('x', 1, 2, 1)]]
"""
if not ind:
return head
if ind[0] not in dummies:
return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)
else:
return [lol_tuples(head + (v,), ind[1:], values, dummies)
for v in dummies[ind[0]]]
def zero_broadcast_dimensions(lol, nblocks):
"""
>>> lol = [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> nblocks = (4, 1, 2) # note singleton dimension in second place
>>> lol = [[('x', 1, 0, 0), ('x', 1, 0, 1)],
... [('x', 1, 1, 0), ('x', 1, 1, 1)],
... [('x', 1, 2, 0), ('x', 1, 2, 1)]]
>>> zero_broadcast_dimensions(lol, nblocks) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)]]
See Also
--------
lol_tuples
"""
f = lambda t: (t[0],) + tuple(0 if d == 1 else i for i, d in zip(t[1:], nblocks))
return homogeneous_deepmap(f, lol)
def broadcast_dimensions(argpairs, numblocks, sentinels=(1, (1,)),
consolidate=None):
""" Find block dimensions from arguments
Parameters
----------
argpairs: iterable
name, ijk index pairs
numblocks: dict
maps {name: number of blocks}
sentinels: iterable (optional)
values for singleton dimensions
consolidate: func (optional)
use this to reduce each set of common blocks into a smaller set
Examples
--------
>>> argpairs = [('x', 'ij'), ('y', 'ji')]
>>> numblocks = {'x': (2, 3), 'y': (3, 2)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Supports numpy broadcasting rules
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> numblocks = {'x': (2, 1), 'y': (1, 3)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Works in other contexts too
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
>>> broadcast_dimensions(argpairs, d)
{'i': 'Hello', 'j': (2, 3)}
"""
# List like [('i', 2), ('j', 1), ('i', 1), ('j', 2)]
L = concat([zip(inds, dims) for (x, inds), (x, dims)
in join(first, argpairs, first, numblocks.items())])
g = groupby(0, L)
g = dict((k, set([d for i, d in v])) for k, v in g.items())
g2 = dict((k, v - set(sentinels) if len(v) > 1 else v) for k, v in g.items())
if consolidate:
return valmap(consolidate, g2)
if g2 and not set(map(len, g2.values())) == set([1]):
raise ValueError("Shapes do not align %s" % g)
return valmap(first, g2)
def top(func, output, out_indices, *arrind_pairs, **kwargs):
""" Tensor operation
Applies a function, ``func``, across blocks from many different input
dasks. We arrange the pattern with which those blocks interact with sets
of matching indices. E.g.::
top(func, 'z', 'i', 'x', 'i', 'y', 'i')
yield an embarrassingly parallel communication pattern and is read as
$$ z_i = func(x_i, y_i) $$
More complex patterns may emerge, including multiple indices::
top(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')
$$ z_{ij} = func(x_{ij}, y_{ji}) $$
Indices missing in the output but present in the inputs results in many
inputs being sent to one function (see examples).
Examples
--------
Simple embarrassing map operation
>>> inc = lambda x: x + 1
>>> top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
Simple operation on two datasets
>>> add = lambda x, y: x + y
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Operation that flips one of the datasets
>>> addT = lambda x, y: x + y.T # Transpose each chunk
>>> # z_ij ~ x_ij y_ji
>>> # .. .. .. notice swap
>>> top(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Dot product with contraction over ``j`` index. Yields list arguments
>>> top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
Pass ``concatenate=True`` to concatenate arrays ahead of time
>>> top(f, 'z', 'i', 'x', 'ij', 'y', 'ij', concatenate=True,
... numblocks={'x': (2, 2), 'y': (2, 2,)}) # doctest: +SKIP
{('z', 0): (f, (concatenate_axes, [('x', 0, 0), ('x', 0, 1)], (1,)),
(concatenate_axes, [('y', 0, 0), ('y', 0, 1)], (1,)))
('z', 1): (f, (concatenate_axes, [('x', 1, 0), ('x', 1, 1)], (1,)),
(concatenate_axes, [('y', 1, 0), ('y', 1, 1)], (1,)))}
Supports Broadcasting rules
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}
Support keyword arguments with apply
>>> def f(a, b=0): return a + b
>>> top(f, 'z', 'i', 'x', 'i', numblocks={'x': (2,)}, b=10) # doctest: +SKIP
{('z', 0): (apply, f, [('x', 0)], {'b': 10}),
('z', 1): (apply, f, [('x', 1)], {'b': 10})}
See Also
--------
atop
"""
numblocks = kwargs.pop('numblocks')
concatenate = kwargs.pop('concatenate', None)
new_axes = kwargs.pop('new_axes', {})
argpairs = list(partition(2, arrind_pairs))
assert set(numblocks) == set(pluck(0, argpairs))
all_indices = pipe(argpairs, pluck(1), concat, set)
dummy_indices = all_indices - set(out_indices)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
dims = broadcast_dimensions(argpairs, numblocks)
for k in new_axes:
dims[k] = 1
# (0, 0), (0, 1), (0, 2), (1, 0), ...
keytups = list(product(*[range(dims[i]) for i in out_indices]))
# {i: 0, j: 0}, {i: 0, j: 1}, ...
keydicts = [dict(zip(out_indices, tup)) for tup in keytups]
# {j: [1, 2, 3], ...} For j a dummy index of dimension 3
dummies = dict((i, list(range(dims[i]))) for i in dummy_indices)
# Create argument lists
valtups = []
for kd in keydicts:
args = []
for arg, ind in argpairs:
tups = lol_tuples((arg,), ind, kd, dummies)
if any(nb == 1 for nb in numblocks[arg]):
tups2 = zero_broadcast_dimensions(tups, numblocks[arg])
else:
tups2 = tups
if concatenate and isinstance(tups2, list):
axes = [n for n, i in enumerate(ind) if i in dummies]
tups2 = (concatenate_axes, tups2, axes)
args.append(tups2)
valtups.append(args)
if not kwargs: # will not be used in an apply, should be a tuple
valtups = [tuple(vt) for vt in valtups]
# Add heads to tuples
keys = [(output,) + kt for kt in keytups]
dsk = {}
# Unpack delayed objects in kwargs
if kwargs:
task, dsk2 = to_task_dask(kwargs)
if dsk2:
dsk.update(ensure_dict(dsk2))
kwargs2 = task
else:
kwargs2 = kwargs
vals = [(apply, func, vt, kwargs2) for vt in valtups]
else:
vals = [(func,) + vt for vt in valtups]
dsk.update(dict(zip(keys, vals)))
return dsk
def _concatenate2(arrays, axes=[]):
""" Recursively Concatenate nested lists of arrays along axes
Each entry in axes corresponds to each level of the nested list. The
length of axes should correspond to the level of nesting of arrays.
>>> x = np.array([[1, 2], [3, 4]])
>>> _concatenate2([x, x], axes=[0])
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> _concatenate2([x, x], axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> _concatenate2([[x, x], [x, x]], axes=[0, 1])
array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
Supports Iterators
>>> _concatenate2(iter([x, x]), axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
"""
if isinstance(arrays, Iterator):
arrays = list(arrays)
if not isinstance(arrays, (list, tuple)):
return arrays
if len(axes) > 1:
arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]
return np.concatenate(arrays, axis=axes[0])
def apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype=True):
args = [np.ones((1,) * x.ndim, dtype=x.dtype)
if isinstance(x, Array) else x for x in args]
try:
o = func(*args, **kwargs)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
suggest = ("Please specify the dtype explicitly using the "
"`dtype` kwarg.\n\n") if suggest_dtype else ""
msg = ("`dtype` inference failed in `{0}`.\n\n"
"{1}"
"Original error is below:\n"
"------------------------\n"
"{2}\n\n"
"Traceback:\n"
"---------\n"
"{3}").format(funcname, suggest, repr(e), tb)
else:
msg = None
if msg is not None:
raise ValueError(msg)
return o.dtype
def map_blocks(func, *args, **kwargs):
""" Map a function across all blocks of a dask array.
Parameters
----------
func : callable
Function to apply to every block in the array.
args : dask arrays or constants
dtype : np.dtype, optional
The ``dtype`` of the output array. It is recommended to provide this.
If not provided, will be inferred by applying the function to a small
set of fake data.
chunks : tuple, optional
Chunk shape of resulting blocks if the function does not preserve
shape. If not provided, the resulting array is assumed to have the same
block structure as the first input array.
drop_axis : number or iterable, optional
Dimensions lost by the function.
new_axis : number or iterable, optional
New dimensions created by the function.
name : string, optional
The key name to use for the array. If not provided, will be determined
by a hash of the arguments.
**kwargs :
Other keyword arguments to pass to function. Values must be constants
(not dask.arrays)
Examples
--------
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays.
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If the function changes shape of the blocks then you must provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
You have a bit of freedom in specifying chunks. If all of the output chunk
sizes are the same, you can provide just that chunk size as a single tuple.
>>> a = da.arange(18, chunks=(6,))
>>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))
If the function changes the dimension of the blocks you must specify the
created or destroyed dimensions.
>>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),
... new_axis=[0, 2])
Map_blocks aligns blocks by block positions without regard to shape. In the
following example we have two arrays with the same number of blocks but
with different shape and chunk sizes.
>>> x = da.arange(1000, chunks=(100,))
>>> y = da.arange(100, chunks=(10,))
The relevant attribute to match is numblocks.
>>> x.numblocks
(10,)
>>> y.numblocks
(10,)
If these match (up to broadcasting rules) then we can map arbitrary
functions across blocks
>>> def func(a, b):
... return np.array([a.max(), b.max()])
>>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')
dask.array<func, shape=(20,), dtype=int64, chunksize=(2,)>
>>> _.compute()
array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,
69, 799, 79, 899, 89, 999, 99])
Your block function can learn where in the array it is if it supports a
``block_id`` keyword argument. This will receive entries like (2, 0, 1),
the position of the block in the dask array.
>>> def func(block, block_id=None):
... pass
You may specify the name of the resulting task in the graph with the
optional ``name`` keyword argument.
>>> y = x.map_blocks(lambda x: x + 1, name='increment')
"""
if not callable(func):
msg = ("First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)")
raise TypeError(msg % type(func).__name__)
name = kwargs.pop('name', None)
name = name or '%s-%s' % (funcname(func), tokenize(func, args, **kwargs))
dtype = kwargs.pop('dtype', None)
chunks = kwargs.pop('chunks', None)
drop_axis = kwargs.pop('drop_axis', [])
new_axis = kwargs.pop('new_axis', [])
if isinstance(drop_axis, Number):
drop_axis = [drop_axis]
if isinstance(new_axis, Number):
new_axis = [new_axis]
if drop_axis and new_axis:
raise ValueError("Can't specify drop_axis and new_axis together")
arrs = [a for a in args if isinstance(a, Array)]
other = [(i, a) for i, a in enumerate(args) if not isinstance(a, Array)]
argpairs = [(a.name, tuple(range(a.ndim))[::-1]) for a in arrs]
numblocks = {a.name: a.numblocks for a in arrs}
arginds = list(concat(argpairs))
out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]
try:
spec = getargspec(func)
block_id = ('block_id' in spec.args or
'block_id' in getattr(spec, 'kwonly_args', ()))
except:
block_id = False
if block_id:
kwargs['block_id'] = '__dummy__'
if other:
dsk = top(partial_by_order, name, out_ind, *arginds,
numblocks=numblocks, function=func, other=other,
**kwargs)
else:
dsk = top(func, name, out_ind, *arginds, numblocks=numblocks,
**kwargs)
# If func has block_id as an argument, add it to the kwargs for each call
if block_id:
for k in dsk.keys():
dsk[k] = dsk[k][:-1] + (assoc(dsk[k][-1], 'block_id', k[1:]),)
if dtype is None:
if block_id:
kwargs2 = assoc(kwargs, 'block_id', first(dsk.keys())[1:])
else:
kwargs2 = kwargs
dtype = apply_infer_dtype(func, args, kwargs2, 'map_blocks')
if len(arrs) == 1:
numblocks = list(arrs[0].numblocks)
else:
dims = broadcast_dimensions(argpairs, numblocks)
numblocks = [b for (_, b) in sorted(dims.items(), reverse=True)]
if drop_axis:
if any(numblocks[i] > 1 for i in drop_axis):
raise ValueError("Can't drop an axis with more than 1 block. "
"Please use `atop` instead.")
dsk = dict((tuple(k for i, k in enumerate(k)
if i - 1 not in drop_axis), v)
for k, v in dsk.items())
numblocks = [n for i, n in enumerate(numblocks) if i not in drop_axis]
elif new_axis:
dsk, old_dsk = dict(), dsk
for key in old_dsk:
new_key = list(key)
for i in new_axis:
new_key.insert(i + 1, 0)
dsk[tuple(new_key)] = old_dsk[key]
for i in sorted(new_axis):
numblocks.insert(i, 1)
if chunks:
if len(chunks) != len(numblocks):
raise ValueError("Provided chunks have {0} dims, expected {1} "
"dims.".format(len(chunks), len(numblocks)))
chunks2 = []
for i, (c, nb) in enumerate(zip(chunks, numblocks)):
if isinstance(c, tuple):
if not len(c) == nb:
raise ValueError("Dimension {0} has {1} blocks, "
"chunks specified with "
"{2} blocks".format(i, nb, len(c)))
chunks2.append(c)
else:
chunks2.append(nb * (c,))
else:
if len(arrs) == 1:
chunks2 = list(arrs[0].chunks)
else:
try:
chunks2 = list(broadcast_chunks(*[a.chunks for a in arrs]))
except:
raise ValueError("Arrays in `map_blocks` don't align, can't "
"infer output chunks. Please provide "
"`chunks` kwarg.")
if drop_axis:
chunks2 = [c for (i, c) in enumerate(chunks2) if i not in drop_axis]
elif new_axis:
for i in sorted(new_axis):
chunks2.insert(i, (1,))
chunks = tuple(chunks2)
return Array(sharedict.merge((name, dsk), *[a.dask for a in arrs]),
name, chunks, dtype)
def broadcast_chunks(*chunkss):
""" Construct a chunks tuple that broadcasts many chunks tuples
>>> a = ((5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((5, 5),)
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((1,), (5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((3, 3,), (5, 5),)
>>> broadcast_chunks(a, b)
Traceback (most recent call last):
...
ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]
"""
if len(chunkss) == 1:
return chunkss[0]
n = max(map(len, chunkss))
chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]
result = []
for i in range(n):
step1 = [c[i] for c in chunkss2]
if all(c == (1,) for c in step1):
step2 = step1
else:
step2 = [c for c in step1 if c != (1,)]
if len(set(step2)) != 1:
raise ValueError("Chunks do not align: %s" % str(step2))
result.append(step2[0])
return tuple(result)
@wraps(np.squeeze)
def squeeze(a, axis=None):
if 1 not in a.shape:
return a
if axis is None:
axis = tuple(i for i, d in enumerate(a.shape) if d == 1)
b = a.map_blocks(partial(np.squeeze, axis=axis), dtype=a.dtype)
chunks = tuple(bd for bd in b.chunks if bd != (1,))
name = 'squeeze-' + tokenize(a, axis)
old_keys = list(product([b.name], *[range(len(bd)) for bd in b.chunks]))
new_keys = list(product([name], *[range(len(bd)) for bd in chunks]))
dsk = {n: b.dask[o] for o, n in zip(old_keys, new_keys)}
return Array(sharedict.merge(b.dask, (name, dsk)), name, chunks, dtype=a.dtype)
def topk(k, x):
""" The top k elements of an array
Returns the k greatest elements of the array in sorted order. Only works
on arrays of a single dimension.
This assumes that ``k`` is small. All results will be returned in a single
chunk.
Examples
--------
>>> x = np.array([5, 1, 3, 6])
>>> d = from_array(x, chunks=2)
>>> d.topk(2).compute()
array([6, 5])
"""
if x.ndim != 1:
raise ValueError("Topk only works on arrays of one dimension")
token = tokenize(k, x)
name = 'chunk.topk-' + token
dsk = dict(((name, i), (chunk.topk, k, key))
for i, key in enumerate(x._keys()))
name2 = 'topk-' + token
dsk[(name2, 0)] = (getitem, (np.sort, (np.concatenate, list(dsk))),
slice(-1, -k - 1, -1))
chunks = ((k,),)
return Array(sharedict.merge((name2, dsk), x.dask), name2, chunks, dtype=x.dtype)
def store(sources, targets, lock=True, regions=None, compute=True, **kwargs):
""" Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or iterable of Arrays
targets: array-like or iterable of array-likes
These should support setitem syntax ``target[10:20] = ...``
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular ``threading.Lock`` object to be shared among all writes.
regions: tuple of slices or iterable of tuple of slices
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets, respectively.
compute: boolean, optional
If true compute immediately, return ``dask.delayed.Delayed`` otherwise
Examples
--------
>>> x = ... # doctest: +SKIP
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets]
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError("Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets)))
if isinstance(regions, tuple) or regions is None:
regions = [regions]
if len(sources) > 1 and len(regions) == 1:
regions *= len(sources)
if len(sources) != len(regions):
raise ValueError("Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions)))
updates = {}
keys = []
for tgt, src, reg in zip(targets, sources, regions):
# if out is a delayed object update dictionary accordingly
try:
dsk = {}
dsk.update(tgt.dask)
tgt = tgt.key
except AttributeError:
dsk = {}
update = insert_to_ooc(tgt, src, lock=lock, region=reg)
keys.extend(update)
update.update(dsk)
updates.update(update)
name = 'store-' + tokenize(*keys)
dsk = sharedict.merge((name, updates), *[src.dask for src in sources])
if compute:
Array._get(dsk, keys, **kwargs)
else:
from ..delayed import Delayed
dsk.update({name: keys})
return Delayed(name, dsk)
def blockdims_from_blockshape(shape, chunks):
"""
>>> blockdims_from_blockshape((10, 10), (4, 3))
((4, 4, 2), (3, 3, 3, 1))
>>> blockdims_from_blockshape((10, 0), (4, 0))
((4, 4, 2), (0,))
"""
if chunks is None:
raise TypeError("Must supply chunks= keyword argument")
if shape is None:
raise TypeError("Must supply shape= keyword argument")
if np.isnan(sum(shape)) or np.isnan(sum(chunks)):
raise ValueError("Array chunk sizes are unknown. shape: %s, chunks: %s"
% (shape, chunks))
if not all(map(is_integer, chunks)):
raise ValueError("chunks can only contain integers.")
if not all(map(is_integer, shape)):
raise ValueError("shape can only contain integers.")
shape = tuple(map(int, shape))
chunks = tuple(map(int, chunks))
return tuple(((bd,) * (d // bd) + ((d % bd,) if d % bd else ())
if d else (0,))
for d, bd in zip(shape, chunks))
def finalize(results):
if not results:
return concatenate3(results)
results2 = results
while isinstance(results2, (tuple, list)):
if len(results2) > 1:
return concatenate3(results)
else:
results2 = results2[0]
return unpack_singleton(results)
class Array(Base):
""" Parallel Dask Array
A parallel nd-array comprised of many numpy arrays arranged in a grid.
This constructor is for advanced uses only. For normal use see the
``da.from_array`` function.
Parameters
----------
dask : dict
Task dependency graph
name : string
Name of array in dask
shape : tuple of ints
Shape of the entire array
chunks: iterable of tuples
block sizes along each dimension
See Also
--------
dask.array.from_array
"""
__slots__ = 'dask', 'name', '_chunks', 'dtype'
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(finalize)
def __new__(cls, dask, name, chunks, dtype, shape=None):
self = super(Array, cls).__new__(cls)
assert isinstance(dask, Mapping)
if not isinstance(dask, ShareDict):
s = ShareDict()
s.update_with_key(dask, key=name)
dask = s
self.dask = dask
self.name = name
self._chunks = normalize_chunks(chunks, shape)
if self._chunks is None:
raise ValueError(chunks_none_error_message)
if dtype is None:
raise ValueError("You must specify the dtype of the array")
self.dtype = np.dtype(dtype)
for plugin in _globals.get('array_plugins', ()):
result = plugin(self)
if result is not None:
self = result
return self
def __reduce__(self):
return (Array, (self.dask, self.name, self.chunks, self.dtype))
@property
def numblocks(self):
return tuple(map(len, self.chunks))
@property
def npartitions(self):
return reduce(mul, self.numblocks, 1)
@property
def shape(self):
return tuple(map(sum, self.chunks))
def _get_chunks(self):
return self._chunks
def _set_chunks(self, chunks):
raise TypeError("Can not set chunks directly\n\n"
"Please use the rechunk method instead:\n"
" x.rechunk(%s)" % str(chunks))
chunks = property(_get_chunks, _set_chunks, "chunks property")
def __len__(self):
return sum(self.chunks[0])
def __repr__(self):
"""
>>> import dask.array as da
>>> da.ones((10, 10), chunks=(5, 5), dtype='i4')
dask.array<..., shape=(10, 10), dtype=int32, chunksize=(5, 5)>
"""
chunksize = str(tuple(c[0] if c else 0 for c in self.chunks))
name = self.name.rsplit('-', 1)[0]
return ("dask.array<%s, shape=%s, dtype=%s, chunksize=%s>" %
(name, self.shape, self.dtype, chunksize))
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
""" Number of elements in array """
return reduce(mul, self.shape, 1)
@property
def nbytes(self):
""" Number of bytes in array """
return self.size * self.dtype.itemsize
@property
def itemsize(self):
""" Length of one array element in bytes """
return self.dtype.itemsize
def _keys(self, *args):
if not args:
try:
return self._cached_keys
except AttributeError:
pass
if not self.chunks:
return [(self.name,)]
ind = len(args)
if ind + 1 == self.ndim:
result = [(self.name,) + args + (i,)
for i in range(self.numblocks[ind])]
else:
result = [self._keys(*(args + (i,)))
for i in range(self.numblocks[ind])]
if not args:
self._cached_keys = result
return result
__array_priority__ = 11 # higher than numpy.ndarray and numpy.matrix
def __array__(self, dtype=None, **kwargs):
x = self.compute()
if dtype and x.dtype != dtype:
x = x.astype(dtype)
if not isinstance(x, np.ndarray):
x = np.array(x)
return x
@property
def _elemwise(self):
return elemwise
@wraps(store)
def store(self, target, **kwargs):
return store([self], [target], **kwargs)
def to_hdf5(self, filename, datapath, **kwargs):
""" Store array in HDF5 file
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP
See Also
--------
da.store
h5py.File.create_dataset
"""
return to_hdf5(filename, datapath, self, **kwargs)
def to_dask_dataframe(self, columns=None):
""" Convert dask Array to dask Dataframe
Parameters
----------
columns: list or string
list of column names if DataFrame, single string if Series
See Also
--------
dask.dataframe.from_dask_array
"""
from ..dataframe import from_dask_array
return from_dask_array(self, columns=columns)
def cache(self, store=None, **kwargs):
""" Evaluate and cache array
Parameters
----------
store: MutableMapping or ndarray-like
Place to put computed and cached chunks
kwargs:
Keyword arguments to pass on to ``get`` function for scheduling
Examples
--------
This triggers evaluation and store the result in either
1. An ndarray object supporting setitem (see da.store)
2. A MutableMapping like a dict or chest
It then returns a new dask array that points to this store.
This returns a semantically equivalent dask array.
>>> import dask.array as da
>>> x = da.arange(5, chunks=2)
>>> y = 2*x + 1
>>> z = y.cache() # triggers computation
>>> y.compute() # Does entire computation
array([1, 3, 5, 7, 9])
>>> z.compute() # Just pulls from store
array([1, 3, 5, 7, 9])
You might base a cache off of an array like a numpy array or
h5py.Dataset.
>>> cache = np.empty(5, dtype=x.dtype)
>>> z = y.cache(store=cache)
>>> cache
array([1, 3, 5, 7, 9])
Or one might use a MutableMapping like a dict or chest
>>> cache = dict()
>>> z = y.cache(store=cache)
>>> cache # doctest: +SKIP
{('x', 0): array([1, 3]),
('x', 1): array([5, 7]),
('x', 2): array([9])}
"""
warnings.warn("Deprecation Warning: The `cache` method is deprecated, "
"and will be removed in the next release. To achieve "
"the same behavior, either write to disk or use "
"`Client.persist`, from `dask.distributed`.")
if store is not None and hasattr(store, 'shape'):
self.store(store)
return from_array(store, chunks=self.chunks)
if store is None:
try:
from chest import Chest
store = Chest()
except ImportError:
if self.nbytes <= 1e9:
store = dict()
else:
msg = ("No out-of-core storage found."
"Either:\n"
"1. Install ``chest``, an out-of-core dictionary\n"
"2. Provide an on-disk array like an h5py.Dataset")
raise ValueError(msg) # pragma: no cover
if isinstance(store, MutableMapping):
name = 'cache-' + tokenize(self)
dsk = dict(((name, k[1:]), (operator.setitem, store, (tuple, list(k)), k))
for k in core.flatten(self._keys()))
Array._get(sharedict.merge(dsk, self.dask), list(dsk.keys()), **kwargs)
dsk2 = dict((k, (operator.getitem, store, (tuple, list(k))))
for k in store)
return Array(dsk2, self.name, chunks=self.chunks, dtype=self.dtype)
def __int__(self):
return int(self.compute())
def __bool__(self):
return bool(self.compute())
__nonzero__ = __bool__ # python 2
def __float__(self):
return float(self.compute())
def __complex__(self):
return complex(self.compute())
def __setitem__(self, key, value):
if isinstance(key, Array):
if isinstance(value, Array) and value.ndim > 1:
raise ValueError('boolean index array should have 1 dimension')
y = where(key, value, self)
self.dtype = y.dtype
self.dask = y.dask
self.name = y.name
return self
else:
raise NotImplementedError("Item assignment with %s not supported"
% type(key))
def __getitem__(self, index):
out = 'getitem-' + tokenize(self, index)
# Field access, e.g. x['a'] or x[['a', 'b']]
if (isinstance(index, (str, unicode)) or
(isinstance(index, list) and index and
all(isinstance(i, (str, unicode)) for i in index))):
if isinstance(index, (str, unicode)):
dt = self.dtype[index]
else:
dt = np.dtype([(name, self.dtype[name]) for name in index])
if dt.shape:
new_axis = list(range(self.ndim, self.ndim + len(dt.shape)))
chunks = self.chunks + tuple((i,) for i in dt.shape)
return self.map_blocks(getitem, index, dtype=dt.base, name=out,
chunks=chunks, new_axis=new_axis)
else:
return self.map_blocks(getitem, index, dtype=dt, name=out)
# Slicing
if isinstance(index, Array):
return slice_with_dask_array(self, index)
if not isinstance(index, tuple):
index = (index,)
if any(isinstance(i, Array) for i in index):
raise NotImplementedError("Indexing with a dask Array")
if all(isinstance(i, slice) and i == slice(None) for i in index):
return self
dsk, chunks = slice_array(out, self.name, self.chunks, index)
dsk2 = sharedict.merge(self.dask, (out, dsk))
return Array(dsk2, out, chunks, dtype=self.dtype)
def _vindex(self, key):
if (not isinstance(key, tuple) or
not len([k for k in key if isinstance(k, (np.ndarray, list))]) >= 2 or
not all(isinstance(k, (np.ndarray, list)) or k == slice(None, None)
for k in key)):
msg = ("vindex expects only lists and full slices\n"
"At least two entries must be a list\n"
"For other combinations try doing normal slicing first, followed\n"
"by vindex slicing. Got: \n\t%s")
raise IndexError(msg % str(key))
if any((isinstance(k, np.ndarray) and k.ndim != 1) or
(isinstance(k, list) and k and isinstance(k[0], list))
for k in key):
raise IndexError("vindex does not support multi-dimensional keys\n"
"Got: %s" % str(key))
if len(set(len(k) for k in key if isinstance(k, (list, np.ndarray)))) != 1:
raise IndexError("All indexers must have the same length, got\n"
"\t%s" % str(key))
key = key + (slice(None, None),) * (self.ndim - len(key))
key = [i if isinstance(i, list) else
i.tolist() if isinstance(i, np.ndarray) else
None for i in key]
return _vindex(self, *key)
@property
def vindex(self):
return IndexCallable(self._vindex)
@wraps(np.dot)
def dot(self, other):
return tensordot(self, other,
axes=((self.ndim - 1,), (other.ndim - 2,)))
@property
def A(self):
return self
@property
def T(self):
return transpose(self)
@derived_from(np.ndarray)
def transpose(self, *axes):
if not axes:
axes = None
elif len(axes) == 1 and isinstance(axes[0], Iterable):
axes = axes[0]
return transpose(self, axes=axes)
@wraps(np.ravel)
def ravel(self):
return ravel(self)
flatten = ravel
@wraps(np.reshape)
def reshape(self, *shape):
from .reshape import reshape
if len(shape) == 1 and not isinstance(shape[0], Number):
shape = shape[0]
return reshape(self, shape)
@wraps(topk)
def topk(self, k):
return topk(k, self)
def astype(self, dtype, **kwargs):
"""Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to False and the `dtype` requirement is satisfied, the input
array is returned instead of a copy.
"""
# Scalars don't take `casting` or `copy` kwargs - as such we only pass
# them to `map_blocks` if specified by user (different than defaults).
extra = set(kwargs) - {'casting', 'copy'}
if extra:
raise TypeError("astype does not take the following keyword "
"arguments: {0!s}".format(list(extra)))
casting = kwargs.get('casting', 'unsafe')
copy = kwargs.get('copy', True)
dtype = np.dtype(dtype)
if self.dtype == dtype:
return self
elif not np.can_cast(self.dtype, dtype, casting=casting):
raise TypeError("Cannot cast array from {0!r} to {1!r}"
" according to the rule "
"{2!r}".format(self.dtype, dtype, casting))
name = 'astype-' + tokenize(self, dtype, casting, copy)
return self.map_blocks(_astype, dtype=dtype, name=name,
astype_dtype=dtype, **kwargs)
def __abs__(self):
return elemwise(operator.abs, self)
def __add__(self, other):
return elemwise(operator.add, self, other)
def __radd__(self, other):
return elemwise(operator.add, other, self)
def __and__(self, other):
return elemwise(operator.and_, self, other)
def __rand__(self, other):
return elemwise(operator.and_, other, self)
def __div__(self, other):
return elemwise(operator.div, self, other)
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
def __eq__(self, other):
return elemwise(operator.eq, self, other)
def __gt__(self, other):
return elemwise(operator.gt, self, other)
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.invert, self)
def __lshift__(self, other):
return elemwise(operator.lshift, self, other)
def __rlshift__(self, other):
return elemwise(operator.lshift, other, self)
def __lt__(self, other):
return elemwise(operator.lt, self, other)
def __le__(self, other):
return elemwise(operator.le, self, other)
def __mod__(self, other):
return elemwise(operator.mod, self, other)
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
def __mul__(self, other):
return elemwise(operator.mul, self, other)
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __pos__(self):
return self
def __ror__(self, other):
return elemwise(operator.or_, other, self)
def __pow__(self, other):
return elemwise(operator.pow, self, other)
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
def __rshift__(self, other):
return elemwise(operator.rshift, self, other)
def __rrshift__(self, other):
return elemwise(operator.rshift, other, self)
def __sub__(self, other):
return elemwise(operator.sub, self, other)
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
def __xor__(self, other):
return elemwise(operator.xor, self, other)
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
@wraps(np.any)
def any(self, axis=None, keepdims=False, split_every=None):
from .reductions import any
return any(self, axis=axis, keepdims=keepdims, split_every=split_every)
@wraps(np.all)
def all(self, axis=None, keepdims=False, split_every=None):
from .reductions import all
return all(self, axis=axis, keepdims=keepdims, split_every=split_every)
@wraps(np.min)
def min(self, axis=None, keepdims=False, split_every=None):
from .reductions import min
return min(self, axis=axis, keepdims=keepdims, split_every=split_every)
@wraps(np.max)
def max(self, axis=None, keepdims=False, split_every=None):
from .reductions import max
return max(self, axis=axis, keepdims=keepdims, split_every=split_every)
@wraps(np.argmin)
def argmin(self, axis=None, split_every=None):
from .reductions import argmin
return argmin(self, axis=axis, split_every=split_every)
@wraps(np.argmax)
def argmax(self, axis=None, split_every=None):
from .reductions import argmax
return argmax(self, axis=axis, split_every=split_every)
@wraps(np.sum)
def sum(self, axis=None, dtype=None, keepdims=False, split_every=None):
from .reductions import sum
return sum(self, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)
@wraps(np.prod)
def prod(self, axis=None, dtype=None, keepdims=False, split_every=None):
from .reductions import prod
return prod(self, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)
@wraps(np.mean)
def mean(self, axis=None, dtype=None, keepdims=False, split_every=None):
from .reductions import mean
return mean(self, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)
@wraps(np.std)
def std(self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
from .reductions import std
return std(self, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof,
split_every=split_every)
@wraps(np.var)
def var(self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
from .reductions import var
return var(self, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof,
split_every=split_every)
def moment(self, order, axis=None, dtype=None, keepdims=False, ddof=0,
split_every=None):
"""Calculate the nth centralized moment.
Parameters
----------
order : int
Order of the moment that is returned, must be >= 2.
axis : int, optional
Axis along which the central moment is computed. The default is to
compute the moment of the flattened array.
dtype : data-type, optional
Type to use in computing the moment. For arrays of integer type the
default is float64; for arrays of float types it is the same as the
array type.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
N - ddof, where N represents the number of elements. By default
ddof is zero.
Returns
-------
moment : ndarray
References
----------
.. [1] Pebay, Philippe (2008), "Formulas for Robust, One-Pass Parallel
Computation of Covariances and Arbitrary-Order Statistical Moments"
(PDF), Technical Report SAND2008-6212, Sandia National Laboratories
"""
from .reductions import moment
return moment(self, order, axis=axis, dtype=dtype, keepdims=keepdims,
ddof=ddof, split_every=split_every)
def vnorm(self, ord=None, axis=None, keepdims=False, split_every=None):
""" Vector norm """
from .reductions import vnorm
return vnorm(self, ord=ord, axis=axis, keepdims=keepdims,
split_every=split_every)
@wraps(map_blocks)
def map_blocks(self, func, *args, **kwargs):
return map_blocks(func, self, *args, **kwargs)
def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):
""" Map a function over blocks of the array with some overlap
We share neighboring zones between blocks of the array, then map a
function, then trim away the neighboring strips.
Parameters
----------
func: function
The function to apply to each extended block
depth: int, tuple, or dict
The number of cells that each block should share with its neighbors
If a tuple or dict this can be different per axis
boundary: str, tuple, dict
how to handle the boundaries. Values include 'reflect',
'periodic', 'nearest', 'none', or any constant value like 0 or
np.nan
trim: bool
Whether or not to trim the excess after the map function. Set this
to false if your mapping function does this for you.
**kwargs:
Other keyword arguments valid in ``map_blocks``
Examples
--------
>>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])
>>> x = from_array(x, chunks=5)
>>> def derivative(x):
... return x - np.roll(x, 1)
>>> y = x.map_overlap(derivative, depth=1, boundary=0)
>>> y.compute()
array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])
>>> import dask.array as da
>>> x = np.arange(16).reshape((4, 4))
>>> d = da.from_array(x, chunks=(2, 2))
>>> d.map_overlap(lambda x: x + x.size, depth=1).compute()
array([[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]])
>>> func = lambda x: x + x.size
>>> depth = {0: 1, 1: 1}
>>> boundary = {0: 'reflect', 1: 'none'}
>>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE
array([[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27]])
"""
from .ghost import map_overlap
return map_overlap(self, func, depth, boundary, trim, **kwargs)
def cumsum(self, axis, dtype=None):
""" See da.cumsum for docstring """
from .reductions import cumsum
return cumsum(self, axis, dtype)
def cumprod(self, axis, dtype=None):
""" See da.cumprod for docstring """
from .reductions import cumprod
return cumprod(self, axis, dtype)
@wraps(squeeze)
def squeeze(self):
return squeeze(self)
def rechunk(self, chunks, threshold=None, block_size_limit=None):
""" See da.rechunk for docstring """
from . import rechunk # avoid circular import
return rechunk(self, chunks, threshold, block_size_limit)
@property
def real(self):
from .ufunc import real
return real(self)
@property
def imag(self):
from .ufunc import imag
return imag(self)
def conj(self):
from .ufunc import conj
return conj(self)
@wraps(np.clip)
def clip(self, min=None, max=None):
from .ufunc import clip
return clip(self, min, max)
def view(self, dtype, order='C'):
""" Get a view of the array as a new data type
Parameters
----------
dtype:
The dtype by which to view the array
order: string
'C' or 'F' (Fortran) ordering
This reinterprets the bytes of the array under a new dtype. If that
dtype does not have the same size as the original array then the shape
will change.
Beware that both numpy and dask.array can behave oddly when taking
shape-changing views of arrays under Fortran ordering. Under some
versions of NumPy this function will fail when taking shape-changing
views of Fortran ordered arrays if the first dimension has chunks of
size one.
"""
dtype = np.dtype(dtype)
mult = self.dtype.itemsize / dtype.itemsize
if order == 'C':
ascontiguousarray = np.ascontiguousarray
chunks = self.chunks[:-1] + (tuple(ensure_int(c * mult)
for c in self.chunks[-1]),)
elif order == 'F':
ascontiguousarray = np.asfortranarray
chunks = ((tuple(ensure_int(c * mult) for c in self.chunks[0]), ) +
self.chunks[1:])
else:
raise ValueError("Order must be one of 'C' or 'F'")
out = elemwise(ascontiguousarray, self, dtype=self.dtype)
out = elemwise(np.ndarray.view, out, dtype, dtype=dtype)
out._chunks = chunks
return out
@wraps(np.swapaxes)
def swapaxes(self, axis1, axis2):
return swapaxes(self, axis1, axis2)
@wraps(np.round)
def round(self, decimals=0):
return round(self, decimals=decimals)
def copy(self):
"""
Copy array. This is a no-op for dask.arrays, which are immutable
"""
return Array(self.dask, self.name, self.chunks, self.dtype)
def __deepcopy__(self, memo):
c = self.copy()
memo[id(self)] = c
return c
def to_delayed(self):
""" Convert Array into dask Delayed objects
Returns an array of values, one value per chunk.
See Also
--------
dask.array.from_delayed
"""
from ..delayed import Delayed
return np.array(ndeepmap(self.ndim, lambda k: Delayed(k, self.dask), self._keys()),
dtype=object)
@wraps(np.repeat)
def repeat(self, repeats, axis=None):
return repeat(self, repeats, axis=axis)
def ensure_int(f):
i = int(f)
if i != f:
raise ValueError("Could not coerce %f to integer" % f)
return i
normalize_token.register(Array, lambda a: a.name)
def normalize_chunks(chunks, shape=None):
""" Normalize chunks to tuple of tuples
>>> normalize_chunks((2, 2), shape=(5, 6))
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(4, 6)) # Idempotent
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks([[2, 2], [3, 3]]) # Cleans up lists to tuples
((2, 2), (3, 3))
>>> normalize_chunks(10, shape=(30, 5)) # Supports integer inputs
((10, 10, 10), (5,))
>>> normalize_chunks((), shape=(0, 0)) # respects null dimensions
((), ())
"""
if chunks is None:
raise ValueError(chunks_none_error_message)
if isinstance(chunks, list):
chunks = tuple(chunks)
if isinstance(chunks, Number):
chunks = (chunks,) * len(shape)
if not chunks and shape and all(s == 0 for s in shape):
chunks = ((),) * len(shape)
if shape and len(chunks) != len(shape):
if not (len(shape) == 1 and sum(chunks) == shape[0]):
raise ValueError(
"Chunks and shape must be of the same length/dimension. "
"Got chunks=%s, shape=%s" % (chunks, shape))
if shape is not None:
chunks = tuple(c if c is not None else s for c, s in zip(chunks, shape))
if chunks and shape is not None:
chunks = sum((blockdims_from_blockshape((s,), (c,))
if not isinstance(c, (tuple, list)) else (c,)
for s, c in zip(shape, chunks)), ())
return tuple(map(tuple, chunks))
def from_array(x, chunks, name=None, lock=False, fancy=True):
""" Create dask array from something that looks like an array
Input must have a ``.shape`` and support numpy-style slicing.
Parameters
----------
x : array_like
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions
like ((1000, 1000, 500), (400, 400)).
name : str, optional
The key name to use for the array. Defaults to a hash of ``x``.
Use ``name=False`` to generate a random name instead of hashing (fast)
lock : bool or Lock, optional
If ``x`` doesn't support concurrent reads then provide a lock here, or
pass in True to have dask.array create one for you.
fancy : bool, optional
If ``x`` doesn't support fancy indexing (e.g. indexing with lists or
arrays) then set to False. Default is True.
Examples
--------
>>> x = h5py.File('...')['/data/path'] # doctest: +SKIP
>>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP
If your underlying datastore does not support concurrent reads then include
the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple
arrays to coordinate around the same lock.
>>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP
"""
chunks = normalize_chunks(chunks, x.shape)
if len(chunks) != len(x.shape):
raise ValueError("Input array has %d dimensions but the supplied "
"chunks has only %d dimensions" %
(len(x.shape), len(chunks)))
if tuple(map(sum, chunks)) != x.shape:
raise ValueError("Chunks do not add up to shape. "
"Got chunks=%s, shape=%s" % (chunks, x.shape))
if name in (None, True):
token = tokenize(x, chunks)
original_name = 'array-original-' + token
name = name or 'array-' + token
elif name is False:
original_name = name = 'array-' + str(uuid.uuid1())
else:
original_name = name
if lock is True:
lock = SerializableLock()
dsk = getem(original_name, chunks, out_name=name, fancy=fancy, lock=lock)
dsk[original_name] = x
return Array(dsk, name, chunks, dtype=x.dtype)
def from_delayed(value, shape, dtype, name=None):
""" Create a dask array from a dask delayed value
This routine is useful for constructing dask arrays in an ad-hoc fashion
using dask delayed, particularly when combined with stack and concatenate.
The dask array will consist of a single chunk.
Examples
--------
>>> from dask import delayed
>>> value = delayed(np.ones)(5)
>>> array = from_delayed(value, (5,), float)
>>> array
dask.array<from-value, shape=(5,), dtype=float64, chunksize=(5,)>
>>> array.compute()
array([ 1., 1., 1., 1., 1.])
"""
from dask.delayed import delayed, Delayed
if not isinstance(value, Delayed) and hasattr(value, 'key'):
value = delayed(value)
name = name or 'from-value-' + tokenize(value, shape, dtype)
dsk = {(name,) + (0,) * len(shape): value.key}
chunks = tuple((d,) for d in shape)
return Array(sharedict.merge(value.dask, (name, dsk)), name, chunks, dtype)
def from_func(func, shape, dtype=None, name=None, args=(), kwargs={}):
""" Create dask array in a single block by calling a function
Calling the provided function with func(*args, **kwargs) should return a
NumPy array of the indicated shape and dtype.
Examples
--------
>>> a = from_func(np.arange, (3,), dtype='i8', args=(3,))
>>> a.compute()
array([0, 1, 2])
This works particularly well when coupled with dask.array functions like
concatenate and stack:
>>> arrays = [from_func(np.array, (), dtype='i8', args=(n,)) for n in range(5)]
>>> stack(arrays).compute()
array([0, 1, 2, 3, 4])
"""
name = name or 'from_func-' + tokenize(func, shape, dtype, args, kwargs)
if args or kwargs:
func = partial(func, *args, **kwargs)
dsk = {(name,) + (0,) * len(shape): (func,)}
chunks = tuple((i,) for i in shape)
return Array(dsk, name, chunks, dtype)
def common_blockdim(blockdims):
""" Find the common block dimensions from the list of block dimensions
Currently only implements the simplest possible heuristic: the common
block-dimension is the only one that does not span fully span a dimension.
This is a conservative choice that allows us to avoid potentially very
expensive rechunking.
Assumes that each element of the input block dimensions has all the same
sum (i.e., that they correspond to dimensions of the same size).
Examples
--------
>>> common_blockdim([(3,), (2, 1)])
(2, 1)
>>> common_blockdim([(1, 2), (2, 1)])
(1, 1, 1)
>>> common_blockdim([(2, 2), (3, 1)]) # doctest: +SKIP
Traceback (most recent call last):
...
ValueError: Chunks do not align
"""
if not any(blockdims):
return ()
non_trivial_dims = set([d for d in blockdims if len(d) > 1])
if len(non_trivial_dims) == 1:
return first(non_trivial_dims)
if len(non_trivial_dims) == 0:
return max(blockdims, key=first)
if np.isnan(sum(map(sum, blockdims))):
raise ValueError("Arrays chunk sizes are unknown: %s", blockdims)
if len(set(map(sum, non_trivial_dims))) > 1:
raise ValueError("Chunks do not add up to same value", blockdims)
# We have multiple non-trivial chunks on this axis
# e.g. (5, 2) and (4, 3)
# We create a single chunk tuple with the same total length
# that evenly divides both, e.g. (4, 1, 2)
# To accomplish this we walk down all chunk tuples together, finding the
# smallest element, adding it to the output, and subtracting it from all
# other elements and remove the element itself. We stop once we have
# burned through all of the chunk tuples.
# For efficiency's sake we reverse the lists so that we can pop off the end
rchunks = [list(ntd)[::-1] for ntd in non_trivial_dims]
total = sum(first(non_trivial_dims))
i = 0
out = []
while i < total:
m = min(c[-1] for c in rchunks)
out.append(m)
for c in rchunks:
c[-1] -= m
if c[-1] == 0:
c.pop()
i += m
return tuple(out)
def unify_chunks(*args, **kwargs):
"""
Unify chunks across a sequence of arrays
Parameters
----------
*args: sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
Examples
--------
>>> import dask.array as da
>>> x = da.ones(10, chunks=((5, 2, 3),))
>>> y = da.ones(10, chunks=((2, 3, 5),))
>>> chunkss, arrays = unify_chunks(x, 'i', y, 'i')
>>> chunkss
{'i': (2, 3, 2, 3)}
>>> x = da.ones((100, 10), chunks=(20, 5))
>>> y = da.ones((10, 100), chunks=(4, 50))
>>> chunkss, arrays = unify_chunks(x, 'ij', y, 'jk')
>>> chunkss # doctest: +SKIP
{'k': (50, 50), 'i': (20, 20, 20, 20, 20), 'j': (4, 1, 3, 2)}
Returns
-------
chunkss : dict
Map like {index: chunks}.
arrays : list
List of rechunked arrays.
See Also
--------
common_blockdim
"""
args = [asarray(a) if i % 2 == 0 else a for i, a in enumerate(args)]
warn = kwargs.get('warn', True)
arginds = list(partition(2, args)) # [x, ij, y, jk] -> [(x, ij), (y, jk)]
arrays, inds = zip(*arginds)
if all(ind == inds[0] for ind in inds) and all(a.chunks == arrays[0].chunks for a in arrays):
return dict(zip(inds[0], arrays[0].chunks)), arrays
nameinds = [(a.name, i) for a, i in arginds]
blockdim_dict = dict((a.name, a.chunks) for a, _ in arginds)
chunkss = broadcast_dimensions(nameinds, blockdim_dict,
consolidate=common_blockdim)
max_parts = max(arg.npartitions for arg in args[::2])
nparts = np.prod(list(map(len, chunkss.values())))
if warn and nparts and nparts >= max_parts * 10:
warnings.warn("Increasing number of chunks by factor of %d" %
(nparts / max_parts))
arrays = []
for a, i in arginds:
chunks = tuple(chunkss[j] if a.shape[n] > 1 else a.shape[n]
if not np.isnan(sum(chunkss[j])) else None
for n, j in enumerate(i))
if chunks != a.chunks and all(a.chunks):
arrays.append(a.rechunk(chunks))
else:
arrays.append(a)
return chunkss, arrays
def atop(func, out_ind, *args, **kwargs):
""" Tensor operation: Generalized inner and outer products
A broad class of blocked algorithms and patterns can be specified with a
concise multi-index notation. The ``atop`` function applies an in-memory
function across multiple blocks of multiple inputs in a variety of ways.
Many dask.array operations are special cases of atop including elementwise,
broadcasting, reductions, tensordot, and transpose.
Parameters
----------
func : callable
Function to apply to individual tuples of blocks
out_ind : iterable
Block pattern of the output, something like 'ijk' or (1, 2, 3)
*args : sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
**kwargs : dict
Extra keyword arguments to pass to function
dtype : np.dtype
Datatype of resulting array.
concatenate : bool, keyword only
If true concatenate arrays along dummy indices, else provide lists
adjust_chunks : dict
Dictionary mapping index to function to be applied to chunk sizes
new_axes : dict, keyword only
New indexes and their dimension lengths
Examples
--------
2D embarrassingly parallel operation from two arrays, x, and y.
>>> z = atop(operator.add, 'ij', x, 'ij', y, 'ij', dtype='f8') # z = x + y # doctest: +SKIP
Outer product multiplying x by y, two 1-d vectors
>>> z = atop(operator.mul, 'ij', x, 'i', y, 'j', dtype='f8') # doctest: +SKIP
z = x.T
>>> z = atop(np.transpose, 'ji', x, 'ij', dtype=x.dtype) # doctest: +SKIP
The transpose case above is illustrative because it does same transposition
both on each in-memory block by calling ``np.transpose`` and on the order
of the blocks themselves, by switching the order of the index ``ij -> ji``.
We can compose these same patterns with more variables and more complex
in-memory functions
z = X + Y.T
>>> z = atop(lambda x, y: x + y.T, 'ij', x, 'ij', y, 'ji', dtype='f8') # doctest: +SKIP
Any index, like ``i`` missing from the output index is interpreted as a
contraction (note that this differs from Einstein convention; repeated
indices do not imply contraction.) In the case of a contraction the passed
function should expect an iterable of blocks on any array that holds that
index. To receive arrays concatenated along contracted dimensions instead
pass ``concatenate=True``.
Inner product multiplying x by y, two 1-d vectors
>>> def sequence_dot(x_blocks, y_blocks):
... result = 0
... for x, y in zip(x_blocks, y_blocks):
... result += x.dot(y)
... return result
>>> z = atop(sequence_dot, '', x, 'i', y, 'i', dtype='f8') # doctest: +SKIP
Add new single-chunk dimensions with the ``new_axes=`` keyword, including
the length of the new dimension. New dimensions will always be in a single
chunk.
>>> def f(x):
... return x[:, None] * np.ones((1, 5))
>>> z = atop(f, 'az', x, 'a', new_axes={'z': 5}, dtype=x.dtype) # doctest: +SKIP
If the applied function changes the size of each chunk you can specify this
with a ``adjust_chunks={...}`` dictionary holding a function for each index
that modifies the dimension size in that index.
>>> def double(x):
... return np.concatenate([x, x])
>>> y = atop(double, 'ij', x, 'ij',
... adjust_chunks={'i': lambda n: 2 * n}, dtype=x.dtype) # doctest: +SKIP
See Also
--------
top - dict formulation of this function, contains most logic
"""
out = kwargs.pop('name', None) # May be None at this point
token = kwargs.pop('token', None)
dtype = kwargs.pop('dtype', None)
adjust_chunks = kwargs.pop('adjust_chunks', None)
new_axes = kwargs.get('new_axes', {})
if dtype is None:
raise ValueError("Must specify dtype of output array")
chunkss, arrays = unify_chunks(*args)
for k, v in new_axes.items():
chunkss[k] = (v,)
arginds = list(zip(arrays, args[1::2]))
numblocks = dict([(a.name, a.numblocks) for a, _ in arginds])
argindsstr = list(concat([(a.name, ind) for a, ind in arginds]))
# Finish up the name
if not out:
out = '%s-%s' % (token or funcname(func).strip('_'),
tokenize(func, out_ind, argindsstr, dtype, **kwargs))
dsk = top(func, out, out_ind, *argindsstr, numblocks=numblocks, **kwargs)
dsks = [a.dask for a, _ in arginds]
chunks = [chunkss[i] for i in out_ind]
if adjust_chunks:
for i, ind in enumerate(out_ind):
if ind in adjust_chunks:
if callable(adjust_chunks[ind]):
chunks[i] = tuple(map(adjust_chunks[ind], chunks[i]))
elif isinstance(adjust_chunks[ind], int):
chunks[i] = tuple(adjust_chunks[ind] for _ in chunks[i])
elif isinstance(adjust_chunks[ind], (tuple, list)):
chunks[i] = tuple(adjust_chunks[ind])
else:
raise NotImplementedError(
"adjust_chunks values must be callable, int, or tuple")
chunks = tuple(chunks)
return Array(sharedict.merge((out, dsk), *dsks), out, chunks, dtype=dtype)
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array(datetime.date(2000, 1, 1), dtype='datetime64[D]')
"""
while isinstance(x, (list, tuple)):
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x
def stack(seq, axis=0):
"""
Stack arrays along a new axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along a new dimension (axis=0 by default)
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also
--------
concatenate
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis + 1
if axis > ndim:
raise ValueError("Axis must not be greater than number of dimensions"
"\nData has %d dimensions, but got axis=%d" %
(ndim, axis))
if not all(x.shape == seq[0].shape for x in seq):
raise ValueError("Stacked arrays must have the same shape. Got %s",
[x.shape for x in seq])
ind = list(range(ndim))
uc_args = list(concat((x, ind) for x in seq))
_, seq = unify_chunks(*uc_args)
assert len(set(a.chunks for a in seq)) == 1 # same chunks
chunks = (seq[0].chunks[:axis] + ((1,) * n,) + seq[0].chunks[axis:])
names = [a.name for a in seq]
name = 'stack-' + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
inputs = [(names[key[axis + 1]], ) + key[1:axis + 1] + key[axis + 2:]
for key in keys]
values = [(getitem, inp, (slice(None, None, None),) * axis +
(None, ) + (slice(None, None, None), ) * (ndim - axis))
for inp in inputs]
dsk = dict(zip(keys, values))
dsk2 = sharedict.merge((name, dsk), *[a.dask for a in seq])
dt = reduce(np.promote_types, [a.dtype for a in seq])
return Array(dsk2, name, chunks, dtype=dt)
def concatenate(seq, axis=0):
"""
Concatenate arrays along an existing axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along an existing dimension (axis=0 by default)
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.concatenate(data, axis=0)
>>> x.shape
(12, 4)
>>> da.concatenate(data, axis=1).shape
(4, 12)
Result is a new dask Array
See Also
--------
stack
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis
if axis >= ndim:
msg = ("Axis must be less than than number of dimensions"
"\nData has %d dimensions, but got axis=%d")
raise ValueError(msg % (ndim, axis))
if not all(i == axis or all(x.shape[i] == seq[0].shape[i] for x in seq)
for i in range(ndim)):
raise ValueError("Shapes do not align: %s", [x.shape for x in seq])
inds = [list(range(ndim)) for i in range(n)]
for i, ind in enumerate(inds):
ind[axis] = -(i + 1)
uc_args = list(concat(zip(seq, inds)))
_, seq = unify_chunks(*uc_args, warn=False)
bds = [a.chunks for a in seq]
chunks = (seq[0].chunks[:axis] + (sum([bd[axis] for bd in bds], ()), ) +
seq[0].chunks[axis + 1:])
cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq]))
dt = reduce(np.promote_types, [a.dtype for a in seq])
seq = [x.astype(dt) for x in seq]
names = [a.name for a in seq]
name = 'concatenate-' + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
values = [(names[bisect(cum_dims, key[axis + 1]) - 1],) + key[1:axis + 1] +
(key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1], ) +
key[axis + 2:] for key in keys]
dsk = dict(zip(keys, values))
dsk2 = sharedict.merge((name, dsk), * [a.dask for a in seq])
return Array(dsk2, name, chunks, dtype=dt)
def atleast_3d(x):
if x.ndim == 1:
return x[None, :, None]
elif x.ndim == 2:
return x[:, :, None]
elif x.ndim > 2:
return x
else:
raise NotImplementedError()
def atleast_2d(x):
if x.ndim == 1:
return x[None, :]
elif x.ndim > 1:
return x
else:
raise NotImplementedError()
@wraps(np.vstack)
def vstack(tup):
tup = tuple(atleast_2d(x) for x in tup)
return concatenate(tup, axis=0)
@wraps(np.hstack)
def hstack(tup):
if all(x.ndim == 1 for x in tup):
return concatenate(tup, axis=0)
else:
return concatenate(tup, axis=1)
@wraps(np.dstack)
def dstack(tup):
tup = tuple(atleast_3d(x) for x in tup)
return concatenate(tup, axis=2)
@wraps(np.take)
def take(a, indices, axis=0):
if not -a.ndim <= axis < a.ndim:
raise ValueError('axis=(%s) out of bounds' % axis)
if axis < 0:
axis += a.ndim
if isinstance(a, np.ndarray) and isinstance(indices, Array):
return _take_dask_array_from_numpy(a, indices, axis)
else:
return a[(slice(None),) * axis + (indices,)]
@wraps(np.compress)
def compress(condition, a, axis=None):
if axis is None:
raise NotImplementedError("Must select axis for compression")
if not -a.ndim <= axis < a.ndim:
raise ValueError('axis=(%s) out of bounds' % axis)
if axis < 0:
axis += a.ndim
condition = np.array(condition, dtype=bool)
if condition.ndim != 1:
raise ValueError("Condition must be one dimensional")
if len(condition) < a.shape[axis]:
condition = condition.copy()
condition.resize(a.shape[axis])
slc = ((slice(None),) * axis + (condition, ) +
(slice(None),) * (a.ndim - axis - 1))
return a[slc]
def _take_dask_array_from_numpy(a, indices, axis):
assert isinstance(a, np.ndarray)
assert isinstance(indices, Array)
return indices.map_blocks(lambda block: np.take(a, block, axis),
chunks=indices.chunks,
dtype=a.dtype)
@wraps(np.transpose)
def transpose(a, axes=None):
if axes:
if len(axes) != a.ndim:
raise ValueError("axes don't match array")
else:
axes = tuple(range(a.ndim))[::-1]
axes = tuple(d + a.ndim if d < 0 else d for d in axes)
return atop(partial(np.transpose, axes=axes),
axes,
a, tuple(range(a.ndim)), dtype=a.dtype)
alphabet = 'abcdefghijklmnopqrstuvwxyz'
ALPHABET = alphabet.upper()
def _tensordot(a, b, axes):
x = np.tensordot(a, b, axes=axes)
ind = [slice(None, None)] * x.ndim
for a in sorted(axes[0]):
ind.insert(a, None)
x = x[tuple(ind)]
return x
@wraps(np.tensordot)
def tensordot(lhs, rhs, axes=2):
if isinstance(axes, Iterable):
left_axes, right_axes = axes
else:
left_axes = tuple(range(lhs.ndim - 1, lhs.ndim - axes - 1, -1))
right_axes = tuple(range(0, axes))
if isinstance(left_axes, int):
left_axes = (left_axes,)
if isinstance(right_axes, int):
right_axes = (right_axes,)
if isinstance(left_axes, list):
left_axes = tuple(left_axes)
if isinstance(right_axes, list):
right_axes = tuple(right_axes)
dt = np.promote_types(lhs.dtype, rhs.dtype)
left_index = list(alphabet[:lhs.ndim])
right_index = list(ALPHABET[:rhs.ndim])
out_index = left_index + right_index
for l, r in zip(left_axes, right_axes):
out_index.remove(right_index[r])
right_index[r] = left_index[l]
intermediate = atop(_tensordot, out_index,
lhs, left_index,
rhs, right_index, dtype=dt,
axes=(left_axes, right_axes))
result = intermediate.sum(axis=left_axes)
return result
@wraps(np.dot)
def dot(a, b):
return tensordot(a, b, axes=((a.ndim - 1,), (b.ndim - 2,)))
def insert_to_ooc(out, arr, lock=True, region=None):
if lock is True:
lock = Lock()
def store(out, x, index, lock, region):
if lock:
lock.acquire()
try:
if region is None:
out[index] = np.asanyarray(x)
else:
out[fuse_slice(region, index)] = np.asanyarray(x)
finally:
if lock:
lock.release()
return None
slices = slices_from_chunks(arr.chunks)
name = 'store-%s' % arr.name
dsk = dict(((name,) + t[1:], (store, out, t, slc, lock, region))
for t, slc in zip(core.flatten(arr._keys()), slices))
return dsk
def asarray(array):
"""Coerce argument into a dask array
Examples
--------
>>> x = np.arange(3)
>>> asarray(x)
dask.array<asarray, shape=(3,), dtype=int64, chunksize=(3,)>
"""
if isinstance(array, Array):
return array
name = 'asarray-' + tokenize(array)
if not isinstance(getattr(array, 'shape', None), Iterable):
array = np.asarray(array)
dsk = {(name,) + (0,) * len(array.shape):
(getarray_inline, name) + ((slice(None, None),) * len(array.shape),),
name: array}
chunks = tuple((d,) for d in array.shape)
return Array(dsk, name, chunks, dtype=array.dtype)
def partial_by_order(*args, **kwargs):
"""
>>> partial_by_order(5, function=add, other=[(1, 10)])
15
"""
function = kwargs.pop('function')
other = kwargs.pop('other')
args2 = list(args)
for i, arg in other:
args2.insert(i, arg)
return function(*args2, **kwargs)
def is_scalar_for_elemwise(arg):
"""
>>> is_scalar_for_elemwise(42)
True
>>> is_scalar_for_elemwise('foo')
True
>>> is_scalar_for_elemwise(True)
True
>>> is_scalar_for_elemwise(np.array(42))
True
>>> is_scalar_for_elemwise([1, 2, 3])
True
>>> is_scalar_for_elemwise(np.array([1, 2, 3]))
False
>>> is_scalar_for_elemwise(from_array(np.array(0), chunks=()))
False
>>> is_scalar_for_elemwise(np.dtype('i4'))
True
"""
return (np.isscalar(arg) or
not isinstance(getattr(arg, 'shape', None), Iterable) or
isinstance(arg, np.dtype) or
(isinstance(arg, np.ndarray) and arg.ndim == 0))
def broadcast_shapes(*shapes):
"""
Determines output shape from broadcasting arrays.
Parameters
----------
shapes : tuples
The shapes of the arguments.
Returns
-------
output_shape : tuple
Raises
------
ValueError
If the input shapes cannot be successfully broadcast together.
"""
if len(shapes) == 1:
return shapes[0]
out = []
for sizes in zip_longest(*map(reversed, shapes), fillvalue=-1):
dim = max(sizes)
if any(i != -1 and i != 1 and i != dim and not np.isnan(i) for i in sizes):
raise ValueError("operands could not be broadcast together with "
"shapes {0}".format(' '.join(map(str, shapes))))
out.append(dim)
return tuple(reversed(out))
def elemwise(op, *args, **kwargs):
""" Apply elementwise function across arguments
Respects broadcasting rules
Examples
--------
>>> elemwise(add, x, y) # doctest: +SKIP
>>> elemwise(sin, x) # doctest: +SKIP
See Also
--------
atop
"""
if not set(['name', 'dtype']).issuperset(kwargs):
msg = "%s does not take the following keyword arguments %s"
raise TypeError(msg % (op.__name__, str(sorted(set(kwargs) - set(['name', 'dtype'])))))
args = [np.asarray(a) if isinstance(a, (list, tuple)) else a for a in args]
shapes = [getattr(arg, 'shape', ()) for arg in args]
shapes = [s if isinstance(s, Iterable) else () for s in shapes]
out_ndim = len(broadcast_shapes(*shapes)) # Raises ValueError if dimensions mismatch
expr_inds = tuple(range(out_ndim))[::-1]
arrays = [a for a in args if not is_scalar_for_elemwise(a)]
other = [(i, a) for i, a in enumerate(args) if is_scalar_for_elemwise(a)]
if 'dtype' in kwargs:
dt = kwargs['dtype']
else:
# We follow NumPy's rules for dtype promotion, which special cases
# scalars and 0d ndarrays (which it considers equivalent) by using
# their values to compute the result dtype:
# https://github.com/numpy/numpy/issues/6240
# We don't inspect the values of 0d dask arrays, because these could
# hold potentially very expensive calculations.
vals = [np.empty((1,) * a.ndim, dtype=a.dtype)
if not is_scalar_for_elemwise(a) else a
for a in args]
dt = apply_infer_dtype(op, vals, {}, 'elemwise', suggest_dtype=False)
name = kwargs.get('name', None) or '%s-%s' % (funcname(op),
tokenize(op, dt, *args))
if other:
return atop(partial_by_order, expr_inds,
*concat((a, tuple(range(a.ndim)[::-1])) for a in arrays),
dtype=dt, name=name, function=op, other=other,
token=funcname(op))
else:
return atop(op, expr_inds,
*concat((a, tuple(range(a.ndim)[::-1])) for a in arrays),
dtype=dt, name=name)
@wraps(np.around)
def around(x, decimals=0):
return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)
def isnull(values):
""" pandas.isnull for dask arrays """
import pandas as pd
return elemwise(pd.isnull, values, dtype='bool')
def notnull(values):
""" pandas.notnull for dask arrays """
return ~isnull(values)
@wraps(numpy_compat.isclose)
def isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
func = partial(numpy_compat.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)
return elemwise(func, arr1, arr2, dtype='bool')
def variadic_choose(a, *choices):
return np.choose(a, choices)
@wraps(np.choose)
def choose(a, choices):
return elemwise(variadic_choose, a, *choices)
where_error_message = """
The dask.array version of where only handles the three argument case.
da.where(x > 0, x, 0)
and not the single argument case
da.where(x > 0)
This is because dask.array operations must be able to infer the shape of their
outputs prior to execution. The number of positive elements of x requires
execution. See the ``np.where`` docstring for examples and the following link
for a more thorough explanation:
http://dask.pydata.org/en/latest/array-overview.html#construct
""".strip()
chunks_none_error_message = """
You must specify a chunks= keyword argument.
This specifies the chunksize of your array blocks.
See the following documentation page for details:
http://dask.pydata.org/en/latest/array-creation.html#chunks
""".strip()
@wraps(np.where)
def where(condition, x=None, y=None):
if x is None or y is None:
raise TypeError(where_error_message)
return choose(condition, [y, x])
@wraps(chunk.coarsen)
def coarsen(reduction, x, axes, trim_excess=False):
if (not trim_excess and
not all(bd % div == 0 for i, div in axes.items()
for bd in x.chunks[i])):
msg = "Coarsening factor does not align with block dimensions"
raise ValueError(msg)
if 'dask' in inspect.getfile(reduction):
reduction = getattr(np, reduction.__name__)
name = 'coarsen-' + tokenize(reduction, x, axes, trim_excess)
dsk = dict(((name,) + key[1:], (chunk.coarsen, reduction, key, axes,
trim_excess))
for key in core.flatten(x._keys()))
chunks = tuple(tuple(int(bd // axes.get(i, 1)) for bd in bds)
for i, bds in enumerate(x.chunks))
dt = reduction(np.empty((1,) * x.ndim, dtype=x.dtype)).dtype
return Array(sharedict.merge(x.dask, (name, dsk)), name, chunks, dtype=dt)
def split_at_breaks(array, breaks, axis=0):
""" Split an array into a list of arrays (using slices) at the given breaks
>>> split_at_breaks(np.arange(6), [3, 5])
[array([0, 1, 2]), array([3, 4]), array([5])]
"""
padded_breaks = concat([[None], breaks, [None]])
slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]
preslice = (slice(None),) * axis
split_array = [array[preslice + (s,)] for s in slices]
return split_array
@wraps(np.insert)
def insert(arr, obj, values, axis):
# axis is a required argument here to avoid needing to deal with the numpy
# default case (which reshapes the array to make it flat)
if not -arr.ndim <= axis < arr.ndim:
raise IndexError('axis %r is out of bounds for an array of dimension '
'%s' % (axis, arr.ndim))
if axis < 0:
axis += arr.ndim
if isinstance(obj, slice):
obj = np.arange(*obj.indices(arr.shape[axis]))
obj = np.asarray(obj)
scalar_obj = obj.ndim == 0
if scalar_obj:
obj = np.atleast_1d(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
if (np.diff(obj) < 0).any():
raise NotImplementedError(
'da.insert only implemented for monotonic ``obj`` argument')
split_arr = split_at_breaks(arr, np.unique(obj), axis)
if getattr(values, 'ndim', 0) == 0:
# we need to turn values into a dask array
name = 'values-' + tokenize(values)
dtype = getattr(values, 'dtype', type(values))
values = Array({(name,): values}, name, chunks=(), dtype=dtype)
values_shape = tuple(len(obj) if axis == n else s
for n, s in enumerate(arr.shape))
values = broadcast_to(values, values_shape)
elif scalar_obj:
values = values[(slice(None),) * axis + (None,)]
values_chunks = tuple(values_bd if axis == n else arr_bd
for n, (arr_bd, values_bd)
in enumerate(zip(arr.chunks,
values.chunks)))
values = values.rechunk(values_chunks)
counts = np.bincount(obj)[:-1]
values_breaks = np.cumsum(counts[counts > 0])
split_values = split_at_breaks(values, values_breaks, axis)
interleaved = list(interleave([split_arr, split_values]))
interleaved = [i for i in interleaved if i.nbytes]
return concatenate(interleaved, axis=axis)
@wraps(chunk.broadcast_to)
def broadcast_to(x, shape):
shape = tuple(shape)
ndim_new = len(shape) - x.ndim
if ndim_new < 0 or any(new != old
for new, old in zip(shape[ndim_new:], x.shape)
if old != 1):
raise ValueError('cannot broadcast shape %s to shape %s'
% (x.shape, shape))
name = 'broadcast_to-' + tokenize(x, shape)
chunks = (tuple((s,) for s in shape[:ndim_new]) +
tuple(bd if old > 1 else (new,)
for bd, old, new in zip(x.chunks, x.shape, shape[ndim_new:])))
dsk = dict(((name,) + (0,) * ndim_new + key[1:],
(chunk.broadcast_to, key, shape[:ndim_new] +
tuple(bd[i] for i, bd in zip(key[1:], chunks[ndim_new:]))))
for key in core.flatten(x._keys()))
return Array(sharedict.merge((name, dsk), x.dask), name, chunks, dtype=x.dtype)
@wraps(np.ravel)
def ravel(array):
return array.reshape((-1,))
@wraps(np.roll)
def roll(array, shift, axis=None):
result = array
if axis is None:
result = ravel(result)
if not isinstance(shift, Integral):
raise TypeError(
"Expect `shift` to be an instance of Integral"
" when `axis` is None."
)
shift = (shift,)
axis = (0,)
else:
try:
len(shift)
except TypeError:
shift = (shift,)
try:
len(axis)
except TypeError:
axis = (axis,)
if len(shift) != len(axis):
raise ValueError("Must have the same number of shifts as axes.")
for i, s in zip(axis, shift):
s = -s
s %= result.shape[i]
sl1 = result.ndim * [slice(None)]
sl2 = result.ndim * [slice(None)]
sl1[i] = slice(s, None)
sl2[i] = slice(None, s)
sl1 = tuple(sl1)
sl2 = tuple(sl2)
result = concatenate([result[sl1], result[sl2]], axis=i)
result = result.reshape(array.shape)
return result
def offset_func(func, offset, *args):
""" Offsets inputs by offset
>>> double = lambda x: x * 2
>>> f = offset_func(double, (10,))
>>> f(1)
22
>>> f(300)
620
"""
def _offset(*args):
args2 = list(map(add, args, offset))
return func(*args2)
with ignoring(Exception):
_offset.__name__ = 'offset_' + func.__name__
return _offset
@wraps(np.fromfunction)
def fromfunction(func, chunks=None, shape=None, dtype=None):
if chunks:
chunks = normalize_chunks(chunks, shape)
name = 'fromfunction-' + tokenize(func, chunks, shape, dtype)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
aggdims = [list(accumulate(add, (0,) + bd[:-1])) for bd in chunks]
offsets = list(product(*aggdims))
shapes = list(product(*chunks))
values = [(np.fromfunction, offset_func(func, offset), shp)
for offset, shp in zip(offsets, shapes)]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype=dtype)
@wraps(np.unique)
def unique(x):
name = 'unique-' + x.name
dsk = dict(((name, i), (np.unique, key)) for i, key in enumerate(x._keys()))
parts = Array._get(sharedict.merge((name, dsk), x.dask), list(dsk.keys()))
return np.unique(np.concatenate(parts))
@wraps(np.bincount)
def bincount(x, weights=None, minlength=None):
if minlength is None:
raise TypeError("Must specify minlength argument in da.bincount")
assert x.ndim == 1
if weights is not None:
assert weights.chunks == x.chunks
# Call np.bincount on each block, possibly with weights
token = tokenize(x, weights, minlength)
name = 'bincount-' + token
if weights is not None:
dsk = dict(((name, i),
(np.bincount, (x.name, i), (weights.name, i), minlength))
for i, _ in enumerate(x._keys()))
dtype = np.bincount([1], weights=[1]).dtype
else:
dsk = dict(((name, i), (np.bincount, (x.name, i), None, minlength))
for i, _ in enumerate(x._keys()))
dtype = np.bincount([]).dtype
# Sum up all of the intermediate bincounts per block
name = 'bincount-sum-' + token
dsk[(name, 0)] = (np.sum, list(dsk), 0)
chunks = ((minlength,),)
dsk = sharedict.merge((name, dsk), x.dask)
if weights is not None:
dsk.update(weights.dask)
return Array(dsk, name, chunks, dtype)
@wraps(np.digitize)
def digitize(a, bins, right=False):
bins = np.asarray(bins)
dtype = np.digitize([0], bins, right=False).dtype
return a.map_blocks(np.digitize, dtype=dtype, bins=bins, right=right)
def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):
"""
Blocked variant of numpy.histogram.
Follows the signature of numpy.histogram exactly with the following
exceptions:
- Either an iterable specifying the ``bins`` or the number of ``bins``
and a ``range`` argument is required as computing ``min`` and ``max``
over blocked arrays is an expensive operation that must be performed
explicitly.
- ``weights`` must be a dask.array.Array with the same block structure
as ``a``.
Examples
--------
Using number of bins and range:
>>> import dask.array as da
>>> import numpy as np
>>> x = da.from_array(np.arange(10000), chunks=10)
>>> h, bins = da.histogram(x, bins=10, range=[0, 10000])
>>> bins
array([ 0., 1000., 2000., 3000., 4000., 5000., 6000.,
7000., 8000., 9000., 10000.])
>>> h.compute()
array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000])
Explicitly specifying the bins:
>>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000]))
>>> bins
array([ 0, 5000, 10000])
>>> h.compute()
array([5000, 5000])
"""
if bins is None or (range is None and bins is None):
raise ValueError('dask.array.histogram requires either bins '
'or bins and range to be defined.')
if weights is not None and weights.chunks != a.chunks:
raise ValueError('Input array and weights must have the same '
'chunked structure')
if not np.iterable(bins):
bin_token = bins
mn, mx = range
if mn == mx:
mn -= 0.5
mx += 0.5
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else:
bin_token = bins
token = tokenize(a, bin_token, range, normed, weights, density)
nchunks = len(list(core.flatten(a._keys())))
chunks = ((1,) * nchunks, (len(bins) - 1,))
name = 'histogram-sum-' + token
# Map the histogram to all bins
def block_hist(x, weights=None):
return np.histogram(x, bins, weights=weights)[0][np.newaxis]
if weights is None:
dsk = dict(((name, i, 0), (block_hist, k))
for i, k in enumerate(core.flatten(a._keys())))
dtype = np.histogram([])[0].dtype
else:
a_keys = core.flatten(a._keys())
w_keys = core.flatten(weights._keys())
dsk = dict(((name, i, 0), (block_hist, k, w))
for i, (k, w) in enumerate(zip(a_keys, w_keys)))
dtype = weights.dtype
all_dsk = sharedict.merge(a.dask, (name, dsk))
if weights is not None:
all_dsk.update(weights.dask)
mapped = Array(all_dsk, name, chunks, dtype=dtype)
n = mapped.sum(axis=0)
# We need to replicate normed and density options from numpy
if density is not None:
if density:
db = from_array(np.diff(bins).astype(float), chunks=n.chunks)
return n / db / n.sum(), bins
else:
return n, bins
else:
# deprecated, will be removed from Numpy 2.0
if normed:
db = from_array(np.diff(bins).astype(float), chunks=n.chunks)
return n / (n * db).sum(), bins
else:
return n, bins
def eye(N, chunks, M=None, k=0, dtype=float):
"""
Return a 2-D Array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
chunks: int
chunk size of resulting blocks
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : Array of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
"""
if not isinstance(chunks, int):
raise ValueError('chunks must be an int')
token = tokenize(N, chunk, M, k, dtype)
name_eye = 'eye-' + token
eye = {}
if M is None:
M = N
vchunks = [chunks] * (N // chunks)
if N % chunks != 0:
vchunks.append(N % chunks)
hchunks = [chunks] * (M // chunks)
if M % chunks != 0:
hchunks.append(M % chunks)
for i, vchunk in enumerate(vchunks):
for j, hchunk in enumerate(hchunks):
if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks:
eye[name_eye, i, j] = (np.eye, vchunk, hchunk, k - (j - i) * chunks, dtype)
else:
eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype)
return Array(eye, name_eye, shape=(N, M),
chunks=(chunks, chunks), dtype=dtype)
@wraps(np.diag)
def diag(v):
name = 'diag-' + tokenize(v)
if isinstance(v, np.ndarray):
if v.ndim == 1:
chunks = ((v.shape[0],), (v.shape[0],))
dsk = {(name, 0, 0): (np.diag, v)}
elif v.ndim == 2:
chunks = ((min(v.shape),),)
dsk = {(name, 0): (np.diag, v)}
else:
raise ValueError("Array must be 1d or 2d only")
return Array(dsk, name, chunks, dtype=v.dtype)
if not isinstance(v, Array):
raise TypeError("v must be a dask array or numpy array, "
"got {0}".format(type(v)))
if v.ndim != 1:
if v.chunks[0] == v.chunks[1]:
dsk = dict(((name, i), (np.diag, row[i])) for (i, row)
in enumerate(v._keys()))
return Array(sharedict.merge(v.dask, (name, dsk)), name, (v.chunks[0],), dtype=v.dtype)
else:
raise NotImplementedError("Extracting diagonals from non-square "
"chunked arrays")
chunks_1d = v.chunks[0]
blocks = v._keys()
dsk = {}
for i, m in enumerate(chunks_1d):
for j, n in enumerate(chunks_1d):
key = (name, i, j)
if i == j:
dsk[key] = (np.diag, blocks[i])
else:
dsk[key] = (np.zeros, (m, n))
return Array(sharedict.merge(v.dask, (name, dsk)), name, (chunks_1d, chunks_1d),
dtype=v.dtype)
def triu(m, k=0):
"""
Upper triangle of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
triu : ndarray, shape (M, N)
Upper triangle of `m`, of same shape and data-type as `m`.
See Also
--------
tril : lower triangle of an array
"""
if m.ndim != 2:
raise ValueError('input must be 2 dimensional')
if m.shape[0] != m.shape[1]:
raise NotImplementedError('input must be a square matrix')
if m.chunks[0][0] != m.chunks[1][0]:
msg = ('chunks must be a square. '
'Use .rechunk method to change the size of chunks.')
raise NotImplementedError(msg)
rdim = len(m.chunks[0])
hdim = len(m.chunks[1])
chunk = m.chunks[0][0]
token = tokenize(m, k)
name = 'triu-' + token
dsk = {}
for i in range(rdim):
for j in range(hdim):
if chunk * (j - i + 1) < k:
dsk[(name, i, j)] = (np.zeros, (m.chunks[0][i], m.chunks[1][j]))
elif chunk * (j - i - 1) < k <= chunk * (j - i + 1):
dsk[(name, i, j)] = (np.triu, (m.name, i, j), k - (chunk * (j - i)))
else:
dsk[(name, i, j)] = (m.name, i, j)
return Array(sharedict.merge((name, dsk), m.dask), name,
shape=m.shape, chunks=m.chunks, dtype=m.dtype)
def tril(m, k=0):
"""
Lower triangle of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, M)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, M)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : upper triangle of an array
"""
if m.ndim != 2:
raise ValueError('input must be 2 dimensional')
if m.shape[0] != m.shape[1]:
raise NotImplementedError('input must be a square matrix')
if not len(set(m.chunks[0] + m.chunks[1])) == 1:
msg = ('All chunks must be a square matrix to perform lu decomposition. '
'Use .rechunk method to change the size of chunks.')
raise ValueError(msg)
rdim = len(m.chunks[0])
hdim = len(m.chunks[1])
chunk = m.chunks[0][0]
token = tokenize(m, k)
name = 'tril-' + token
dsk = {}
for i in range(rdim):
for j in range(hdim):
if chunk * (j - i + 1) < k:
dsk[(name, i, j)] = (m.name, i, j)
elif chunk * (j - i - 1) < k <= chunk * (j - i + 1):
dsk[(name, i, j)] = (np.tril, (m.name, i, j), k - (chunk * (j - i)))
else:
dsk[(name, i, j)] = (np.zeros, (m.chunks[0][i], m.chunks[1][j]))
dsk = sharedict.merge(m.dask, (name, dsk))
return Array(dsk, name, shape=m.shape, chunks=m.chunks, dtype=m.dtype)
def chunks_from_arrays(arrays):
""" Chunks tuple from nested list of arrays
>>> x = np.array([1, 2])
>>> chunks_from_arrays([x, x])
((2, 2),)
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x], [x]])
((1, 1), (2,))
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x, x]])
((1,), (2, 2))
>>> chunks_from_arrays([1, 1])
((1, 1),)
"""
if not arrays:
return ()
result = []
dim = 0
def shape(x):
try:
return x.shape
except AttributeError:
return (1,)
while isinstance(arrays, (list, tuple)):
result.append(tuple([shape(deepfirst(a))[dim] for a in arrays]))
arrays = arrays[0]
dim += 1
return tuple(result)
def deepfirst(seq):
""" First element in a nested list
>>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])
1
"""
if not isinstance(seq, (list, tuple)):
return seq
else:
return deepfirst(seq[0])
def ndimlist(seq):
if not isinstance(seq, (list, tuple)):
return 0
elif not seq:
return 1
else:
return 1 + ndimlist(seq[0])
def shapelist(a):
""" Get the shape of nested list """
if type(a) is list:
return tuple([len(a)] + list(shapelist(a[0])))
else:
return ()
def reshapelist(shape, seq):
""" Reshape iterator to nested shape
>>> reshapelist((2, 3), range(6))
[[0, 1, 2], [3, 4, 5]]
"""
if len(shape) == 1:
return list(seq)
else:
n = int(len(seq) / shape[0])
return [reshapelist(shape[1:], part) for part in partition(n, seq)]
def transposelist(arrays, axes, extradims=0):
""" Permute axes of nested list
>>> transposelist([[1,1,1],[1,1,1]], [2,1])
[[[1, 1], [1, 1], [1, 1]]]
>>> transposelist([[1,1,1],[1,1,1]], [2,1], extradims=1)
[[[[1], [1]], [[1], [1]], [[1], [1]]]]
"""
if len(axes) != ndimlist(arrays):
raise ValueError("Length of axes should equal depth of nested arrays")
if extradims < 0:
raise ValueError("`newdims` should be positive")
if len(axes) > len(set(axes)):
raise ValueError("`axes` should be unique")
ndim = max(axes) + 1
shape = shapelist(arrays)
newshape = [shape[axes.index(i)] if i in axes else 1 for i in range(ndim + extradims)]
result = list(core.flatten(arrays))
return reshapelist(newshape, result)
def concatenate3(arrays):
""" Recursive np.concatenate
Input should be a nested list of numpy arrays arranged in the order they
should appear in the array itself. Each array should have the same number
of dimensions as the desired output and the nesting of the lists.
>>> x = np.array([[1, 2]])
>>> concatenate3([[x, x, x], [x, x, x]])
array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])
>>> concatenate3([[x, x], [x, x], [x, x]])
array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])
"""
arrays = concrete(arrays)
ndim = ndimlist(arrays)
if not ndim:
return arrays
if not arrays:
return np.empty(0)
chunks = chunks_from_arrays(arrays)
shape = tuple(map(sum, chunks))
def dtype(x):
try:
return x.dtype
except AttributeError:
return type(x)
result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))
for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)):
if hasattr(arr, 'ndim'):
while arr.ndim < ndim:
arr = arr[None, ...]
result[idx] = arr
return result
def concatenate_axes(arrays, axes):
""" Recursively call np.concatenate along axes """
if len(axes) != ndimlist(arrays):
raise ValueError("Length of axes should equal depth of nested arrays")
extradims = max(0, deepfirst(arrays).ndim - (max(axes) + 1))
return concatenate3(transposelist(arrays, axes, extradims=extradims))
def to_hdf5(filename, *args, **kwargs):
""" Store arrays in HDF5 file
This saves several dask arrays into several datapaths in an HDF5 file.
It creates the necessary datasets and handles clean file opening/closing.
>>> da.to_hdf5('myfile.hdf5', '/x', x) # doctest: +SKIP
or
>>> da.to_hdf5('myfile.hdf5', {'/x': x, '/y': y}) # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> da.to_hdf5('myfile.hdf5', '/x', x, compression='lzf', shuffle=True) # doctest: +SKIP
This can also be used as a method on a single Array
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
See Also
--------
da.store
h5py.File.create_dataset
"""
if len(args) == 1 and isinstance(args[0], dict):
data = args[0]
elif (len(args) == 2 and
isinstance(args[0], str) and
isinstance(args[1], Array)):
data = {args[0]: args[1]}
else:
raise ValueError("Please provide {'/data/path': array} dictionary")
chunks = kwargs.pop('chunks', True)
import h5py
with h5py.File(filename) as f:
dsets = [f.require_dataset(dp, shape=x.shape, dtype=x.dtype,
chunks=tuple([c[0] for c in x.chunks])
if chunks is True else chunks, **kwargs)
for dp, x in data.items()]
store(list(data.values()), dsets)
def interleave_none(a, b):
"""
>>> interleave_none([0, None, 2, None], [1, 3])
(0, 1, 2, 3)
"""
result = []
i = j = 0
n = len(a) + len(b)
while i + j < n:
if a[i] is not None:
result.append(a[i])
i += 1
else:
result.append(b[j])
i += 1
j += 1
return tuple(result)
def keyname(name, i, okey):
"""
>>> keyname('x', 3, [None, None, 0, 2])
('x', 3, 0, 2)
"""
return (name, i) + tuple(k for k in okey if k is not None)
def _vindex(x, *indexes):
""" Point wise slicing
This is equivalent to numpy slicing with multiple input lists
>>> x = np.arange(56).reshape((7, 8))
>>> x
array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55]])
>>> d = from_array(x, chunks=(3, 4))
>>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])
>>> result.compute()
array([ 0, 9, 48, 7])
"""
indexes = [list(index) if index is not None else index for index in indexes]
bounds = [list(accumulate(add, (0,) + c)) for c in x.chunks]
bounds2 = [b for i, b in zip(indexes, bounds) if i is not None]
axis = _get_axis(indexes)
points = list()
for i, idx in enumerate(zip(*[i for i in indexes if i is not None])):
block_idx = [np.searchsorted(b, ind, 'right') - 1
for b, ind in zip(bounds2, idx)]
inblock_idx = [ind - bounds2[k][j]
for k, (ind, j) in enumerate(zip(idx, block_idx))]
points.append((i, tuple(block_idx), tuple(inblock_idx)))
per_block = groupby(1, points)
per_block = dict((k, v) for k, v in per_block.items() if v)
other_blocks = list(product(*[list(range(len(c))) if i is None else [None]
for i, c in zip(indexes, x.chunks)]))
token = tokenize(x, indexes)
name = 'vindex-slice-' + token
full_slices = [slice(None, None) if i is None else None for i in indexes]
dsk = dict((keyname(name, i, okey),
(_vindex_transpose,
(_vindex_slice, (x.name,) + interleave_none(okey, key),
interleave_none(full_slices, list(zip(*pluck(2, per_block[key]))))),
axis))
for i, key in enumerate(per_block)
for okey in other_blocks)
if per_block:
dsk2 = dict((keyname('vindex-merge-' + token, 0, okey),
(_vindex_merge,
[list(pluck(0, per_block[key])) for key in per_block],
[keyname(name, i, okey) for i in range(len(per_block))]))
for okey in other_blocks)
else:
dsk2 = dict()
chunks = [c for i, c in zip(indexes, x.chunks) if i is None]
chunks.insert(0, (len(points),) if points else ())
chunks = tuple(chunks)
name = 'vindex-merge-' + token
dsk.update(dsk2)
return Array(sharedict.merge(x.dask, (name, dsk)), name, chunks, x.dtype)
def _get_axis(indexes):
""" Get axis along which point-wise slicing results lie
This is mostly a hack because I can't figure out NumPy's rule on this and
can't be bothered to go reading.
>>> _get_axis([[1, 2], None, [1, 2], None])
0
>>> _get_axis([None, [1, 2], [1, 2], None])
1
>>> _get_axis([None, None, [1, 2], [1, 2]])
2
"""
ndim = len(indexes)
indexes = [slice(None, None) if i is None else [0] for i in indexes]
x = np.empty((2,) * ndim)
x2 = x[tuple(indexes)]
return x2.shape.index(1)
def _vindex_slice(block, points):
""" Pull out point-wise slices from block """
points = [p if isinstance(p, slice) else list(p) for p in points]
return block[tuple(points)]
def _vindex_transpose(block, axis):
""" Rotate block so that points are on the first dimension """
axes = [axis] + list(range(axis)) + list(range(axis + 1, block.ndim))
return block.transpose(axes)
def _vindex_merge(locations, values):
"""
>>> locations = [0], [2, 1]
>>> values = [np.array([[1, 2, 3]]),
... np.array([[10, 20, 30], [40, 50, 60]])]
>>> _vindex_merge(locations, values)
array([[ 1, 2, 3],
[40, 50, 60],
[10, 20, 30]])
"""
locations = list(map(list, locations))
values = list(values)
n = sum(map(len, locations))
shape = list(values[0].shape)
shape[0] = n
shape = tuple(shape)
dtype = values[0].dtype
x = np.empty(shape, dtype=dtype)
ind = [slice(None, None) for i in range(x.ndim)]
for loc, val in zip(locations, values):
ind[0] = loc
x[tuple(ind)] = val
return x
@wraps(np.array)
def array(x, dtype=None, ndmin=None):
while x.ndim < ndmin:
x = x[None, :]
if dtype is not None and x.dtype != dtype:
x = x.astype(dtype)
return x
@wraps(np.cov)
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
# This was copied almost verbatim from np.cov
# See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
# check ddof
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X = X - X.mean(axis=1 - axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
@wraps(np.corrcoef)
def corrcoef(x, y=None, rowvar=1):
from .ufunc import sqrt
c = cov(x, y, rowvar)
if c.shape == ():
return c / c
d = diag(c)
d = d.reshape((d.shape[0], 1))
sqr_d = sqrt(d)
return (c / sqr_d) / sqr_d.T
def to_npy_stack(dirname, x, axis=0):
""" Write dask array to a stack of .npy files
This partitions the dask.array along one axis and stores each block along
that axis as a single .npy file in the specified directory
Examples
--------
>>> x = da.ones((5, 10, 10), chunks=(2, 4, 4)) # doctest: +SKIP
>>> da.to_npy_stack('data/', x, axis=0) # doctest: +SKIP
$ tree data/
data/
|-- 0.npy
|-- 1.npy
|-- 2.npy
|-- info
The ``.npy`` files store numpy arrays for ``x[0:2], x[2:4], and x[4:5]``
respectively, as is specified by the chunk size along the zeroth axis. The
info file stores the dtype, chunks, and axis information of the array.
You can load these stacks with the ``da.from_npy_stack`` function.
>>> y = da.from_npy_stack('data/') # doctest: +SKIP
See Also
--------
from_npy_stack
"""
chunks = tuple((c if i == axis else (sum(c),))
for i, c in enumerate(x.chunks))
xx = x.rechunk(chunks)
if not os.path.exists(dirname):
os.path.mkdir(dirname)
meta = {'chunks': chunks, 'dtype': x.dtype, 'axis': axis}
with open(os.path.join(dirname, 'info'), 'wb') as f:
pickle.dump(meta, f)
name = 'to-npy-stack-' + str(uuid.uuid1())
dsk = dict(((name, i), (np.save, os.path.join(dirname, '%d.npy' % i), key))
for i, key in enumerate(core.flatten(xx._keys())))
Array._get(sharedict.merge(dsk, xx.dask), list(dsk))
def from_npy_stack(dirname, mmap_mode='r'):
""" Load dask array from stack of npy files
See ``da.to_npy_stack`` for docstring
Parameters
----------
dirname: string
Directory of .npy files
mmap_mode: (None or 'r')
Read data in memory map mode
"""
with open(os.path.join(dirname, 'info'), 'rb') as f:
info = pickle.load(f)
dtype = info['dtype']
chunks = info['chunks']
axis = info['axis']
name = 'from-npy-stack-%s' % dirname
keys = list(product([name], *[range(len(c)) for c in chunks]))
values = [(np.load, os.path.join(dirname, '%d.npy' % i), mmap_mode)
for i in range(len(chunks[axis]))]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype)
def _astype(x, astype_dtype=None, **kwargs):
return x.astype(astype_dtype, **kwargs)
@wraps(np.round)
def round(a, decimals=0):
return a.map_blocks(np.round, decimals=decimals, dtype=a.dtype)
@wraps(np.swapaxes)
def swapaxes(a, axis1, axis2):
if axis1 == axis2:
return a
if axis1 < 0:
axis1 = axis1 + a.ndim
if axis2 < 0:
axis2 = axis2 + a.ndim
ind = list(range(a.ndim))
out = list(ind)
out[axis1], out[axis2] = axis2, axis1
return atop(np.swapaxes, out, a, ind, axis1=axis1, axis2=axis2,
dtype=a.dtype)
@wraps(np.repeat)
def repeat(a, repeats, axis=None):
if axis is None:
if a.ndim == 1:
axis = 0
else:
raise NotImplementedError("Must supply an integer axis value")
if not isinstance(repeats, int):
raise NotImplementedError("Only integer valued repeats supported")
if repeats == 1:
return a
cchunks = np.cumsum((0,) + a.chunks[axis])
slices = []
for c_start, c_stop in sliding_window(2, cchunks):
ls = np.linspace(c_start, c_stop, repeats).round(0)
for ls_start, ls_stop in sliding_window(2, ls):
if ls_start != ls_stop:
slices.append(slice(ls_start, ls_stop))
all_slice = slice(None, None, None)
slices = [(all_slice,) * axis + (s,) + (all_slice,) * (a.ndim - axis - 1)
for s in slices]
slabs = [a[slc] for slc in slices]
out = []
for slab in slabs:
chunks = list(slab.chunks)
assert len(chunks[axis]) == 1
chunks[axis] = (chunks[axis][0] * repeats,)
chunks = tuple(chunks)
result = slab.map_blocks(np.repeat, repeats, axis=axis, chunks=chunks,
dtype=slab.dtype)
out.append(result)
return concatenate(out, axis=axis)
@wraps(np.tile)
def tile(A, reps):
if not isinstance(reps, Integral):
raise NotImplementedError("Only integer valued `reps` supported.")
if reps < 0:
raise ValueError("Negative `reps` are not allowed.")
elif reps == 0:
return A[..., :0]
elif reps == 1:
return A
return concatenate(reps * [A], axis=-1)
def slice_with_dask_array(x, index):
y = elemwise(getitem, x, index, dtype=x.dtype)
name = 'getitem-' + tokenize(x, index)
dsk = {(name, i): k
for i, k in enumerate(core.flatten(y._keys()))}
chunks = ((np.nan,) * y.npartitions,)
return Array(sharedict.merge(y.dask, (name, dsk)), name, chunks, x.dtype)
| {
"repo_name": "cpcloud/dask",
"path": "dask/array/core.py",
"copies": "1",
"size": "125689",
"license": "bsd-3-clause",
"hash": 2454336980979084300,
"line_mean": 31.3523809524,
"line_max": 107,
"alpha_frac": 0.555689042,
"autogenerated": false,
"ratio": 3.5158745699180396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4571563611918039,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from bisect import bisect
from collections import Iterable, MutableMapping
from collections import Iterator
from functools import partial, wraps
import inspect
from itertools import product
from numbers import Number
import operator
from operator import add, getitem, mul
import os
import pickle
from threading import Lock
import uuid
import warnings
from toolz.curried import (pipe, partition, concat, pluck, join, first,
memoize, map, groupby, valmap, accumulate, merge,
reduce, interleave, sliding_window, assoc)
import numpy as np
from . import chunk
from .slicing import slice_array
from . import numpy_compat
from ..base import Base, tokenize, normalize_token
from ..utils import (deepmap, ignoring, concrete, is_integer,
IndexCallable, funcname, derived_from)
from ..compatibility import unicode, long, getargspec, zip_longest, apply
from ..optimize import cull
from .. import threaded, core
def getarray(a, b, lock=None):
""" Mimics getitem but includes call to np.asarray
>>> getarray([1, 2, 3, 4, 5], slice(1, 4))
array([2, 3, 4])
"""
if isinstance(b, tuple) and any(x is None for x in b):
b2 = tuple(x for x in b if x is not None)
b3 = tuple(None if x is None else slice(None, None)
for x in b if not isinstance(x, (int, long)))
return getarray(a, b2, lock)[b3]
if lock:
lock.acquire()
try:
c = a[b]
if type(c) != np.ndarray:
c = np.asarray(c)
finally:
if lock:
lock.release()
return c
def getarray_nofancy(a, b, lock=None):
""" A simple wrapper around ``getarray``.
Used to indicate to the optimization passes that the backend doesn't
support "fancy indexing"
"""
return getarray(a, b, lock=lock)
from .optimization import optimize
def slices_from_chunks(chunks):
""" Translate chunks tuple to a set of slices in product order
>>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE
[(slice(0, 2, None), slice(0, 3, None)),
(slice(0, 2, None), slice(3, 6, None)),
(slice(0, 2, None), slice(6, 9, None)),
(slice(2, 4, None), slice(0, 3, None)),
(slice(2, 4, None), slice(3, 6, None)),
(slice(2, 4, None), slice(6, 9, None))]
"""
cumdims = [list(accumulate(add, (0,) + bds[:-1])) for bds in chunks]
shapes = product(*chunks)
starts = product(*cumdims)
return [tuple(slice(s, s + dim) for s, dim in zip(start, shape))
for start, shape in zip(starts, shapes)]
def getem(arr, chunks, shape=None, out_name=None, fancy=True, lock=False):
""" Dask getting various chunks from an array-like
>>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
>>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
"""
out_name = out_name or arr
chunks = normalize_chunks(chunks, shape)
keys = list(product([out_name], *[range(len(bds)) for bds in chunks]))
slices = slices_from_chunks(chunks)
getter = getarray if fancy else getarray_nofancy
if lock:
values = [(getter, arr, x, lock) for x in slices]
else:
values = [(getter, arr, x) for x in slices]
return dict(zip(keys, values))
def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):
""" Dot product of many aligned chunks
>>> x = np.array([[1, 2], [1, 2]])
>>> y = np.array([[10, 20], [10, 20]])
>>> dotmany([x, x, x], [y, y, y])
array([[ 90, 180],
[ 90, 180]])
Optionally pass in functions to apply to the left and right chunks
>>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)
array([[150, 150],
[150, 150]])
"""
if leftfunc:
A = map(leftfunc, A)
if rightfunc:
B = map(rightfunc, B)
return sum(map(partial(np.dot, **kwargs), A, B))
def lol_tuples(head, ind, values, dummies):
""" List of list of tuple keys
Parameters
----------
head : tuple
The known tuple so far
ind : Iterable
An iterable of indices not yet covered
values : dict
Known values for non-dummy indices
dummies : dict
Ranges of values for dummy indices
Examples
--------
>>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {})
('x', 1, 0)
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 1, 0), ('x', 1, 1, 1)],
[('x', 1, 2, 0), ('x', 1, 2, 1)]]
"""
if not ind:
return head
if ind[0] not in dummies:
return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)
else:
return [lol_tuples(head + (v,), ind[1:], values, dummies)
for v in dummies[ind[0]]]
def zero_broadcast_dimensions(lol, nblocks):
"""
>>> lol = [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> nblocks = (4, 1, 2) # note singleton dimension in second place
>>> lol = [[('x', 1, 0, 0), ('x', 1, 0, 1)],
... [('x', 1, 1, 0), ('x', 1, 1, 1)],
... [('x', 1, 2, 0), ('x', 1, 2, 1)]]
>>> zero_broadcast_dimensions(lol, nblocks) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)]]
See Also
--------
lol_tuples
"""
f = lambda t: (t[0],) + tuple(0 if d == 1 else i for i, d in zip(t[1:], nblocks))
return deepmap(f, lol)
def broadcast_dimensions(argpairs, numblocks, sentinels=(1, (1,)),
consolidate=None):
""" Find block dimensions from arguments
Parameters
----------
argpairs: iterable
name, ijk index pairs
numblocks: dict
maps {name: number of blocks}
sentinels: iterable (optional)
values for singleton dimensions
consolidate: func (optional)
use this to reduce each set of common blocks into a smaller set
Examples
--------
>>> argpairs = [('x', 'ij'), ('y', 'ji')]
>>> numblocks = {'x': (2, 3), 'y': (3, 2)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Supports numpy broadcasting rules
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> numblocks = {'x': (2, 1), 'y': (1, 3)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Works in other contexts too
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
>>> broadcast_dimensions(argpairs, d)
{'i': 'Hello', 'j': (2, 3)}
"""
# List like [('i', 2), ('j', 1), ('i', 1), ('j', 2)]
L = concat([zip(inds, dims) for (x, inds), (x, dims)
in join(first, argpairs, first, numblocks.items())])
g = groupby(0, L)
g = dict((k, set([d for i, d in v])) for k, v in g.items())
g2 = dict((k, v - set(sentinels) if len(v) > 1 else v) for k, v in g.items())
if consolidate:
return valmap(consolidate, g2)
if g2 and not set(map(len, g2.values())) == set([1]):
raise ValueError("Shapes do not align %s" % g)
return valmap(first, g2)
def top(func, output, out_indices, *arrind_pairs, **kwargs):
""" Tensor operation
Applies a function, ``func``, across blocks from many different input
dasks. We arrange the pattern with which those blocks interact with sets
of matching indices. E.g.::
top(func, 'z', 'i', 'x', 'i', 'y', 'i')
yield an embarrassingly parallel communication pattern and is read as
$$ z_i = func(x_i, y_i) $$
More complex patterns may emerge, including multiple indices::
top(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')
$$ z_{ij} = func(x_{ij}, y_{ji}) $$
Indices missing in the output but present in the inputs results in many
inputs being sent to one function (see examples).
Examples
--------
Simple embarrassing map operation
>>> inc = lambda x: x + 1
>>> top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
Simple operation on two datasets
>>> add = lambda x, y: x + y
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Operation that flips one of the datasets
>>> addT = lambda x, y: x + y.T # Transpose each chunk
>>> # z_ij ~ x_ij y_ji
>>> # .. .. .. notice swap
>>> top(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Dot product with contraction over ``j`` index. Yields list arguments
>>> top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
Pass ``concatenate=True`` to concatenate arrays ahead of time
>>> top(f, 'z', 'i', 'x', 'ij', 'y', 'ij', concatenate=True,
... numblocks={'x': (2, 2), 'y': (2, 2,)}) # doctest: +SKIP
{('z', 0): (f, (concatenate_axes, [('x', 0, 0), ('x', 0, 1)], (1,)),
(concatenate_axes, [('y', 0, 0), ('y', 0, 1)], (1,)))
('z', 1): (f, (concatenate_axes, [('x', 1, 0), ('x', 1, 1)], (1,)),
(concatenate_axes, [('y', 1, 0), ('y', 1, 1)], (1,)))}
Supports Broadcasting rules
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}
Support keyword arguments with apply
>>> def f(a, b=0): return a + b
>>> top(f, 'z', 'i', 'x', 'i', numblocks={'x': (2,)}, b=10) # doctest: +SKIP
{('z', 0): (apply, f, [('x', 0)], {'b': 10}),
('z', 1): (apply, f, [('x', 1)], {'b': 10})}
See Also
--------
atop
"""
numblocks = kwargs.pop('numblocks')
concatenate = kwargs.pop('concatenate', None)
new_axes = kwargs.pop('new_axes', {})
argpairs = list(partition(2, arrind_pairs))
assert set(numblocks) == set(pluck(0, argpairs))
all_indices = pipe(argpairs, pluck(1), concat, set)
dummy_indices = all_indices - set(out_indices)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
dims = broadcast_dimensions(argpairs, numblocks)
for k in new_axes:
dims[k] = 1
# (0, 0), (0, 1), (0, 2), (1, 0), ...
keytups = list(product(*[range(dims[i]) for i in out_indices]))
# {i: 0, j: 0}, {i: 0, j: 1}, ...
keydicts = [dict(zip(out_indices, tup)) for tup in keytups]
# {j: [1, 2, 3], ...} For j a dummy index of dimension 3
dummies = dict((i, list(range(dims[i]))) for i in dummy_indices)
# Create argument lists
valtups = []
for kd in keydicts:
args = []
for arg, ind in argpairs:
tups = lol_tuples((arg,), ind, kd, dummies)
tups2 = zero_broadcast_dimensions(tups, numblocks[arg])
if concatenate and isinstance(tups2, list):
axes = [n for n, i in enumerate(ind) if i in dummies]
tups2 = (concatenate_axes, tups2, axes)
args.append(tups2)
valtups.append(tuple(args))
# Add heads to tuples
keys = [(output,) + kt for kt in keytups]
if kwargs:
vals = [(apply, func, list(vt), kwargs) for vt in valtups]
else:
vals = [(func,) + vt for vt in valtups]
return dict(zip(keys, vals))
def _concatenate2(arrays, axes=[]):
""" Recursively Concatenate nested lists of arrays along axes
Each entry in axes corresponds to each level of the nested list. The
length of axes should correspond to the level of nesting of arrays.
>>> x = np.array([[1, 2], [3, 4]])
>>> _concatenate2([x, x], axes=[0])
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> _concatenate2([x, x], axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> _concatenate2([[x, x], [x, x]], axes=[0, 1])
array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
Supports Iterators
>>> _concatenate2(iter([x, x]), axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
"""
if isinstance(arrays, Iterator):
arrays = list(arrays)
if not isinstance(arrays, (list, tuple)):
return arrays
if len(axes) > 1:
arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]
return np.concatenate(arrays, axis=axes[0])
def map_blocks(func, *args, **kwargs):
""" Map a function across all blocks of a dask array
Parameters
----------
func: callable
Function to apply to every block in the array
args: dask arrays or constants
dtype: np.dtype
Datatype of resulting array
chunks: tuple (optional)
Chunk shape of resulting blocks if the function does not preserve shape
drop_axis: number or iterable (optional)
Dimensions lost by the function
new_axis: number or iterable (optional)
New dimensions created by the function
**kwargs:
Other keyword arguments to pass to function.
Values must be constants (not dask.arrays)
You must also specify the chunks and dtype of the resulting array. If you
don't then we assume that the resulting array has the same block structure
as the input.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If function changes shape of the blocks then please provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
You have a bit of freedom in specifying chunks. If all of the output chunk
sizes are the same, you can provide just that chunk size as a single tuple.
>>> a = da.arange(18, chunks=(6,))
>>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))
If the function changes the dimension of the blocks you must specify the
created or destroyed dimensions.
>>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),
... new_axis=[0, 2])
Map_blocks aligns blocks by block positions without regard to shape. In
the following example we have two arrays with the same number of blocks but
with different shape and chunk sizes.
>>> x = da.arange(1000, chunks=(100,))
>>> y = da.arange(100, chunks=(10,))
The relevant attribute to match is numblocks
>>> x.numblocks
(10,)
>>> y.numblocks
(10,)
If these match (up to broadcasting rules) then we can map arbitrary
functions across blocks
>>> def func(a, b):
... return np.array([a.max(), b.max()])
>>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')
dask.array<..., shape=(20,), dtype=int64, chunksize=(2,)>
>>> _.compute()
array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,
69, 799, 79, 899, 89, 999, 99])
Your block function can learn where in the array it is if it supports a
``block_id`` keyword argument. This will receive entries like (2, 0, 1),
the position of the block in the dask array.
>>> def func(block, block_id=None):
... pass
You may specify the name of the resulting task in the graph with the
optional ``name`` keyword argument.
>>> y = x.map_blocks(lambda x: x + 1, name='increment')
"""
if not callable(func):
msg = ("First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)")
raise TypeError(msg % type(func).__name__)
name = kwargs.pop('name', None)
name = name or '%s-%s' % (funcname(func), tokenize(func, args, **kwargs))
dtype = kwargs.pop('dtype', None)
chunks = kwargs.pop('chunks', None)
drop_axis = kwargs.pop('drop_axis', [])
new_axis = kwargs.pop('new_axis', [])
if isinstance(drop_axis, Number):
drop_axis = [drop_axis]
if isinstance(new_axis, Number):
new_axis = [new_axis]
if drop_axis and new_axis:
raise ValueError("Can't specify drop_axis and new_axis together")
arrs = [a for a in args if isinstance(a, Array)]
args = [(i, a) for i, a in enumerate(args) if not isinstance(a, Array)]
argpairs = [(a.name, tuple(range(a.ndim))[::-1]) for a in arrs]
numblocks = {a.name: a.numblocks for a in arrs}
arginds = list(concat(argpairs))
out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]
try:
spec = getargspec(func)
block_id = ('block_id' in spec.args or
'block_id' in getattr(spec, 'kwonly_args', ()))
except:
block_id = False
if block_id:
kwargs['block_id'] = '__dummy__'
if args:
dsk = top(partial_by_order, name, out_ind, *arginds,
numblocks=numblocks, function=func, other=args,
**kwargs)
else:
dsk = top(func, name, out_ind, *arginds, numblocks=numblocks,
**kwargs)
# If func has block_id as an argument, add it to the kwargs for each call
if block_id:
for k in dsk.keys():
dsk[k] = dsk[k][:-1] + (assoc(dsk[k][-1], 'block_id', k[1:]),)
if len(arrs) == 1:
numblocks = list(arrs[0].numblocks)
else:
dims = broadcast_dimensions(argpairs, numblocks)
numblocks = [b for (_, b) in reversed(list(dims.items()))]
if drop_axis:
if any(numblocks[i] > 1 for i in drop_axis):
raise ValueError("Can't drop an axis with more than 1 block. "
"Please use `atop` instead.")
dsk = dict((tuple(k for i, k in enumerate(k)
if i - 1 not in drop_axis), v)
for k, v in dsk.items())
numblocks = [n for i, n in enumerate(numblocks) if i not in drop_axis]
elif new_axis:
dsk, old_dsk = dict(), dsk
for key in old_dsk:
new_key = list(key)
for i in new_axis:
new_key.insert(i + 1, 0)
dsk[tuple(new_key)] = old_dsk[key]
for i in sorted(new_axis):
numblocks.insert(i, 1)
if chunks:
if len(chunks) != len(numblocks):
raise ValueError("Provided chunks have {0} dims, expected {1} "
"dims.".format(len(chunks), len(numblocks)))
chunks2 = []
for i, (c, nb) in enumerate(zip(chunks, numblocks)):
if isinstance(c, tuple):
if not len(c) == nb:
raise ValueError("Dimension {0} has {1} blocks, "
"chunks specified with "
"{2} blocks".format(i, nb, len(c)))
chunks2.append(c)
else:
chunks2.append(nb * (c,))
else:
if len(arrs) == 1:
chunks2 = list(arrs[0].chunks)
else:
try:
chunks2 = list(broadcast_chunks(*[a.chunks for a in arrs]))
except:
raise ValueError("Arrays in `map_blocks` don't align, can't "
"infer output chunks. Please provide "
"`chunks` kwarg.")
if drop_axis:
chunks2 = [c for (i, c) in enumerate(chunks2) if i not in drop_axis]
elif new_axis:
for i in sorted(new_axis):
chunks2.insert(i, (1,))
chunks = tuple(chunks2)
return Array(merge(dsk, *[a.dask for a in arrs]), name, chunks, dtype)
def broadcast_chunks(*chunkss):
""" Construct a chunks tuple that broadcasts many chunks tuples
>>> a = ((5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((5, 5),)
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((1,), (5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((3, 3,), (5, 5),)
>>> broadcast_chunks(a, b)
Traceback (most recent call last):
...
ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]
"""
if len(chunkss) == 1:
return chunkss[0]
n = max(map(len, chunkss))
chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]
result = []
for i in range(n):
step1 = [c[i] for c in chunkss2]
if all(c == (1,) for c in step1):
step2 = step1
else:
step2 = [c for c in step1 if c != (1,)]
if len(set(step2)) != 1:
raise ValueError("Chunks do not align: %s" % str(step2))
result.append(step2[0])
return tuple(result)
@wraps(np.squeeze)
def squeeze(a, axis=None):
if 1 not in a.shape:
return a
if axis is None:
axis = tuple(i for i, d in enumerate(a.shape) if d == 1)
b = a.map_blocks(partial(np.squeeze, axis=axis), dtype=a.dtype)
chunks = tuple(bd for bd in b.chunks if bd != (1,))
old_keys = list(product([b.name], *[range(len(bd)) for bd in b.chunks]))
new_keys = list(product([b.name], *[range(len(bd)) for bd in chunks]))
dsk = b.dask.copy()
for o, n in zip(old_keys, new_keys):
dsk[n] = dsk[o]
del dsk[o]
return Array(dsk, b.name, chunks, dtype=a.dtype)
def topk(k, x):
""" The top k elements of an array
Returns the k greatest elements of the array in sorted order. Only works
on arrays of a single dimension.
This assumes that ``k`` is small. All results will be returned in a single
chunk.
Examples
--------
>>> x = np.array([5, 1, 3, 6])
>>> d = from_array(x, chunks=2)
>>> d.topk(2).compute()
array([6, 5])
"""
if x.ndim != 1:
raise ValueError("Topk only works on arrays of one dimension")
token = tokenize(k, x)
name = 'chunk.topk-' + token
dsk = dict(((name, i), (chunk.topk, k, key))
for i, key in enumerate(x._keys()))
name2 = 'topk-' + token
dsk[(name2, 0)] = (getitem, (np.sort, (np.concatenate, list(dsk))),
slice(-1, -k - 1, -1))
chunks = ((k,),)
return Array(merge(dsk, x.dask), name2, chunks, dtype=x.dtype)
def store(sources, targets, lock=True, compute=True, **kwargs):
""" Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or iterable of Arrays
targets: array-like or iterable of array-likes
These should support setitem syntax ``target[10:20] = ...``
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular ``threading.Lock`` object to be shared among all writes.
compute: boolean, optional
If true compute immediately, return ``dask.delayed.Delayed`` otherwise
Examples
--------
>>> x = ... # doctest: +SKIP
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets]
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError("Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets)))
updates = [insert_to_ooc(tgt, src, lock=lock)
for tgt, src in zip(targets, sources)]
dsk = merge([src.dask for src in sources] + updates)
keys = [key for u in updates for key in u]
if compute:
Array._get(dsk, keys, **kwargs)
else:
from ..delayed import Delayed
name = 'store-' + tokenize(*keys)
dsk[name] = keys
return Delayed(name, [dsk])
def blockdims_from_blockshape(shape, chunks):
"""
>>> blockdims_from_blockshape((10, 10), (4, 3))
((4, 4, 2), (3, 3, 3, 1))
"""
if chunks is None:
raise TypeError("Must supply chunks= keyword argument")
if shape is None:
raise TypeError("Must supply shape= keyword argument")
if not all(map(is_integer, chunks)):
raise ValueError("chunks can only contain integers.")
if not all(map(is_integer, shape)):
raise ValueError("shape can only contain integers.")
shape = tuple(map(int, shape))
chunks = tuple(map(int, chunks))
return tuple((bd,) * (d // bd) + ((d % bd,) if d % bd else ())
for d, bd in zip(shape, chunks))
def finalize(results):
if not results:
return concatenate3(results)
results2 = results
while isinstance(results2, (tuple, list)):
if len(results2) > 1:
return concatenate3(results)
else:
results2 = results2[0]
return unpack_singleton(results)
class Array(Base):
""" Parallel Dask Array
A parallel nd-array comprised of many numpy arrays arranged in a grid.
This constructor is for advanced uses only. For normal use see the
``da.from_array`` function.
Parameters
----------
dask : dict
Task dependency graph
name : string
Name of array in dask
shape : tuple of ints
Shape of the entire array
chunks: iterable of tuples
block sizes along each dimension
See Also
--------
dask.array.from_array
"""
__slots__ = 'dask', 'name', '_chunks', '_dtype'
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(finalize)
def __init__(self, dask, name, chunks, dtype=None, shape=None):
self.dask = dask
self.name = name
self._chunks = normalize_chunks(chunks, shape)
if self._chunks is None:
raise ValueError(chunks_none_error_message)
if dtype is not None:
dtype = np.dtype(dtype)
self._dtype = dtype
@property
def _args(self):
return (self.dask, self.name, self.chunks, self.dtype)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self.name, self._chunks, self._dtype = state
@property
def numblocks(self):
return tuple(map(len, self.chunks))
@property
def npartitions(self):
return reduce(mul, self.numblocks, 1)
@property
def shape(self):
return tuple(map(sum, self.chunks))
def _get_chunks(self):
return self._chunks
def _set_chunks(self, chunks):
raise TypeError("Can not set chunks directly\n\n"
"Please use the rechunk method instead:\n"
" x.rechunk(%s)" % str(chunks))
chunks = property(_get_chunks, _set_chunks, "chunks property")
def __len__(self):
return sum(self.chunks[0])
@property
@memoize(key=lambda args, kwargs: (id(args[0]), args[0].name, args[0].chunks))
def dtype(self):
if self._dtype is not None:
return self._dtype
if self.shape:
return self[(0,) * self.ndim].compute().dtype
else:
return self.compute().dtype
def __repr__(self):
"""
>>> import dask.array as da
>>> da.ones((10, 10), chunks=(5, 5), dtype='i4')
dask.array<..., shape=(10, 10), dtype=int32, chunksize=(5, 5)>
"""
chunksize = str(tuple(c[0] if c else 0 for c in self.chunks))
name = self.name if len(self.name) < 10 else self.name[:7] + '...'
return ("dask.array<%s, shape=%s, dtype=%s, chunksize=%s>" %
(name, self.shape, self._dtype, chunksize))
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
""" Number of elements in array """
return reduce(mul, self.shape, 1)
@property
def nbytes(self):
""" Number of bytes in array """
return self.size * self.dtype.itemsize
def _keys(self, *args):
if not args:
try:
return self._cached_keys
except AttributeError:
pass
if not self.chunks:
return [(self.name,)]
ind = len(args)
if ind + 1 == self.ndim:
result = [(self.name,) + args + (i,)
for i in range(self.numblocks[ind])]
else:
result = [self._keys(*(args + (i,)))
for i in range(self.numblocks[ind])]
if not args:
self._cached_keys = result
return result
__array_priority__ = 11 # higher than numpy.ndarray and numpy.matrix
def __array__(self, dtype=None, **kwargs):
x = self.compute()
if dtype and x.dtype != dtype:
x = x.astype(dtype)
if not isinstance(x, np.ndarray):
x = np.array(x)
return x
@property
def _elemwise(self):
return elemwise
@wraps(store)
def store(self, target, **kwargs):
return store([self], [target], **kwargs)
def to_hdf5(self, filename, datapath, **kwargs):
""" Store array in HDF5 file
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP
See Also
--------
da.store
h5py.File.create_dataset
"""
return to_hdf5(filename, datapath, self, **kwargs)
def to_dask_dataframe(self, columns=None):
""" Convert dask Array to dask Dataframe
Parameters
----------
columns: list or string
list of column names if DataFrame, single string if Series
See Also
--------
dask.dataframe.from_dask_array
"""
from ..dataframe import from_dask_array
return from_dask_array(self, columns=columns)
def cache(self, store=None, **kwargs):
""" Evaluate and cache array
Parameters
----------
store: MutableMapping or ndarray-like
Place to put computed and cached chunks
kwargs:
Keyword arguments to pass on to ``get`` function for scheduling
Examples
--------
This triggers evaluation and store the result in either
1. An ndarray object supporting setitem (see da.store)
2. A MutableMapping like a dict or chest
It then returns a new dask array that points to this store.
This returns a semantically equivalent dask array.
>>> import dask.array as da
>>> x = da.arange(5, chunks=2)
>>> y = 2*x + 1
>>> z = y.cache() # triggers computation
>>> y.compute() # Does entire computation
array([1, 3, 5, 7, 9])
>>> z.compute() # Just pulls from store
array([1, 3, 5, 7, 9])
You might base a cache off of an array like a numpy array or
h5py.Dataset.
>>> cache = np.empty(5, dtype=x.dtype)
>>> z = y.cache(store=cache)
>>> cache
array([1, 3, 5, 7, 9])
Or one might use a MutableMapping like a dict or chest
>>> cache = dict()
>>> z = y.cache(store=cache)
>>> cache # doctest: +SKIP
{('x', 0): array([1, 3]),
('x', 1): array([5, 7]),
('x', 2): array([9])}
"""
warnings.warn("Deprecation Warning: The `cache` method is deprecated, "
"and will be removed in the next release. To achieve "
"the same behavior, either write to disk or use "
"`Client.persist`, from `dask.distributed`.")
if store is not None and hasattr(store, 'shape'):
self.store(store)
return from_array(store, chunks=self.chunks)
if store is None:
try:
from chest import Chest
store = Chest()
except ImportError:
if self.nbytes <= 1e9:
store = dict()
else:
msg = ("No out-of-core storage found."
"Either:\n"
"1. Install ``chest``, an out-of-core dictionary\n"
"2. Provide an on-disk array like an h5py.Dataset")
raise ValueError(msg) # pragma: no cover
if isinstance(store, MutableMapping):
name = 'cache-' + tokenize(self)
dsk = dict(((name, k[1:]), (operator.setitem, store, (tuple, list(k)), k))
for k in core.flatten(self._keys()))
Array._get(merge(dsk, self.dask), list(dsk.keys()), **kwargs)
dsk2 = dict((k, (operator.getitem, store, (tuple, list(k))))
for k in store)
return Array(dsk2, self.name, chunks=self.chunks, dtype=self._dtype)
def __int__(self):
return int(self.compute())
def __bool__(self):
return bool(self.compute())
__nonzero__ = __bool__ # python 2
def __float__(self):
return float(self.compute())
def __complex__(self):
return complex(self.compute())
def __getitem__(self, index):
out = 'getitem-' + tokenize(self, index)
# Field access, e.g. x['a'] or x[['a', 'b']]
if (isinstance(index, (str, unicode)) or
(isinstance(index, list) and
all(isinstance(i, (str, unicode)) for i in index))):
if self._dtype is not None:
if isinstance(index, (str, unicode)):
dt = self._dtype[index]
else:
dt = np.dtype([(name, self._dtype[name]) for name in index])
else:
dt = None
if dt is not None and dt.shape:
new_axis = list(range(self.ndim, self.ndim + len(dt.shape)))
chunks = self.chunks + tuple((i,) for i in dt.shape)
return self.map_blocks(getitem, index, dtype=dt.base, name=out,
chunks=chunks, new_axis=new_axis)
else:
return self.map_blocks(getitem, index, dtype=dt, name=out)
# Slicing
if not isinstance(index, tuple):
index = (index,)
if any(isinstance(i, Array) for i in index):
raise NotImplementedError("Indexing with a dask Array")
if all(isinstance(i, slice) and i == slice(None) for i in index):
return self
dsk, chunks = slice_array(out, self.name, self.chunks, index)
if len(dsk) < self.npartitions / 2: # significant reduction in graph
needed = core.get_dependencies(self.dask, task=list(dsk.values()))
dsk2, _ = cull(self.dask, needed)
dsk2.update(dsk)
else:
dsk2 = merge(self.dask, dsk)
return Array(dsk2, out, chunks, dtype=self._dtype)
def _vindex(self, key):
if (not isinstance(key, tuple) or
not len([k for k in key if isinstance(k, (np.ndarray, list))]) >= 2 or
not all(isinstance(k, (np.ndarray, list)) or k == slice(None, None)
for k in key)):
msg = ("vindex expects only lists and full slices\n"
"At least two entries must be a list\n"
"For other combinations try doing normal slicing first, followed\n"
"by vindex slicing. Got: \n\t%s")
raise IndexError(msg % str(key))
if any((isinstance(k, np.ndarray) and k.ndim != 1) or
(isinstance(k, list) and k and isinstance(k[0], list))
for k in key):
raise IndexError("vindex does not support multi-dimensional keys\n"
"Got: %s" % str(key))
if len(set(len(k) for k in key if isinstance(k, (list, np.ndarray)))) != 1:
raise IndexError("All indexers must have the same length, got\n"
"\t%s" % str(key))
key = key + (slice(None, None),) * (self.ndim - len(key))
key = [i if isinstance(i, list) else
i.tolist() if isinstance(i, np.ndarray) else
None for i in key]
return _vindex(self, *key)
@property
def vindex(self):
return IndexCallable(self._vindex)
@wraps(np.dot)
def dot(self, other):
return tensordot(self, other,
axes=((self.ndim - 1,), (other.ndim - 2,)))
@property
def A(self):
return self
@property
def T(self):
return transpose(self)
@derived_from(np.ndarray)
def transpose(self, *axes):
if not axes:
axes = None
elif len(axes) == 1 and isinstance(axes[0], Iterable):
axes = axes[0]
return transpose(self, axes=axes)
@wraps(np.ravel)
def ravel(self):
return ravel(self)
flatten = ravel
@wraps(np.reshape)
def reshape(self, *shape):
if len(shape) == 1 and not isinstance(shape[0], Number):
shape = shape[0]
return reshape(self, shape)
@wraps(topk)
def topk(self, k):
return topk(k, self)
def astype(self, dtype, **kwargs):
"""Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to False and the `dtype` requirement is satisfied, the input
array is returned instead of a copy.
"""
# Scalars don't take `casting` or `copy` kwargs - as such we only pass
# them to `map_blocks` if specified by user (different than defaults).
extra = set(kwargs) - {'casting', 'copy'}
if extra:
raise TypeError("astype does not take the following keyword "
"arguments: {0!s}".format(list(extra)))
casting = kwargs.get('casting', 'unsafe')
copy = kwargs.get('copy', True)
dtype = np.dtype(dtype)
if self._dtype is not None:
if self._dtype == dtype:
return self
elif not np.can_cast(self._dtype, dtype, casting=casting):
raise TypeError("Cannot cast array from {0!r} to {1!r}"
" according to the rule "
"{2!r}".format(self._dtype, dtype, casting))
name = 'astype-' + tokenize(self, dtype, casting, copy)
return self.map_blocks(_astype, dtype=dtype, name=name,
astype_dtype=dtype, **kwargs)
def __abs__(self):
return elemwise(operator.abs, self)
def __add__(self, other):
return elemwise(operator.add, self, other)
def __radd__(self, other):
return elemwise(operator.add, other, self)
def __and__(self, other):
return elemwise(operator.and_, self, other)
def __rand__(self, other):
return elemwise(operator.and_, other, self)
def __div__(self, other):
return elemwise(operator.div, self, other)
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
def __eq__(self, other):
return elemwise(operator.eq, self, other)
def __gt__(self, other):
return elemwise(operator.gt, self, other)
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.invert, self)
def __lshift__(self, other):
return elemwise(operator.lshift, self, other)
def __rlshift__(self, other):
return elemwise(operator.lshift, other, self)
def __lt__(self, other):
return elemwise(operator.lt, self, other)
def __le__(self, other):
return elemwise(operator.le, self, other)
def __mod__(self, other):
return elemwise(operator.mod, self, other)
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
def __mul__(self, other):
return elemwise(operator.mul, self, other)
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __pos__(self):
return self
def __ror__(self, other):
return elemwise(operator.or_, other, self)
def __pow__(self, other):
return elemwise(operator.pow, self, other)
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
def __rshift__(self, other):
return elemwise(operator.rshift, self, other)
def __rrshift__(self, other):
return elemwise(operator.rshift, other, self)
def __sub__(self, other):
return elemwise(operator.sub, self, other)
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
def __xor__(self, other):
return elemwise(operator.xor, self, other)
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
@wraps(np.any)
def any(self, axis=None, keepdims=False, split_every=None):
from .reductions import any
return any(self, axis=axis, keepdims=keepdims, split_every=split_every)
@wraps(np.all)
def all(self, axis=None, keepdims=False, split_every=None):
from .reductions import all
return all(self, axis=axis, keepdims=keepdims, split_every=split_every)
@wraps(np.min)
def min(self, axis=None, keepdims=False, split_every=None):
from .reductions import min
return min(self, axis=axis, keepdims=keepdims, split_every=split_every)
@wraps(np.max)
def max(self, axis=None, keepdims=False, split_every=None):
from .reductions import max
return max(self, axis=axis, keepdims=keepdims, split_every=split_every)
@wraps(np.argmin)
def argmin(self, axis=None, split_every=None):
from .reductions import argmin
return argmin(self, axis=axis, split_every=split_every)
@wraps(np.argmax)
def argmax(self, axis=None, split_every=None):
from .reductions import argmax
return argmax(self, axis=axis, split_every=split_every)
@wraps(np.sum)
def sum(self, axis=None, dtype=None, keepdims=False, split_every=None):
from .reductions import sum
return sum(self, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)
@wraps(np.prod)
def prod(self, axis=None, dtype=None, keepdims=False, split_every=None):
from .reductions import prod
return prod(self, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)
@wraps(np.mean)
def mean(self, axis=None, dtype=None, keepdims=False, split_every=None):
from .reductions import mean
return mean(self, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)
@wraps(np.std)
def std(self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
from .reductions import std
return std(self, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof,
split_every=split_every)
@wraps(np.var)
def var(self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
from .reductions import var
return var(self, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof,
split_every=split_every)
def moment(self, order, axis=None, dtype=None, keepdims=False, ddof=0,
split_every=None):
"""Calculate the nth centralized moment.
Parameters
----------
order : int
Order of the moment that is returned, must be >= 2.
axis : int, optional
Axis along which the central moment is computed. The default is to
compute the moment of the flattened array.
dtype : data-type, optional
Type to use in computing the moment. For arrays of integer type the
default is float64; for arrays of float types it is the same as the
array type.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
N - ddof, where N represents the number of elements. By default
ddof is zero.
Returns
-------
moment : ndarray
References
----------
.. [1] Pebay, Philippe (2008), "Formulas for Robust, One-Pass Parallel
Computation of Covariances and Arbitrary-Order Statistical Moments"
(PDF), Technical Report SAND2008-6212, Sandia National Laboratories
"""
from .reductions import moment
return moment(self, order, axis=axis, dtype=dtype, keepdims=keepdims,
ddof=ddof, split_every=split_every)
def vnorm(self, ord=None, axis=None, keepdims=False, split_every=None):
""" Vector norm """
from .reductions import vnorm
return vnorm(self, ord=ord, axis=axis, keepdims=keepdims,
split_every=split_every)
@wraps(map_blocks)
def map_blocks(self, func, *args, **kwargs):
return map_blocks(func, self, *args, **kwargs)
def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):
""" Map a function over blocks of the array with some overlap
We share neighboring zones between blocks of the array, then map a
function, then trim away the neighboring strips.
Parameters
----------
func: function
The function to apply to each extended block
depth: int, tuple, or dict
The number of cells that each block should share with its neighbors
If a tuple or dict this can be different per axis
boundary: str, tuple, dict
how to handle the boundaries. Values include 'reflect',
'periodic', 'nearest', 'none', or any constant value like 0 or
np.nan
trim: bool
Whether or not to trim the excess after the map function. Set this
to false if your mapping function does this for you.
**kwargs:
Other keyword arguments valid in ``map_blocks``
Examples
--------
>>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])
>>> x = from_array(x, chunks=5)
>>> def derivative(x):
... return x - np.roll(x, 1)
>>> y = x.map_overlap(derivative, depth=1, boundary=0)
>>> y.compute()
array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])
>>> import dask.array as da
>>> x = np.arange(16).reshape((4, 4))
>>> d = da.from_array(x, chunks=(2, 2))
>>> d.map_overlap(lambda x: x + x.size, depth=1).compute()
array([[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]])
>>> func = lambda x: x + x.size
>>> depth = {0: 1, 1: 1}
>>> boundary = {0: 'reflect', 1: 'none'}
>>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE
array([[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27]])
"""
from .ghost import map_overlap
return map_overlap(self, func, depth, boundary, trim, **kwargs)
def cumsum(self, axis, dtype=None):
""" See da.cumsum for docstring """
from .reductions import cumsum
return cumsum(self, axis, dtype)
def cumprod(self, axis, dtype=None):
""" See da.cumprod for docstring """
from .reductions import cumprod
return cumprod(self, axis, dtype)
@wraps(squeeze)
def squeeze(self):
return squeeze(self)
def rechunk(self, chunks):
""" See da.rechunk for docstring """
from .rechunk import rechunk
return rechunk(self, chunks)
@property
def real(self):
from .ufunc import real
return real(self)
@property
def imag(self):
from .ufunc import imag
return imag(self)
def conj(self):
from .ufunc import conj
return conj(self)
@wraps(np.clip)
def clip(self, min=None, max=None):
from .ufunc import clip
return clip(self, min, max)
def view(self, dtype, order='C'):
""" Get a view of the array as a new data type
Parameters
----------
dtype:
The dtype by which to view the array
order: string
'C' or 'F' (Fortran) ordering
This reinterprets the bytes of the array under a new dtype. If that
dtype does not have the same size as the original array then the shape
will change.
Beware that both numpy and dask.array can behave oddly when taking
shape-changing views of arrays under Fortran ordering. Under some
versions of NumPy this function will fail when taking shape-changing
views of Fortran ordered arrays if the first dimension has chunks of
size one.
"""
dtype = np.dtype(dtype)
mult = self.dtype.itemsize / dtype.itemsize
if order == 'C':
ascontiguousarray = np.ascontiguousarray
chunks = self.chunks[:-1] + (tuple(ensure_int(c * mult)
for c in self.chunks[-1]),)
elif order == 'F':
ascontiguousarray = np.asfortranarray
chunks = ((tuple(ensure_int(c * mult) for c in self.chunks[0]), ) +
self.chunks[1:])
else:
raise ValueError("Order must be one of 'C' or 'F'")
out = elemwise(ascontiguousarray, self, dtype=self.dtype)
out = elemwise(np.ndarray.view, out, dtype, dtype=dtype)
out._chunks = chunks
return out
@wraps(np.swapaxes)
def swapaxes(self, axis1, axis2):
return swapaxes(self, axis1, axis2)
@wraps(np.round)
def round(self, decimals=0):
return round(self, decimals=decimals)
def copy(self):
"""
Copy array. This is a no-op for dask.arrays, which are immutable
"""
return self
def to_delayed(self):
""" Convert Array into dask Delayed objects
Returns an array of values, one value per chunk.
See Also
--------
dask.array.from_delayed
"""
from ..delayed import Delayed
return np.array(deepmap(lambda k: Delayed(k, [self.dask]), self._keys()),
dtype=object)
@wraps(np.repeat)
def repeat(self, repeats, axis=None):
return repeat(self, repeats, axis=axis)
def ensure_int(f):
i = int(f)
if i != f:
raise ValueError("Could not coerce %f to integer" % f)
return i
normalize_token.register(Array, lambda a: a.name)
def normalize_chunks(chunks, shape=None):
""" Normalize chunks to tuple of tuples
>>> normalize_chunks((2, 2), shape=(5, 6))
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(4, 6)) # Idempotent
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks([[2, 2], [3, 3]]) # Cleans up lists to tuples
((2, 2), (3, 3))
>>> normalize_chunks(10, shape=(30, 5)) # Supports integer inputs
((10, 10, 10), (5,))
>>> normalize_chunks((), shape=(0, 0)) # respects null dimensions
((), ())
"""
if chunks is None:
raise ValueError(chunks_none_error_message)
if isinstance(chunks, list):
chunks = tuple(chunks)
if isinstance(chunks, Number):
chunks = (chunks,) * len(shape)
if not chunks and shape and all(s == 0 for s in shape):
chunks = ((),) * len(shape)
if shape and len(chunks) != len(shape):
if not (len(shape) == 1 and sum(chunks) == shape[0]):
raise ValueError(
"Chunks and shape must be of the same length/dimension. "
"Got chunks=%s, shape=%s" % (chunks, shape))
if shape is not None:
chunks = tuple(c if c is not None else s for c, s in zip(chunks, shape))
if chunks and shape is not None:
chunks = sum((blockdims_from_blockshape((s,), (c,))
if not isinstance(c, (tuple, list)) else (c,)
for s, c in zip(shape, chunks)), ())
return tuple(map(tuple, chunks))
def from_array(x, chunks, name=None, lock=False, fancy=True):
""" Create dask array from something that looks like an array
Input must have a ``.shape`` and support numpy-style slicing.
Parameters
----------
x : array_like
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions
like ((1000, 1000, 500), (400, 400)).
name : str, optional
The key name to use for the array. Defaults to a hash of ``x``.
lock : bool or Lock, optional
If ``x`` doesn't support concurrent reads then provide a lock here, or
pass in True to have dask.array create one for you.
fancy : bool, optional
If ``x`` doesn't support fancy indexing (e.g. indexing with lists or
arrays) then set to False. Default is True.
Examples
--------
>>> x = h5py.File('...')['/data/path'] # doctest: +SKIP
>>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP
If your underlying datastore does not support concurrent reads then include
the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple
arrays to coordinate around the same lock.
>>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP
"""
chunks = normalize_chunks(chunks, x.shape)
if len(chunks) != len(x.shape):
raise ValueError("Input array has %d dimensions but the supplied "
"chunks has only %d dimensions" %
(len(x.shape), len(chunks)))
if tuple(map(sum, chunks)) != x.shape:
raise ValueError("Chunks do not add up to shape. "
"Got chunks=%s, shape=%s" % (chunks, x.shape))
token = tokenize(x, chunks)
original_name = (name or 'array-') + 'original-' + token
name = name or 'array-' + token
if lock is True:
lock = Lock()
dsk = getem(original_name, chunks, out_name=name, fancy=fancy, lock=lock)
return Array(merge({original_name: x}, dsk), name, chunks, dtype=x.dtype)
def from_delayed(value, shape, dtype=None, name=None):
""" Create a dask array from a dask delayed value
This routine is useful for constructing dask arrays in an ad-hoc fashion
using dask delayed, particularly when combined with stack and concatenate.
The dask array will consist of a single chunk.
Examples
--------
>>> from dask import do
>>> value = do(np.ones)(5)
>>> array = from_delayed(value, (5,), dtype=float)
>>> array
dask.array<from-va..., shape=(5,), dtype=float64, chunksize=(5,)>
>>> array.compute()
array([ 1., 1., 1., 1., 1.])
"""
name = name or 'from-value-' + tokenize(value, shape, dtype)
dsk = {(name,) + (0,) * len(shape): value.key}
dsk.update(value.dask)
chunks = tuple((d,) for d in shape)
return Array(dsk, name, chunks, dtype)
def from_func(func, shape, dtype=None, name=None, args=(), kwargs={}):
""" Create dask array in a single block by calling a function
Calling the provided function with func(*args, **kwargs) should return a
NumPy array of the indicated shape and dtype.
Examples
--------
>>> a = from_func(np.arange, (3,), np.int64, args=(3,))
>>> a.compute()
array([0, 1, 2])
This works particularly well when coupled with dask.array functions like
concatenate and stack:
>>> arrays = [from_func(np.array, (), args=(n,)) for n in range(5)]
>>> stack(arrays).compute()
array([0, 1, 2, 3, 4])
"""
name = name or 'from_func-' + tokenize(func, shape, dtype, args, kwargs)
if args or kwargs:
func = partial(func, *args, **kwargs)
dsk = {(name,) + (0,) * len(shape): (func,)}
chunks = tuple((i,) for i in shape)
return Array(dsk, name, chunks, dtype)
def common_blockdim(blockdims):
""" Find the common block dimensions from the list of block dimensions
Currently only implements the simplest possible heuristic: the common
block-dimension is the only one that does not span fully span a dimension.
This is a conservative choice that allows us to avoid potentially very
expensive rechunking.
Assumes that each element of the input block dimensions has all the same
sum (i.e., that they correspond to dimensions of the same size).
Examples
--------
>>> common_blockdim([(3,), (2, 1)])
(2, 1)
>>> common_blockdim([(1, 2), (2, 1)])
(1, 1, 1)
>>> common_blockdim([(2, 2), (3, 1)]) # doctest: +SKIP
Traceback (most recent call last):
...
ValueError: Chunks do not align
"""
non_trivial_dims = set([d for d in blockdims if len(d) > 1])
if len(non_trivial_dims) == 1:
return first(non_trivial_dims)
if len(non_trivial_dims) == 0:
return max(blockdims, key=first)
if len(set(map(sum, non_trivial_dims))) > 1:
raise ValueError("Chunks do not add up to same value", blockdims)
# We have multiple non-trivial chunks on this axis
# e.g. (5, 2) and (4, 3)
# We create a single chunk tuple with the same total length
# that evenly divides both, e.g. (4, 1, 2)
# To accomplish this we walk down all chunk tuples together, finding the
# smallest element, adding it to the output, and subtracting it from all
# other elements and remove the element itself. We stop once we have
# burned through all of the chunk tuples.
# For efficiency's sake we reverse the lists so that we can pop off the end
rchunks = [list(ntd)[::-1] for ntd in non_trivial_dims]
total = sum(first(non_trivial_dims))
i = 0
out = []
while i < total:
m = min(c[-1] for c in rchunks)
out.append(m)
for c in rchunks:
c[-1] -= m
if c[-1] == 0:
c.pop()
i += m
return tuple(out)
def unify_chunks(*args, **kwargs):
"""
Unify chunks across a sequence of arrays
Parameters
----------
*args: sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
Examples
--------
>>> import dask.array as da
>>> x = da.ones(10, chunks=((5, 2, 3),))
>>> y = da.ones(10, chunks=((2, 3, 5),))
>>> chunkss, arrays = unify_chunks(x, 'i', y, 'i')
>>> chunkss
{'i': (2, 3, 2, 3)}
>>> x = da.ones((100, 10), chunks=(20, 5))
>>> y = da.ones((10, 100), chunks=(4, 50))
>>> chunkss, arrays = unify_chunks(x, 'ij', y, 'jk')
>>> chunkss # doctest: +SKIP
{'k': (50, 50), 'i': (20, 20, 20, 20, 20), 'j': (4, 1, 3, 2)}
Returns
-------
chunkss : dict
Map like {index: chunks}.
arrays : list
List of rechunked arrays.
See Also
--------
common_blockdim
"""
warn = kwargs.get('warn', True)
arginds = list(partition(2, args)) # [x, ij, y, jk] -> [(x, ij), (y, jk)]
nameinds = [(a.name, i) for a, i in arginds]
blockdim_dict = dict((a.name, a.chunks) for a, _ in arginds)
chunkss = broadcast_dimensions(nameinds, blockdim_dict,
consolidate=common_blockdim)
max_parts = max(arg.npartitions for arg in args[::2])
nparts = np.prod(list(map(len, chunkss.values())))
if warn and nparts >= max_parts * 10:
warnings.warn("Increasing number of chunks by factor of %d" %
(nparts / max_parts))
arrays = [a.rechunk(tuple(chunkss[j] if a.shape[n] > 1 else 1
for n, j in enumerate(i)))
for a, i in arginds]
return chunkss, arrays
def atop(func, out_ind, *args, **kwargs):
""" Tensor operation: Generalized inner and outer products
A broad class of blocked algorithms and patterns can be specified with a
concise multi-index notation. The ``atop`` function applies an in-memory
function across multiple blocks of multiple inputs in a variety of ways.
Many dask.array operations are special cases of atop including elementwise,
broadcasting, reductions, tensordot, and transpose.
Parameters
----------
func: callable
Function to apply to individual tuples of blocks
out_ind: iterable
Block pattern of the output, something like 'ijk' or (1, 2, 3)
*args: sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
**kwargs: dict
Extra keyword arguments to pass to function
concatenate: bool, keyword only
If true concatenate arrays along dummy indices, else provide lists
adjust_chunks: dict
Dictionary mapping index to function to be applied to chunk sizes
new_axes: dict, keyword only
New indexes and their dimension lengths
Examples
--------
2D embarrassingly parallel operation from two arrays, x, and y.
>>> z = atop(operator.add, 'ij', x, 'ij', y, 'ij') # z = x + y # doctest: +SKIP
Outer product multiplying x by y, two 1-d vectors
>>> z = atop(operator.mul, 'ij', x, 'i', y, 'j') # doctest: +SKIP
z = x.T
>>> z = atop(np.transpose, 'ji', x, 'ij') # doctest: +SKIP
The transpose case above is illustrative because it does same transposition
both on each in-memory block by calling ``np.transpose`` and on the order
of the blocks themselves, by switching the order of the index ``ij -> ji``.
We can compose these same patterns with more variables and more complex
in-memory functions
z = X + Y.T
>>> z = atop(lambda x, y: x + y.T, 'ij', x, 'ij', y, 'ji') # doctest: +SKIP
Any index, like ``i`` missing from the output index is interpreted as a
contraction (note that this differs from Einstein convention; repeated
indices do not imply contraction.) In the case of a contraction the passed
function should expect an iterable of blocks on any array that holds that
index. To receive arrays concatenated along contracted dimensions instead
pass ``concatenate=True``.
Inner product multiplying x by y, two 1-d vectors
>>> def sequence_dot(x_blocks, y_blocks):
... result = 0
... for x, y in zip(x_blocks, y_blocks):
... result += x.dot(y)
... return result
>>> z = atop(sequence_dot, '', x, 'i', y, 'i') # doctest: +SKIP
Add new single-chunk dimensions with the ``new_axes=`` keyword, including
the length of the new dimension. New dimensions will always be in a single
chunk.
>>> def f(x):
... return x[:, None] * np.ones((1, 5))
>>> z = atop(f, 'az', x, 'a', new_axes={'z': 5}) # doctest: +SKIP
If the applied function changes the size of each chunk you can specify this
with a ``adjust_chunks={...}`` dictionary holding a function for each index
that modifies the dimension size in that index.
>>> def double(x):
... return np.concatenate([x, x])
>>> y = atop(double, 'ij', x, 'ij', adjust_chunks={'i': lambda n: 2 * n}) # doctest: +SKIP
See Also
--------
top - dict formulation of this function, contains most logic
"""
out = kwargs.pop('name', None) # May be None at this point
token = kwargs.pop('token', None)
dtype = kwargs.pop('dtype', None)
adjust_chunks = kwargs.pop('adjust_chunks', None)
new_axes = kwargs.get('new_axes', {})
chunkss, arrays = unify_chunks(*args)
for k, v in new_axes.items():
chunkss[k] = (v,)
arginds = list(zip(arrays, args[1::2]))
numblocks = dict([(a.name, a.numblocks) for a, _ in arginds])
argindsstr = list(concat([(a.name, ind) for a, ind in arginds]))
# Finish up the name
if not out:
out = '%s-%s' % (token or funcname(func),
tokenize(func, out_ind, argindsstr, dtype, **kwargs))
dsk = top(func, out, out_ind, *argindsstr, numblocks=numblocks, **kwargs)
dsks = [a.dask for a, _ in arginds]
chunks = [chunkss[i] for i in out_ind]
if adjust_chunks:
for i, ind in enumerate(out_ind):
if ind in adjust_chunks:
if callable(adjust_chunks[ind]):
chunks[i] = tuple(map(adjust_chunks[ind], chunks[i]))
elif isinstance(adjust_chunks[ind], int):
chunks[i] = tuple(adjust_chunks[ind] for _ in chunks[i])
elif isinstance(adjust_chunks[ind], (tuple, list)):
chunks[i] = tuple(adjust_chunks[ind])
else:
raise NotImplementedError(
"adjust_chunks values must be callable, int, or tuple")
chunks = tuple(chunks)
return Array(merge(dsk, *dsks), out, chunks, dtype=dtype)
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array(datetime.date(2000, 1, 1), dtype='datetime64[D]')
"""
while isinstance(x, (list, tuple)):
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x
def stack(seq, axis=0):
"""
Stack arrays along a new axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along a new dimension (axis=0 by default)
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also
--------
concatenate
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis + 1
if axis > ndim:
raise ValueError("Axis must not be greater than number of dimensions"
"\nData has %d dimensions, but got axis=%d" %
(ndim, axis))
ind = list(range(ndim))
uc_args = list(concat((x, ind) for x in seq))
_, seq = unify_chunks(*uc_args)
assert len(set(a.chunks for a in seq)) == 1 # same chunks
chunks = (seq[0].chunks[:axis] + ((1,) * n,) + seq[0].chunks[axis:])
names = [a.name for a in seq]
name = 'stack-' + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
inputs = [(names[key[axis + 1]], ) + key[1:axis + 1] + key[axis + 2:]
for key in keys]
values = [(getitem, inp, (slice(None, None, None),) * axis +
(None, ) + (slice(None, None, None), ) * (ndim - axis))
for inp in inputs]
dsk = dict(zip(keys, values))
dsk2 = merge(dsk, *[a.dask for a in seq])
if all(a._dtype is not None for a in seq):
dt = reduce(np.promote_types, [a._dtype for a in seq])
else:
dt = None
return Array(dsk2, name, chunks, dtype=dt)
def concatenate(seq, axis=0):
"""
Concatenate arrays along an existing axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along an existing dimension (axis=0 by default)
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.concatenate(data, axis=0)
>>> x.shape
(12, 4)
>>> da.concatenate(data, axis=1).shape
(4, 12)
Result is a new dask Array
See Also
--------
stack
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis
if axis >= ndim:
msg = ("Axis must be less than than number of dimensions"
"\nData has %d dimensions, but got axis=%d")
raise ValueError(msg % (ndim, axis))
inds = [list(range(ndim)) for i in range(n)]
for i, ind in enumerate(inds):
ind[axis] = -(i + 1)
uc_args = list(concat(zip(seq, inds)))
_, seq = unify_chunks(*uc_args, warn=False)
bds = [a.chunks for a in seq]
chunks = (seq[0].chunks[:axis] + (sum([bd[axis] for bd in bds], ()), ) +
seq[0].chunks[axis + 1:])
cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq]))
if all(a._dtype is not None for a in seq):
dt = reduce(np.promote_types, [a._dtype for a in seq])
seq = [x.astype(dt) for x in seq]
else:
dt = None
names = [a.name for a in seq]
name = 'concatenate-' + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
values = [(names[bisect(cum_dims, key[axis + 1]) - 1],) + key[1:axis + 1] +
(key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1], ) +
key[axis + 2:] for key in keys]
dsk = dict(zip(keys, values))
dsk2 = merge(dsk, * [a.dask for a in seq])
return Array(dsk2, name, chunks, dtype=dt)
def atleast_3d(x):
if x.ndim == 1:
return x[None, :, None]
elif x.ndim == 2:
return x[:, :, None]
elif x.ndim > 2:
return x
else:
raise NotImplementedError()
def atleast_2d(x):
if x.ndim == 1:
return x[None, :]
elif x.ndim > 1:
return x
else:
raise NotImplementedError()
@wraps(np.vstack)
def vstack(tup):
tup = tuple(atleast_2d(x) for x in tup)
return concatenate(tup, axis=0)
@wraps(np.hstack)
def hstack(tup):
if all(x.ndim == 1 for x in tup):
return concatenate(tup, axis=0)
else:
return concatenate(tup, axis=1)
@wraps(np.dstack)
def dstack(tup):
tup = tuple(atleast_3d(x) for x in tup)
return concatenate(tup, axis=2)
@wraps(np.take)
def take(a, indices, axis=0):
if not -a.ndim <= axis < a.ndim:
raise ValueError('axis=(%s) out of bounds' % axis)
if axis < 0:
axis += a.ndim
if isinstance(a, np.ndarray) and isinstance(indices, Array):
return _take_dask_array_from_numpy(a, indices, axis)
else:
return a[(slice(None),) * axis + (indices,)]
@wraps(np.compress)
def compress(condition, a, axis=None):
if axis is None:
raise NotImplementedError("Must select axis for compression")
if not -a.ndim <= axis < a.ndim:
raise ValueError('axis=(%s) out of bounds' % axis)
if axis < 0:
axis += a.ndim
condition = np.array(condition, dtype=bool)
if condition.ndim != 1:
raise ValueError("Condition must be one dimensional")
if len(condition) < a.shape[axis]:
condition = condition.copy()
condition.resize(a.shape[axis])
slc = ((slice(None),) * axis + (condition, ) +
(slice(None),) * (a.ndim - axis - 1))
return a[slc]
def _take_dask_array_from_numpy(a, indices, axis):
assert isinstance(a, np.ndarray)
assert isinstance(indices, Array)
return indices.map_blocks(lambda block: np.take(a, block, axis),
chunks=indices.chunks,
dtype=a.dtype)
@wraps(np.transpose)
def transpose(a, axes=None):
if axes:
if len(axes) != a.ndim:
raise ValueError("axes don't match array")
else:
axes = tuple(range(a.ndim))[::-1]
return atop(partial(np.transpose, axes=axes),
axes,
a, tuple(range(a.ndim)), dtype=a._dtype)
alphabet = 'abcdefghijklmnopqrstuvwxyz'
ALPHABET = alphabet.upper()
@wraps(np.tensordot)
def tensordot(lhs, rhs, axes=2):
if isinstance(axes, Iterable):
left_axes, right_axes = axes
else:
left_axes = tuple(range(lhs.ndim - 1, lhs.ndim - axes - 1, -1))
right_axes = tuple(range(0, axes))
if isinstance(left_axes, int):
left_axes = (left_axes,)
if isinstance(right_axes, int):
right_axes = (right_axes,)
if isinstance(left_axes, list):
left_axes = tuple(left_axes)
if isinstance(right_axes, list):
right_axes = tuple(right_axes)
if len(left_axes) > 1:
raise NotImplementedError("Simultaneous Contractions of multiple "
"indices not yet supported")
if isinstance(lhs, np.ndarray):
chunks = [(d,) for d in lhs.shape]
chunks[left_axes[0]] = rhs.chunks[right_axes[0]]
lhs = from_array(lhs, chunks=chunks)
if isinstance(rhs, np.ndarray):
chunks = [(d,) for d in rhs.shape]
chunks[right_axes[0]] = lhs.chunks[left_axes[0]]
rhs = from_array(rhs, chunks=chunks)
if lhs._dtype is not None and rhs._dtype is not None:
dt = np.promote_types(lhs._dtype, rhs._dtype)
else:
dt = None
left_index = list(alphabet[:lhs.ndim])
right_index = list(ALPHABET[:rhs.ndim])
out_index = left_index + right_index
for l, r in zip(left_axes, right_axes):
out_index.remove(right_index[r])
right_index[r] = left_index[l]
intermediate = atop(np.tensordot, out_index,
lhs, left_index,
rhs, right_index, dtype=dt,
axes=(left_axes, right_axes))
int_index = list(out_index)
for l in left_axes:
out_index.remove(left_index[l])
return atop(sum, out_index, intermediate, int_index, dtype=dt)
@wraps(np.dot)
def dot(a, b):
return tensordot(a, b, axes=((a.ndim - 1,), (b.ndim - 2,)))
def insert_to_ooc(out, arr, lock=True):
if lock is True:
lock = Lock()
def store(x, index, lock):
if lock:
lock.acquire()
try:
out[index] = np.asanyarray(x)
finally:
if lock:
lock.release()
return None
slices = slices_from_chunks(arr.chunks)
name = 'store-%s' % arr.name
dsk = dict(((name,) + t[1:], (store, t, slc, lock))
for t, slc in zip(core.flatten(arr._keys()), slices))
return dsk
def asarray(array):
"""Coerce argument into a dask array
>>> x = np.arange(3)
>>> asarray(x)
dask.array<asarray..., shape=(3,), dtype=int64, chunksize=(3,)>
"""
if not isinstance(array, Array):
name = 'asarray-' + tokenize(array)
if isinstance(getattr(array, 'shape', None), Iterable):
array = np.asarray(array)
array = from_array(array, chunks=array.shape, name=name)
return array
def partial_by_order(*args, **kwargs):
"""
>>> partial_by_order(5, function=add, other=[(1, 10)])
15
"""
function = kwargs.pop('function')
other = kwargs.pop('other')
args2 = list(args)
for i, arg in other:
args2.insert(i, arg)
return function(*args2, **kwargs)
def is_scalar_for_elemwise(arg):
"""
>>> is_scalar_for_elemwise(42)
True
>>> is_scalar_for_elemwise('foo')
True
>>> is_scalar_for_elemwise(True)
True
>>> is_scalar_for_elemwise(np.array(42))
True
>>> is_scalar_for_elemwise([1, 2, 3])
True
>>> is_scalar_for_elemwise(np.array([1, 2, 3]))
False
>>> is_scalar_for_elemwise(from_array(np.array(0), chunks=()))
False
>>> is_scalar_for_elemwise(np.dtype('i4'))
True
"""
return (np.isscalar(arg) or
not isinstance(getattr(arg, 'shape', None), Iterable) or
isinstance(arg, np.dtype) or
(isinstance(arg, np.ndarray) and arg.ndim == 0))
def broadcast_shapes(*shapes):
"""Determines output shape from broadcasting arrays.
Parameters
----------
shapes : tuples
The shapes of the arguments.
Returns
-------
output_shape : tuple
Raises
------
ValueError
If the input shapes cannot be successfully broadcast together.
"""
if len(shapes) == 1:
return shapes[0]
out = []
for sizes in zip_longest(*map(reversed, shapes), fillvalue=1):
dim = max(sizes)
if any(i != 1 and i != dim for i in sizes):
raise ValueError("operands could not be broadcast together with "
"shapes {0}".format(' '.join(map(str, shapes))))
out.append(dim)
return tuple(reversed(out))
def elemwise(op, *args, **kwargs):
""" Apply elementwise function across arguments
Respects broadcasting rules
Examples
--------
>>> elemwise(add, x, y) # doctest: +SKIP
>>> elemwise(sin, x) # doctest: +SKIP
See Also
--------
atop
"""
if not set(['name', 'dtype']).issuperset(kwargs):
msg = "%s does not take the following keyword arguments %s"
raise TypeError(msg % (op.__name__, str(sorted(set(kwargs) - set(['name', 'dtype'])))))
shapes = [getattr(arg, 'shape', ()) for arg in args]
shapes = [s if isinstance(s, Iterable) else () for s in shapes]
out_ndim = len(broadcast_shapes(*shapes)) # Raises ValueError if dimensions mismatch
expr_inds = tuple(range(out_ndim))[::-1]
arrays = [asarray(a) for a in args if not is_scalar_for_elemwise(a)]
other = [(i, a) for i, a in enumerate(args) if is_scalar_for_elemwise(a)]
if 'dtype' in kwargs:
dt = kwargs['dtype']
elif any(a._dtype is None for a in arrays):
dt = None
else:
# We follow NumPy's rules for dtype promotion, which special cases
# scalars and 0d ndarrays (which it considers equivalent) by using
# their values to compute the result dtype:
# https://github.com/numpy/numpy/issues/6240
# We don't inspect the values of 0d dask arrays, because these could
# hold potentially very expensive calculations.
vals = [np.empty((1,) * a.ndim, dtype=a.dtype)
if not is_scalar_for_elemwise(a) else a
for a in args]
try:
dt = op(*vals).dtype
except AttributeError:
dt = None
name = kwargs.get('name', None) or '%s-%s' % (funcname(op),
tokenize(op, dt, *args))
if other:
return atop(partial_by_order, expr_inds,
*concat((a, tuple(range(a.ndim)[::-1])) for a in arrays),
dtype=dt, name=name, function=op, other=other,
token=funcname(op))
else:
return atop(op, expr_inds,
*concat((a, tuple(range(a.ndim)[::-1])) for a in arrays),
dtype=dt, name=name)
@wraps(np.around)
def around(x, decimals=0):
return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)
def isnull(values):
""" pandas.isnull for dask arrays """
import pandas as pd
return elemwise(pd.isnull, values, dtype='bool')
def notnull(values):
""" pandas.notnull for dask arrays """
return ~isnull(values)
@wraps(numpy_compat.isclose)
def isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
func = partial(numpy_compat.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)
return elemwise(func, arr1, arr2, dtype='bool')
def variadic_choose(a, *choices):
return np.choose(a, choices)
@wraps(np.choose)
def choose(a, choices):
return elemwise(variadic_choose, a, *choices)
where_error_message = """
The dask.array version of where only handles the three argument case.
da.where(x > 0, x, 0)
and not the single argument case
da.where(x > 0)
This is because dask.array operations must be able to infer the shape of their
outputs prior to execution. The number of positive elements of x requires
execution. See the ``np.where`` docstring for examples and the following link
for a more thorough explanation:
http://dask.pydata.org/en/latest/array-overview.html#construct
""".strip()
chunks_none_error_message = """
You must specify a chunks= keyword argument.
This specifies the chunksize of your array blocks.
See the following documentation page for details:
http://dask.pydata.org/en/latest/array-creation.html#chunks
""".strip()
@wraps(np.where)
def where(condition, x=None, y=None):
if x is None or y is None:
raise TypeError(where_error_message)
return choose(condition, [y, x])
@wraps(chunk.coarsen)
def coarsen(reduction, x, axes, trim_excess=False):
if (not trim_excess and
not all(bd % div == 0 for i, div in axes.items()
for bd in x.chunks[i])):
msg = "Coarsening factor does not align with block dimensions"
raise ValueError(msg)
if 'dask' in inspect.getfile(reduction):
reduction = getattr(np, reduction.__name__)
name = 'coarsen-' + tokenize(reduction, x, axes, trim_excess)
dsk = dict(((name,) + key[1:], (chunk.coarsen, reduction, key, axes,
trim_excess))
for key in core.flatten(x._keys()))
chunks = tuple(tuple(int(bd // axes.get(i, 1)) for bd in bds)
for i, bds in enumerate(x.chunks))
if x._dtype is not None:
dt = reduction(np.empty((1,) * x.ndim, dtype=x.dtype)).dtype
else:
dt = None
return Array(merge(x.dask, dsk), name, chunks, dtype=dt)
def split_at_breaks(array, breaks, axis=0):
""" Split an array into a list of arrays (using slices) at the given breaks
>>> split_at_breaks(np.arange(6), [3, 5])
[array([0, 1, 2]), array([3, 4]), array([5])]
"""
padded_breaks = concat([[None], breaks, [None]])
slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]
preslice = (slice(None),) * axis
split_array = [array[preslice + (s,)] for s in slices]
return split_array
@wraps(np.insert)
def insert(arr, obj, values, axis):
# axis is a required argument here to avoid needing to deal with the numpy
# default case (which reshapes the array to make it flat)
if not -arr.ndim <= axis < arr.ndim:
raise IndexError('axis %r is out of bounds for an array of dimension '
'%s' % (axis, arr.ndim))
if axis < 0:
axis += arr.ndim
if isinstance(obj, slice):
obj = np.arange(*obj.indices(arr.shape[axis]))
obj = np.asarray(obj)
scalar_obj = obj.ndim == 0
if scalar_obj:
obj = np.atleast_1d(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
if (np.diff(obj) < 0).any():
raise NotImplementedError(
'da.insert only implemented for monotonic ``obj`` argument')
split_arr = split_at_breaks(arr, np.unique(obj), axis)
if getattr(values, 'ndim', 0) == 0:
# we need to turn values into a dask array
name = 'values-' + tokenize(values)
dtype = getattr(values, 'dtype', type(values))
values = Array({(name,): values}, name, chunks=(), dtype=dtype)
values_shape = tuple(len(obj) if axis == n else s
for n, s in enumerate(arr.shape))
values = broadcast_to(values, values_shape)
elif scalar_obj:
values = values[(slice(None),) * axis + (None,)]
values_chunks = tuple(values_bd if axis == n else arr_bd
for n, (arr_bd, values_bd)
in enumerate(zip(arr.chunks,
values.chunks)))
values = values.rechunk(values_chunks)
counts = np.bincount(obj)[:-1]
values_breaks = np.cumsum(counts[counts > 0])
split_values = split_at_breaks(values, values_breaks, axis)
interleaved = list(interleave([split_arr, split_values]))
interleaved = [i for i in interleaved if i.nbytes]
return concatenate(interleaved, axis=axis)
@wraps(chunk.broadcast_to)
def broadcast_to(x, shape):
shape = tuple(shape)
ndim_new = len(shape) - x.ndim
if ndim_new < 0 or any(new != old
for new, old in zip(shape[ndim_new:], x.shape)
if old != 1):
raise ValueError('cannot broadcast shape %s to shape %s'
% (x.shape, shape))
name = 'broadcast_to-' + tokenize(x, shape)
chunks = (tuple((s,) for s in shape[:ndim_new]) +
tuple(bd if old > 1 else (new,)
for bd, old, new in zip(x.chunks, x.shape, shape[ndim_new:])))
dsk = dict(((name,) + (0,) * ndim_new + key[1:],
(chunk.broadcast_to, key, shape[:ndim_new] +
tuple(bd[i] for i, bd in zip(key[1:], chunks[ndim_new:]))))
for key in core.flatten(x._keys()))
return Array(merge(dsk, x.dask), name, chunks, dtype=x.dtype)
@wraps(np.ravel)
def ravel(array):
return reshape(array, (-1,))
@wraps(np.reshape)
def reshape(array, shape):
from .slicing import sanitize_index
shape = tuple(map(sanitize_index, shape))
known_sizes = [s for s in shape if s != -1]
if len(known_sizes) < len(shape):
if len(known_sizes) - len(shape) > 1:
raise ValueError('can only specify one unknown dimension')
missing_size = sanitize_index(array.size / reduce(mul, known_sizes, 1))
shape = tuple(missing_size if s == -1 else s for s in shape)
if reduce(mul, shape, 1) != array.size:
raise ValueError('total size of new array must be unchanged')
# ensure the same number of leading dimensions of size 1, to simply the
# logic below
leading_ones_diff = 0
for size in array.shape:
if size != 1:
break
leading_ones_diff += 1
for size in shape:
if size != 1:
break
leading_ones_diff -= 1
if leading_ones_diff > 0:
array = array[(0,) * leading_ones_diff]
elif leading_ones_diff < 0:
array = array[(np.newaxis,) * -leading_ones_diff]
# leading dimensions with the same size can be ignored in the reshape
ndim_same = 0
for old_size, new_size in zip(array.shape, shape):
if old_size != new_size:
break
ndim_same += 1
if any(len(c) != 1 for c in array.chunks[ndim_same + 1:]):
raise ValueError('dask.array.reshape requires that reshaped '
'dimensions after the first contain at most one chunk')
if ndim_same == len(shape):
chunks = array.chunks[:ndim_same]
elif ndim_same == array.ndim:
chunks = (array.chunks[:ndim_same] +
tuple((c,) for c in shape[ndim_same:]))
else:
trailing_size_before = reduce(mul, array.shape[ndim_same + 1:], 1)
trailing_size_after = reduce(mul, shape[ndim_same + 1:], 1)
ndim_same_chunks, remainders = zip(
*(divmod(c * trailing_size_before, trailing_size_after)
for c in array.chunks[ndim_same]))
if any(remainder != 0 for remainder in remainders):
raise ValueError('dask.array.reshape requires that the first '
'reshaped dimension can be evenly divided into '
'new chunks')
chunks = (array.chunks[:ndim_same] + (ndim_same_chunks, ) +
tuple((c, ) for c in shape[ndim_same + 1:]))
name = 'reshape-' + tokenize(array, shape)
dsk = {}
prev_index_count = min(ndim_same + 1, array.ndim, len(shape))
extra_zeros = len(shape) - prev_index_count
for key in core.flatten(array._keys()):
index = key[1:]
valid_index = index[:prev_index_count]
new_key = (name,) + valid_index + (0,) * extra_zeros
new_shape = (tuple(chunk[i] for i, chunk in zip(valid_index, chunks)) +
shape[prev_index_count:])
dsk[new_key] = (np.reshape, key, new_shape)
return Array(merge(dsk, array.dask), name, chunks, dtype=array.dtype)
def offset_func(func, offset, *args):
""" Offsets inputs by offset
>>> double = lambda x: x * 2
>>> f = offset_func(double, (10,))
>>> f(1)
22
>>> f(300)
620
"""
def _offset(*args):
args2 = list(map(add, args, offset))
return func(*args2)
with ignoring(Exception):
_offset.__name__ = 'offset_' + func.__name__
return _offset
@wraps(np.fromfunction)
def fromfunction(func, chunks=None, shape=None, dtype=None):
if chunks:
chunks = normalize_chunks(chunks, shape)
name = 'fromfunction-' + tokenize(func, chunks, shape, dtype)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
aggdims = [list(accumulate(add, (0,) + bd[:-1])) for bd in chunks]
offsets = list(product(*aggdims))
shapes = list(product(*chunks))
values = [(np.fromfunction, offset_func(func, offset), shp)
for offset, shp in zip(offsets, shapes)]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype=dtype)
@wraps(np.unique)
def unique(x):
name = 'unique-' + x.name
dsk = dict(((name, i), (np.unique, key)) for i, key in enumerate(x._keys()))
parts = Array._get(merge(dsk, x.dask), list(dsk.keys()))
return np.unique(np.concatenate(parts))
@wraps(np.bincount)
def bincount(x, weights=None, minlength=None):
if minlength is None:
raise TypeError("Must specify minlength argument in da.bincount")
assert x.ndim == 1
if weights is not None:
assert weights.chunks == x.chunks
# Call np.bincount on each block, possibly with weights
token = tokenize(x, weights, minlength)
name = 'bincount-' + token
if weights is not None:
dsk = dict(((name, i),
(np.bincount, (x.name, i), (weights.name, i), minlength))
for i, _ in enumerate(x._keys()))
dtype = np.bincount([1], weights=[1]).dtype
else:
dsk = dict(((name, i), (np.bincount, (x.name, i), None, minlength))
for i, _ in enumerate(x._keys()))
dtype = np.bincount([]).dtype
# Sum up all of the intermediate bincounts per block
name = 'bincount-sum-' + token
dsk[(name, 0)] = (np.sum, list(dsk), 0)
chunks = ((minlength,),)
dsk.update(x.dask)
if weights is not None:
dsk.update(weights.dask)
return Array(dsk, name, chunks, dtype)
@wraps(np.digitize)
def digitize(a, bins, right=False):
bins = np.asarray(bins)
dtype = np.digitize([0], bins, right=False).dtype
return a.map_blocks(np.digitize, dtype=dtype, bins=bins, right=right)
def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):
"""
Blocked variant of numpy.histogram.
Follows the signature of numpy.histogram exactly with the following
exceptions:
- Either an iterable specifying the ``bins`` or the number of ``bins``
and a ``range`` argument is required as computing ``min`` and ``max``
over blocked arrays is an expensive operation that must be performed
explicitly.
- ``weights`` must be a dask.array.Array with the same block structure
as ``a``.
Examples
--------
Using number of bins and range:
>>> import dask.array as da
>>> import numpy as np
>>> x = da.from_array(np.arange(10000), chunks=10)
>>> h, bins = da.histogram(x, bins=10, range=[0, 10000])
>>> bins
array([ 0., 1000., 2000., 3000., 4000., 5000., 6000.,
7000., 8000., 9000., 10000.])
>>> h.compute()
array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000])
Explicitly specifying the bins:
>>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000]))
>>> bins
array([ 0, 5000, 10000])
>>> h.compute()
array([5000, 5000])
"""
if bins is None or (range is None and bins is None):
raise ValueError('dask.array.histogram requires either bins '
'or bins and range to be defined.')
if weights is not None and weights.chunks != a.chunks:
raise ValueError('Input array and weights must have the same '
'chunked structure')
if not np.iterable(bins):
bin_token = bins
mn, mx = range
if mn == mx:
mn -= 0.5
mx += 0.5
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else:
bin_token = bins
token = tokenize(a, bin_token, range, normed, weights, density)
nchunks = len(list(core.flatten(a._keys())))
chunks = ((1,) * nchunks, (len(bins) - 1,))
name = 'histogram-sum-' + token
# Map the histogram to all bins
def block_hist(x, weights=None):
return np.histogram(x, bins, weights=weights)[0][np.newaxis]
if weights is None:
dsk = dict(((name, i, 0), (block_hist, k))
for i, k in enumerate(core.flatten(a._keys())))
dtype = np.histogram([])[0].dtype
else:
a_keys = core.flatten(a._keys())
w_keys = core.flatten(weights._keys())
dsk = dict(((name, i, 0), (block_hist, k, w))
for i, (k, w) in enumerate(zip(a_keys, w_keys)))
dsk.update(weights.dask)
dtype = weights.dtype
dsk.update(a.dask)
mapped = Array(dsk, name, chunks, dtype=dtype)
n = mapped.sum(axis=0)
# We need to replicate normed and density options from numpy
if density is not None:
if density:
db = from_array(np.diff(bins).astype(float), chunks=n.chunks)
return n / db / n.sum(), bins
else:
return n, bins
else:
# deprecated, will be removed from Numpy 2.0
if normed:
db = from_array(np.diff(bins).astype(float), chunks=n.chunks)
return n / (n * db).sum(), bins
else:
return n, bins
def eye(N, chunks, M=None, k=0, dtype=float):
"""
Return a 2-D Array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
chunks: int
chunk size of resulting blocks
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : Array of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
"""
if not isinstance(chunks, int):
raise ValueError('chunks must be an int')
token = tokenize(N, chunk, M, k, dtype)
name_eye = 'eye-' + token
eye = {}
if M is None:
M = N
vchunks = [chunks] * (N // chunks)
if N % chunks != 0:
vchunks.append(N % chunks)
hchunks = [chunks] * (M // chunks)
if M % chunks != 0:
hchunks.append(M % chunks)
for i, vchunk in enumerate(vchunks):
for j, hchunk in enumerate(hchunks):
if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks:
eye[name_eye, i, j] = (np.eye, vchunk, hchunk, k - (j - i) * chunks, dtype)
else:
eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype)
return Array(eye, name_eye, shape=(N, M),
chunks=(chunks, chunks), dtype=dtype)
@wraps(np.diag)
def diag(v):
name = 'diag-' + tokenize(v)
if isinstance(v, np.ndarray):
if v.ndim == 1:
chunks = ((v.shape[0],), (v.shape[0],))
dsk = {(name, 0, 0): (np.diag, v)}
elif v.ndim == 2:
chunks = ((min(v.shape),),)
dsk = {(name, 0): (np.diag, v)}
else:
raise ValueError("Array must be 1d or 2d only")
return Array(dsk, name, chunks, dtype=v.dtype)
if not isinstance(v, Array):
raise TypeError("v must be a dask array or numpy array, "
"got {0}".format(type(v)))
if v.ndim != 1:
if v.chunks[0] == v.chunks[1]:
dsk = dict(((name, i), (np.diag, row[i])) for (i, row)
in enumerate(v._keys()))
dsk.update(v.dask)
return Array(dsk, name, (v.chunks[0],), dtype=v.dtype)
else:
raise NotImplementedError("Extracting diagonals from non-square "
"chunked arrays")
chunks_1d = v.chunks[0]
blocks = v._keys()
dsk = v.dask.copy()
for i, m in enumerate(chunks_1d):
for j, n in enumerate(chunks_1d):
key = (name, i, j)
if i == j:
dsk[key] = (np.diag, blocks[i])
else:
dsk[key] = (np.zeros, (m, n))
return Array(dsk, name, (chunks_1d, chunks_1d), dtype=v._dtype)
def triu(m, k=0):
"""
Upper triangle of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
triu : ndarray, shape (M, N)
Upper triangle of `m`, of same shape and data-type as `m`.
See Also
--------
tril : lower triangle of an array
"""
if m.ndim != 2:
raise ValueError('input must be 2 dimensional')
if m.shape[0] != m.shape[1]:
raise NotImplementedError('input must be a square matrix')
if m.chunks[0][0] != m.chunks[1][0]:
msg = ('chunks must be a square. '
'Use .rechunk method to change the size of chunks.')
raise NotImplementedError(msg)
rdim = len(m.chunks[0])
hdim = len(m.chunks[1])
chunk = m.chunks[0][0]
token = tokenize(m, k)
name = 'triu-' + token
dsk = {}
for i in range(rdim):
for j in range(hdim):
if chunk * (j - i + 1) < k:
dsk[(name, i, j)] = (np.zeros, (m.chunks[0][i], m.chunks[1][j]))
elif chunk * (j - i - 1) < k <= chunk * (j - i + 1):
dsk[(name, i, j)] = (np.triu, (m.name, i, j), k - (chunk * (j - i)))
else:
dsk[(name, i, j)] = (m.name, i, j)
dsk.update(m.dask)
return Array(dsk, name, shape=m.shape, chunks=m.chunks, dtype=m.dtype)
def tril(m, k=0):
"""
Lower triangle of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, M)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, M)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : upper triangle of an array
"""
if m.ndim != 2:
raise ValueError('input must be 2 dimensional')
if m.shape[0] != m.shape[1]:
raise NotImplementedError('input must be a square matrix')
if not len(set(m.chunks[0] + m.chunks[1])) == 1:
msg = ('All chunks must be a square matrix to perform lu decomposition. '
'Use .rechunk method to change the size of chunks.')
raise ValueError(msg)
rdim = len(m.chunks[0])
hdim = len(m.chunks[1])
chunk = m.chunks[0][0]
token = tokenize(m, k)
name = 'tril-' + token
dsk = {}
for i in range(rdim):
for j in range(hdim):
if chunk * (j - i + 1) < k:
dsk[(name, i, j)] = (m.name, i, j)
elif chunk * (j - i - 1) < k <= chunk * (j - i + 1):
dsk[(name, i, j)] = (np.tril, (m.name, i, j), k - (chunk * (j - i)))
else:
dsk[(name, i, j)] = (np.zeros, (m.chunks[0][i], m.chunks[1][j]))
dsk.update(m.dask)
return Array(dsk, name, shape=m.shape, chunks=m.chunks, dtype=m.dtype)
def chunks_from_arrays(arrays):
""" Chunks tuple from nested list of arrays
>>> x = np.array([1, 2])
>>> chunks_from_arrays([x, x])
((2, 2),)
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x], [x]])
((1, 1), (2,))
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x, x]])
((1,), (2, 2))
>>> chunks_from_arrays([1, 1])
((1, 1),)
"""
if not arrays:
return ()
result = []
dim = 0
def shape(x):
try:
return x.shape
except AttributeError:
return (1,)
while isinstance(arrays, (list, tuple)):
result.append(tuple(shape(deepfirst(a))[dim] for a in arrays))
arrays = arrays[0]
dim += 1
return tuple(result)
def deepfirst(seq):
""" First element in a nested list
>>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])
1
"""
if not isinstance(seq, (list, tuple)):
return seq
else:
return deepfirst(seq[0])
def ndimlist(seq):
if not isinstance(seq, (list, tuple)):
return 0
elif not seq:
return 1
else:
return 1 + ndimlist(seq[0])
def concatenate3(arrays):
""" Recursive np.concatenate
Input should be a nested list of numpy arrays arranged in the order they
should appear in the array itself. Each array should have the same number
of dimensions as the desired output and the nesting of the lists.
>>> x = np.array([[1, 2]])
>>> concatenate3([[x, x, x], [x, x, x]])
array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])
>>> concatenate3([[x, x], [x, x], [x, x]])
array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])
"""
arrays = concrete(arrays)
ndim = ndimlist(arrays)
if not ndim:
return arrays
if not arrays:
return np.empty(0)
chunks = chunks_from_arrays(arrays)
shape = tuple(map(sum, chunks))
def dtype(x):
try:
return x.dtype
except AttributeError:
return type(x)
result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))
for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)):
if hasattr(arr, 'ndim'):
while arr.ndim < ndim:
arr = arr[None, ...]
result[idx] = arr
return result
def concatenate_axes(arrays, axes):
""" Recurseively call np.concatenate along axes
TODO: This performs many copies. We should be able to do this in one
TODO: Merge logic on concatenate3 with this
"""
if len(axes) != ndimlist(arrays):
raise ValueError("Length of axes should equal depth of nested arrays")
if len(axes) > 1:
arrays = [concatenate_axes(a, axes[1:]) for a in arrays]
return np.concatenate(arrays, axis=axes[0])
def to_hdf5(filename, *args, **kwargs):
""" Store arrays in HDF5 file
This saves several dask arrays into several datapaths in an HDF5 file.
It creates the necessary datasets and handles clean file opening/closing.
>>> da.to_hdf5('myfile.hdf5', '/x', x) # doctest: +SKIP
or
>>> da.to_hdf5('myfile.hdf5', {'/x': x, '/y': y}) # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> da.to_hdf5('myfile.hdf5', '/x', x, compression='lzf', shuffle=True) # doctest: +SKIP
This can also be used as a method on a single Array
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
See Also
--------
da.store
h5py.File.create_dataset
"""
if len(args) == 1 and isinstance(args[0], dict):
data = args[0]
elif (len(args) == 2 and
isinstance(args[0], str) and
isinstance(args[1], Array)):
data = {args[0]: args[1]}
else:
raise ValueError("Please provide {'/data/path': array} dictionary")
chunks = kwargs.pop('chunks', True)
import h5py
with h5py.File(filename) as f:
dsets = [f.require_dataset(dp, shape=x.shape, dtype=x.dtype,
chunks=tuple([c[0] for c in x.chunks])
if chunks is True else chunks, **kwargs)
for dp, x in data.items()]
store(list(data.values()), dsets)
def interleave_none(a, b):
"""
>>> interleave_none([0, None, 2, None], [1, 3])
(0, 1, 2, 3)
"""
result = []
i = j = 0
n = len(a) + len(b)
while i + j < n:
if a[i] is not None:
result.append(a[i])
i += 1
else:
result.append(b[j])
i += 1
j += 1
return tuple(result)
def keyname(name, i, okey):
"""
>>> keyname('x', 3, [None, None, 0, 2])
('x', 3, 0, 2)
"""
return (name, i) + tuple(k for k in okey if k is not None)
def _vindex(x, *indexes):
""" Point wise slicing
This is equivalent to numpy slicing with multiple input lists
>>> x = np.arange(56).reshape((7, 8))
>>> x
array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55]])
>>> d = from_array(x, chunks=(3, 4))
>>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])
>>> result.compute()
array([ 0, 9, 48, 7])
"""
indexes = [list(index) if index is not None else index for index in indexes]
bounds = [list(accumulate(add, (0,) + c)) for c in x.chunks]
bounds2 = [b for i, b in zip(indexes, bounds) if i is not None]
axis = _get_axis(indexes)
points = list()
for i, idx in enumerate(zip(*[i for i in indexes if i is not None])):
block_idx = [np.searchsorted(b, ind, 'right') - 1
for b, ind in zip(bounds2, idx)]
inblock_idx = [ind - bounds2[k][j]
for k, (ind, j) in enumerate(zip(idx, block_idx))]
points.append((i, tuple(block_idx), tuple(inblock_idx)))
per_block = groupby(1, points)
per_block = dict((k, v) for k, v in per_block.items() if v)
other_blocks = list(product(*[list(range(len(c))) if i is None else [None]
for i, c in zip(indexes, x.chunks)]))
token = tokenize(x, indexes)
name = 'vindex-slice-' + token
full_slices = [slice(None, None) if i is None else None for i in indexes]
dsk = dict((keyname(name, i, okey),
(_vindex_transpose,
(_vindex_slice, (x.name,) + interleave_none(okey, key),
interleave_none(full_slices, list(zip(*pluck(2, per_block[key]))))),
axis))
for i, key in enumerate(per_block)
for okey in other_blocks)
if per_block:
dsk2 = dict((keyname('vindex-merge-' + token, 0, okey),
(_vindex_merge,
[list(pluck(0, per_block[key])) for key in per_block],
[keyname(name, i, okey) for i in range(len(per_block))]))
for okey in other_blocks)
else:
dsk2 = dict()
chunks = [c for i, c in zip(indexes, x.chunks) if i is None]
chunks.insert(0, (len(points),) if points else ())
chunks = tuple(chunks)
return Array(merge(x.dask, dsk, dsk2), 'vindex-merge-' + token, chunks, x.dtype)
def _get_axis(indexes):
""" Get axis along which point-wise slicing results lie
This is mostly a hack because I can't figure out NumPy's rule on this and
can't be bothered to go reading.
>>> _get_axis([[1, 2], None, [1, 2], None])
0
>>> _get_axis([None, [1, 2], [1, 2], None])
1
>>> _get_axis([None, None, [1, 2], [1, 2]])
2
"""
ndim = len(indexes)
indexes = [slice(None, None) if i is None else [0] for i in indexes]
x = np.empty((2,) * ndim)
x2 = x[tuple(indexes)]
return x2.shape.index(1)
def _vindex_slice(block, points):
""" Pull out point-wise slices from block """
points = [p if isinstance(p, slice) else list(p) for p in points]
return block[tuple(points)]
def _vindex_transpose(block, axis):
""" Rotate block so that points are on the first dimension """
axes = [axis] + list(range(axis)) + list(range(axis + 1, block.ndim))
return block.transpose(axes)
def _vindex_merge(locations, values):
"""
>>> locations = [0], [2, 1]
>>> values = [np.array([[1, 2, 3]]),
... np.array([[10, 20, 30], [40, 50, 60]])]
>>> _vindex_merge(locations, values)
array([[ 1, 2, 3],
[40, 50, 60],
[10, 20, 30]])
"""
locations = list(map(list, locations))
values = list(values)
n = sum(map(len, locations))
shape = list(values[0].shape)
shape[0] = n
shape = tuple(shape)
dtype = values[0].dtype
x = np.empty(shape, dtype=dtype)
ind = [slice(None, None) for i in range(x.ndim)]
for loc, val in zip(locations, values):
ind[0] = loc
x[tuple(ind)] = val
return x
@wraps(np.array)
def array(x, dtype=None, ndmin=None):
while x.ndim < ndmin:
x = x[None, :]
if dtype is not None and x.dtype != dtype:
x = x.astype(dtype)
return x
@wraps(np.cov)
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
# This was copied almost verbatim from np.cov
# See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
# check ddof
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X = X - X.mean(axis=1 - axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
@wraps(np.corrcoef)
def corrcoef(x, y=None, rowvar=1):
from .ufunc import sqrt
c = cov(x, y, rowvar)
if c.shape == ():
return c / c
d = diag(c)
d = d.reshape((d.shape[0], 1))
sqr_d = sqrt(d)
return (c / sqr_d) / sqr_d.T
def to_npy_stack(dirname, x, axis=0):
""" Write dask array to a stack of .npy files
This partitions the dask.array along one axis and stores each block along
that axis as a single .npy file in the specified directory
Examples
--------
>>> x = da.ones((5, 10, 10), chunks=(2, 4, 4)) # doctest: +SKIP
>>> da.to_npy_stack('data/', x, axis=0) # doctest: +SKIP
$ tree data/
data/
|-- 0.npy
|-- 1.npy
|-- 2.npy
|-- info
The ``.npy`` files store numpy arrays for ``x[0:2], x[2:4], and x[4:5]``
respectively, as is specified by the chunk size along the zeroth axis. The
info file stores the dtype, chunks, and axis information of the array.
You can load these stacks with the ``da.from_npy_stack`` function.
>>> y = da.from_npy_stack('data/') # doctest: +SKIP
See Also
--------
from_npy_stack
"""
chunks = tuple((c if i == axis else (sum(c),))
for i, c in enumerate(x.chunks))
xx = x.rechunk(chunks)
if not os.path.exists(dirname):
os.path.mkdir(dirname)
meta = {'chunks': chunks, 'dtype': x.dtype, 'axis': axis}
with open(os.path.join(dirname, 'info'), 'wb') as f:
pickle.dump(meta, f)
name = 'to-npy-stack-' + str(uuid.uuid1())
dsk = dict(((name, i), (np.save, os.path.join(dirname, '%d.npy' % i), key))
for i, key in enumerate(core.flatten(xx._keys())))
Array._get(merge(dsk, xx.dask), list(dsk))
def from_npy_stack(dirname, mmap_mode='r'):
""" Load dask array from stack of npy files
See ``da.to_npy_stack`` for docstring
Parameters
----------
dirname: string
Directory of .npy files
mmap_mode: (None or 'r')
Read data in memory map mode
"""
with open(os.path.join(dirname, 'info'), 'rb') as f:
info = pickle.load(f)
dtype = info['dtype']
chunks = info['chunks']
axis = info['axis']
name = 'from-npy-stack-%s' % dirname
keys = list(product([name], *[range(len(c)) for c in chunks]))
values = [(np.load, os.path.join(dirname, '%d.npy' % i), mmap_mode)
for i in range(len(chunks[axis]))]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype)
def _astype(x, astype_dtype=None, **kwargs):
return x.astype(astype_dtype, **kwargs)
@wraps(np.round)
def round(a, decimals=0):
return a.map_blocks(np.round, decimals=decimals, dtype=a.dtype)
@wraps(np.swapaxes)
def swapaxes(a, axis1, axis2):
if axis1 == axis2:
return a
ind = list(range(a.ndim))
out = list(ind)
out[axis1], out[axis2] = axis2, axis1
return atop(np.swapaxes, out, a, ind, axis1=axis1, axis2=axis2,
dtype=a._dtype)
@wraps(np.dot)
def repeat(a, repeats, axis=None):
if axis is None:
if a.ndim == 1:
axis = 0
else:
raise NotImplementedError("Must supply an integer axis value")
if not isinstance(repeats, int):
raise NotImplementedError("Only integer valued repeats supported")
if repeats == 1:
return a
cchunks = np.cumsum((0,) + a.chunks[axis])
slices = []
for c_start, c_stop in sliding_window(2, cchunks):
ls = np.linspace(c_start, c_stop, repeats).round(0)
for ls_start, ls_stop in sliding_window(2, ls):
if ls_start != ls_stop:
slices.append(slice(ls_start, ls_stop))
all_slice = slice(None, None, None)
slices = [(all_slice,) * axis + (s,) + (all_slice,) * (a.ndim - axis - 1)
for s in slices]
slabs = [a[slc] for slc in slices]
out = []
for slab in slabs:
chunks = list(slab.chunks)
assert len(chunks[axis]) == 1
chunks[axis] = (chunks[axis][0] * repeats,)
chunks = tuple(chunks)
result = slab.map_blocks(np.repeat, repeats, axis=axis, chunks=chunks,
dtype=slab._dtype)
out.append(result)
return concatenate(out, axis=axis)
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/array/core.py",
"copies": "1",
"size": "119688",
"license": "mit",
"hash": -1324116425722364000,
"line_mean": 31.3568532036,
"line_max": 107,
"alpha_frac": 0.55546922,
"autogenerated": false,
"ratio": 3.511765741447098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9566170705351671,
"avg_score": 0.00021285121908535385,
"num_lines": 3699
} |
from __future__ import absolute_import, division, print_function
from blaze.compute.spark import *
from blaze.compatibility import skip
from blaze.expr.table import *
data = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
data2 = [['Alice', 'Austin'],
['Bob', 'Boston']]
try:
from pyspark import SparkContext
sc = SparkContext("local", "Simple App")
rdd = sc.parallelize(data)
rdd2 = sc.parallelize(data2)
except ImportError:
pass
t = TableSymbol('t', '{name: string, amount: int, id: int}')
t2 = TableSymbol('t2', '{name: string, city: string}')
#Web Commons Graph Example data
data_idx = [['A', 1],
['B', 2],
['C', 3]]
data_arc = [[1, 3],
[2, 3],
[3, 1]]
t_idx = TableSymbol('idx', '{name: string, node_id: int32}')
t_arc = TableSymbol('arc', '{node_out: int32, node_id: int32}')
def test_spark_table():
assert compute(t, rdd) == rdd
def test_spark_projection():
assert compute(t['name'], rdd).collect() == [row[0] for row in data]
def test_spark_multicols_projection():
result = compute(t[['amount', 'name']], rdd).collect()
expected = [(100, 'Alice'), (200, 'Bob'), (50, 'Alice')]
print(result)
print(expected)
assert result == expected
inc = lambda x: x + 1
reduction_exprs = [
t['amount'].sum(),
t['amount'].min(),
t['amount'].max(),
t['amount'].nunique(),
t['name'].nunique(),
t['amount'].count(),
(t['amount'] > 150).any(),
(t['amount'] > 150).all(),
t['amount'].mean(),
t['amount'].var(),
t['amount'].std()]
def test_spark_reductions():
for expr in reduction_exprs:
result = compute(expr, rdd)
expected = compute(expr, data)
if not result == expected:
print(result)
print(expected)
if isinstance(result, float):
assert abs(result - expected) < 0.001
else:
assert result == expected
exprs = [
t['amount'],
t['amount'] == 100,
t[t['name'] == 'Alice'],
t[t['amount'] == 0],
t[t['amount'] > 150],
t['amount'] + t['id'],
t['amount'] % t['id'],
exp(t['amount']),
By(t, t['name'], t['amount'].sum()),
By(t, t['name'], (t['amount'] + 1).sum()),
(t['amount'] * 1).label('foo'),
t.map(lambda _, amt, id: amt + id),
t['amount'].map(inc)]
def test_spark_basic():
check_exprs_against_python(exprs, data, rdd)
def check_exprs_against_python(exprs, data, rdd):
any_bad = False
for expr in exprs:
result = compute(expr, rdd).collect()
expected = list(compute(expr, data))
if not result == expected:
any_bad = True
print("Expression:", expr)
print("Spark:", result)
print("Python:", expected)
assert not any_bad
def test_spark_big_by():
tbig = TableSymbol('tbig', '{name: string, sex: string[1], amount: int, id: int}')
big_exprs = [
By(tbig, tbig[['name', 'sex']], tbig['amount'].sum()),
By(tbig, tbig[['name', 'sex']], (tbig['id'] + tbig['amount']).sum())]
databig = [['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]]
rddbig = sc.parallelize(databig)
check_exprs_against_python(big_exprs, databig, rddbig)
def test_spark_head():
assert list(compute(t.head(1), rdd)) == list(compute(t.head(1), data))
def test_spark_sort():
check_exprs_against_python([
t.sort('amount'),
t.sort('amount', ascending=True),
t.sort(['amount', 'id'])], data, rdd)
def test_spark_distinct():
assert set(compute(t['name'].distinct(), rdd).collect()) == \
set(['Alice', 'Bob'])
def test_spark_join():
joined = Join(t, t2, 'name')
expected = [['Alice', 100, 1, 'Austin'],
['Bob', 200, 2, 'Boston'],
['Alice', 50, 3, 'Austin']]
result = compute(joined, rdd, rdd2).collect()
assert all(i in expected for i in result)
def test_spark_groupby():
rddidx = sc.parallelize(data_idx)
rddarc = sc.parallelize(data_arc)
joined = Join(t_arc, t_idx, "node_id")
result_blaze = compute(joined, {t_arc: rddarc, t_idx:rddidx})
t = By(joined, joined['name'], joined['node_id'].count())
a = compute(t, {t_arc: rddarc, t_idx:rddidx})
in_degree = dict(a.collect())
assert in_degree == {'A': 1, 'C': 2}
def test_spark_multi_level_rowfunc_works():
expr = t['amount'].map(lambda x: x + 1)
assert compute(expr, rdd).collect() == [x[1] + 1 for x in data]
@skip("Spark not yet fully supported")
def test_jaccard():
data_idx_j = sc.parallelize([['A', 1],['B', 2],['C', 3],['D', 4],['E', 5],['F', 6]])
data_arc_j = sc.parallelize([[1, 3],[2, 3],[4, 3],[5, 3],[3, 1],[2, 1],[5, 1],[1, 6],[2, 6],[4, 6]])
#The tables we need to work with
t_idx_j = TableSymbol('{name: string, node_id: int32}') #Index of sites
t_arc_j = TableSymbol('{node_out: int32, node_id: int32}') # Links between sites
t_sel_j = TableSymbol('{name: string}') # A Selection table for just site names
join_names = Join(t_arc_j, t_idx_j, "node_id")
user_selected = Join(join_names, t_sel_j, "name")
proj_of_nodes = user_selected[['node_out', 'node_id']]
node_selfjoin = Join(proj_of_nodes, proj_of_nodes.relabel(
{'node_id':'node_other'}), "node_out")
#Filter here to get (a,b) node pairs where a < b
flter = node_selfjoin[ node_selfjoin['node_id'] < node_selfjoin['node_other']]
gby = By(flter, flter[['node_id', 'node_other']], flter['node_out'].count())
indeg_joined = Join(t_arc, t_idx, 'node_id')
indeg_t = By(indeg_joined, indeg_joined['node_id'], indeg_joined['node_id'].count())
#### Now we actually do the computation on the graph:
# The subset we care about
data_sel_j = sc.parallelize([['C'],['F']])
shared_neighbor_num = compute(gby, {t_sel_j: data_sel_j, t_arc:data_arc_j, t_idx_j:data_idx_j})
indeg = compute(indeg_t, {t_arc_j: data_arc_j, t_idx_j:data_idx_j})
indeg_py = dict(indeg.collect())
shared_neighbor_py = shared_neighbor_num.collect()
assert shared_neighbor_py == [((3, 6), 3)]
assert indeg_py == {1: 3, 3: 4, 6: 3}
def test_spark_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
assert compute(expr, rdd).collect() == [(row[0], row[1] * 2) for row in data]
def test_spark_into():
from blaze.api.into import into
seq = [1, 2, 3]
assert isinstance(into(rdd, seq), RDD)
assert into([], into(rdd, seq)) == seq
| {
"repo_name": "aterrel/blaze",
"path": "blaze/compute/tests/test_spark.py",
"copies": "1",
"size": "6697",
"license": "bsd-3-clause",
"hash": 6247682430380929000,
"line_mean": 28.8973214286,
"line_max": 104,
"alpha_frac": 0.5556219203,
"autogenerated": false,
"ratio": 3.070609812012838,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4126231732312838,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from blaze.compute.sql import compute, computefull, select
from blaze.expr.table import *
import sqlalchemy
import sqlalchemy as sa
from blaze.compatibility import skip
from blaze.utils import unique
t = TableSymbol('t', '{name: string, amount: int, id: int}')
metadata = sa.MetaData()
s = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True),
)
tbig = TableSymbol('tbig', '{name: string, sex: string[1], amount: int, id: int}')
sbig = sa.Table('accountsbig', metadata,
sa.Column('name', sa.String),
sa.Column('sex', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True),
)
def normalize(s):
return ' '.join(s.strip().split())
def test_table():
result = str(computefull(t, s))
expected = """
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
""".strip()
assert normalize(result) == normalize(expected)
def test_projection():
assert str(compute(t[['name', 'amount']], s)) == \
str(sa.select([s.c.name, s.c.amount]))
def test_eq():
assert str(compute(t['amount'] == 100, s)) == str(s.c.amount == 100)
def test_selection():
assert str(compute(t[t['amount'] == 0], s)) == \
str(sa.select([s]).where(s.c.amount == 0))
assert str(compute(t[t['amount'] > 150], s)) == \
str(sa.select([s]).where(s.c.amount > 150))
def test_arithmetic():
assert str(computefull(t['amount'] + t['id'], s)) == \
str(sa.select([s.c.amount + s.c.id]))
assert str(compute(t['amount'] + t['id'], s)) == str(s.c.amount + s.c.id)
assert str(compute(t['amount'] * t['id'], s)) == str(s.c.amount * s.c.id)
assert str(computefull(t['amount'] + t['id'] * 2, s)) == \
str(sa.select([s.c.amount + s.c.id * 2]))
def test_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
expected = lhs.join(rhs, lhs.c.name == rhs.c.name)
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
L = TableSymbol('L', '{name: string, amount: int}')
R = TableSymbol('R', '{name: string, id: int}')
joined = Join(L, R, 'name')
result = compute(joined, {L: lhs, R: rhs})
assert str(result) == str(expected)
assert str(select(result)) == str(select(expected))
# Schemas match
assert list(result.c.keys()) == list(joined.columns)
def test_unary_op():
assert str(compute(exp(t['amount']), s)) == str(sa.func.exp(s.c.amount))
def test_unary_op():
assert str(compute(-t['amount'], s)) == str(-s.c.amount)
def test_reductions():
assert str(compute(sum(t['amount']), s)) == \
str(sa.sql.functions.sum(s.c.amount))
assert str(compute(mean(t['amount']), s)) == \
str(sa.sql.func.avg(s.c.amount))
assert str(compute(count(t['amount']), s)) == \
str(sa.sql.func.count(s.c.amount))
assert 'amount' == compute(sum(t['amount']), s).name
def test_distinct():
result = str(compute(Distinct(t['amount']), s))
assert 'distinct' in result.lower()
assert 'amount' in result.lower()
print(result)
assert result == str(sa.distinct(s.c.amount))
def test_nunique():
result = str(compute(nunique(t['amount']), s))
assert 'distinct' in result.lower()
assert 'count' in result.lower()
assert 'amount' in result.lower()
print(result)
assert result == str(sa.sql.func.count(sa.distinct(s.c.amount)))
@skip("Fails because SQLAlchemy doesn't seem to know binary reductions")
def test_binary_reductions():
assert str(compute(any(t['amount'] > 150), s)) == \
str(sa.sql.functions.any(s.c.amount > 150))
def test_by():
expr = By(t, t['name'], t['amount'].sum())
result = compute(expr, s)
expected = sa.select([s.c.name,
sa.sql.functions.sum(s.c.amount).label('amount')]
).group_by(s.c.name)
assert str(result) == str(expected)
def test_by_head():
t2 = t.head(100)
expr = By(t2, t2['name'], t2['amount'].sum())
result = compute(expr, s)
s2 = select(s).limit(100)
expected = sa.select([s2.c.name,
sa.sql.functions.sum(s2.c.amount).label('amount')]
).group_by(s2.c.name)
assert str(result) == str(expected)
def test_by_two():
expr = By(tbig, tbig[['name', 'sex']], tbig['amount'].sum())
result = compute(expr, sbig)
expected = (sa.select([sbig.c.name,
sbig.c.sex,
sa.sql.functions.sum(sbig.c.amount).label('amount')])
.group_by(sbig.c.name, sbig.c.sex))
assert str(result) == str(expected)
def test_by_three():
result = compute(By(tbig,
tbig[['name', 'sex']],
(tbig['id'] + tbig['amount']).sum()),
sbig)
expected = (sa.select([sbig.c.name,
sbig.c.sex,
sa.sql.functions.sum(sbig.c.id+ sbig.c.amount)])
.group_by(sbig.c.name, sbig.c.sex))
assert str(result) == str(expected)
def test_join_projection():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = TableSymbol('L', '{name: string, amount: int}')
R = TableSymbol('R', '{name: string, id: int}')
want = Join(L, R, 'name')[['amount', 'id']]
result = compute(want, {L: lhs, R: rhs})
print(result)
assert 'JOIN' in str(result)
assert result.c.keys() == ['amount', 'id']
assert 'amounts.name = ids.name' in str(result)
def test_sort():
assert str(compute(t.sort('amount'), s)) == \
str(select(s).order_by(s.c.amount))
assert str(compute(t.sort('amount', ascending=False), s)) == \
str(select(s).order_by(sqlalchemy.desc(s.c.amount)))
def test_head():
assert str(compute(t.head(2), s)) == str(select(s).limit(2))
def test_label():
assert str(compute((t['amount'] * 10).label('foo'), s)) == \
str((s.c.amount * 10).label('foo'))
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), s)
expected = select([s.c.name.label('NAME'), s.c.amount, s.c.id.label('ID')])
assert str(result) == str(expected)
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
result = str(compute(expr, s))
assert 'amount * ' in result
assert 'FROM accounts' in result
assert 'SELECT accounts.name' in result
assert 'new' in result
| {
"repo_name": "aterrel/blaze",
"path": "blaze/compute/tests/test_sql.py",
"copies": "1",
"size": "7263",
"license": "bsd-3-clause",
"hash": 252467099755268400,
"line_mean": 29.0123966942,
"line_max": 82,
"alpha_frac": 0.5591353435,
"autogenerated": false,
"ratio": 3.2511190689346465,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43102544124346465,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from blaze.expr import Selection, Head, Field, Projection, ReLabel, ElemWise
from blaze.expr import Label, Distinct, By, Reduction, Like, Slice
from blaze.expr import std, var, count, mean, nunique, sum
from blaze.expr import eval_str, Expr
from blaze.expr.optimize import lean_projection
from collections import Iterator
import datashape
import bcolz
import math
from .chunks import ChunkIndexable
from ..compatibility import builtins
from ..dispatch import dispatch
from ..api import into
__all__ = ['bcolz']
COMFORTABLE_MEMORY_SIZE = 1e9
@dispatch((bcolz.carray, bcolz.ctable))
def discover(data):
return datashape.from_numpy(data.shape, data.dtype)
@dispatch(Selection, bcolz.ctable)
def compute_up(expr, data, **kwargs):
if data.nbytes < COMFORTABLE_MEMORY_SIZE:
return compute_up(expr, data[:], **kwargs)
s = eval_str(expr.predicate._expr)
try:
return data.where(s)
except (NotImplementedError, NameError, AttributeError):
# numexpr may not be able to handle the predicate
return compute_up(expr, into(Iterator, data), **kwargs)
@dispatch(Selection, bcolz.ctable)
def compute_up(expr, data, **kwargs):
if data.nbytes < COMFORTABLE_MEMORY_SIZE:
return compute_up(expr, data[:], **kwargs)
return compute_up(expr, into(Iterator, data), **kwargs)
@dispatch(Head, (bcolz.carray, bcolz.ctable))
def compute_up(expr, data, **kwargs):
return data[:expr.n]
@dispatch(Field, bcolz.ctable)
def compute_up(expr, data, **kwargs):
return data[expr._name]
@dispatch(Projection, bcolz.ctable)
def compute_up(expr, data, **kwargs):
return data[expr.fields]
@dispatch(sum, (bcolz.carray, bcolz.ctable))
def compute_up(expr, data, **kwargs):
return data.sum()
@dispatch(count, (bcolz.ctable, bcolz.carray))
def compute_up(expr, data, **kwargs):
return len(data)
@dispatch(mean, bcolz.carray)
def compute_up(expr, ba, **kwargs):
return ba.sum() / ba.len
@dispatch(var, bcolz.carray)
def compute_up(expr, ba, chunksize=2**20, **kwargs):
n = ba.len
E_X_2 = builtins.sum((chunk * chunk).sum() for chunk in chunks(ba))
E_X = float(ba.sum())
return (E_X_2 - (E_X * E_X) / n) / (n - expr.unbiased)
@dispatch(std, bcolz.carray)
def compute_up(expr, ba, **kwargs):
result = compute_up(expr._child.var(unbiased=expr.unbiased), ba, **kwargs)
return math.sqrt(result)
@dispatch((ReLabel, Label), (bcolz.carray, bcolz.ctable))
def compute_up(expr, b, **kwargs):
raise NotImplementedError()
@dispatch((ElemWise, Distinct, By, nunique, Like), (bcolz.carray, bcolz.ctable))
def compute_up(expr, data, **kwargs):
if data.nbytes < COMFORTABLE_MEMORY_SIZE:
return compute_up(expr, data[:], **kwargs)
return compute_up(expr, iter(data), **kwargs)
@dispatch(nunique, bcolz.carray)
def compute_up(expr, data, **kwargs):
return len(set(data))
@dispatch(Reduction, (bcolz.carray, bcolz.ctable))
def compute_up(expr, data, **kwargs):
if data.nbytes < COMFORTABLE_MEMORY_SIZE:
return compute_up(expr, data[:], **kwargs)
return compute_up(expr, ChunkIndexable(data), **kwargs)
@dispatch(Slice, (bcolz.carray, bcolz.ctable))
def compute_up(expr, x, **kwargs):
return x[expr.index]
@dispatch((bcolz.carray, bcolz.ctable))
def chunks(b, chunksize=2**15):
start = 0
n = b.len
while start < n:
yield b[start:start + chunksize]
start += chunksize
@dispatch((bcolz.carray, bcolz.ctable), int)
def get_chunk(b, i, chunksize=2**15):
start = chunksize * i
stop = chunksize * (i + 1)
return b[start:stop]
@dispatch(Expr, (bcolz.ctable, bcolz.carray))
def optimize(expr, _):
return lean_projection(expr)
| {
"repo_name": "vitan/blaze",
"path": "blaze/compute/bcolz.py",
"copies": "1",
"size": "3765",
"license": "bsd-3-clause",
"hash": -7659563887643042000,
"line_mean": 25.8928571429,
"line_max": 80,
"alpha_frac": 0.6852589641,
"autogenerated": false,
"ratio": 2.9786392405063293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41638982046063294,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .callbacks import Callback
from timeit import default_timer
from numbers import Number
import sys
overhead = sys.getsizeof(1.23) * 4 + sys.getsizeof(()) * 4
class Cache(Callback):
""" Use cache for computation
Examples
--------
>>> cache = Cache(1e9) # doctest: +SKIP
The cache can be used locally as a context manager around ``compute`` or
``get`` calls:
>>> with cache: # doctest: +SKIP
... result = x.compute()
You can also register a cache globally, so that it works for all
computations:
>>> cache.register() # doctest: +SKIP
>>> cache.unregister() # doctest: +SKIP
"""
def __init__(self, cache, *args, **kwargs):
try:
import cachey
except ImportError as ex:
raise ImportError('Cache requires cachey, "{ex}" problem '
'importing'.format(ex=str(ex)))
self._nbytes = cachey.nbytes
if isinstance(cache, Number):
cache = cachey.Cache(cache, *args, **kwargs)
else:
assert not args and not kwargs
self.cache = cache
self.starttimes = dict()
def _start(self, dsk):
self.durations = dict()
overlap = set(dsk) & set(self.cache.data)
for key in overlap:
dsk[key] = self.cache.data[key]
def _pretask(self, key, dsk, state):
self.starttimes[key] = default_timer()
def _posttask(self, key, value, dsk, state, id):
duration = default_timer() - self.starttimes[key]
deps = state['dependencies'][key]
if deps:
duration += max(self.durations.get(k, 0) for k in deps)
self.durations[key] = duration
nb = self._nbytes(value) + overhead + sys.getsizeof(key) * 4
self.cache.put(key, value, cost=duration / nb / 1e9, nbytes=nb)
def _finish(self, dsk, state, errored):
self.starttimes.clear()
self.durations.clear()
| {
"repo_name": "cowlicks/dask",
"path": "dask/cache.py",
"copies": "14",
"size": "2058",
"license": "bsd-3-clause",
"hash": 7075139131965660000,
"line_mean": 30.1818181818,
"line_max": 76,
"alpha_frac": 0.5777453839,
"autogenerated": false,
"ratio": 3.853932584269663,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .callbacks import Callback
from timeit import default_timer
from numbers import Number
import sys
try:
import cachey
except ImportError:
pass
overhead = sys.getsizeof(1.23) * 4 + sys.getsizeof(()) * 4
class Cache(Callback):
""" Use cache for computation
Examples
--------
>>> cache = Cache(1e9) # available bytes
>>> with cache: # use as a context manager around get/compute calls
... result = x.compute() # doctest: +SKIP
>>> cache.register() # or use globally
>>> cache.unregister()
"""
def __init__(self, cache, *args, **kwargs):
if isinstance(cache, Number):
cache = cachey.Cache(cache, *args, **kwargs)
else:
assert not args and not kwargs
self.cache = cache
self.starttimes = dict()
def _start(self, dsk):
self.durations = dict()
overlap = set(dsk) & set(self.cache.data)
for key in overlap:
dsk[key] = self.cache.data[key]
def _pretask(self, key, dsk, state):
self.starttimes[key] = default_timer()
def _posttask(self, key, value, dsk, state, id):
duration = default_timer() - self.starttimes[key]
deps = state['dependencies'][key]
if deps:
duration += max(self.durations.get(k, 0) for k in deps)
self.durations[key] = duration
nb = cachey.nbytes(value) + overhead + sys.getsizeof(key) * 4
self.cache.put(key, value, cost=duration / nb / 1e9, nbytes=nb)
def _finish(self, dsk, state, errored):
self.starttimes.clear()
self.durations.clear()
| {
"repo_name": "pombredanne/dask",
"path": "dask/cache.py",
"copies": "2",
"size": "1689",
"license": "bsd-3-clause",
"hash": -8952820828883038000,
"line_mean": 27.6271186441,
"line_max": 79,
"alpha_frac": 0.5985790409,
"autogenerated": false,
"ratio": 3.655844155844156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 59
} |
from __future__ import absolute_import, division, print_function
from click import testing
import pytest
import six
import sys
from app.cli import app as app_cli
class Result(testing.Result):
def __init__(self, *args, **kw):
self.__allow_exceptions = kw.pop('allow_exception_access', False)
self.__exception = None
self.__exc_info = None
super(Result, self).__init__(*args, **kw)
@property
def exception(self):
assert self.__allow_exceptions, \
('In order to access exception information,'
' you must explicitly set catch_exceptions when calling the cli')
return self.__exception
@exception.setter
def exception(self, value):
self.__exception = value
@property
def exc_info(self):
assert self.__allow_exceptions, \
('In order to access exception information,'
' you must explicitly set catch_exceptions when calling the cli')
return self.__exc_info
@exc_info.setter
def exc_info(self, value):
self.__exc_info = value
def __repr__(self):
return '<Result %s>' % (self.__exception and repr(self.__exception) or 'okay', )
@classmethod
def from_upstream(cls, r, allow_exception_access):
d = r.__dict__.copy()
d['allow_exception_access'] = allow_exception_access
return Result(**d)
@pytest.fixture
def cli(request):
def invoke(*args, **kw):
__tracebackhide__ = True
runner = testing.CliRunner()
exit_code = kw.pop('exit_code', 0)
try:
catch_exceptions = kw.pop('catch_exceptions')
explicit = True
except KeyError:
catch_exceptions = (request.config.getvalue('verbose') <= 0)
explicit = False
assert not kw, 'unhandled kw args: %s' % (kw, )
args = ('--home', './tests/fixtures') + args
r = runner.invoke(app_cli, args, catch_exceptions=catch_exceptions)
if isinstance(exit_code, six.string_types) and (exit_code.lower() == 'ignore'):
pass
else:
if not r.exit_code == exit_code:
print('%r\nOutput was:' % r, file=sys.stderr)
sys.stderr.write(r.output)
raise AssertionError('Wanted exit code %s but got %s (see stderr for more)' % (exit_code, r.exit_code))
return Result.from_upstream(r, allow_exception_access=explicit)
return invoke
| {
"repo_name": "dcwangmit01/options-screener",
"path": "tests/conftest.py",
"copies": "1",
"size": "2471",
"license": "mit",
"hash": -4700947750841535000,
"line_mean": 29.8875,
"line_max": 119,
"alpha_frac": 0.5940914609,
"autogenerated": false,
"ratio": 4.077557755775578,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003866615594143684,
"num_lines": 80
} |
from __future__ import absolute_import, division, print_function
from collections import defaultdict, Iterator, Mapping
import decimal
from datetime import date, datetime, timedelta
from functools import partial
import itertools
import numbers
import warnings
from datashape.predicates import (
isscalar,
iscollection,
isrecord,
istabular,
_dimensions,
)
from odo import odo
from odo.compatibility import unicode
import numpy as np
import pandas as pd
import toolz
from toolz import first, unique, assoc
from toolz.utils import no_default
from ..compatibility import basestring
from ..expr import (
BoundSymbol,
Cast,
Expr,
Field,
Join,
Literal,
Symbol,
symbol,
)
from ..dispatch import dispatch
from ..types import iscoretype
__all__ = ['compute', 'compute_up']
base = numbers.Number, basestring, date, datetime, timedelta, type(None)
@dispatch(Expr, object)
def pre_compute(leaf, data, scope=None, **kwargs):
""" Transform data prior to calling ``compute`` """
return data
@dispatch(Expr, object)
def post_compute(expr, result, scope=None):
""" Effects after the computation is complete """
return result
@dispatch(Expr, object)
def optimize(expr, data):
""" Optimize expression to be computed on data """
return expr
@dispatch(object, object)
def compute_up(a, b, **kwargs):
raise NotImplementedError("Blaze does not know how to compute "
"expression of type `%s` on data of type `%s`"
% (type(a).__name__, type(b).__name__))
@dispatch(Cast, object)
def compute_up(c, b, **kwargs):
# cast only works on the expression system and does not affect the
# computation
return b
@dispatch(base)
def compute_up(a, **kwargs):
return a
@dispatch((list, tuple))
def compute_up(seq, scope=None, **kwargs):
return type(seq)(compute(item, scope or {}, **kwargs) for item in seq)
@dispatch(object)
def compute_down(expr, **kwargs):
""" Compute the expression on the entire inputs
inputs match up to leaves of the expression
"""
raise NotImplementedError()
def issubtype(a, b):
""" A custom issubclass """
if issubclass(a, b):
return True
if issubclass(a, (tuple, list, set)) and issubclass(b, Iterator):
return True
if issubclass(b, (tuple, list, set)) and issubclass(a, Iterator):
return True
return False
def type_change(old, new):
""" Was there a significant type change between old and new data?
>>> type_change([1, 2], [3, 4])
False
>>> type_change([1, 2], [3, [1,2,3]])
True
Some special cases exist, like no type change from list to Iterator
>>> type_change([[1, 2]], [iter([1, 2])])
False
"""
if all(isinstance(x, base) for x in old + new):
return False
if len(old) != len(new):
return True
new_types = list(map(type, new))
old_types = list(map(type, old))
return not all(map(issubtype, new_types, old_types))
def top_then_bottom_then_top_again_etc(expr, scope, **kwargs):
""" Compute expression against scope
Does the following interpreter strategy:
1. Try compute_down on the entire expression
2. Otherwise compute_up from the leaves until we experience a type change
(e.g. data changes from dict -> pandas DataFrame)
3. Re-optimize expression and re-pre-compute data
4. Go to step 1
Examples
--------
>>> import numpy as np
>>> s = symbol('s', 'var * {name: string, amount: int}')
>>> data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
... dtype=[('name', 'S7'), ('amount', 'i4')])
>>> e = s.amount.sum() + 1
>>> top_then_bottom_then_top_again_etc(e, {s: data})
601
See Also
--------
bottom_up_until_type_break -- uses this for bottom-up traversal
top_to_bottom -- older version
bottom_up -- older version still
"""
# 0. Base case: expression is in dict, return associated data
if expr in scope:
return scope[expr]
if not hasattr(expr, '_leaves'):
return expr
leaf_data = (
scope.get(leaf) for leaf in expr._leaves()
if not isinstance(leaf, Literal)
)
# 1. See if we have a direct computation path with compute_down
try:
return compute_down(expr, *leaf_data, **kwargs)
except NotImplementedError:
pass
# 2. Compute from the bottom until there is a data type change
expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs)
# 3. Re-optimize data and expressions
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
if pre_compute_:
scope3 = {
e: pre_compute_(e, datum, **assoc(kwargs, 'scope', scope2))
for e, datum in scope2.items()
}
else:
scope3 = scope2
if optimize_:
try:
expr3 = optimize_(expr2, *[scope3[leaf]
for leaf in expr2._leaves()])
_d = dict(zip(expr2._leaves(), expr3._leaves()))
scope4 = dict((e._subs(_d), d) for e, d in scope3.items())
except NotImplementedError:
expr3 = expr2
scope4 = scope3
else:
expr3 = expr2
scope4 = scope3
# 4. Repeat
if expr.isidentical(expr3):
raise NotImplementedError("Don't know how to compute:\n"
"type(expr): %s\n"
"expr: %s\n"
"data: %s" % (type(expr3), expr3, scope4))
else:
return top_then_bottom_then_top_again_etc(expr3, scope4, **kwargs)
_names = ('leaf_%d' % i for i in itertools.count(1))
_leaf_cache = {}
_used_tokens = defaultdict(set)
def _reset_leaves():
_leaf_cache.clear()
_used_tokens.clear()
def makeleaf(expr):
""" Name of a new leaf replacement for this expression
>>> _reset_leaves()
>>> t = symbol('t', '{x: int, y: int, z: int}')
>>> makeleaf(t) == t
True
>>> makeleaf(t.x)
<`x` symbol; dshape='int32'>
>>> makeleaf(t.x + 1)
<`x` symbol; dshape='int64'>
>>> makeleaf(t.y + 1)
<`y` symbol; dshape='int64'>
>>> makeleaf(t.x).isidentical(makeleaf(t.x + 1))
False
>>> from blaze import sin, cos
>>> x = symbol('x', 'real')
>>> makeleaf(cos(x)**2).isidentical(sin(x) ** 2)
False
>>> makeleaf(t) is t # makeleaf passes on Symbols
True
"""
name = expr._name or '_'
if expr in _leaf_cache:
return _leaf_cache[expr]
if isinstance(expr, Symbol): # Idempotent on symbols
_used_tokens[name].add(expr._token)
_leaf_cache[expr] = expr
return expr
used_for_name = _used_tokens[name]
for token in itertools.count():
if token not in used_for_name:
break
result = symbol(name, expr.dshape, token)
used_for_name.add(token)
_leaf_cache[expr] = result
return result
def data_leaves(expr, scope):
return [scope[leaf] for leaf in expr._leaves()]
def bottom_up_until_type_break(expr, scope, **kwargs):
""" Traverse bottom up until data changes significantly
Parameters
----------
expr: Expression
Expression to compute
scope: dict
namespace matching leaves of expression to data
Returns
-------
expr: Expression
New expression with lower subtrees replaced with leaves
scope: dict
New scope with entries for those leaves
Examples
--------
>>> import numpy as np
>>> s = symbol('s', 'var * {name: string, amount: int}')
>>> data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
... dtype=[('name', 'S7'), ('amount', 'i8')])
This computation completes without changing type. We get back a leaf
symbol and a computational result
>>> e = (s.amount + 1).distinct()
>>> bottom_up_until_type_break(e, {s: data}) # doctest: +SKIP
(amount, {amount: array([101, 201, 301])})
This computation has a type change midstream (``list`` to ``int``), so we
stop and get the unfinished computation.
>>> e = s.amount.sum() + 1
>>> bottom_up_until_type_break(e, {s: data})
(amount_sum + 1, {<`amount_sum` symbol; dshape='int64'>: 600})
"""
# 0. Base case. Return if expression is in scope
if expr in scope:
leaf = makeleaf(expr)
return leaf, {leaf: scope[expr]}
inputs = list(unique(expr._inputs))
# 1. Recurse down the tree, calling this function on children
# (this is the bottom part of bottom up)
exprs, new_scopes = zip(*[bottom_up_until_type_break(i, scope, **kwargs)
for i in inputs])
# 2. Form new (much shallower) expression and new (more computed) scope
new_scope = toolz.merge(new_scopes)
new_expr = expr._subs({
i: e for i, e in zip(inputs, exprs) if not i.isidentical(e)
})
old_expr_leaves = expr._leaves()
old_data_leaves = [scope.get(leaf) for leaf in old_expr_leaves]
# 3. If the leaves have changed substantially then stop
key = lambda x: str(type(x))
if type_change(sorted(new_scope.values(), key=key),
sorted(old_data_leaves, key=key)):
return new_expr, new_scope
# 4. Otherwise try to do some actual work
try:
leaf = makeleaf(expr)
_data = [new_scope[i] for i in new_expr._inputs]
except KeyError:
return new_expr, new_scope
try:
return leaf, {leaf: compute_up(new_expr, *_data, scope=new_scope,
**kwargs)}
except NotImplementedError:
return new_expr, new_scope
def swap_resources_into_scope(expr, scope):
""" Translate interactive expressions into normal abstract expressions
Interactive Blaze expressions link to data on their leaves. From the
expr/compute perspective, this is a hack. We push the resources onto the
scope and return simple unadorned expressions instead.
Examples
--------
>>> from blaze import data
>>> t = data([1, 2, 3], dshape='3 * int32', name='t')
>>> swap_resources_into_scope(t.head(2), {})
{<'list' data; _name='t', dshape='3 * int32'>: [1, 2, 3]}
"""
return toolz.merge(expr._resources(), scope)
@dispatch((object, type, str, unicode), BoundSymbol)
def into(a, b, **kwargs):
return into(a, b.data, **kwargs)
@dispatch((object, type, str, unicode), Expr)
def into(a, b, **kwargs):
result = compute(b, return_type='native', **kwargs)
kwargs['dshape'] = b.dshape
return into(a, result, **kwargs)
Expr.__iter__ = into(Iterator)
@dispatch(Expr)
def compute(expr, **kwargs):
resources = expr._resources()
if not resources:
raise ValueError("No data resources found")
else:
return compute(expr, resources, **kwargs)
@dispatch(Expr, Mapping)
def compute(expr, d, return_type=no_default, **kwargs):
"""Compute expression against data sources.
Parameters
----------
expr : Expr
The blaze expression to compute.
d : any
The data source to compute expression on.
return_type : {'native', 'core', type}, optional
Type to return data as. Defaults to 'native' but will be changed
to 'core' in version 0.11. 'core' forces the computation into a core
type. 'native' returns the result as is from the respective backend's
``post_compute``. If a type is passed, it will odo the result into the
type before returning.
Examples
--------
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> list(compute(deadbeats, {t: data}))
['Bob', 'Charlie']
"""
_reset_leaves()
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
post_compute_ = kwargs.get('post_compute', post_compute)
d2 = swap_resources_into_scope(expr, d)
if pre_compute_:
d3 = dict(
(e, pre_compute_(e, dat, **kwargs))
for e, dat in d2.items()
if e in expr
)
else:
d3 = d2
if optimize_:
try:
expr2 = optimize_(expr, *[v for e, v in d3.items() if e in expr])
_d = dict(zip(expr._leaves(), expr2._leaves()))
d4 = dict((e._subs(_d), d) for e, d in d3.items())
except NotImplementedError:
expr2 = expr
d4 = d3
else:
expr2 = expr
d4 = d3
result = top_then_bottom_then_top_again_etc(expr2, d4, **kwargs)
if post_compute_:
result = post_compute_(expr2, result, scope=d4)
# return the backend's native response
if return_type is no_default:
msg = ("The default behavior of compute will change in version >= 0.11"
" where the `return_type` parameter will default to 'core'.")
warnings.warn(msg, DeprecationWarning)
# return result as a core type
# (python type, pandas Series/DataFrame, numpy array)
elif return_type == 'core':
result = coerce_core(result, expr.dshape)
# user specified type
elif isinstance(return_type, type):
result = into(return_type, result, dshape=expr2.dshape)
elif return_type != 'native':
raise ValueError(
"Invalid return_type passed to compute: {}".format(return_type),
)
return result
@compute.register(Expr, object)
def compute_single_object(expr, o, **kwargs):
""" Compute against single input
Assumes that only one Symbol exists in expression
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> # list(compute(deadbeats, {t: data}))
>>> list(compute(deadbeats, data))
['Bob', 'Charlie']
"""
resources = expr._resources()
ts = set(expr._leaves()) - set(resources)
if not ts and o in resources.values():
# the data is already bound to an expression
return compute(expr, **kwargs)
if len(ts) == 1:
return compute(expr, {first(ts): o}, **kwargs)
else:
raise ValueError("Give compute dictionary input, got %s" % str(o))
@dispatch(Field, Mapping)
def compute_up(expr, data, **kwargs):
return data[expr._name]
@compute_up.register(Join, object, object)
def join_dataframe_to_selectable(expr, lhs, rhs, scope=None, **kwargs):
lexpr, rexpr = expr._leaves()
return compute(
expr,
{
lexpr: odo(lhs, pd.DataFrame, dshape=lexpr.dshape),
rexpr: odo(rhs, pd.DataFrame, dshape=rexpr.dshape)
},
**kwargs
)
def coerce_to(typ, x, odo_kwargs=None):
try:
return typ(x)
except TypeError:
return odo(x, typ, **(odo_kwargs or {}))
def coerce_scalar(result, dshape, odo_kwargs=None):
dshape = str(dshape)
coerce_ = partial(coerce_to, x=result, odo_kwargs=odo_kwargs)
if 'float' in dshape:
return coerce_(float)
if 'decimal' in dshape:
return coerce_(decimal.Decimal)
elif 'int' in dshape:
return coerce_(int)
elif 'bool' in dshape:
return coerce_(bool)
elif 'datetime' in dshape:
return coerce_(pd.Timestamp)
elif 'date' in dshape:
return coerce_(date)
elif 'timedelta' in dshape:
return coerce_(timedelta)
else:
return result
def coerce_core(result, dshape, odo_kwargs=None):
"""Coerce data to a core data type."""
if iscoretype(result):
return result
elif isscalar(dshape):
result = coerce_scalar(result, dshape, odo_kwargs=odo_kwargs)
elif istabular(dshape) and isrecord(dshape.measure):
result = into(pd.DataFrame, result, **(odo_kwargs or {}))
elif iscollection(dshape):
dim = _dimensions(dshape)
if dim == 1:
result = into(pd.Series, result, **(odo_kwargs or {}))
elif dim > 1:
result = into(np.ndarray, result, **(odo_kwargs or {}))
else:
msg = "Expr with dshape dimensions < 1 should have been handled earlier: dim={}"
raise ValueError(msg.format(str(dim)))
else:
msg = "Expr does not evaluate to a core return type"
raise ValueError(msg)
return result
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/compute/core.py",
"copies": "3",
"size": "16504",
"license": "bsd-3-clause",
"hash": -8049783747023529000,
"line_mean": 28.3665480427,
"line_max": 92,
"alpha_frac": 0.5974309258,
"autogenerated": false,
"ratio": 3.6626719928983578,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0000730528798488849,
"num_lines": 562
} |
from __future__ import absolute_import, division, print_function
from collections import defaultdict, MutableMapping
from operator import getitem, add
from datetime import datetime
from time import time
from ..core import istask, ishashable
class Store(MutableMapping):
""" Store - A storage of data and computation
Examples
--------
Store data like a dictionary
>>> import dask.store as ds
>>> s = ds.Store()
>>> s['x'] = 10
>>> s['x']
10
Also store computation on that data
>>> s['y'] = (add, 'x', 5)
Accessing these keys results in computations. Results may be cached for
reuse.
>>> s['y']
15
Design
------
A Store maintains the following state
dsk: dict
A dask to define all computation
cache: dict-like
Stores both ground data and cached intermediate values
data: set
The keys in the cache that can not be removed for correctness.
compute_time: dict:: {key: float}
dict mapping the time it took to compute each key
access_times: dict:: {key: [datetimes]}
The times at which a key was accessed
"""
def __init__(self, cache=None):
self.dsk = dict()
if cache is None:
cache = dict()
self.cache = cache
self.data = set()
self.compute_time = dict()
self.access_times = defaultdict(list)
def __setitem__(self, key, value):
if key in self.dsk:
if (self.dsk[key] == value or
self.dsk[key] == (getitem, self.cache, key) and
self.cache[key] == value):
return
else:
raise KeyError("Can not overwrite data")
if istask(value):
self.dsk[key] = value
else:
self.cache[key] = value
self.dsk[key] = (getitem, self.cache, key)
self.data.add(key)
def __getitem__(self, key):
if isinstance(key, list):
return (self[item] for item in key)
if not ishashable(key):
return key
if key not in self.dsk:
return key
self.access_times[key].append(datetime.now())
if key in self.cache:
return self.cache[key]
task = self.dsk[key]
func, args = task[0], task[1:]
if func == getitem and args[0] is self.cache:
return self.cache[args[1]]
args = [self[arg] for arg in args]
start = time()
result = func(*args)
end = time()
self.cache[key] = result
self.compute_time[key] = end - start
return result
def __len__(self):
return len(self.dsk)
def __iter__(self):
return iter(self.dsk)
def __delitem__(self, key):
raise ValueError("Dask Store does not support deletion")
| {
"repo_name": "mikegraham/dask",
"path": "dask/store/core.py",
"copies": "3",
"size": "2849",
"license": "bsd-3-clause",
"hash": 5843506564196268000,
"line_mean": 24.2123893805,
"line_max": 76,
"alpha_frac": 0.5605475605,
"autogenerated": false,
"ratio": 4.04113475177305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6101682312273049,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import defaultdict
from datetime import datetime, timedelta
from django.utils import timezone
from sentry.constants import STATUS_CHOICES
from sentry.models import EventUser, User
from sentry.search.base import ANY
from sentry.utils.auth import find_users
class InvalidQuery(Exception):
pass
def get_user_tag(project, key, value):
# TODO(dcramer): do something with case of multiple matches
try:
lookup = EventUser.attr_from_keyword(key)
euser = EventUser.objects.filter(
project=project,
**{lookup: value}
)[0]
except (KeyError, IndexError):
return '{}:{}'.format(key, value)
return euser.tag_value
def parse_datetime_range(value):
try:
flag, count, interval = value[0], int(value[1:-1]), value[-1]
except (ValueError, TypeError):
raise InvalidQuery('{} is not a valid datetime query'.format(value))
if flag not in ('+', '-'):
raise InvalidQuery('{} is not a valid datetime query'.format(value))
if interval == 'h':
delta = timedelta(hours=count)
elif interval == 'w':
delta = timedelta(days=count * 7)
elif interval == 'd':
delta = timedelta(days=count)
elif interval == 'm':
delta = timedelta(minutes=count)
else:
raise InvalidQuery('{} is not a valid datetime query'.format(value))
if flag == '-':
return (timezone.now() - delta, None)
else:
return (None, timezone.now() - delta)
def parse_datetime_comparison(value):
# TODO(dcramer): currently inclusitivity is not controllable by the query
# as from date is always inclusive, and to date is always exclusive
if value[:2] in ('>=', '=>'):
return (parse_datetime_value(value[2:])[0], None)
if value[:2] in ('<=', '=<'):
return (None, parse_datetime_value(value[2:])[0])
if value[:1] in ('>'):
return (parse_datetime_value(value[1:])[0], None)
if value[:1] in ('<'):
return (None, parse_datetime_value(value[1:])[0])
if value[0] == '=':
return parse_datetime_value(value[1:])
raise InvalidQuery('{} is not a valid datetime query'.format(value))
def parse_datetime_value(value):
try:
return _parse_datetime_value(value)
except ValueError:
raise InvalidQuery('{} is not a valid datetime query'.format(value))
def _parse_datetime_value(value):
# timezones are not supported and are assumed UTC
if value[-1] == 'Z':
value = value[:-1]
value_len = len(value)
if value_len in (8, 10):
value = datetime.strptime(value, '%Y-%m-%d').replace(
tzinfo=timezone.utc,
)
return [value, value + timedelta(days=1)]
elif value[4] == '-':
try:
value = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S').replace(
tzinfo=timezone.utc,
)
except ValueError:
value = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f').replace(
tzinfo=timezone.utc,
)
else:
value = datetime.utcfromtimestamp(float(value)).replace(
tzinfo=timezone.utc,
)
return [value - timedelta(minutes=5), value + timedelta(minutes=6)]
def parse_datetime_expression(value):
# result must be (from inclusive, to exclusive)
if value.startswith(('-', '+')):
return parse_datetime_range(value)
if value.startswith(('>', '<', '=', '<=', '>=')):
return parse_datetime_comparison(value)
return parse_datetime_value(value)
def get_date_params(value, from_field, to_field):
date_from, date_to = parse_datetime_expression(value)
result = {}
if date_from:
result.update({
from_field: date_from,
'{}_inclusive'.format(from_field): True,
})
if date_to:
result.update({
to_field: date_to,
'{}_inclusive'.format(to_field): False,
})
return result
def tokenize_query(query):
"""
Tokenizes a standard Sentry search query.
>>> query = 'is:resolved foo bar tag:value'
>>> tokenize_query(query)
{
'is': ['resolved'],
'query': ['foo', 'bar'],
'tag': ['value'],
}
"""
results = defaultdict(list)
tokens = query.split(' ')
tokens_iter = iter(tokens)
for token in tokens_iter:
# ignore empty tokens
if not token:
continue
if ':' not in token:
results['query'].append(token)
continue
key, value = token.split(':', 1)
if not value:
results['query'].append(token)
continue
if value[0] == '"':
nvalue = value
while nvalue[-1] != '"':
try:
nvalue = tokens_iter.next()
except StopIteration:
break
value = '%s %s' % (value, nvalue)
if value.endswith('"'):
value = value[1:-1]
else:
value = value[1:]
results[key].append(value)
return dict(results)
def parse_query(project, query, user):
# TODO(dcramer): handle query being wrapped in quotes
tokens = tokenize_query(query)
results = {'tags': {}, 'query': []}
for key, token_list in tokens.iteritems():
for value in token_list:
if key == 'query':
results['query'].append(value)
elif key == 'is':
if value == 'unassigned':
results['unassigned'] = True
elif value == 'assigned':
results['unassigned'] = False
else:
try:
results['status'] = STATUS_CHOICES[value]
except KeyError:
pass
elif key == 'assigned':
if value == 'me':
results['assigned_to'] = user
else:
try:
results['assigned_to'] = find_users(value)[0]
except IndexError:
# XXX(dcramer): hacky way to avoid showing any results when
# an invalid user is entered
results['assigned_to'] = User(id=0)
elif key == 'bookmarks':
if value == 'me':
results['bookmarked_by'] = user
else:
try:
results['bookmarked_by'] = find_users(value)[0]
except IndexError:
# XXX(dcramer): hacky way to avoid showing any results when
# an invalid user is entered
results['bookmarked_by'] = User(id=0)
elif key == 'first-release':
results['first_release'] = value
elif key == 'release':
results['tags']['sentry:release'] = value
elif key == 'user':
if ':' in value:
comp, value = value.split(':', 1)
else:
comp = 'id'
results['tags']['sentry:user'] = get_user_tag(
project, comp, value)
elif key == 'has':
if value == 'user':
value = 'sentry:user'
elif value == 'release':
value = 'sentry:release'
results['tags'][value] = ANY
elif key == 'age':
results.update(get_date_params(value, 'age_from', 'age_to'))
elif key.startswith('user.'):
results['tags']['sentry:user'] = get_user_tag(
project, key.split('.', 1)[1], value)
elif key == 'event.timestamp':
results.update(get_date_params(value, 'date_from', 'date_to'))
else:
results['tags'][key] = value
results['query'] = ' '.join(results['query'])
return results
| {
"repo_name": "nicholasserra/sentry",
"path": "src/sentry/search/utils.py",
"copies": "2",
"size": "8105",
"license": "bsd-3-clause",
"hash": -2312231317154480600,
"line_mean": 31.42,
"line_max": 83,
"alpha_frac": 0.5196792104,
"autogenerated": false,
"ratio": 4.279303062302007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00009523809523809523,
"num_lines": 250
} |
from __future__ import absolute_import, division, print_function
from collections import defaultdict
from datetime import datetime, timedelta
import six
from django.db import DataError
from django.utils import timezone
from sentry.constants import STATUS_CHOICES
from sentry.models import EventUser, KEYWORD_MAP, Release, Team, User
from sentry.search.base import ANY
from sentry.utils.auth import find_users
from sentry.utils.compat import map
class InvalidQuery(Exception):
pass
def get_user_tag(projects, key, value):
# TODO(dcramer): do something with case of multiple matches
try:
lookup = EventUser.attr_from_keyword(key)
euser = EventUser.objects.filter(
project_id__in=[p.id for p in projects], **{lookup: value}
)[0]
except (KeyError, IndexError):
return u"{}:{}".format(key, value)
except DataError:
raise InvalidQuery(u"malformed '{}:' query '{}'.".format(key, value))
return euser.tag_value
def parse_status_value(value):
if value in STATUS_CHOICES:
return STATUS_CHOICES[value]
if value in STATUS_CHOICES.values():
return value
raise ValueError("Invalid status value")
def parse_duration(value, interval):
try:
value = float(value)
except ValueError:
raise InvalidQuery(u"{} is not a valid duration value".format(value))
if interval == "ms":
delta = timedelta(milliseconds=value)
elif interval == "s":
delta = timedelta(seconds=value)
elif interval in ["min", "m"]:
delta = timedelta(minutes=value)
elif interval in ["hr", "h"]:
delta = timedelta(hours=value)
elif interval in ["day", "d"]:
delta = timedelta(days=value)
elif interval in ["wk", "w"]:
delta = timedelta(days=value * 7)
else:
raise InvalidQuery(
u"{} is not a valid duration type, must be ms, s, min, m, hr, h, day, d, wk or w".format(
interval
)
)
return delta.total_seconds() * 1000.0
def parse_percentage(value):
try:
value = float(value)
except ValueError:
raise InvalidQuery(u"{} is not a valid percentage value".format(value))
return value / 100
def parse_datetime_range(value):
try:
flag, count, interval = value[0], int(value[1:-1]), value[-1]
except (ValueError, TypeError, IndexError):
raise InvalidQuery(u"{} is not a valid datetime query".format(value))
if flag not in ("+", "-"):
raise InvalidQuery(u"{} is not a valid datetime query".format(value))
if interval == "h":
delta = timedelta(hours=count)
elif interval == "w":
delta = timedelta(days=count * 7)
elif interval == "d":
delta = timedelta(days=count)
elif interval == "m":
delta = timedelta(minutes=count)
else:
raise InvalidQuery(u"{} is not a valid datetime query".format(value))
if flag == "-":
return ((timezone.now() - delta, True), None)
else:
return (None, (timezone.now() - delta, True))
DATE_FORMAT = "%Y-%m-%d"
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
DATETIME_FORMAT_MICROSECONDS = "%Y-%m-%dT%H:%M:%S.%f"
def parse_unix_timestamp(value):
return datetime.utcfromtimestamp(float(value)).replace(tzinfo=timezone.utc)
def parse_datetime_string(value):
# timezones are not supported and are assumed UTC
if value[-1:] == "Z":
value = value[:-1]
if len(value) >= 6 and value[-6] == "+":
value = value[:-6]
for format in [DATETIME_FORMAT_MICROSECONDS, DATETIME_FORMAT, DATE_FORMAT]:
try:
return datetime.strptime(value, format).replace(tzinfo=timezone.utc)
except ValueError:
pass
try:
return parse_unix_timestamp(value)
except ValueError:
pass
raise InvalidQuery(u"{} is not a valid ISO8601 date query".format(value))
def parse_datetime_comparison(value):
if value[:2] == ">=":
return ((parse_datetime_string(value[2:]), True), None)
if value[:2] == "<=":
return (None, (parse_datetime_string(value[2:]), True))
if value[:1] == ">":
return ((parse_datetime_string(value[1:]), False), None)
if value[:1] == "<":
return (None, (parse_datetime_string(value[1:]), False))
raise InvalidQuery(u"{} is not a valid datetime query".format(value))
def parse_datetime_value(value):
# timezones are not supported and are assumed UTC
if value[-1:] == "Z":
value = value[:-1]
if len(value) >= 6 and value[-6] == "+":
value = value[:-6]
result = None
# A value that only specifies the date (without a time component) should be
# expanded to an interval that spans the entire day.
try:
result = datetime.strptime(value, DATE_FORMAT).replace(tzinfo=timezone.utc)
except ValueError:
pass
else:
return ((result, True), (result + timedelta(days=1), False))
# A value that contains the time should converted to an interval.
for format in [DATETIME_FORMAT, DATETIME_FORMAT_MICROSECONDS]:
try:
result = datetime.strptime(value, format).replace(tzinfo=timezone.utc)
except ValueError:
pass
else:
break # avoid entering the else clause below
else:
try:
result = parse_unix_timestamp(value)
except ValueError:
pass
if result is None:
raise InvalidQuery(u"{} is not a valid datetime query".format(value))
return ((result - timedelta(minutes=5), True), (result + timedelta(minutes=6), False))
def parse_datetime_expression(value):
if value.startswith(("-", "+")):
return parse_datetime_range(value)
elif value.startswith((">", "<", "<=", ">=")):
return parse_datetime_comparison(value)
else:
return parse_datetime_value(value)
def get_date_params(value, from_field, to_field):
date_from, date_to = parse_datetime_expression(value)
result = {}
if date_from is not None:
date_from_value, date_from_inclusive = date_from
result.update(
{from_field: date_from_value, u"{}_inclusive".format(from_field): date_from_inclusive}
)
if date_to is not None:
date_to_value, date_to_inclusive = date_to
result.update(
{to_field: date_to_value, u"{}_inclusive".format(to_field): date_to_inclusive}
)
return result
def parse_team_value(projects, value, user):
return Team.objects.filter(
slug__iexact=value[1:], projectteam__project__in=projects
).first() or Team(id=0)
def parse_actor_value(projects, value, user):
if value.startswith("#"):
return parse_team_value(projects, value, user)
return parse_user_value(value, user)
def parse_user_value(value, user):
if value == "me":
return user
try:
return find_users(value)[0]
except IndexError:
# XXX(dcramer): hacky way to avoid showing any results when
# an invalid user is entered
return User(id=0)
def get_latest_release(projects, environments, organization_id=None):
if organization_id is None:
project = projects[0]
if hasattr(project, "organization_id"):
organization_id = project.organization_id
else:
return ""
release_qs = Release.objects.filter(organization_id=organization_id, projects__in=projects)
if environments:
release_qs = release_qs.filter(
releaseprojectenvironment__environment__id__in=[
environment.id for environment in environments
]
)
return (
release_qs.extra(select={"sort": "COALESCE(date_released, date_added)"})
.order_by("-sort")
.values_list("version", flat=True)[:1]
.get()
)
def parse_release(value, projects, environments, organization_id=None):
if value == "latest":
try:
return get_latest_release(projects, environments, organization_id)
except Release.DoesNotExist:
# Should just get no results here, so return an empty release name.
return ""
else:
return value
numeric_modifiers = [
(
">=",
lambda field, value: {
u"{}_lower".format(field): value,
u"{}_lower_inclusive".format(field): True,
},
),
(
"<=",
lambda field, value: {
u"{}_upper".format(field): value,
u"{}_upper_inclusive".format(field): True,
},
),
(
">",
lambda field, value: {
u"{}_lower".format(field): value,
u"{}_lower_inclusive".format(field): False,
},
),
(
"<",
lambda field, value: {
u"{}_upper".format(field): value,
u"{}_upper_inclusive".format(field): False,
},
),
]
def get_numeric_field_value(field, raw_value, type=int):
try:
for modifier, function in numeric_modifiers:
if raw_value.startswith(modifier):
return function(field, type(raw_value[len(modifier) :]))
else:
return {field: type(raw_value)}
except ValueError:
msg = u'"{}" could not be converted to a number.'.format(raw_value)
raise InvalidQuery(msg)
def tokenize_query(query):
"""
Tokenizes a standard Sentry search query.
Example:
>>> query = 'is:resolved foo bar tag:value'
>>> tokenize_query(query)
{
'is': ['resolved'],
'query': ['foo', 'bar'],
'tag': ['value'],
}
Has a companion implementation in static/app/utils/tokenizeSearch.tsx
"""
result = defaultdict(list)
query_params = defaultdict(list)
tokens = split_query_into_tokens(query)
for token in tokens:
if token.upper() in ["OR", "AND"] or token.strip("()") == "":
continue
state = "query"
for idx, char in enumerate(token):
next_char = token[idx + 1] if idx < len(token) - 1 else None
if idx == 0 and char in ('"', "'", ":"):
break
if char == ":":
if next_char in (":", " "):
state = "query"
else:
state = "tags"
break
query_params[state].append(token)
if "query" in query_params:
result["query"] = map(format_query, query_params["query"])
for tag in query_params["tags"]:
key, value = format_tag(tag)
result[key].append(value)
return dict(result)
def format_tag(tag):
"""
Splits tags on ':' and removes enclosing quotes and grouping parens if present and returns
returns both sides of the split as strings
Example:
>>> format_tag('user:foo')
'user', 'foo'
>>>format_tag('user:"foo bar"'')
'user', 'foo bar'
"""
idx = tag.index(":")
key = tag[:idx].lstrip("(").strip('"')
value = tag[idx + 1 :].rstrip(")").strip('"')
return key, value
def format_query(query):
"""
Strips enclosing quotes and grouping parens from queries if present.
Example:
>>> format_query('"user:foo bar"')
'user:foo bar'
"""
return query.strip('"()')
def split_query_into_tokens(query):
"""
Splits query string into tokens for parsing by 'tokenize_query'.
Returns list of strigs
Rules:
Split on whitespace
Unless
- inside enclosing quotes -> 'user:"foo bar"'
- end of last word is a ':' -> 'user: foo'
Example:
>>> split_query_into_tokens('user:foo user: bar user"foo bar' foo bar) =>
['user:foo', 'user: bar', 'user"foo bar"', 'foo', 'bar']
"""
tokens = []
token = ""
quote_enclosed = False
quote_type = None
end_of_prev_word = None
for idx, char in enumerate(query):
next_char = query[idx + 1] if idx < len(query) - 1 else None
token += char
if next_char and not char.isspace() and next_char.isspace():
end_of_prev_word = char
if char.isspace() and not quote_enclosed and end_of_prev_word != ":":
if not token.isspace():
tokens.append(token.strip(" "))
token = ""
if char in ("'", '"'):
if not quote_enclosed or quote_type == char:
quote_enclosed = not quote_enclosed
if quote_enclosed:
quote_type = char
if not token.isspace():
tokens.append(token.strip(" "))
return tokens
def parse_query(projects, query, user, environments):
# TODO(dcramer): handle query being wrapped in quotes
tokens = tokenize_query(query)
results = {"tags": {}, "query": []}
for key, token_list in six.iteritems(tokens):
for value in token_list:
if key == "query":
results["query"].append(value)
elif key == "is":
if value == "unassigned":
results["unassigned"] = True
elif value == "assigned":
results["unassigned"] = False
elif value == "inbox":
results["inbox"] = True
elif value == "linked":
results["linked"] = True
elif value == "unlinked":
results["linked"] = False
else:
try:
results["status"] = STATUS_CHOICES[value]
except KeyError:
raise InvalidQuery(u"'is:' had unknown status code '{}'.".format(value))
elif key == "assigned":
results["assigned_to"] = parse_actor_value(projects, value, user)
elif key == "bookmarks":
results["bookmarked_by"] = parse_user_value(value, user)
elif key == "subscribed":
results["subscribed_by"] = parse_user_value(value, user)
elif key in ("first-release", "firstRelease"):
results["first_release"] = parse_release(value, projects, environments)
elif key == "release":
results["tags"]["sentry:release"] = parse_release(value, projects, environments)
elif key == "dist":
results["tags"]["sentry:dist"] = value
elif key == "user":
if ":" in value:
comp, value = value.split(":", 1)
else:
comp = "id"
results["tags"]["sentry:user"] = get_user_tag(projects, comp, value)
elif key == "has":
if value == "user":
value = "sentry:user"
elif value == "release":
value = "sentry:release"
# `has:x` query should not take precedence over `x:value` queries
if value not in results["tags"]:
results["tags"][value] = ANY
elif key in ("age", "firstSeen"):
results.update(get_date_params(value, "age_from", "age_to"))
elif key in ("last_seen", "lastSeen"):
results.update(get_date_params(value, "last_seen_from", "last_seen_to"))
elif key == "activeSince":
results.update(get_date_params(value, "active_at_from", "active_at_to"))
elif key.startswith("user."):
results["tags"]["sentry:user"] = get_user_tag(projects, key.split(".", 1)[1], value)
elif key == "event.timestamp":
results.update(get_date_params(value, "date_from", "date_to"))
elif key == "timesSeen":
results.update(get_numeric_field_value("times_seen", value))
else:
results["tags"][key] = value
results["query"] = " ".join(results["query"])
return results
def convert_user_tag_to_query(key, value):
"""
Converts a user tag to a query string that can be used to search for that
user. Returns None if not a user tag.
"""
if key == "user" and ":" in value:
sub_key, value = value.split(":", 1)
if KEYWORD_MAP.get_key(sub_key, None):
return 'user.%s:"%s"' % (sub_key, value.replace('"', '\\"'))
| {
"repo_name": "beeftornado/sentry",
"path": "src/sentry/search/utils.py",
"copies": "1",
"size": "16272",
"license": "bsd-3-clause",
"hash": -5698983795134993000,
"line_mean": 31.2857142857,
"line_max": 101,
"alpha_frac": 0.5664945919,
"autogenerated": false,
"ratio": 3.9931288343558284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004874581785756994,
"num_lines": 504
} |
from __future__ import absolute_import, division, print_function
from collections import defaultdict
from functools import wraps
from .decorators import singleton
from .util import disambiguate
@singleton
class Registry(object):
""" Stores labels for classes of objects. Ensures uniqueness
The registry ensures that labels for objects of the same "group"
are unique, and disambiguates as necessary. By default,
objects types are used to group, but anything can be used as a group
Registry is a singleton, and thus all instances of Registry
share the same information
Usage:
>>> r = Registry()
>>> x, y, z = 3, 4, 5
>>> w = list()
>>> r.register(x, 'Label')
'Label'
>>> r.register(y, 'Label') # duplicate label disambiguated
'Label_01'
>>> r.register(w, 'Label') # uniqueness only enforced within groups
'Label'
>>> r.register(z, 'Label', group=int) # put z in integer registry
'Label_02'
"""
def __init__(self):
self._registry = defaultdict(dict)
self._disable = False
def register(self, obj, label, group=None):
""" Register label with object (possibly disamgiguating)
:param obj: The object to label
:param label: The desired label
:param group: (optional) use the registry for group (default=type(obj))
:rtype: str
*Returns*
The disambiguated label
"""
group = group or type(obj)
reg = self._registry[group]
has_obj = obj in reg
has_label = label in reg.values()
label_is_obj = has_label and has_obj and reg[obj] == label
if has_label and (not label_is_obj):
values = set(reg.values())
if has_obj:
values.remove(reg[obj])
if not self._disable:
label = disambiguate(label, values)
reg[obj] = label
return label
def unregister(self, obj, group=None):
group = group or type(obj)
reg = self._registry[group]
if obj in reg:
reg.pop(obj)
def clear(self):
""" Reset registry, clearing all stored values """
self._registry = defaultdict(dict)
def disable(func):
""" Decorator to temporarily disable disambiguation """
@wraps(func)
def wrapper(*args, **kwargs):
r = Registry()
old = r._disable
r._disable = True
try:
return func(*args, **kwargs)
finally:
r._disable = old
return wrapper
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/registry.py",
"copies": "1",
"size": "2587",
"license": "bsd-3-clause",
"hash": 4472989306474761700,
"line_mean": 26.2315789474,
"line_max": 79,
"alpha_frac": 0.5867800541,
"autogenerated": false,
"ratio": 4.20650406504065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.529328411914065,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from collections import defaultdict
from operator import add, iadd, mul, imul
from itertools import product, cycle, chain
import sys
import pytest
from cycler import cycler, Cycler, concat
if sys.version_info < (3,):
from itertools import izip as zip
range = xrange # noqa
str = unicode # noqa
def _cycler_helper(c, length, keys, values):
assert len(c) == length
assert len(c) == len(list(c))
assert c.keys == set(keys)
for k, vals in zip(keys, values):
for v, v_target in zip(c, vals):
assert v[k] == v_target
def _cycles_equal(c1, c2):
assert list(c1) == list(c2)
assert c1 == c2
@pytest.mark.parametrize('c', [cycler(c='rgb'),
cycler(c=list('rgb')),
cycler(cycler(c='rgb'))],
ids=['from string',
'from list',
'from cycler'])
def test_creation(c):
_cycler_helper(c, 3, ['c'], [['r', 'g', 'b']])
def test_add():
c1 = cycler(c='rgb')
c2 = cycler(lw=range(3))
# addition
_cycler_helper(c1 + c2, 3, ['c', 'lw'], [list('rgb'), range(3)])
_cycler_helper(c2 + c1, 3, ['c', 'lw'], [list('rgb'), range(3)])
_cycles_equal(c2 + c1, c1 + c2)
def test_add_len_mismatch():
# miss-matched add lengths
c1 = cycler(c='rgb')
c3 = cycler(lw=range(15))
with pytest.raises(ValueError):
c1 + c3
with pytest.raises(ValueError):
c3 + c1
def test_prod():
c1 = cycler(c='rgb')
c2 = cycler(lw=range(3))
c3 = cycler(lw=range(15))
# multiplication
target = zip(*product(list('rgb'), range(3)))
_cycler_helper(c1 * c2, 9, ['c', 'lw'], target)
target = zip(*product(range(3), list('rgb')))
_cycler_helper(c2 * c1, 9, ['lw', 'c'], target)
target = zip(*product(range(15), list('rgb')))
_cycler_helper(c3 * c1, 45, ['lw', 'c'], target)
def test_inplace():
c1 = cycler(c='rgb')
c2 = cycler(lw=range(3))
c2 += c1
_cycler_helper(c2, 3, ['c', 'lw'], [list('rgb'), range(3)])
c3 = cycler(c='rgb')
c4 = cycler(lw=range(3))
c3 *= c4
target = zip(*product(list('rgb'), range(3)))
_cycler_helper(c3, 9, ['c', 'lw'], target)
def test_constructor():
c1 = cycler(c='rgb')
c2 = cycler(ec=c1)
_cycler_helper(c1 + c2, 3, ['c', 'ec'], [['r', 'g', 'b']] * 2)
c3 = cycler(c=c1)
_cycler_helper(c3 + c2, 3, ['c', 'ec'], [['r', 'g', 'b']] * 2)
# Using a non-string hashable
c4 = cycler(1, range(3))
_cycler_helper(c4 + c1, 3, [1, 'c'], [range(3), ['r', 'g', 'b']])
# addition using cycler()
_cycler_helper(cycler(c='rgb', lw=range(3)),
3, ['c', 'lw'], [list('rgb'), range(3)])
_cycler_helper(cycler(lw=range(3), c='rgb'),
3, ['c', 'lw'], [list('rgb'), range(3)])
# Purposely mixing them
_cycler_helper(cycler(c=range(3), lw=c1),
3, ['c', 'lw'], [range(3), list('rgb')])
def test_failures():
c1 = cycler(c='rgb')
c2 = cycler(c=c1)
pytest.raises(ValueError, add, c1, c2)
pytest.raises(ValueError, iadd, c1, c2)
pytest.raises(ValueError, mul, c1, c2)
pytest.raises(ValueError, imul, c1, c2)
pytest.raises(TypeError, iadd, c2, 'aardvark')
pytest.raises(TypeError, imul, c2, 'aardvark')
c3 = cycler(ec=c1)
pytest.raises(ValueError, cycler, c=c2 + c3)
def test_simplify():
c1 = cycler(c='rgb')
c2 = cycler(ec=c1)
for c in [c1 * c2, c2 * c1, c1 + c2]:
_cycles_equal(c, c.simplify())
def test_multiply():
c1 = cycler(c='rgb')
_cycler_helper(2 * c1, 6, ['c'], ['rgb' * 2])
c2 = cycler(ec=c1)
c3 = c1 * c2
_cycles_equal(2 * c3, c3 * 2)
def test_mul_fails():
c1 = cycler(c='rgb')
pytest.raises(TypeError, mul, c1, 2.0)
pytest.raises(TypeError, mul, c1, 'a')
pytest.raises(TypeError, mul, c1, [])
def test_getitem():
c1 = cycler(3, range(15))
widths = list(range(15))
for slc in (slice(None, None, None),
slice(None, None, -1),
slice(1, 5, None),
slice(0, 5, 2)):
_cycles_equal(c1[slc], cycler(3, widths[slc]))
def test_fail_getime():
c1 = cycler(lw=range(15))
pytest.raises(ValueError, Cycler.__getitem__, c1, 0)
pytest.raises(ValueError, Cycler.__getitem__, c1, [0, 1])
def _repr_tester_helper(rpr_func, cyc, target_repr):
test_repr = getattr(cyc, rpr_func)()
assert str(test_repr) == str(target_repr)
def test_repr():
c = cycler(c='rgb')
# Using an identifier that would be not valid as a kwarg
c2 = cycler('3rd', range(3))
c_sum_rpr = "(cycler('c', ['r', 'g', 'b']) + cycler('3rd', [0, 1, 2]))"
c_prod_rpr = "(cycler('c', ['r', 'g', 'b']) * cycler('3rd', [0, 1, 2]))"
_repr_tester_helper('__repr__', c + c2, c_sum_rpr)
_repr_tester_helper('__repr__', c * c2, c_prod_rpr)
sum_html = (
"<table>"
"<th>'3rd'</th><th>'c'</th>"
"<tr><td>0</td><td>'r'</td></tr>"
"<tr><td>1</td><td>'g'</td></tr>"
"<tr><td>2</td><td>'b'</td></tr>"
"</table>")
prod_html = (
"<table>"
"<th>'3rd'</th><th>'c'</th>"
"<tr><td>0</td><td>'r'</td></tr>"
"<tr><td>1</td><td>'r'</td></tr>"
"<tr><td>2</td><td>'r'</td></tr>"
"<tr><td>0</td><td>'g'</td></tr>"
"<tr><td>1</td><td>'g'</td></tr>"
"<tr><td>2</td><td>'g'</td></tr>"
"<tr><td>0</td><td>'b'</td></tr>"
"<tr><td>1</td><td>'b'</td></tr>"
"<tr><td>2</td><td>'b'</td></tr>"
"</table>")
_repr_tester_helper('_repr_html_', c + c2, sum_html)
_repr_tester_helper('_repr_html_', c * c2, prod_html)
def test_call():
c = cycler(c='rgb')
c_cycle = c()
assert isinstance(c_cycle, cycle)
j = 0
for a, b in zip(2 * c, c_cycle):
j += 1
assert a == b
assert j == len(c) * 2
def test_copying():
# Just about everything results in copying the cycler and
# its contents (shallow). This set of tests is intended to make sure
# of that. Our iterables will be mutable for extra fun!
i1 = [1, 2, 3]
i2 = ['r', 'g', 'b']
# For more mutation fun!
i3 = [['y', 'g'], ['b', 'k']]
c1 = cycler('c', i1)
c2 = cycler('lw', i2)
c3 = cycler('foo', i3)
c_before = (c1 + c2) * c3
i1.pop()
i2.append('cyan')
i3[0].append('blue')
c_after = (c1 + c2) * c3
assert c1 == cycler('c', [1, 2, 3])
assert c2 == cycler('lw', ['r', 'g', 'b'])
assert c3 == cycler('foo', [['y', 'g', 'blue'], ['b', 'k']])
assert c_before == (cycler(c=[1, 2, 3], lw=['r', 'g', 'b']) *
cycler('foo', [['y', 'g', 'blue'], ['b', 'k']]))
assert c_after == (cycler(c=[1, 2, 3], lw=['r', 'g', 'b']) *
cycler('foo', [['y', 'g', 'blue'], ['b', 'k']]))
# Make sure that changing the key for a specific cycler
# doesn't break things for a composed cycler
c = (c1 + c2) * c3
c4 = cycler('bar', c3)
assert c == (cycler(c=[1, 2, 3], lw=['r', 'g', 'b']) *
cycler('foo', [['y', 'g', 'blue'], ['b', 'k']]))
assert c3 == cycler('foo', [['y', 'g', 'blue'], ['b', 'k']])
assert c4 == cycler('bar', [['y', 'g', 'blue'], ['b', 'k']])
def test_keychange():
c1 = cycler('c', 'rgb')
c2 = cycler('lw', [1, 2, 3])
c3 = cycler('ec', 'yk')
c3.change_key('ec', 'edgecolor')
assert c3 == cycler('edgecolor', c3)
c = c1 + c2
c.change_key('lw', 'linewidth')
# Changing a key in one cycler should have no
# impact in the original cycler.
assert c2 == cycler('lw', [1, 2, 3])
assert c == c1 + cycler('linewidth', c2)
c = (c1 + c2) * c3
c.change_key('c', 'color')
assert c1 == cycler('c', 'rgb')
assert c == (cycler('color', c1) + c2) * c3
# Perfectly fine, it is a no-op
c.change_key('color', 'color')
assert c == (cycler('color', c1) + c2) * c3
# Can't change a key to one that is already in there
pytest.raises(ValueError, Cycler.change_key, c, 'color', 'lw')
# Can't change a key you don't have
pytest.raises(KeyError, Cycler.change_key, c, 'c', 'foobar')
def _eq_test_helper(a, b, res):
if res:
assert a == b
else:
assert a != b
def test_eq():
a = cycler(c='rgb')
b = cycler(c='rgb')
_eq_test_helper(a, b, True)
_eq_test_helper(a, b[::-1], False)
c = cycler(lw=range(3))
_eq_test_helper(a+c, c+a, True)
_eq_test_helper(a+c, c+b, True)
_eq_test_helper(a*c, c*a, False)
_eq_test_helper(a, c, False)
d = cycler(c='ymk')
_eq_test_helper(b, d, False)
e = cycler(c='orange')
_eq_test_helper(b, e, False)
def test_cycler_exceptions():
pytest.raises(TypeError, cycler)
pytest.raises(TypeError, cycler, 'c', 'rgb', lw=range(3))
pytest.raises(TypeError, cycler, 'c')
pytest.raises(TypeError, cycler, 'c', 'rgb', 'lw', range(3))
def test_starange_init():
c = cycler('r', 'rgb')
c2 = cycler('lw', range(3))
cy = Cycler(list(c), list(c2), zip)
assert cy == c + c2
def test_concat():
a = cycler('a', range(3))
b = cycler('a', 'abc')
for con, chn in zip(a.concat(b), chain(a, b)):
assert con == chn
for con, chn in zip(concat(a, b), chain(a, b)):
assert con == chn
def test_concat_fail():
a = cycler('a', range(3))
b = cycler('b', range(3))
pytest.raises(ValueError, concat, a, b)
pytest.raises(ValueError, a.concat, b)
def _by_key_helper(cy):
res = cy.by_key()
target = defaultdict(list)
for sty in cy:
for k, v in sty.items():
target[k].append(v)
assert res == target
def test_by_key_add():
input_dict = dict(c=list('rgb'), lw=[1, 2, 3])
cy = cycler(c=input_dict['c']) + cycler(lw=input_dict['lw'])
res = cy.by_key()
assert res == input_dict
_by_key_helper(cy)
def test_by_key_mul():
input_dict = dict(c=list('rg'), lw=[1, 2, 3])
cy = cycler(c=input_dict['c']) * cycler(lw=input_dict['lw'])
res = cy.by_key()
assert input_dict['lw'] * len(input_dict['c']) == res['lw']
_by_key_helper(cy)
def test_contains():
a = cycler('a', range(3))
b = cycler('b', range(3))
assert 'a' in a
assert 'b' in b
assert 'a' not in b
assert 'b' not in a
ab = a + b
assert 'a' in ab
assert 'b' in ab
| {
"repo_name": "matplotlib/cycler",
"path": "test_cycler.py",
"copies": "1",
"size": "10487",
"license": "bsd-3-clause",
"hash": 749834052460513300,
"line_mean": 26.6701846966,
"line_max": 76,
"alpha_frac": 0.5166396491,
"autogenerated": false,
"ratio": 2.731700963792654,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8748340612892654,
"avg_score": 0,
"num_lines": 379
} |
from __future__ import (absolute_import, division, print_function)
from collections import defaultdict
import numpy as np
from qtpy.QtWidgets import QMainWindow, QTableWidget, QRadioButton, QTableWidgetItem
from addie.utilities import load_ui
from addie.utilities.list_runs_parser import ListRunsParser
#from addie.ui_solve_import_conflicts import Ui_MainWindow as UiMainWindow
class ConflictsSolverHandler:
def __init__(self, parent=None, json_conflicts={}):
o_solver = ConflictsSolverWindow(parent=parent, json_conflicts=json_conflicts)
if parent.conflicts_solver_ui_position:
o_solver.move(parent.conflicts_solver_ui_position)
o_solver.show()
class ConflictsSolverWindow(QMainWindow):
list_table = [] # name of table in each of the tabs
table_width_per_character = 20
table_header_per_character = 15
list_keys = ["Run Number", 'chemical_formula', 'geometry', 'mass_density', 'sample_env_device']
columns_label = ["Run Number", "Chemical Formula", "Geometry", "Mass Density", "Sample Env. Device"]
list_of_keys_with_conflicts = []
def __init__(self, parent=None, json_conflicts={}):
self.parent = parent
self.json_conflicts = json_conflicts
QMainWindow.__init__(self, parent=parent)
self.ui = load_ui('solve_import_conflicts.ui', baseinstance=self)
#self.ui = UiMainWindow()
#self.ui.setupUi(self)
self.init_widgets()
def init_widgets(self):
json_conflicts = self.json_conflicts
for _key in json_conflicts.keys():
if json_conflicts[_key]['any_conflict']:
self.list_of_keys_with_conflicts.append(_key)
self._add_tab(json=json_conflicts[_key]['conflict_dict'])
def _calculate_columns_width(self, json=None):
"""will loop through all the conflict keys to figure out which one, for each column label, the string
is the longest"""
list_key = self.list_keys
columns_width = defaultdict(list)
for _key in list_key:
for _conflict_index in json.keys():
columns_width[_key].append(self.table_width_per_character * len(json[_conflict_index][_key]))
final_columns_width = []
for _key in list_key:
_max_width = np.max([np.array(columns_width[_key]).max(), len(_key)* self.table_header_per_character])
final_columns_width.append(_max_width)
return final_columns_width
def _add_tab(self, json=None):
"""will look at the json and will display the values in conflicts in a new tab to allow the user
to fix the conflicts"""
number_of_tabs = self.ui.tabWidget.count()
_table = QTableWidget()
# initialize each table
columns_width = self._calculate_columns_width(json=json)
for _col in np.arange(len(json[0])):
_table.insertColumn(_col)
_table.setColumnWidth(_col, columns_width[_col])
for _row in np.arange(len(json)):
_table.insertRow(_row)
self.list_table.append(_table)
_table.setHorizontalHeaderLabels(self.columns_label)
for _row in np.arange(len(json)):
# run number
_col = 0
list_runs = json[_row]["Run Number"]
o_parser = ListRunsParser()
checkbox = QRadioButton(o_parser.new_runs(list_runs=list_runs))
if _row == 0:
checkbox.setChecked(True)
# QtCore.QObject.connect(checkbox, QtCore.SIGNAL("clicked(bool)"),
# lambda bool, row=_row, table_id=_table:
# self._changed_conflict_checkbox(bool, row, table_id))
_table.setCellWidget(_row, _col, checkbox)
_col += 1
# chemical formula
item = QTableWidgetItem(json[_row]["chemical_formula"])
_table.setItem(_row, _col, item)
_col += 1
# geometry
item = QTableWidgetItem(json[_row]["geometry"])
_table.setItem(_row, _col, item)
_col += 1
# mass_density
item = QTableWidgetItem(json[_row]["mass_density"])
_table.setItem(_row, _col, item)
_col += 1
# sample_env_device
item = QTableWidgetItem(json[_row]["sample_env_device"])
_table.setItem(_row, _col, item)
self.ui.tabWidget.insertTab(number_of_tabs, _table, "Conflict #{}".format(number_of_tabs))
# def _changed_conflict_checkbox(self, state, row, table_id):
# print("state is {} in row {} from table_id {}".format(state, row, table_id))
def save_resolved_conflict(self, tab_index=0, key=None):
"""Using the radio button checked, will save the chemical_formula, geometry... into the final json"""
def _get_checked_row(table_ui=None):
"""returns the first row where the radio button (column 0) is checked"""
if table_ui is None:
return -1
nbr_row = table_ui.rowCount()
for _row in np.arange(nbr_row):
is_radio_button_checked = table_ui.cellWidget(_row, 0).isChecked()
if is_radio_button_checked:
return _row
return -1
table_ui = self.list_table[tab_index]
json_conflicts = self.json_conflicts
this_json = json_conflicts[key]
# row checked (which row to use to fix conflict
_row = _get_checked_row(table_ui=table_ui)
this_json['any_conflict'] = False
# chemical_formula, geometry, etc.
chemical_formula = str(table_ui.item(_row, 1).text())
geometry = str(table_ui.item(_row, 2).text())
mass_density = str(table_ui.item(_row, 3).text())
sample_env_device = str(table_ui.item(_row, 4).text())
this_json['resolved_conflict'] = {'chemical_formula': chemical_formula,
'geometry': geometry,
'mass_density': mass_density,
'sample_env_device': sample_env_device}
self.json_conflicts = json_conflicts
def accept(self):
for _conflict_index, _key in enumerate(self.list_of_keys_with_conflicts):
self.save_resolved_conflict(tab_index=_conflict_index, key=_key)
self.parent.from_oncat_to_master_table(json=self.json_conflicts,
with_conflict=False)
self.close()
def reject(self):
self.parent.from_oncat_to_master_table(json=self.json_conflicts,
ignore_conflicts=True)
self.close()
def closeEvent(self, c):
pass
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/import_from_database/conflicts_solver.py",
"copies": "1",
"size": "6804",
"license": "mit",
"hash": -8960811740590854000,
"line_mean": 36.591160221,
"line_max": 114,
"alpha_frac": 0.5871546149,
"autogenerated": false,
"ratio": 3.9375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5024654614899999,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import pandas as pd
from toolz import partition_all
from ..base import tokenize
from .accessor import Accessor
from .utils import (has_known_categories, clear_known_categories, is_scalar,
is_categorical_dtype)
def _categorize_block(df, categories, index):
""" Categorize a dataframe with given categories
df: DataFrame
categories: dict mapping column name to iterable of categories
"""
df = df.copy()
for col, vals in categories.items():
if is_categorical_dtype(df[col]):
df[col] = df[col].cat.set_categories(vals)
else:
df[col] = pd.Categorical(df[col], categories=vals, ordered=False)
if index is not None:
if is_categorical_dtype(df.index):
ind = df.index.set_categories(index)
else:
ind = pd.Categorical(df.index, categories=index, ordered=False)
ind.name = df.index.name
df.index = ind
return df
def _get_categories(df, columns, index):
res = {}
for col in columns:
x = df[col]
if is_categorical_dtype(x):
res[col] = pd.Series(x.cat.categories)
else:
res[col] = x.dropna().drop_duplicates()
if index:
if is_categorical_dtype(df.index):
return res, df.index.categories
return res, df.index.dropna().drop_duplicates()
return res, None
def _get_categories_agg(parts):
res = defaultdict(list)
res_ind = []
for p in parts:
for k, v in p[0].items():
res[k].append(v)
res_ind.append(p[1])
res = {k: pd.concat(v, ignore_index=True).drop_duplicates()
for k, v in res.items()}
if res_ind[0] is None:
return res, None
return res, res_ind[0].append(res_ind[1:]).drop_duplicates()
def categorize(df, columns=None, index=None, split_every=None, **kwargs):
"""Convert columns of the DataFrame to category dtype.
Parameters
----------
columns : list, optional
A list of column names to convert to categoricals. By default any
column with an object dtype is converted to a categorical, and any
unknown categoricals are made known.
index : bool, optional
Whether to categorize the index. By default, object indices are
converted to categorical, and unknown categorical indices are made
known. Set True to always categorize the index, False to never.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is 16.
kwargs
Keyword arguments are passed on to compute.
"""
meta = df._meta
if columns is None:
columns = list(meta.select_dtypes(['object', 'category']).columns)
elif is_scalar(columns):
columns = [columns]
# Filter out known categorical columns
columns = [c for c in columns if not (is_categorical_dtype(meta[c]) and
has_known_categories(meta[c]))]
if index is not False:
if is_categorical_dtype(meta.index):
index = not has_known_categories(meta.index)
elif index is None:
index = meta.index.dtype == object
# Nothing to do
if not len(columns) and index is False:
return df
if split_every is None:
split_every = 16
elif split_every is False:
split_every = df.npartitions
elif not isinstance(split_every, int) or split_every < 2:
raise ValueError("split_every must be an integer >= 2")
token = tokenize(df, columns, index, split_every)
a = 'get-categories-chunk-' + token
dsk = {(a, i): (_get_categories, key, columns, index)
for (i, key) in enumerate(df._keys())}
prefix = 'get-categories-agg-' + token
k = df.npartitions
b = a
depth = 0
while k > split_every:
b = prefix + str(depth)
for part_i, inds in enumerate(partition_all(split_every, range(k))):
dsk[(b, part_i)] = (_get_categories_agg, [(a, i) for i in inds])
k = part_i + 1
a = b
depth += 1
dsk[(prefix, 0)] = (_get_categories_agg, [(a, i) for i in range(k)])
dsk.update(df.dask)
# Compute the categories
categories, index = df._get(dsk, (prefix, 0), **kwargs)
# Categorize each partition
return df.map_partitions(_categorize_block, categories, index)
class CategoricalAccessor(Accessor):
"""
Accessor object for categorical properties of the Series values.
Examples
--------
>>> s.cat.categories # doctest: +SKIP
Notes
-----
Attributes that depend only on metadata are eager
* categories
* ordered
Attributes depending on the entire dataset are lazy
* codes
* ...
So `df.a.cat.categories` <=> `df.a._meta.cat.categories`
So `df.a.cat.codes` <=> `df.a.map_partitions(lambda x: x.cat.codes)`
"""
_accessor = pd.Series.cat
_accessor_name = 'cat'
def _validate(self, series):
if not is_categorical_dtype(series.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
@property
def known(self):
"""Whether the categories are fully known"""
return has_known_categories(self._series)
def as_known(self, **kwargs):
"""Ensure the categories in this series are known.
If the categories are known, this is a no-op. If unknown, the
categories are computed, and a new series with known categories is
returned.
Parameters
----------
kwargs
Keywords to pass on to the call to `compute`.
"""
if self.known:
return self
categories = self._property_map('categories').unique().compute(**kwargs)
return self.set_categories(categories.values)
def as_unknown(self):
"""Ensure the categories in this series are unknown"""
if not self.known:
return self._series
out = self._series.copy()
out._meta = clear_known_categories(out._meta)
return out
@property
def ordered(self):
return self._delegate_property(self._series._meta, 'cat', 'ordered')
@property
def categories(self):
"""The categories of this categorical.
If categories are unknown, an error is raised"""
if not self.known:
msg = ("`df.column.cat.categories` with unknown categories is not "
"supported. Please use `column.cat.as_known()` or "
"`df.categorize()` beforehand to ensure known categories")
raise NotImplementedError(msg)
return self._delegate_property(self._series._meta, 'cat', 'categories')
@property
def codes(self):
"""The codes of this categorical.
If categories are unknown, an error is raised"""
if not self.known:
msg = ("`df.column.cat.codes` with unknown categories is not "
"supported. Please use `column.cat.as_known()` or "
"`df.categorize()` beforehand to ensure known categories")
raise NotImplementedError(msg)
return self._property_map('codes')
def remove_unused_categories(self):
"""
Removes categories which are not used
Notes
-----
This method requires a full scan of the data to compute the
unique values, which can be expensive.
"""
# get the set of used categories
present = self._series.dropna().unique()
present = pd.Index(present.compute())
if isinstance(self._series._meta, pd.CategoricalIndex):
meta_cat = self._series._meta
else:
meta_cat = self._series._meta.cat
# Reorder to keep cat:code relationship, filtering unused (-1)
ordered, mask = present.reindex(meta_cat.categories)
new_categories = ordered[mask != -1]
meta = meta_cat.set_categories(new_categories, ordered=meta_cat.ordered)
return self._series.map_partitions(self._delegate_method, 'cat',
'set_categories', (),
{'new_categories': new_categories},
meta=meta,
token='cat-set_categories')
| {
"repo_name": "mraspaud/dask",
"path": "dask/dataframe/categorical.py",
"copies": "3",
"size": "8568",
"license": "bsd-3-clause",
"hash": -4189232013516108300,
"line_mean": 32.8656126482,
"line_max": 80,
"alpha_frac": 0.5965219421,
"autogenerated": false,
"ratio": 4.1612433220009715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6257765264100972,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import re
import traceback
import sys
import zlib
import cbor
import happybase
import happybase.hbase.ttypes as ttypes
from dossier.models.etl.interface import ETL, html_to_fc, mk_content_id
from dossier.fc import StringCounter
class Ads(ETL):
def __init__(self, host, port, table_prefix=''):
self.host = host
self.port = port
self.table_prefix = table_prefix
self.last_key = None
def get_conn(self):
return happybase.Connection(host=self.host, port=self.port,
table_prefix=self.table_prefix)
def cids_and_fcs(self, mapper, start, end, limit=5):
return mapper(row_to_content_obj,
self.get_artifact_rows(limit=limit,
start_key=start, stop_key=end))
def get_artifact_rows(self, limit=5, start_key=None, stop_key=None):
while True:
try:
scanner = self.get_scanner(limit=limit,
start_key=start_key,
stop_key=stop_key)
for key, data in scanner:
if key == self.last_key:
# Don't re-process troublesome keys.
continue
self.last_key = key
yield key, unpack_artifact_row(data)
break
except ttypes.IOError:
# Die HBase, die!
continue
def get_scanner(self, limit=5, start_key=None, stop_key=None):
if self.last_key is not None:
start_key = self.last_key
t = self.get_conn().table('artifact')
return t.scan(row_start=start_key, row_stop=stop_key,
limit=limit, batch_size=20)
def row_to_content_obj(key_row):
'''Returns ``FeatureCollection`` given an HBase artifact row.
Note that the FC returned has a Unicode feature ``artifact_id``
set to the row's key.
'''
key, row = key_row
cid = mk_content_id(key.encode('utf-8'))
response = row.get('response', {})
other_bows = defaultdict(StringCounter)
for attr, val in row.get('indices', []):
other_bows[attr][val] += 1
try:
artifact_id = key
if isinstance(artifact_id, str):
artifact_id = unicode(artifact_id, 'utf-8')
fc = html_to_fc(
response.get('body', ''),
url=row.get('url'), timestamp=row.get('timestamp'),
other_features=dict(other_bows, **{'artifact_id': artifact_id}))
except:
fc = None
print('Could not create FC for %s:' % cid, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return cid, fc
def unpack_artifact_row(row):
data = {
'url': row['f:url'],
'timestamp': int(row['f:timestamp']),
'request': {
'method': row['f:request.method'],
'client': cbor.loads(zlib.decompress(row['f:request.client'])),
'headers': cbor.loads(zlib.decompress(row['f:request.headers'])),
'body': cbor.loads(zlib.decompress(row['f:request.body'])),
},
'response': {
'status': row['f:response.status'],
'server': {
'hostname': row['f:response.server.hostname'],
'address': row['f:response.server.address'],
},
'headers': cbor.loads(zlib.decompress(row['f:response.headers'])),
'body': cbor.loads(zlib.decompress(row['f:response.body'])),
},
'indices': [],
}
for kk, vv in row.items():
mm = re.match(r"^f:index\.(?P<key>.*)\.[0-9]+$", kk)
if mm is not None:
data['indices'].append((mm.group('key'), vv))
return data
| {
"repo_name": "dossier/dossier.models",
"path": "dossier/models/etl/ads.py",
"copies": "1",
"size": "3895",
"license": "mit",
"hash": 232037583797294750,
"line_mean": 34.0900900901,
"line_max": 78,
"alpha_frac": 0.5424903723,
"autogenerated": false,
"ratio": 3.8074291300097753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4849919502309775,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import defaultdict
from glue.core import Data
from glue.config import colormaps
from glue.viewers.matplotlib.state import (MatplotlibDataViewerState,
MatplotlibLayerState,
DeferredDrawCallbackProperty as DDCProperty,
DeferredDrawSelectionCallbackProperty as DDSCProperty)
from glue.core.state_objects import StateAttributeLimitsHelper
from glue.utils import defer_draw, view_shape
from glue.external.echo import delay_callback
from glue.core.data_combo_helper import ManualDataComboHelper, ComponentIDComboHelper
__all__ = ['ImageViewerState', 'ImageLayerState', 'ImageSubsetLayerState', 'AggregateSlice']
class AggregateSlice(object):
def __init__(self, slice=None, center=None, function=None):
self.slice = slice
self.center = center
self.function = function
class ImageViewerState(MatplotlibDataViewerState):
"""
A state class that includes all the attributes for an image viewer.
"""
x_att = DDCProperty(docstring='The component ID giving the pixel component '
'shown on the x axis')
y_att = DDCProperty(docstring='The component ID giving the pixel component '
'shown on the y axis')
x_att_world = DDSCProperty(docstring='The component ID giving the world component '
'shown on the x axis', default_index=-1)
y_att_world = DDSCProperty(docstring='The component ID giving the world component '
'shown on the y axis', default_index=-2)
aspect = DDCProperty('equal', docstring='Whether to enforce square pixels (``equal``) '
'or fill the axes (``auto``)')
reference_data = DDSCProperty(docstring='The dataset that is used to define the '
'available pixel/world components, and '
'which defines the coordinate frame in '
'which the images are shown')
slices = DDCProperty(docstring='The current slice along all dimensions')
color_mode = DDCProperty('Colormaps', docstring='Whether each layer can have '
'its own colormap (``Colormaps``) or '
'whether each layer is assigned '
'a single color (``One color per layer``)')
dpi = DDCProperty(72, docstring='The resolution (in dots per inch) of density maps, if present')
def __init__(self, **kwargs):
super(ImageViewerState, self).__init__()
self.limits_cache = {}
self.x_lim_helper = StateAttributeLimitsHelper(self, attribute='x_att',
lower='x_min', upper='x_max',
limits_cache=self.limits_cache)
self.y_lim_helper = StateAttributeLimitsHelper(self, attribute='y_att',
lower='y_min', upper='y_max',
limits_cache=self.limits_cache)
self.ref_data_helper = ManualDataComboHelper(self, 'reference_data')
self.xw_att_helper = ComponentIDComboHelper(self, 'x_att_world',
numeric=False, categorical=False,
world_coord=True)
self.yw_att_helper = ComponentIDComboHelper(self, 'y_att_world',
numeric=False, categorical=False,
world_coord=True)
self.add_callback('reference_data', self._reference_data_changed, priority=1000)
self.add_callback('layers', self._layers_changed, priority=1000)
self.add_callback('x_att', self._on_xatt_change, priority=500)
self.add_callback('y_att', self._on_yatt_change, priority=500)
self.add_callback('x_att_world', self._update_att, priority=500)
self.add_callback('y_att_world', self._update_att, priority=500)
self.add_callback('x_att_world', self._on_xatt_world_change, priority=1000)
self.add_callback('y_att_world', self._on_yatt_world_change, priority=1000)
self.update_from_dict(kwargs)
def reset_limits(self):
if self.reference_data is None or self.x_att is None or self.y_att is None:
return
nx = self.reference_data.shape[self.x_att.axis]
ny = self.reference_data.shape[self.y_att.axis]
with delay_callback(self, 'x_min', 'x_max', 'y_min', 'y_max'):
self.x_min = -0.5
self.x_max = nx - 0.5
self.y_min = -0.5
self.y_max = ny - 0.5
def _reference_data_changed(self, *args):
with delay_callback(self, 'x_att_world', 'y_att_world', 'slices'):
self._update_combo_att()
self._set_default_slices()
def _layers_changed(self, *args):
# The layers callback gets executed if anything in the layers changes,
# but we only care about whether the actual set of 'layer' attributes
# for all layers change.
layers_data = self.layers_data
layers_data_cache = getattr(self, '_layers_data_cache', [])
if layers_data == layers_data_cache:
return
self._update_combo_ref_data()
self._set_reference_data()
self._update_syncing()
self._layers_data_cache = layers_data
def _update_syncing(self):
# If there are multiple layers for a given dataset, we disable the
# syncing by default.
layer_state_by_data = defaultdict(list)
for layer_state in self.layers:
if isinstance(layer_state.layer, Data):
layer_state_by_data[layer_state.layer].append(layer_state)
for data, layer_states in layer_state_by_data.items():
if len(layer_states) > 1:
for layer_state in layer_states:
# Scatter layers don't have global_sync so we need to be
# careful here and make sure we return a default value
if getattr(layer_state, 'global_sync', False):
layer_state.global_sync = False
def _update_combo_ref_data(self):
self.ref_data_helper.set_multiple_data(self.layers_data)
def _update_combo_att(self):
with delay_callback(self, 'x_att_world', 'y_att_world'):
if self.reference_data is None:
self.xw_att_helper.set_multiple_data([])
self.yw_att_helper.set_multiple_data([])
else:
self.xw_att_helper.set_multiple_data([self.reference_data])
self.yw_att_helper.set_multiple_data([self.reference_data])
def _update_priority(self, name):
if name == 'layers':
return 3
elif name == 'reference_data':
return 2
elif name.endswith(('_min', '_max')):
return 0
else:
return 1
@defer_draw
def _update_att(self, *args):
# Need to delay the callbacks here to make sure that we get a chance to
# update both x_att and y_att otherwise could end up triggering image
# slicing with two pixel components that are the same.
with delay_callback(self, 'x_att', 'y_att'):
if self.x_att_world is not None:
index = self.reference_data.world_component_ids.index(self.x_att_world)
self.x_att = self.reference_data.pixel_component_ids[index]
if self.y_att_world is not None:
index = self.reference_data.world_component_ids.index(self.y_att_world)
self.y_att = self.reference_data.pixel_component_ids[index]
@defer_draw
def _on_xatt_change(self, *args):
if self.x_att is not None:
self.x_att_world = self.reference_data.world_component_ids[self.x_att.axis]
@defer_draw
def _on_yatt_change(self, *args):
if self.y_att is not None:
self.y_att_world = self.reference_data.world_component_ids[self.y_att.axis]
@defer_draw
def _on_xatt_world_change(self, *args):
if self.x_att_world is not None and self.x_att_world == self.y_att_world:
world_ids = self.reference_data.world_component_ids
if self.x_att_world == world_ids[-1]:
self.y_att_world = world_ids[-2]
else:
self.y_att_world = world_ids[-1]
@defer_draw
def _on_yatt_world_change(self, *args):
if self.y_att_world is not None and self.y_att_world == self.x_att_world:
world_ids = self.reference_data.world_component_ids
if self.y_att_world == world_ids[-1]:
self.x_att_world = world_ids[-2]
else:
self.x_att_world = world_ids[-1]
def _set_reference_data(self):
if self.reference_data is None:
for layer in self.layers:
if isinstance(layer.layer, Data):
self.reference_data = layer.layer
return
def _set_default_slices(self):
# Need to make sure this gets called immediately when reference_data is changed
if self.reference_data is None:
self.slices = ()
else:
self.slices = (0,) * self.reference_data.ndim
@property
def numpy_slice_aggregation_transpose(self):
"""
Returns slicing information usable by Numpy.
This returns two objects: the first is an object that can be used to
slice Numpy arrays and return a 2D array, and the second object is a
boolean indicating whether to transpose the result.
"""
if self.reference_data is None:
return None
slices = []
agg_func = []
for i in range(self.reference_data.ndim):
if i == self.x_att.axis or i == self.y_att.axis:
slices.append(slice(None))
agg_func.append(None)
else:
if isinstance(self.slices[i], AggregateSlice):
slices.append(self.slices[i].slice)
agg_func.append(self.slices[i].function)
else:
slices.append(self.slices[i])
transpose = self.y_att.axis > self.x_att.axis
return slices, agg_func, transpose
@property
def wcsaxes_slice(self):
"""
Returns slicing information usable by WCSAxes.
This returns an iterable of slices, and including ``'x'`` and ``'y'``
for the dimensions along which we are not slicing.
"""
if self.reference_data is None:
return None
slices = []
for i in range(self.reference_data.ndim):
if i == self.x_att.axis:
slices.append('x')
elif i == self.y_att.axis:
slices.append('y')
else:
if isinstance(self.slices[i], AggregateSlice):
slices.append(self.slices[i].center)
else:
slices.append(self.slices[i])
return slices[::-1]
def flip_x(self):
"""
Flip the x_min/x_max limits.
"""
self.x_lim_helper.flip_limits()
def flip_y(self):
"""
Flip the y_min/y_max limits.
"""
self.y_lim_helper.flip_limits()
class BaseImageLayerState(MatplotlibLayerState):
def get_sliced_data_shape(self, view=None):
if (self.viewer_state.reference_data is None or
self.viewer_state.x_att is None or
self.viewer_state.y_att is None):
return None
x_axis = self.viewer_state.x_att.axis
y_axis = self.viewer_state.y_att.axis
shape = self.viewer_state.reference_data.shape
shape_slice = shape[y_axis], shape[x_axis]
if view is None:
return shape_slice
else:
return view_shape(shape_slice, view)
def get_sliced_data(self, view=None):
slices, agg_func, transpose = self.viewer_state.numpy_slice_aggregation_transpose
full_view = slices
if view is not None and len(view) == 2:
x_axis = self.viewer_state.x_att.axis
y_axis = self.viewer_state.y_att.axis
full_view[x_axis] = view[1]
full_view[y_axis] = view[0]
view_applied = True
else:
view_applied = False
image = self._get_image(view=full_view)
# Apply aggregation functions if needed
if image.ndim != len(agg_func):
raise ValueError("Sliced image dimensions ({0}) does not match "
"aggregation function list ({1})"
.format(image.ndim, len(agg_func)))
for axis in range(image.ndim - 1, -1, -1):
func = agg_func[axis]
if func is not None:
image = func(image, axis=axis)
if image.ndim != 2:
raise ValueError("Image after aggregation should have two dimensions")
if transpose:
image = image.transpose()
if view_applied or view is None:
return image
else:
return image[view]
def _get_image(self, view=None):
raise NotImplementedError()
class ImageLayerState(BaseImageLayerState):
"""
A state class that includes all the attributes for data layers in an image plot.
"""
attribute = DDSCProperty(docstring='The attribute shown in the layer')
v_min = DDCProperty(docstring='The lower level shown')
v_max = DDCProperty(docstring='The upper leven shown')
percentile = DDSCProperty(docstring='The percentile value used to '
'automatically calculate levels')
contrast = DDCProperty(1, docstring='The contrast of the layer')
bias = DDCProperty(0.5, docstring='A constant value that is added to the '
'layer before rendering')
cmap = DDCProperty(docstring='The colormap used to render the layer')
stretch = DDSCProperty(docstring='The stretch used to render the layer, '
'which should be one of ``linear``, '
'``sqrt``, ``log``, or ``arcsinh``')
global_sync = DDCProperty(True, docstring='Whether the color and transparency '
'should be synced with the global '
'color and transparency for the data')
def __init__(self, layer=None, viewer_state=None, **kwargs):
super(ImageLayerState, self).__init__(layer=layer, viewer_state=viewer_state)
self.attribute_lim_helper = StateAttributeLimitsHelper(self, attribute='attribute',
percentile='percentile',
lower='v_min', upper='v_max')
self.attribute_att_helper = ComponentIDComboHelper(self, 'attribute',
numeric=True, categorical=False)
percentile_display = {100: 'Min/Max',
99.5: '99.5%',
99: '99%',
95: '95%',
90: '90%',
'Custom': 'Custom'}
ImageLayerState.percentile.set_choices(self, [100, 99.5, 99, 95, 90, 'Custom'])
ImageLayerState.percentile.set_display_func(self, percentile_display.get)
stretch_display = {'linear': 'Linear',
'sqrt': 'Square Root',
'arcsinh': 'Arcsinh',
'log': 'Logarithmic'}
ImageLayerState.stretch.set_choices(self, ['linear', 'sqrt', 'arcsinh', 'log'])
ImageLayerState.stretch.set_display_func(self, stretch_display.get)
self.add_callback('global_sync', self._update_syncing)
self.add_callback('layer', self._update_attribute)
self._update_syncing()
if layer is not None:
self._update_attribute()
self.update_from_dict(kwargs)
if self.cmap is None:
self.cmap = colormaps.members[0][1]
def _update_attribute(self, *args):
if self.layer is not None:
self.attribute_att_helper.set_multiple_data([self.layer])
self.attribute = self.layer.visible_components[0]
def _update_priority(self, name):
if name == 'layer':
return 3
elif name == 'attribute':
return 2
elif name == 'global_sync':
return 1.5
elif name.endswith(('_min', '_max')):
return 0
else:
return 1
def _update_syncing(self, *args):
if self.global_sync:
self._sync_color.enable_syncing()
self._sync_alpha.enable_syncing()
else:
self._sync_color.disable_syncing()
self._sync_alpha.disable_syncing()
def _get_image(self, view=None):
return self.layer[self.attribute, view]
def flip_limits(self):
"""
Flip the image levels.
"""
self.attribute_lim_helper.flip_limits()
def reset_contrast_bias(self):
with delay_callback(self, 'contrast', 'bias'):
self.contrast = 1
self.bias = 0.5
class ImageSubsetLayerState(BaseImageLayerState):
"""
A state class that includes all the attributes for subset layers in an image plot.
"""
def _get_image(self, view=None):
return self.layer.to_mask(view=view)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/image/state.py",
"copies": "1",
"size": "18096",
"license": "bsd-3-clause",
"hash": 8030047999076490000,
"line_mean": 37.9161290323,
"line_max": 100,
"alpha_frac": 0.5559239611,
"autogenerated": false,
"ratio": 4.196660482374768,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001263199669289008,
"num_lines": 465
} |
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import numpy as np
from glue.core import Subset
from glue.external.echo import (delay_callback, CallbackProperty,
HasCallbackProperties, CallbackList)
from glue.core.state import saver, loader
__all__ = ['State', 'StateAttributeCacheHelper',
'StateAttributeLimitsHelper', 'StateAttributeSingleValueHelper']
@saver(CallbackList)
def _save_callback_list(items, context):
return {'values': [context.id(item) for item in items]}
@loader(CallbackList)
def _load_callback_list(rec, context):
return [context.object(obj) for obj in rec['values']]
class State(HasCallbackProperties):
"""
A class to represent the state of a UI element. Initially this doesn't add
anything compared to HasCallbackProperties, but functionality will be added
over time.
"""
def __init__(self, **kwargs):
super(State, self).__init__()
self.update_from_dict(kwargs)
def update_from_state(self, state):
"""
Update this state using the values from another state.
Parameters
----------
state : `~glue.core.state_objects.State`
The state to use the values from
"""
self.update_from_dict(state.as_dict())
def update_from_dict(self, properties):
"""
Update this state using the values from a dictionary of attributes.
Parameters
----------
properties : dict
The dictionary containing attribute/value pairs.
"""
if len(properties) == 0:
return
# Group properties into priority so as to be able to update them in
# chunks and not fire off callback events between every single one.
groups = defaultdict(list)
for name in properties:
if self.is_callback_property(name):
groups[self._update_priority(name)].append(name)
for priority in sorted(groups, reverse=True):
with delay_callback(self, *groups[priority]):
for name in groups[priority]:
setattr(self, name, properties[name])
def as_dict(self):
"""
Return the current state as a dictionary of attribute/value pairs.
"""
properties = {}
for name in dir(self):
if not name.startswith('_') and self.is_callback_property(name):
properties[name] = getattr(self, name)
return properties
def __gluestate__(self, context):
return {'values': dict((key, context.id(value)) for key, value in self.as_dict().items())}
def _update_priority(self, name):
return 0
@classmethod
def __setgluestate__(cls, rec, context):
properties = dict((key, context.object(value)) for key, value in rec['values'].items())
result = cls(**properties)
return result
class StateAttributeCacheHelper(object):
"""
Generic class to help with caching values on a per-attribute basis
Parameters
----------
state : :class:`glue.core.state_objects.State`
The state object with the callback properties to cache
attribute : str
The attribute name - this will be populated once a dataset is assigned
to the helper
cache : dict, optional
A dictionary that can be used to hold the cache. This option can be used
if a common cache should be shared between different helpers.
kwargs
Additional keyword arguments are taken to be values that should be
used/cached. The key should be the name to be understood by sub-classes
of this base class, and the value should be the name of the attribute
in the state.
"""
def __init__(self, state, attribute, cache=None, **kwargs):
self._state = state
self._attribute = attribute
self._values = dict((key, kwargs[key]) for key in self.values_names if key in kwargs)
self._modifiers = dict((key, kwargs[key]) for key in self.modifiers_names if key in kwargs)
self._attribute_lookup = {'attribute': self._attribute}
self._attribute_lookup.update(self._values)
self._attribute_lookup.update(self._modifiers)
self._attribute_lookup_inv = {v: k for k, v in self._attribute_lookup.items()}
self._state.add_callback(self._attribute, self._update_attribute)
self._state.add_global_callback(self._update_values)
# NOTE: don't use self._cache = cache or {} here since if the initial
# cache is empty it will evaluate as False!
if cache is None:
self._cache = {}
else:
self._cache = cache
@property
def data_values(self):
# For subsets in 'data' mode, we want to compute the limits based on
# the full dataset, not just the subset.
if isinstance(self.data, Subset):
return self.data.data[self.component_id]
else:
return self.data[self.component_id]
@property
def data_component(self):
# For subsets in 'data' mode, we want to compute the limits based on
# the full dataset, not just the subset.
if isinstance(self.data, Subset):
return self.data.data.get_component(self.component_id)
else:
return self.data.get_component(self.component_id)
def invalidate_cache(self):
self._cache.clear()
@property
def data(self):
if self.attribute is None:
return None
else:
return self.attribute.parent
@property
def component_id(self):
if self.attribute is None:
return None
else:
return self.attribute
def set_cache(self, cache):
self._cache = cache
self._update_attribute()
def _update_attribute(self, *args):
if self.component_id in self._cache:
# The component ID already exists in the cache, so just revert to
# that version of the values/settings.
self.set(cache=False, **self._cache[self.component_id])
else:
# We need to compute the values for the first time
self.update_values(attribute=self.component_id, use_default_modifiers=True)
def _update_values(self, **properties):
if hasattr(self, '_in_set'):
if self._in_set:
return
if self.attribute is None:
return
properties = dict((self._attribute_lookup_inv[key], value)
for key, value in properties.items() if key in self._attribute_lookup_inv and self._attribute_lookup_inv[key] != 'attribute')
if len(properties) > 0:
self.update_values(**properties)
def _modifiers_as_dict(self):
return dict((prop, getattr(self, prop)) for prop in self.modifiers_names if prop in self._modifiers)
def _values_as_dict(self):
return dict((prop, getattr(self, prop)) for prop in self.values_names if prop in self._values)
def _update_cache(self):
if self.component_id is not None:
self._cache[self.component_id] = {}
self._cache[self.component_id].update(self._modifiers_as_dict())
self._cache[self.component_id].update(self._values_as_dict())
def __getattr__(self, attribute):
if attribute in self._attribute_lookup:
return getattr(self._state, self._attribute_lookup[attribute])
else:
raise AttributeError(attribute)
def __setattr__(self, attribute, value):
if attribute.startswith('_') or attribute not in self._attribute_lookup:
return object.__setattr__(self, attribute, value)
else:
return setattr(self._state, self._attribute_lookup[attribute], value)
def set(self, cache=True, **kwargs):
self._in_set = True
extra_kwargs = set(kwargs.keys()) - set(self.values_names) - set(self.modifiers_names)
if len(extra_kwargs) > 0:
raise ValueError("Invalid properties: {0}".format(extra_kwargs))
with delay_callback(self._state, *self._attribute_lookup.values()):
for prop, value in kwargs.items():
setattr(self, prop, value)
if cache:
self._update_cache()
self._in_set = False
class StateAttributeLimitsHelper(StateAttributeCacheHelper):
"""
This class is a helper for attribute-dependent min/max level values. It
is equivalent to AttributeLimitsHelper but operates on State objects and
is GUI-independent.
Parameters
----------
attribute : str
The attribute name - this will be populated once a dataset is assigned
to the helper.
percentile_subset : int
How many points to use at most for the percentile calculation (using all
values is highly inefficient and not needed)
lower, upper : str
The fields for the lower/upper levels
percentile : ``QComboBox`` instance, optional
The scale mode combo - this will be populated by presets such as
Min/Max, various percentile levels, and Custom.
log : bool
Whether the limits are in log mode (in which case only positive values
are used when finding the limits)
Notes
-----
Once the helper is instantiated, the data associated with the helper can be
set/changed with:
>>> helper = AttributeLimitsHelper(...)
>>> helper.data = data
The data can also be passed to the initializer as described in the list of
parameters above.
"""
values_names = ('lower', 'upper')
modifiers_names = ('log', 'percentile')
def __init__(self, state, attribute, percentile_subset=10000, cache=None, **kwargs):
super(StateAttributeLimitsHelper, self).__init__(state, attribute, cache=cache, **kwargs)
self.percentile_subset = percentile_subset
if self.attribute is not None:
if (self.lower is not None and self.upper is not None and getattr(self, 'percentile', None) is None):
# If the lower and upper limits are already set, we need to make
# sure we don't override them, so we set the percentile mode to
# custom if it isn't already set.
self.set(percentile='Custom')
else:
# Otherwise, we force the recalculation or the fetching from
# cache of the limits based on the current attribute
self._update_attribute()
def update_values(self, force=False, use_default_modifiers=False, **properties):
if not force and not any(prop in properties for prop in ('attribute', 'percentile', 'log')):
self.set(percentile='Custom')
return
if use_default_modifiers:
percentile = 100
log = False
else:
percentile = self.percentile or 100
log = self.log or False
if not force and (percentile == 'Custom' or not hasattr(self, 'data') or self.data is None):
self.set(percentile=percentile, log=log)
else:
exclude = (100 - percentile) / 2.
data_values = self.data_values
if data_values.size > self.percentile_subset:
data_values = np.random.choice(data_values.ravel(), self.percentile_subset)
if log:
data_values = data_values[data_values > 0]
if len(data_values) == 0:
self.set(lower=0.1, upper=1, percentile=percentile, log=log)
return
try:
lower = np.nanpercentile(data_values, exclude)
upper = np.nanpercentile(data_values, 100 - exclude)
except AttributeError: # Numpy < 1.9
data_values = data_values[~np.isnan(data_values)]
lower = np.percentile(data_values, exclude)
upper = np.percentile(data_values, 100 - exclude)
if self.data_component.categorical:
lower = np.floor(lower - 0.5) + 0.5
upper = np.ceil(upper + 0.5) - 0.5
self.set(lower=lower, upper=upper, percentile=percentile, log=log)
def flip_limits(self):
self.set(lower=self.upper, upper=self.lower)
class StateAttributeSingleValueHelper(StateAttributeCacheHelper):
values_names = ('value',)
modifiers_names = ()
def __init__(self, state, attribute, function, mode='values', **kwargs):
self._function = function
super(StateAttributeSingleValueHelper, self).__init__(state, attribute, **kwargs)
if self.attribute is not None:
self._update_attribute()
if mode in ('values', 'component'):
self.mode = mode
else:
raise ValueError('mode should be one of "values" or "component"')
def update_values(self, use_default_modifiers=False, **properties):
if not any(prop in properties for prop in ('attribute',)) or self.data is None:
self.set()
else:
if self.mode == 'values':
arg = self.data_values
else:
arg = self.data_component
self.set(value=self._function(arg))
class StateAttributeHistogramHelper(StateAttributeCacheHelper):
values_names = ('lower', 'upper', 'n_bin')
modifiers_names = ()
def __init__(self, *args, **kwargs):
self._max_n_bin = kwargs.pop('max_n_bin', 30)
self._default_n_bin = kwargs.pop('default_n_bin', 15)
common_n_bin_att = kwargs.pop('common_n_bin', None)
super(StateAttributeHistogramHelper, self).__init__(*args, **kwargs)
if common_n_bin_att is not None:
if getattr(self._state, common_n_bin_att):
self._common_n_bin = self._default_n_bin
else:
self._common_n_bin = None
self._state.add_callback(common_n_bin_att, self._update_common_n_bin)
else:
self._common_n_bin = None
def _apply_common_n_bin(self):
for att in self._cache:
cmp = self.data.get_component(att)
if not cmp.categorical:
self._cache[att]['n_bin'] = self._common_n_bin
def _update_common_n_bin(self, common_n_bin):
if common_n_bin:
if self.data_component.categorical:
self._common_n_bin = self._default_n_bin
else:
self._common_n_bin = self.n_bin
self._apply_common_n_bin()
else:
self._common_n_bin = None
def update_values(self, force=False, use_default_modifiers=False, **properties):
if not force and not any(prop in properties for prop in ('attribute', 'n_bin')) or self.data is None:
self.set()
return
comp = self.data_component
if 'n_bin' in properties:
self.set()
if self._common_n_bin is not None and not comp.categorical:
self._common_n_bin = properties['n_bin']
self._apply_common_n_bin()
if 'attribute' in properties or force:
if comp.categorical:
n_bin = max(1, min(comp.categories.size, self._max_n_bin))
lower = -0.5
upper = lower + comp.categories.size
else:
if self._common_n_bin is None:
n_bin = self._default_n_bin
else:
n_bin = self._common_n_bin
values = self.data_values
lower = np.nanmin(values)
upper = np.nanmax(values)
self.set(lower=lower, upper=upper, n_bin=n_bin)
if __name__ == "__main__":
from glue.core import Data
class TestState(object):
layer = CallbackProperty()
comp = CallbackProperty(1)
lower = CallbackProperty()
higher = CallbackProperty()
log = CallbackProperty()
scale = CallbackProperty()
state = TestState()
state.layer = Data(x=np.arange(10),
y=np.arange(10) / 3.,
z=np.arange(10) - 5)
helper = StateAttributeLimitsHelper(state, 'layer', 'comp',
'lower', 'higher',
percentile='scale', log='log')
helper.component_id = state.layer.id['x']
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/state_objects.py",
"copies": "1",
"size": "16494",
"license": "bsd-3-clause",
"hash": 4388020468937126400,
"line_mean": 33.8710359408,
"line_max": 151,
"alpha_frac": 0.5957924094,
"autogenerated": false,
"ratio": 4.275272161741835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006386879409072515,
"num_lines": 473
} |
from __future__ import absolute_import, division, print_function
from collections import deque, defaultdict
from datetime import timedelta
import functools
import logging
import six
import sys
import threading
from time import time
import weakref
import toolz
from tornado import gen
from tornado.locks import Condition
from tornado.ioloop import IOLoop
from tornado.queues import Queue
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from collections.abc import Iterable
from .compatibility import get_thread_identity
from .orderedweakset import OrderedWeakrefSet
no_default = '--no-default--'
_global_sinks = set()
_html_update_streams = set()
thread_state = threading.local()
logger = logging.getLogger(__name__)
_io_loops = []
def get_io_loop(asynchronous=None):
if asynchronous:
return IOLoop.current()
if not _io_loops:
loop = IOLoop()
thread = threading.Thread(target=loop.start)
thread.daemon = True
thread.start()
_io_loops.append(loop)
return _io_loops[-1]
def identity(x):
return x
class RefCounter:
""" A counter to track references to data
This class is used to track how many nodes in the DAG are referencing
a particular element in the pipeline. When the count reaches zero,
then parties interested in knowing if data is done being processed are
notified
Parameters
----------
initial: int, optional
The initial value of the reference counter
cb: callable
The function to use a callback when the reference count reaches zero
loop: tornado.ioloop.IOLoop
The loop on which to create a callback when the reference count
reaches zero
"""
def __init__(self, initial=0, cb=None, loop=None):
self.loop = loop if loop else get_io_loop()
self.count = initial
self.cb = cb
def retain(self, n=1):
"""Retain the reference
Parameters
----------
n: The number of times to retain the reference
"""
self.count += n
def release(self, n=1):
"""Release the reference
If the reference count is equal to or less than zero, the callback, if
provided will added to the provided loop or default loop
Parameters
----------
n: The number of references to release
"""
self.count -= n
if self.count <= 0 and self.cb:
self.loop.add_callback(self.cb)
def __str__(self):
return '<RefCounter count={}>'.format(self.count)
__repr__ = __str__
class Stream(object):
""" A Stream is an infinite sequence of data.
Streams subscribe to each other passing and transforming data between them.
A Stream object listens for updates from upstream, reacts to these updates,
and then emits more data to flow downstream to all Stream objects that
subscribe to it. Downstream Stream objects may connect at any point of a
Stream graph to get a full view of the data coming off of that point to do
with as they will.
Parameters
----------
stream_name: str or None
This is the name of the stream.
asynchronous: boolean or None
Whether or not this stream will be used in asynchronous functions or
normal Python functions. Leave as None if you don't know.
True will cause operations like emit to return awaitable Futures
False will use an Event loop in another thread (starts it if necessary)
ensure_io_loop: boolean
Ensure that some IOLoop will be created. If asynchronous is None or
False then this will be in a separate thread, otherwise it will be
IOLoop.current
Examples
--------
>>> def inc(x):
... return x + 1
>>> source = Stream() # Create a stream object
>>> s = source.map(inc).map(str) # Subscribe to make new streams
>>> s.sink(print) # take an action whenever an element reaches the end
>>> L = list()
>>> s.sink(L.append) # or take multiple actions (streams can branch)
>>> for i in range(5):
... source.emit(i) # push data in at the source
'1'
'2'
'3'
'4'
'5'
>>> L # and the actions happen at the sinks
['1', '2', '3', '4', '5']
"""
_graphviz_shape = 'ellipse'
_graphviz_style = 'rounded,filled'
_graphviz_fillcolor = 'white'
_graphviz_orientation = 0
str_list = ['func', 'predicate', 'n', 'interval']
def __init__(self, upstream=None, upstreams=None, stream_name=None,
loop=None, asynchronous=None, ensure_io_loop=False):
self.downstreams = OrderedWeakrefSet()
if upstreams is not None:
self.upstreams = list(upstreams)
else:
self.upstreams = [upstream]
self._set_asynchronous(asynchronous)
self._set_loop(loop)
if ensure_io_loop and not self.loop:
self._set_asynchronous(False)
if self.loop is None and self.asynchronous is not None:
self._set_loop(get_io_loop(self.asynchronous))
for upstream in self.upstreams:
if upstream:
upstream.downstreams.add(self)
self.name = stream_name
def _set_loop(self, loop):
self.loop = None
if loop is not None:
self._inform_loop(loop)
else:
for upstream in self.upstreams:
if upstream and upstream.loop:
self.loop = upstream.loop
break
def _inform_loop(self, loop):
"""
Percolate information about an event loop to the rest of the stream
"""
if self.loop is not None:
if self.loop is not loop:
raise ValueError("Two different event loops active")
else:
self.loop = loop
for upstream in self.upstreams:
if upstream:
upstream._inform_loop(loop)
for downstream in self.downstreams:
if downstream:
downstream._inform_loop(loop)
def _set_asynchronous(self, asynchronous):
self.asynchronous = None
if asynchronous is not None:
self._inform_asynchronous(asynchronous)
else:
for upstream in self.upstreams:
if upstream and upstream.asynchronous:
self.asynchronous = upstream.asynchronous
break
def _inform_asynchronous(self, asynchronous):
"""
Percolate information about an event loop to the rest of the stream
"""
if self.asynchronous is not None:
if self.asynchronous is not asynchronous:
raise ValueError("Stream has both asynchronous and synchronous elements")
else:
self.asynchronous = asynchronous
for upstream in self.upstreams:
if upstream:
upstream._inform_asynchronous(asynchronous)
for downstream in self.downstreams:
if downstream:
downstream._inform_asynchronous(asynchronous)
def _add_upstream(self, upstream):
"""Add upstream to current upstreams, this method is overridden for
classes which handle stream specific buffers/caches"""
if self.upstreams == [None]:
self.upstreams[0] = upstream
else:
self.upstreams.append(upstream)
def _add_downstream(self, downstream):
"""Add downstream to current downstreams"""
self.downstreams.add(downstream)
def _remove_downstream(self, downstream):
"""Remove downstream from current downstreams"""
self.downstreams.remove(downstream)
def _remove_upstream(self, upstream):
"""Remove upstream from current upstreams, this method is overridden for
classes which handle stream specific buffers/caches"""
if len(self.upstreams) == 1:
self.upstreams[0] = [None]
else:
self.upstreams.remove(upstream)
@classmethod
def register_api(cls, modifier=identity, attribute_name=None):
""" Add callable to Stream API
This allows you to register a new method onto this class. You can use
it as a decorator.::
>>> @Stream.register_api()
... class foo(Stream):
... ...
>>> Stream().foo(...) # this works now
It attaches the callable as a normal attribute to the class object. In
doing so it respects inheritance (all subclasses of Stream will also
get the foo attribute).
By default callables are assumed to be instance methods. If you like
you can include modifiers to apply before attaching to the class as in
the following case where we construct a ``staticmethod``.
>>> @Stream.register_api(staticmethod)
... class foo(Stream):
... ...
>>> Stream.foo(...) # Foo operates as a static method
You can also provide an optional ``attribute_name`` argument to control
the name of the attribute your callable will be attached as.
>>> @Stream.register_api(attribute_name="bar")
... class foo(Stream):
... ...
>> Stream().bar(...) # foo was actually attached as bar
"""
def _(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
name = attribute_name if attribute_name else func.__name__
setattr(cls, name, modifier(wrapped))
return func
return _
@classmethod
def register_plugin_entry_point(cls, entry_point, modifier=identity):
if hasattr(cls, entry_point.name):
raise ValueError(
f"Can't add {entry_point.name} from {entry_point.module_name} "
f"to {cls.__name__}: duplicate method name."
)
def stub(*args, **kwargs):
""" Entrypoints-based streamz plugin. Will be loaded on first call. """
node = entry_point.load()
if not issubclass(node, Stream):
raise TypeError(
f"Error loading {entry_point.name} "
f"from module {entry_point.module_name}: "
f"{node.__class__.__name__} must be a subclass of Stream"
)
if getattr(cls, entry_point.name).__name__ == "stub":
cls.register_api(
modifier=modifier, attribute_name=entry_point.name
)(node)
return node(*args, **kwargs)
cls.register_api(modifier=modifier, attribute_name=entry_point.name)(stub)
def start(self):
""" Start any upstream sources """
for upstream in self.upstreams:
upstream.start()
def __str__(self):
s_list = []
if self.name:
s_list.append('{}; {}'.format(self.name, self.__class__.__name__))
else:
s_list.append(self.__class__.__name__)
for m in self.str_list:
s = ''
at = getattr(self, m, None)
if at:
if not callable(at):
s = str(at)
elif hasattr(at, '__name__'):
s = getattr(self, m).__name__
elif hasattr(at.__class__, '__name__'):
s = getattr(self, m).__class__.__name__
else:
s = None
if s:
s_list.append('{}={}'.format(m, s))
if len(s_list) <= 2:
s_list = [term.split('=')[-1] for term in s_list]
text = "<"
text += s_list[0]
if len(s_list) > 1:
text += ': '
text += ', '.join(s_list[1:])
text += '>'
return text
__repr__ = __str__
def _ipython_display_(self, **kwargs): # pragma: no cover
try:
from ipywidgets import Output
from IPython.core.interactiveshell import InteractiveShell
except ImportError:
if hasattr(self, '_repr_html_'):
return self._repr_html_()
else:
return self.__repr__()
output = Output(_view_count=0)
output_ref = weakref.ref(output)
def update_cell(val):
output = output_ref()
if output is None:
return
with output:
content, *_ = InteractiveShell.instance().display_formatter.format(val)
output.outputs = ({'output_type': 'display_data',
'data': content,
'metadata': {}},)
s = self.map(update_cell)
_html_update_streams.add(s)
self.output_ref = output_ref
s_ref = weakref.ref(s)
def remove_stream(change):
output = output_ref()
if output is None:
return
if output._view_count == 0:
ss = s_ref()
ss.destroy()
_html_update_streams.remove(ss) # trigger gc
output.observe(remove_stream, '_view_count')
return output._ipython_display_(**kwargs)
def _emit(self, x, metadata=None):
"""
Push data into the stream at this point
Parameters
----------
x: any
an element of data
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
"""
if metadata:
self._retain_refs(metadata, len(self.downstreams))
else:
metadata = []
result = []
for downstream in list(self.downstreams):
r = downstream.update(x, who=self, metadata=metadata)
if type(r) is list:
result.extend(r)
else:
result.append(r)
self._release_refs(metadata)
return [element for element in result if element is not None]
def emit(self, x, asynchronous=False, metadata=None):
""" Push data into the stream at this point
This is typically done only at source Streams but can theoretically be
done at any point
Parameters
----------
x: any
an element of data
asynchronous:
emit asynchronously
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
"""
ts_async = getattr(thread_state, 'asynchronous', False)
if self.loop is None or asynchronous or self.asynchronous or ts_async:
if not ts_async:
thread_state.asynchronous = True
try:
result = self._emit(x, metadata=metadata)
if self.loop:
return gen.convert_yielded(result)
finally:
thread_state.asynchronous = ts_async
else:
@gen.coroutine
def _():
thread_state.asynchronous = True
try:
result = yield self._emit(x, metadata=metadata)
finally:
del thread_state.asynchronous
raise gen.Return(result)
sync(self.loop, _)
def update(self, x, who=None, metadata=None):
return self._emit(x, metadata=metadata)
def gather(self):
""" This is a no-op for core streamz
This allows gather to be used in both dask and core streams
"""
return self
def connect(self, downstream):
""" Connect this stream to a downstream element.
Parameters
----------
downstream: Stream
The downstream stream to connect to
"""
self._add_downstream(downstream)
downstream._add_upstream(self)
def disconnect(self, downstream):
""" Disconnect this stream to a downstream element.
Parameters
----------
downstream: Stream
The downstream stream to disconnect from
"""
self._remove_downstream(downstream)
downstream._remove_upstream(self)
@property
def upstream(self):
if len(self.upstreams) != 1:
raise ValueError("Stream has multiple upstreams")
else:
return self.upstreams[0]
def destroy(self, streams=None):
"""
Disconnect this stream from any upstream sources
"""
if streams is None:
streams = self.upstreams
for upstream in list(streams):
upstream.downstreams.remove(self)
self.upstreams.remove(upstream)
def scatter(self, **kwargs):
from .dask import scatter
return scatter(self, **kwargs)
def remove(self, predicate):
""" Only pass through elements for which the predicate returns False """
return self.filter(lambda x: not predicate(x))
@property
def scan(self):
return self.accumulate
@property
def concat(self):
return self.flatten
def sink_to_list(self):
""" Append all elements of a stream to a list as they come in
Examples
--------
>>> source = Stream()
>>> L = source.map(lambda x: 10 * x).sink_to_list()
>>> for i in range(5):
... source.emit(i)
>>> L
[0, 10, 20, 30, 40]
"""
L = []
self.sink(L.append)
return L
def frequencies(self, **kwargs):
""" Count occurrences of elements """
def update_frequencies(last, x):
return toolz.assoc(last, x, last.get(x, 0) + 1)
return self.scan(update_frequencies, start={}, **kwargs)
def visualize(self, filename='mystream.png', **kwargs):
"""Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` and ``networkx`` to be installed.
Parameters
----------
filename : str, optional
The name of the file to write to disk.
kwargs:
Graph attributes to pass to graphviz like ``rankdir="LR"``
"""
from .graph import visualize
return visualize(self, filename, **kwargs)
def to_dataframe(self, example):
""" Convert a stream of Pandas dataframes to a DataFrame
Examples
--------
>>> source = Stream()
>>> sdf = source.to_dataframe()
>>> L = sdf.groupby(sdf.x).y.mean().stream.sink_to_list()
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
"""
from .dataframe import DataFrame
return DataFrame(stream=self, example=example)
def to_batch(self, **kwargs):
""" Convert a stream of lists to a Batch
All elements of the stream are assumed to be lists or tuples
Examples
--------
>>> source = Stream()
>>> batches = source.to_batch()
>>> L = batches.pluck('value').map(inc).sum().stream.sink_to_list()
>>> source.emit([{'name': 'Alice', 'value': 1},
... {'name': 'Bob', 'value': 2},
... {'name': 'Charlie', 'value': 3}])
>>> source.emit([{'name': 'Alice', 'value': 4},
... {'name': 'Bob', 'value': 5},
... {'name': 'Charlie', 'value': 6}])
"""
from .batch import Batch
return Batch(stream=self, **kwargs)
def _retain_refs(self, metadata, n=1):
""" Retain all references in the provided metadata `n` number of times
Parameters
----------
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
n: The number of times to retain the provided references
"""
for m in metadata:
if 'ref' in m:
m['ref'].retain(n)
def _release_refs(self, metadata, n=1):
""" Release all references in the provided metadata `n` number of times
Parameters
----------
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
n: The number of times to retain the provided references
"""
for m in metadata:
if 'ref' in m:
m['ref'].release(n)
@Stream.register_api()
class sink(Stream):
""" Apply a function on every element
Examples
--------
>>> source = Stream()
>>> L = list()
>>> source.sink(L.append)
>>> source.sink(print)
>>> source.sink(print)
>>> source.emit(123)
123
123
>>> L
[123]
See Also
--------
map
Stream.sink_to_list
"""
_graphviz_shape = 'trapezium'
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# take the stream specific kwargs out
stream_name = kwargs.pop("stream_name", None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
_global_sinks.add(self)
def update(self, x, who=None, metadata=None):
result = self.func(x, *self.args, **self.kwargs)
if gen.isawaitable(result):
return result
else:
return []
@Stream.register_api()
class map(Stream):
""" Apply a function to every element in the stream
Parameters
----------
func: callable
*args :
The arguments to pass to the function.
**kwargs:
Keyword arguments to pass to func
Examples
--------
>>> source = Stream()
>>> source.map(lambda x: 2*x).sink(print)
>>> for i in range(5):
... source.emit(i)
0
2
4
6
8
"""
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# this is one of a few stream specific kwargs
stream_name = kwargs.pop('stream_name', None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
try:
result = self.func(x, *self.args, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
else:
return self._emit(result, metadata=metadata)
@Stream.register_api()
class starmap(Stream):
""" Apply a function to every element in the stream, splayed out
See ``itertools.starmap``
Parameters
----------
func: callable
*args :
The arguments to pass to the function.
**kwargs:
Keyword arguments to pass to func
Examples
--------
>>> source = Stream()
>>> source.starmap(lambda a, b: a + b).sink(print)
>>> for i in range(5):
... source.emit((i, i))
0
2
4
6
8
"""
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# this is one of a few stream specific kwargs
stream_name = kwargs.pop('stream_name', None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
y = x + self.args
try:
result = self.func(*y, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
else:
return self._emit(result, metadata=metadata)
def _truthy(x):
return not not x
@Stream.register_api()
class filter(Stream):
""" Only pass through elements that satisfy the predicate
Parameters
----------
predicate : function
The predicate. Should return True or False, where
True means that the predicate is satisfied.
*args :
The arguments to pass to the predicate.
**kwargs:
Keyword arguments to pass to predicate
Examples
--------
>>> source = Stream()
>>> source.filter(lambda x: x % 2 == 0).sink(print)
>>> for i in range(5):
... source.emit(i)
0
2
4
"""
def __init__(self, upstream, predicate, *args, **kwargs):
if predicate is None:
predicate = _truthy
self.predicate = predicate
stream_name = kwargs.pop("stream_name", None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
if self.predicate(x, *self.args, **self.kwargs):
return self._emit(x, metadata=metadata)
@Stream.register_api()
class accumulate(Stream):
""" Accumulate results with previous state
This performs running or cumulative reductions, applying the function
to the previous total and the new element. The function should take
two arguments, the previous accumulated state and the next element and
it should return a new accumulated state,
- ``state = func(previous_state, new_value)`` (returns_state=False)
- ``state, result = func(previous_state, new_value)`` (returns_state=True)
where the new_state is passed to the next invocation. The state or result
is emitted downstream for the two cases.
Parameters
----------
func: callable
start: object
Initial value, passed as the value of ``previous_state`` on the first
invocation. Defaults to the first submitted element
returns_state: boolean
If true then func should return both the state and the value to emit
If false then both values are the same, and func returns one value
**kwargs:
Keyword arguments to pass to func
Examples
--------
A running total, producing triangular numbers
>>> source = Stream()
>>> source.accumulate(lambda acc, x: acc + x).sink(print)
>>> for i in range(5):
... source.emit(i)
0
1
3
6
10
A count of number of events (including the current one)
>>> source = Stream()
>>> source.accumulate(lambda acc, x: acc + 1, start=0).sink(print)
>>> for _ in range(5):
... source.emit(0)
1
2
3
4
5
Like the builtin "enumerate".
>>> source = Stream()
>>> source.accumulate(lambda acc, x: ((acc[0] + 1, x), (acc[0], x)),
... start=(0, 0), returns_state=True
... ).sink(print)
>>> for i in range(3):
... source.emit(0)
(0, 0)
(1, 0)
(2, 0)
"""
_graphviz_shape = 'box'
def __init__(self, upstream, func, start=no_default, returns_state=False,
**kwargs):
self.func = func
self.kwargs = kwargs
self.state = start
self.returns_state = returns_state
# this is one of a few stream specific kwargs
stream_name = kwargs.pop('stream_name', None)
self.with_state = kwargs.pop('with_state', False)
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
if self.state is no_default:
self.state = x
if self.with_state:
return self._emit((self.state, x), metadata=metadata)
else:
return self._emit(x, metadata=metadata)
else:
try:
result = self.func(self.state, x, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
if self.returns_state:
state, result = result
else:
state = result
self.state = state
if self.with_state:
return self._emit((self.state, result), metadata=metadata)
else:
return self._emit(result, metadata=metadata)
@Stream.register_api()
class slice(Stream):
"""
Get only some events in a stream by position. Works like list[] syntax.
Parameters
----------
start : int
First event to use. If None, start from the beginnning
end : int
Last event to use (non-inclusive). If None, continue without stopping.
Does not support negative indexing.
step : int
Pass on every Nth event. If None, pass every one.
Examples
--------
>>> source = Stream()
>>> source.slice(2, 6, 2).sink(print)
>>> for i in range(5):
... source.emit(0)
2
4
"""
def __init__(self, upstream, start=None, end=None, step=None, **kwargs):
self.state = 0
self.star = start or 0
self.end = end
self.step = step or 1
if any((_ or 0) < 0 for _ in [start, end, step]):
raise ValueError("Negative indices not supported by slice")
stream_name = kwargs.pop('stream_name', None)
Stream.__init__(self, upstream, stream_name=stream_name)
self._check_end()
def update(self, x, who=None, metadata=None):
if self.state >= self.star and self.state % self.step == 0:
self.emit(x, metadata=metadata)
self.state += 1
self._check_end()
def _check_end(self):
if self.end and self.state >= self.end:
# we're done
for upstream in self.upstreams:
upstream._remove_downstream(self)
@Stream.register_api()
class partition(Stream):
""" Partition stream into tuples of equal size
Parameters
----------
n: int
Maximum partition size
timeout: int or float, optional
Number of seconds after which a partition will be emitted,
even if its size is less than ``n``. If ``None`` (default),
a partition will be emitted only when its size reaches ``n``.
key: hashable or callable, optional
Emit items with the same key together as a separate partition.
If ``key`` is callable, partition will be identified by ``key(x)``,
otherwise by ``x[key]``. Defaults to ``None``.
Examples
--------
>>> source = Stream()
>>> source.partition(3).sink(print)
>>> for i in range(10):
... source.emit(i)
(0, 1, 2)
(3, 4, 5)
(6, 7, 8)
>>> source = Stream()
>>> source.partition(2, key=lambda x: x % 2).sink(print)
>>> for i in range(4):
... source.emit(i)
(0, 2)
(1, 3)
>>> from time import sleep
>>> source = Stream()
>>> source.partition(5, timeout=1).sink(print)
>>> for i in range(3):
... source.emit(i)
>>> sleep(1)
(0, 1, 2)
"""
_graphviz_shape = 'diamond'
def __init__(self, upstream, n, timeout=None, key=None, **kwargs):
self.n = n
self._timeout = timeout
self._key = key
self._buffer = defaultdict(lambda: [])
self._metadata_buffer = defaultdict(lambda: [])
self._callbacks = {}
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
def _get_key(self, x):
if self._key is None:
return None
if callable(self._key):
return self._key(x)
return x[self._key]
@gen.coroutine
def _flush(self, key):
result, self._buffer[key] = self._buffer[key], []
metadata_result, self._metadata_buffer[key] = self._metadata_buffer[key], []
yield self._emit(tuple(result), list(metadata_result))
self._release_refs(metadata_result)
@gen.coroutine
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
key = self._get_key(x)
buffer = self._buffer[key]
metadata_buffer = self._metadata_buffer[key]
buffer.append(x)
if isinstance(metadata, list):
metadata_buffer.extend(metadata)
else:
metadata_buffer.append(metadata)
if len(buffer) == self.n:
if self._timeout is not None and self.n > 1:
self._callbacks[key].cancel()
yield self._flush(key)
return
if len(buffer) == 1 and self._timeout is not None:
self._callbacks[key] = self.loop.call_later(
self._timeout, self._flush, key
)
@Stream.register_api()
class sliding_window(Stream):
""" Produce overlapping tuples of size n
Parameters
----------
return_partial : bool
If True, yield tuples as soon as any events come in, each tuple being
smaller or equal to the window size. If False, only start yielding
tuples once a full window has accrued.
Examples
--------
>>> source = Stream()
>>> source.sliding_window(3, return_partial=False).sink(print)
>>> for i in range(8):
... source.emit(i)
(0, 1, 2)
(1, 2, 3)
(2, 3, 4)
(3, 4, 5)
(4, 5, 6)
(5, 6, 7)
"""
_graphviz_shape = 'diamond'
def __init__(self, upstream, n, return_partial=True, **kwargs):
self.n = n
self._buffer = deque(maxlen=n)
self.metadata_buffer = deque(maxlen=n)
self.partial = return_partial
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
self._buffer.append(x)
if not isinstance(metadata, list):
metadata = [metadata]
self.metadata_buffer.append(metadata)
if self.partial or len(self._buffer) == self.n:
flat_metadata = [m for ml in self.metadata_buffer for m in ml]
ret = self._emit(tuple(self._buffer), flat_metadata)
if len(self.metadata_buffer) == self.n:
completed = self.metadata_buffer.popleft()
self._release_refs(completed)
return ret
else:
return []
def convert_interval(interval):
if isinstance(interval, str):
import pandas as pd
interval = pd.Timedelta(interval).total_seconds()
return interval
@Stream.register_api()
class timed_window(Stream):
""" Emit a tuple of collected results every interval
Every ``interval`` seconds this emits a tuple of all of the results
seen so far. This can help to batch data coming off of a high-volume
stream.
"""
_graphviz_shape = 'octagon'
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self._buffer = []
self.metadata_buffer = []
self.last = gen.moment
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None, metadata=None):
self._buffer.append(x)
self._retain_refs(metadata)
self.metadata_buffer.append(metadata)
return self.last
@gen.coroutine
def cb(self):
while True:
L, self._buffer = self._buffer, []
metadata, self.metadata_buffer = self.metadata_buffer, []
m = [m for ml in metadata for m in ml]
self.last = self._emit(L, m)
self._release_refs(m)
yield self.last
yield gen.sleep(self.interval)
@Stream.register_api()
class delay(Stream):
""" Add a time delay to results """
_graphviz_shape = 'octagon'
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self.queue = Queue()
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
@gen.coroutine
def cb(self):
while True:
last = time()
x, metadata = yield self.queue.get()
yield self._emit(x, metadata=metadata)
self._release_refs(metadata)
duration = self.interval - (time() - last)
if duration > 0:
yield gen.sleep(duration)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
return self.queue.put((x, metadata))
@Stream.register_api()
class rate_limit(Stream):
""" Limit the flow of data
This stops two elements of streaming through in an interval shorter
than the provided value.
Parameters
----------
interval: float
Time in seconds
"""
_graphviz_shape = 'octagon'
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self.next = 0
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
@gen.coroutine
def update(self, x, who=None, metadata=None):
now = time()
old_next = self.next
self.next = max(now, self.next) + self.interval
if now < old_next:
yield gen.sleep(old_next - now)
yield self._emit(x, metadata=metadata)
@Stream.register_api()
class buffer(Stream):
""" Allow results to pile up at this point in the stream
This allows results to buffer in place at various points in the stream.
This can help to smooth flow through the system when backpressure is
applied.
"""
_graphviz_shape = 'diamond'
def __init__(self, upstream, n, **kwargs):
self.queue = Queue(maxsize=n)
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
return self.queue.put((x, metadata))
@gen.coroutine
def cb(self):
while True:
x, metadata = yield self.queue.get()
yield self._emit(x, metadata=metadata)
self._release_refs(metadata)
@Stream.register_api()
class zip(Stream):
""" Combine streams together into a stream of tuples
We emit a new tuple once all streams have produce a new tuple.
See also
--------
combine_latest
zip_latest
"""
_graphviz_orientation = 270
_graphviz_shape = 'triangle'
def __init__(self, *upstreams, **kwargs):
self.maxsize = kwargs.pop('maxsize', 10)
self.condition = Condition()
self.literals = [(i, val) for i, val in enumerate(upstreams)
if not isinstance(val, Stream)]
self.buffers = {upstream: deque()
for upstream in upstreams
if isinstance(upstream, Stream)}
upstreams2 = [upstream for upstream in upstreams if isinstance(upstream, Stream)]
Stream.__init__(self, upstreams=upstreams2, **kwargs)
def _add_upstream(self, upstream):
# Override method to handle setup of buffer for new stream
self.buffers[upstream] = deque()
super(zip, self)._add_upstream(upstream)
def _remove_upstream(self, upstream):
# Override method to handle removal of buffer for stream
self.buffers.pop(upstream)
super(zip, self)._remove_upstream(upstream)
def pack_literals(self, tup):
""" Fill buffers for literals whenever we empty them """
inp = list(tup)[::-1]
out = []
for i, val in self.literals:
while len(out) < i:
out.append(inp.pop())
out.append(val)
while inp:
out.append(inp.pop())
return tuple(out)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
L = self.buffers[who] # get buffer for stream
L.append((x, metadata))
if len(L) == 1 and all(self.buffers.values()):
vals = [self.buffers[up][0] for up in self.upstreams]
tup, md = __builtins__['zip'](*vals)
for buf in self.buffers.values():
buf.popleft()
self.condition.notify_all()
if self.literals:
tup = self.pack_literals(tup)
md = [m for ml in md for m in ml]
ret = self._emit(tup, md)
self._release_refs(md)
return ret
elif len(L) > self.maxsize:
return self.condition.wait()
@Stream.register_api()
class combine_latest(Stream):
""" Combine multiple streams together to a stream of tuples
This will emit a new tuple of all of the most recent elements seen from
any stream.
Parameters
----------
emit_on : stream or list of streams or None
only emit upon update of the streams listed.
If None, emit on update from any stream
See Also
--------
zip
"""
_graphviz_orientation = 270
_graphviz_shape = 'triangle'
def __init__(self, *upstreams, **kwargs):
emit_on = kwargs.pop('emit_on', None)
self._initial_emit_on = emit_on
self.last = [None for _ in upstreams]
self.metadata = [None for _ in upstreams]
self.missing = set(upstreams)
if emit_on is not None:
if not isinstance(emit_on, Iterable):
emit_on = (emit_on, )
emit_on = tuple(
upstreams[x] if isinstance(x, int) else x for x in emit_on)
self.emit_on = emit_on
else:
self.emit_on = upstreams
Stream.__init__(self, upstreams=upstreams, **kwargs)
def _add_upstream(self, upstream):
# Override method to handle setup of last and missing for new stream
self.last.append(None)
self.metadata.append(None)
self.missing.update([upstream])
super(combine_latest, self)._add_upstream(upstream)
if self._initial_emit_on is None:
self.emit_on = self.upstreams
def _remove_upstream(self, upstream):
# Override method to handle removal of last and missing for stream
if self.emit_on == upstream:
raise RuntimeError("Can't remove the ``emit_on`` stream since that"
"would cause no data to be emitted. "
"Consider adding an ``emit_on`` first by "
"running ``node.emit_on=(upstream,)`` to add "
"a new ``emit_on`` or running "
"``node.emit_on=tuple(node.upstreams)`` to "
"emit on all incoming data")
self.last.pop(self.upstreams.index(upstream))
self.metadata.pop(self.upstreams.index(upstream))
self.missing.remove(upstream)
super(combine_latest, self)._remove_upstream(upstream)
if self._initial_emit_on is None:
self.emit_on = self.upstreams
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
idx = self.upstreams.index(who)
if self.metadata[idx]:
self._release_refs(self.metadata[idx])
self.metadata[idx] = metadata
if self.missing and who in self.missing:
self.missing.remove(who)
self.last[idx] = x
if not self.missing and who in self.emit_on:
tup = tuple(self.last)
md = [m for ml in self.metadata for m in ml]
return self._emit(tup, md)
@Stream.register_api()
class flatten(Stream):
""" Flatten streams of lists or iterables into a stream of elements
Examples
--------
>>> source = Stream()
>>> source.flatten().sink(print)
>>> for x in [[1, 2, 3], [4, 5], [6, 7, 7]]:
... source.emit(x)
1
2
3
4
5
6
7
See Also
--------
partition
"""
def update(self, x, who=None, metadata=None):
L = []
for i, item in enumerate(x):
if i == len(x) - 1:
y = self._emit(item, metadata=metadata)
else:
y = self._emit(item)
if type(y) is list:
L.extend(y)
else:
L.append(y)
return L
@Stream.register_api()
class unique(Stream):
""" Avoid sending through repeated elements
This deduplicates a stream so that only new elements pass through.
You can control how much of a history is stored with the ``maxsize=``
parameter. For example setting ``maxsize=1`` avoids sending through
elements when one is repeated right after the other.
Parameters
----------
maxsize: int or None, optional
number of stored unique values to check against
key : function, optional
Function which returns a representation of the incoming data.
For example ``key=lambda x: x['a']`` could be used to allow only
pieces of data with unique ``'a'`` values to pass through.
hashable : bool, optional
If True then data is assumed to be hashable, else it is not. This is
used for determining how to cache the history, if hashable then
either dicts or LRU caches are used, otherwise a deque is used.
Defaults to True.
Examples
--------
>>> source = Stream()
>>> source.unique(maxsize=1).sink(print)
>>> for x in [1, 1, 2, 2, 2, 1, 3]:
... source.emit(x)
1
2
1
3
"""
def __init__(self, upstream, maxsize=None, key=identity, hashable=True,
**kwargs):
self.key = key
self.maxsize = maxsize
if hashable:
self.seen = dict()
if self.maxsize:
from zict import LRU
self.seen = LRU(self.maxsize, self.seen)
else:
self.seen = []
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
y = self.key(x)
emit = True
if isinstance(self.seen, list):
if y in self.seen:
self.seen.remove(y)
emit = False
self.seen.insert(0, y)
if self.maxsize:
del self.seen[self.maxsize:]
if emit:
return self._emit(x, metadata=metadata)
else:
if self.seen.get(y, '~~not_seen~~') == '~~not_seen~~':
self.seen[y] = 1
return self._emit(x, metadata=metadata)
@Stream.register_api()
class union(Stream):
""" Combine multiple streams into one
Every element from any of the upstreams streams will immediately flow
into the output stream. They will not be combined with elements from
other streams.
See also
--------
Stream.zip
Stream.combine_latest
"""
def __init__(self, *upstreams, **kwargs):
super(union, self).__init__(upstreams=upstreams, **kwargs)
def update(self, x, who=None, metadata=None):
return self._emit(x, metadata=metadata)
@Stream.register_api()
class pluck(Stream):
""" Select elements from elements in the stream.
Parameters
----------
pluck : object, list
The element(s) to pick from the incoming element in the stream
If an instance of list, will pick multiple elements.
Examples
--------
>>> source = Stream()
>>> source.pluck([0, 3]).sink(print)
>>> for x in [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 10, 11]]:
... source.emit(x)
(1, 4)
(4, 7)
(8, 11)
>>> source = Stream()
>>> source.pluck('name').sink(print)
>>> for x in [{'name': 'Alice', 'x': 123}, {'name': 'Bob', 'x': 456}]:
... source.emit(x)
'Alice'
'Bob'
"""
def __init__(self, upstream, pick, **kwargs):
self.pick = pick
super(pluck, self).__init__(upstream, **kwargs)
def update(self, x, who=None, metadata=None):
if isinstance(self.pick, list):
return self._emit(tuple([x[ind] for ind in self.pick]),
metadata=metadata)
else:
return self._emit(x[self.pick], metadata=metadata)
@Stream.register_api()
class collect(Stream):
"""
Hold elements in a cache and emit them as a collection when flushed.
Examples
--------
>>> source1 = Stream()
>>> source2 = Stream()
>>> collector = collect(source1)
>>> collector.sink(print)
>>> source2.sink(collector.flush)
>>> source1.emit(1)
>>> source1.emit(2)
>>> source2.emit('anything') # flushes collector
...
[1, 2]
"""
def __init__(self, upstream, cache=None, metadata_cache=None, **kwargs):
if cache is None:
cache = deque()
self.cache = cache
if metadata_cache is None:
metadata_cache = deque()
self.metadata_cache = metadata_cache
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
self.cache.append(x)
if metadata:
if isinstance(metadata, list):
self.metadata_cache.extend(metadata)
else:
self.metadata_cache.append(metadata)
def flush(self, _=None):
out = tuple(self.cache)
metadata = list(self.metadata_cache)
self._emit(out, metadata)
self._release_refs(metadata)
self.cache.clear()
self.metadata_cache.clear()
@Stream.register_api()
class zip_latest(Stream):
"""Combine multiple streams together to a stream of tuples
The stream which this is called from is lossless. All elements from
the lossless stream are emitted reguardless of when they came in.
This will emit a new tuple consisting of an element from the lossless
stream paired with the latest elements from the other streams.
Elements are only emitted when an element on the lossless stream are
received, similar to ``combine_latest`` with the ``emit_on`` flag.
See Also
--------
Stream.combine_latest
Stream.zip
"""
def __init__(self, lossless, *upstreams, **kwargs):
upstreams = (lossless,) + upstreams
self.last = [None for _ in upstreams]
self.metadata = [None for _ in upstreams]
self.missing = set(upstreams)
self.lossless = lossless
self.lossless_buffer = deque()
Stream.__init__(self, upstreams=upstreams, **kwargs)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
idx = self.upstreams.index(who)
if who is self.lossless:
self.lossless_buffer.append((x, metadata))
elif self.metadata[idx]:
self._release_refs(self.metadata[idx])
self.metadata[idx] = metadata
self.last[idx] = x
if self.missing and who in self.missing:
self.missing.remove(who)
if not self.missing:
L = []
while self.lossless_buffer:
self.last[0], self.metadata[0] = self.lossless_buffer.popleft()
md = [m for ml in self.metadata for m in ml]
L.append(self._emit(tuple(self.last), md))
self._release_refs(self.metadata[0])
return L
@Stream.register_api()
class latest(Stream):
""" Drop held-up data and emit the latest result
This allows you to skip intermediate elements in the stream if there is
some back pressure causing a slowdown. Use this when you only care about
the latest elements, and are willing to lose older data.
This passes through values without modification otherwise.
Examples
--------
>>> source.map(f).latest().map(g) # doctest: +SKIP
"""
_graphviz_shape = 'octagon'
def __init__(self, upstream, **kwargs):
self.condition = Condition()
self.next = []
self.next_metadata = None
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None, metadata=None):
if self.next_metadata:
self._release_refs(self.next_metadata)
self._retain_refs(metadata)
self.next = [x]
self.next_metadata = metadata
self.loop.add_callback(self.condition.notify)
@gen.coroutine
def cb(self):
while True:
yield self.condition.wait()
[x] = self.next
yield self._emit(x, self.next_metadata)
@Stream.register_api()
class to_kafka(Stream):
""" Writes data in the stream to Kafka
This stream accepts a string or bytes object. Call ``flush`` to ensure all
messages are pushed. Responses from Kafka are pushed downstream.
Parameters
----------
topic : string
The topic which to write
producer_config : dict
Settings to set up the stream, see
https://docs.confluent.io/current/clients/confluent-kafka-python/#configuration
https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
Examples:
bootstrap.servers: Connection string (host:port) to Kafka
Examples
--------
>>> from streamz import Stream
>>> ARGS = {'bootstrap.servers': 'localhost:9092'}
>>> source = Stream()
>>> kafka = source.map(lambda x: str(x)).to_kafka('test', ARGS)
<to_kafka>
>>> for i in range(10):
... source.emit(i)
>>> kafka.flush()
"""
def __init__(self, upstream, topic, producer_config, **kwargs):
import confluent_kafka as ck
self.topic = topic
self.producer = ck.Producer(producer_config)
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.stopped = False
self.polltime = 0.2
self.loop.add_callback(self.poll)
self.futures = []
@gen.coroutine
def poll(self):
while not self.stopped:
# executes callbacks for any delivered data, in this thread
# if no messages were sent, nothing happens
self.producer.poll(0)
yield gen.sleep(self.polltime)
def update(self, x, who=None, metadata=None):
future = gen.Future()
self.futures.append(future)
@gen.coroutine
def _():
while True:
try:
# this runs asynchronously, in C-K's thread
self.producer.produce(self.topic, x, callback=self.cb)
return
except BufferError:
yield gen.sleep(self.polltime)
except Exception as e:
future.set_exception(e)
return
self.loop.add_callback(_)
return future
@gen.coroutine
def cb(self, err, msg):
future = self.futures.pop(0)
if msg is not None and msg.value() is not None:
future.set_result(None)
yield self._emit(msg.value())
else:
future.set_exception(err or msg.error())
def flush(self, timeout=-1):
self.producer.flush(timeout)
def sync(loop, func, *args, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
# This was taken from distrbuted/utils.py
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and ((isinstance(loop, PollIOLoop) and getattr(loop, '_closing', False))
or (hasattr(loop, 'asyncio_loop') and loop.asyncio_loop._closed)):
raise RuntimeError("IOLoop is closed")
timeout = kwargs.pop('callback_timeout', None)
e = threading.Event()
main_tid = get_thread_identity()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == get_thread_identity():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
future = func(*args, **kwargs)
if timeout is not None:
future = gen.with_timeout(timedelta(seconds=timeout), future)
result[0] = yield future
except Exception:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
if timeout is not None:
if not e.wait(timeout):
raise gen.TimeoutError("timed out after %s s." % (timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
six.reraise(*error[0])
else:
return result[0]
| {
"repo_name": "mrocklin/streams",
"path": "streamz/core.py",
"copies": "1",
"size": "56658",
"license": "bsd-3-clause",
"hash": -9161140651931675000,
"line_mean": 29.9268558952,
"line_max": 90,
"alpha_frac": 0.5664336899,
"autogenerated": false,
"ratio": 4.15594513313284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.522237882303284,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import deque
from itertools import count
import distributed
class DaskClient(object):
""" A simple wrapper around ``distributed.Client`` to conform to our API"""
def __init__(self, client):
self._client = client
self._as_completed = distributed.as_completed([], with_results=True)
def submit(self, func, *args):
future = self._client.submit(func, *args)
self._as_completed.add(future)
return future
def has_results(self):
return not self._as_completed.queue.empty()
def next_batch(self, block=False):
return self._as_completed.next_batch(block=block)
class SerialClient(object):
""" A simple client to run in serial.
This queues work until ``max_queue_size`` tasks have been submitted,
then it returns results one at a time.
"""
def __init__(self):
self._queue = deque()
# For now, we use unique integers to mock future objects. We don't
# do anything fancy with them. We only use them as keys in a dict.
self._counter = count()
def submit(self, func, *args):
future = next(self._counter)
self._queue.append((future, func, args))
return future
def has_results(self):
return False
def next_batch(self, block=False):
if not block:
return ()
future, func, args = self._queue.popleft()
result = func(*args)
return ((future, result),)
| {
"repo_name": "eriknw/dask-patternsearch",
"path": "dask_patternsearch/clients.py",
"copies": "1",
"size": "1545",
"license": "bsd-3-clause",
"hash": 8462417046918362000,
"line_mean": 27.6111111111,
"line_max": 79,
"alpha_frac": 0.627184466,
"autogenerated": false,
"ratio": 4.098143236074271,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.522532770207427,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import deque
from time import time
import toolz
from tornado import gen
from tornado.locks import Condition
from tornado.ioloop import IOLoop
from tornado.queues import Queue
from collections import Iterable
no_default = '--no-default--'
def identity(x):
return x
class Stream(object):
""" A Stream is an infinite sequence of data
Streams subscribe to each other passing and transforming data between them.
A Stream object listens for updates from upstream, reacts to these updates,
and then emits more data to flow downstream to all Stream objects that
subscribe to it. Downstream Stream objects may connect at any point of a
Stream graph to get a full view of the data coming off of that point to do
with as they will.
Examples
--------
>>> def inc(x):
... return x + 1
>>> source = Stream() # Create a stream object
>>> s = source.map(inc).map(str) # Subscribe to make new streams
>>> s.sink(print) # take an action whenever an element reaches the end
>>> L = list()
>>> s.sink(L.append) # or take multiple actions (streams can branch)
>>> for i in range(5):
... source.emit(i) # push data in at the source
'1'
'2'
'3'
'4'
'5'
>>> L # and the actions happen at the sinks
['1', '2', '3', '4', '5']
"""
str_list = ['func', 'predicate', 'n', 'interval']
def __init__(self, child=None, children=None, stream_name=None, **kwargs):
self.parents = []
if children is not None:
self.children = children
else:
self.children = [child]
if kwargs.get('loop'):
self._loop = kwargs.get('loop')
for child in self.children:
if child:
child.parents.append(self)
self.name = stream_name
def __str__(self):
s_list = []
if self.name:
s_list.append('{}; {}'.format(self.name, self.__class__.__name__))
else:
s_list.append(self.__class__.__name__)
for m in self.str_list:
s = ''
at = getattr(self, m, None)
if at:
if not callable(at):
s = str(at)
elif hasattr(at, '__name__'):
s = getattr(self, m).__name__
elif hasattr(at.__class__, '__name__'):
s = getattr(self, m).__class__.__name__
else:
s = None
if s:
s_list.append('{}={}'.format(m, s))
s = "; ".join(s_list)
s = "<" + s + ">"
return s
def emit(self, x):
""" Push data into the stream at this point
This is typically done only at source Streams but can theortically be
done at any point
"""
result = []
for parent in self.parents:
r = parent.update(x, who=self)
if type(r) is list:
result.extend(r)
else:
result.append(r)
return [element for element in result if element is not None]
def update(self, x, who=None):
self.emit(x)
def connect(self, parent):
''' Connect this stream to a downstream element.
Parameters
----------
parent: Stream
the parent stream (downstream element) to connect to
'''
# Note : parents go downstream and children go upstream.
self.parents.append(parent)
if parent.children == [None]:
parent.children = [self]
else:
parent.children.append(self)
@property
def child(self):
if len(self.children) != 1:
raise ValueError("Stream has multiple children")
else:
return self.children[0]
@property
def loop(self):
try:
return self._loop
except AttributeError:
pass
for child in self.children:
if child:
loop = self.child.loop
if loop:
self._loop = loop
return loop
self._loop = IOLoop.current()
return self._loop
def scatter(self):
from .dask import scatter
return scatter(self)
def map(self, func, *args, **kwargs):
""" Apply a function to every element in the stream """
return map(func, self, args=args, **kwargs)
def filter(self, predicate):
""" Only pass through elements that satisfy the predicate """
return filter(predicate, self)
def remove(self, predicate):
""" Only pass through elements for which the predicate returns False
"""
return filter(lambda x: not predicate(x), self)
def accumulate(self, func, start=no_default, returns_state=False):
""" Accumulate results with previous state
This preforms running or cumulative reductions, applying the function
to the previous total and the new element. The function should take
two arguments, the previous accumulated state and the next element and
it should return a new accumulated state.
Examples
--------
>>> source = Stream()
>>> source.accumulate(lambda acc, x: acc + x).sink(print)
...
>>> for i in range(5):
... source.emit(i)
1
3
6
10
"""
return scan(func, self, start=start, returns_state=returns_state)
scan = accumulate
def partition(self, n):
""" Partition stream into tuples of equal size
Examples
--------
>>> source = Stream()
>>> source.partition(3).sink(print)
>>> for i in range(10):
... source.emit(i)
(0, 1, 2)
(3, 4, 5)
(6, 7, 8)
"""
return partition(n, self)
def sliding_window(self, n):
""" Produce overlapping tuples of size n
Examples
--------
>>> source = Stream()
>>> source.sliding_window(3).sink(print)
>>> for i in range(8):
... source.emit(i)
(0, 1, 2)
(1, 2, 3)
(2, 3, 4)
(3, 4, 5)
(4, 5, 6)
(5, 6, 7)
"""
return sliding_window(n, self)
def rate_limit(self, interval):
""" Limit the flow of data
This stops two elements of streaming through in an interval shorter
than the provided value.
Parameters
----------
interval: float
Time in seconds
"""
return rate_limit(interval, self)
def buffer(self, n, loop=None):
""" Allow results to pile up at this point in the stream
This allows results to buffer in place at various points in the stream.
This can help to smooth flow through the system when backpressure is
applied.
"""
return buffer(n, self, loop=loop)
def timed_window(self, interval, loop=None):
""" Emit a tuple of collected results every interval
Every ``interval`` seconds this emits a tuple of all of the results
seen so far. This can help to batch data coming off of a high-volume
stream.
"""
return timed_window(interval, self, loop=loop)
def delay(self, interval, loop=None):
""" Add a time delay to results """
return delay(interval, self, loop=None)
def combine_latest(self, *others, **kwargs):
""" Combine multiple streams together to a stream of tuples
This will emit a new tuple of all of the most recent elements seen from
any stream.
Parameters
---------------
emit_on : stream or list of streams or None
only emit upon update of the streams listed.
If None, emit on update from any stream
"""
return combine_latest(self, *others, **kwargs)
def concat(self):
""" Flatten streams of lists or iterables into a stream of elements
Examples
--------
>>> source = Stream()
>>> source.concat().sink(print)
>>> for x in [[1, 2, 3], [4, 5], [6, 7, 7]]:
... source.emit(x)
1
2
3
4
5
6
7
"""
return concat(self)
flatten = concat
def union(self, *others):
""" Combine multiple streams into one
Every element from any of the children streams will immediately flow
into the output stream. They will not be combined with elements from
other streams.
See also
--------
Stream.zip
Stream.combine_latest
"""
return union(children=(self,) + others)
def unique(self, history=None, key=identity):
""" Avoid sending through repeated elements
This deduplicates a stream so that only new elements pass through.
You can control how much of a history is stored with the ``history=``
parameter. For example setting ``history=1`` avoids sending through
elements when one is repeated right after the other.
Examples
--------
>>> source = Stream()
>>> source.unique(history=1).sink(print)
>>> for x in [1, 1, 2, 2, 2, 1, 3]:
... source.emit(x)
1
2
1
3
"""
return unique(self, history=history, key=key)
def collect(self, cache=None):
"""
Hold elements in a cache and emit them as a collection when flushed.
Examples
--------
>>> source1 = Stream()
>>> source2 = Stream()
>>> collector = collect(source1)
>>> collector.sink(print)
>>> source2.sink(collector.flush)
>>> source1.emit(1)
>>> source1.emit(2)
>>> source2.emit('anything') # flushes collector
...
[1, 2]
"""
return collect(self, cache=cache)
def zip(self, *other):
""" Combine two streams together into a stream of tuples """
return zip(self, *other)
def zip_latest(self, *others):
"""Combine multiple streams together to a stream of tuples
The stream which this is called from is lossless. All elements from
the lossless stream are emitted reguardless of when they came in.
This will emit a new tuple consisting of an element from the lossless
stream paired with the latest elements from the other streams.
Elements are only emitted when an element on the lossless stream are
received, similar to ``combine_latest`` with the ``emit_on`` flag.
See Also
--------
Stream.combine_latest
Stream.zip
"""
return zip_latest(self, *others)
def sink(self, func):
""" Apply a function on every element
Examples
--------
>>> source = Stream()
>>> L = list()
>>> source.sink(L.append)
>>> source.sink(print)
>>> source.sink(print)
>>> source.emit(123)
123
123
>>> L
[123]
See Also
--------
Stream.sink_to_list
"""
return Sink(func, self)
def sink_to_list(self):
""" Append all elements of a stream to a list as they come in
Examples
--------
>>> source = Stream()
>>> L = source.map(lambda x: 10 * x).sink_to_list()
>>> for i in range(5):
... source.emit(i)
>>> L
[0, 10, 20, 30, 40]
"""
L = []
Sink(L.append, self)
return L
def frequencies(self):
""" Count occurrences of elements """
def update_frequencies(last, x):
return toolz.assoc(last, x, last.get(x, 0) + 1)
return self.scan(update_frequencies, start={})
def visualize(self, filename='mystream.png', **kwargs):
"""Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
node: Stream instance
A node in the task graph
filename : str, optional
The name of the file to write to disk.
kwargs:
Graph attributes to pass to graphviz like ``rankdir="LR"``
"""
from .graph import visualize
return visualize(self, filename, **kwargs)
class Sink(Stream):
def __init__(self, func, child):
self.func = func
Stream.__init__(self, child)
def update(self, x, who=None):
result = self.func(x)
if type(result) is gen.Future:
return result
else:
return []
class map(Stream):
def __init__(self, func, child, args=(), **kwargs):
self.func = func
self.kwargs = kwargs
self.args = args
Stream.__init__(self, child)
def update(self, x, who=None):
result = self.func(x, *self.args, **self.kwargs)
return self.emit(result)
class filter(Stream):
def __init__(self, predicate, child):
self.predicate = predicate
Stream.__init__(self, child)
def update(self, x, who=None):
if self.predicate(x):
return self.emit(x)
class scan(Stream):
def __init__(self, func, child, start=no_default, returns_state=False):
self.func = func
self.state = start
self.returns_state = returns_state
Stream.__init__(self, child)
def update(self, x, who=None):
if self.state is no_default:
self.state = x
return self.emit(x)
else:
result = self.func(self.state, x)
if self.returns_state:
state, result = result
else:
state = result
self.state = state
return self.emit(result)
class partition(Stream):
def __init__(self, n, child):
self.n = n
self.buffer = []
Stream.__init__(self, child)
def update(self, x, who=None):
self.buffer.append(x)
if len(self.buffer) == self.n:
result, self.buffer = self.buffer, []
return self.emit(tuple(result))
else:
return []
class sliding_window(Stream):
def __init__(self, n, child):
self.n = n
self.buffer = deque(maxlen=n)
Stream.__init__(self, child)
def update(self, x, who=None):
self.buffer.append(x)
if len(self.buffer) == self.n:
return self.emit(tuple(self.buffer))
else:
return []
class timed_window(Stream):
def __init__(self, interval, child, loop=None):
self.interval = interval
self.buffer = []
self.last = gen.moment
Stream.__init__(self, child, loop=loop)
self.loop.add_callback(self.cb)
def update(self, x, who=None):
self.buffer.append(x)
return self.last
@gen.coroutine
def cb(self):
while True:
L, self.buffer = self.buffer, []
self.last = self.emit(L)
yield self.last
yield gen.sleep(self.interval)
class delay(Stream):
def __init__(self, interval, child, loop=None):
self.interval = interval
self.queue = Queue()
Stream.__init__(self, child, loop=loop)
self.loop.add_callback(self.cb)
@gen.coroutine
def cb(self):
while True:
last = time()
x = yield self.queue.get()
yield self.emit(x)
duration = self.interval - (time() - last)
if duration > 0:
yield gen.sleep(duration)
def update(self, x, who=None):
return self.queue.put(x)
class rate_limit(Stream):
def __init__(self, interval, child):
self.interval = interval
self.next = 0
Stream.__init__(self, child)
@gen.coroutine
def update(self, x, who=None):
now = time()
old_next = self.next
self.next = max(now, self.next) + self.interval
if now < old_next:
yield gen.sleep(old_next - now)
yield self.emit(x)
class buffer(Stream):
def __init__(self, n, child, loop=None):
self.queue = Queue(maxsize=n)
Stream.__init__(self, child, loop=loop)
self.loop.add_callback(self.cb)
def update(self, x, who=None):
return self.queue.put(x)
@gen.coroutine
def cb(self):
while True:
x = yield self.queue.get()
yield self.emit(x)
class zip(Stream):
def __init__(self, *children, **kwargs):
self.maxsize = kwargs.pop('maxsize', 10)
self.buffers = [deque() for _ in children]
self.condition = Condition()
Stream.__init__(self, children=children)
def update(self, x, who=None):
L = self.buffers[self.children.index(who)]
L.append(x)
if len(L) == 1 and all(self.buffers):
tup = tuple(buf.popleft() for buf in self.buffers)
self.condition.notify_all()
return self.emit(tup)
elif len(L) > self.maxsize:
return self.condition.wait()
class combine_latest(Stream):
def __init__(self, *children, **kwargs):
emit_on = kwargs.pop('emit_on', None)
self.last = [None for _ in children]
self.missing = set(children)
if emit_on is not None:
if not isinstance(emit_on, Iterable):
emit_on = (emit_on, )
emit_on = tuple(
children[x] if isinstance(x, int) else x for x in emit_on)
self.emit_on = emit_on
else:
self.emit_on = children
Stream.__init__(self, children=children)
def update(self, x, who=None):
if self.missing and who in self.missing:
self.missing.remove(who)
self.last[self.children.index(who)] = x
if not self.missing and who in self.emit_on:
tup = tuple(self.last)
return self.emit(tup)
class concat(Stream):
def update(self, x, who=None):
L = []
for item in x:
y = self.emit(item)
if type(y) is list:
L.extend(y)
else:
L.append(y)
return L
class unique(Stream):
def __init__(self, child, history=None, key=identity):
self.seen = dict()
self.key = key
if history:
from zict import LRU
self.seen = LRU(history, self.seen)
Stream.__init__(self, child)
def update(self, x, who=None):
y = self.key(x)
if y not in self.seen:
self.seen[y] = 1
return self.emit(x)
class union(Stream):
def update(self, x, who=None):
return self.emit(x)
class collect(Stream):
def __init__(self, child, cache=None):
if cache is None:
cache = deque()
self.cache = cache
Stream.__init__(self, child)
def update(self, x, who=None):
self.cache.append(x)
def flush(self, _=None):
out = tuple(self.cache)
self.emit(out)
self.cache.clear()
class zip_latest(Stream):
def __init__(self, lossless, *children):
children = (lossless,) + children
self.last = [None for _ in children]
self.missing = set(children)
self.lossless = lossless
self.lossless_buffer = deque()
Stream.__init__(self, children=children)
def update(self, x, who=None):
idx = self.children.index(who)
if who is self.lossless:
self.lossless_buffer.append(x)
self.last[idx] = x
if self.missing and who in self.missing:
self.missing.remove(who)
if not self.missing:
L = []
while self.lossless_buffer:
self.last[0] = self.lossless_buffer.popleft()
L.append(self.emit(tuple(self.last)))
return L
| {
"repo_name": "jrmlhermitte/streamz",
"path": "streamz/core.py",
"copies": "1",
"size": "20078",
"license": "bsd-3-clause",
"hash": -2776795346957427700,
"line_mean": 26.9637883008,
"line_max": 79,
"alpha_frac": 0.5397948003,
"autogenerated": false,
"ratio": 4.1194091095609355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 718
} |
from __future__ import absolute_import, division, print_function
from collections import deque
from dask.core import istask, subs
def head(task):
"""Return the top level node of a task"""
if istask(task):
return task[0]
elif isinstance(task, list):
return list
else:
return task
def args(task):
"""Get the arguments for the current task"""
if istask(task):
return task[1:]
elif isinstance(task, list):
return task
else:
return ()
class Traverser(object):
"""Traverser interface for tasks.
Class for storing the state while performing a preorder-traversal of a
task.
Parameters
----------
term : task
The task to be traversed
Attributes
----------
term
The current element in the traversal
current
The head of the current element in the traversal. This is simply `head`
applied to the attribute `term`.
"""
def __init__(self, term, stack=None):
self.term = term
if not stack:
self._stack = deque([END])
else:
self._stack = stack
def __iter__(self):
while self.current is not END:
yield self.current
self.next()
def copy(self):
"""Copy the traverser in its current state.
This allows the traversal to be pushed onto a stack, for easy
backtracking."""
return Traverser(self.term, deque(self._stack))
def next(self):
"""Proceed to the next term in the preorder traversal."""
subterms = args(self.term)
if not subterms:
# No subterms, pop off stack
self.term = self._stack.pop()
else:
self.term = subterms[0]
self._stack.extend(reversed(subterms[1:]))
@property
def current(self):
return head(self.term)
def skip(self):
"""Skip over all subterms of the current level in the traversal"""
self.term = self._stack.pop()
class Token(object):
"""A token object.
Used to express certain objects in the traversal of a task or pattern."""
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
# A variable to represent *all* variables in a discrimination net
VAR = Token('?')
# Represents the end of the traversal of an expression. We can't use `None`,
# 'False', etc... here, as anything may be an argument to a function.
END = Token('end')
class Node(tuple):
"""A Discrimination Net node."""
__slots__ = ()
def __new__(cls, edges=None, patterns=None):
edges = edges if edges else {}
patterns = patterns if patterns else []
return tuple.__new__(cls, (edges, patterns))
@property
def edges(self):
"""A dictionary, where the keys are edges, and the values are nodes"""
return self[0]
@property
def patterns(self):
"""A list of all patterns that currently match at this node"""
return self[1]
class RewriteRule(object):
"""A rewrite rule.
Expresses `lhs` -> `rhs`, for variables `vars`.
Parameters
----------
lhs : task
The left-hand-side of the rewrite rule.
rhs : task or function
The right-hand-side of the rewrite rule. If it's a task, variables in
`rhs` will be replaced by terms in the subject that match the variables
in `lhs`. If it's a function, the function will be called with a dict
of such matches.
vars: tuple, optional
Tuple of variables found in the lhs. Variables can be represented as
any hashable object; a good convention is to use strings. If there are
no variables, this can be omitted.
Examples
--------
Here's a `RewriteRule` to replace all nested calls to `list`, so that
`(list, (list, 'x'))` is replaced with `(list, 'x')`, where `'x'` is a
variable.
>>> lhs = (list, (list, 'x'))
>>> rhs = (list, 'x')
>>> variables = ('x',)
>>> rule = RewriteRule(lhs, rhs, variables)
Here's a more complicated rule that uses a callable right-hand-side. A
callable `rhs` takes in a dictionary mapping variables to their matching
values. This rule replaces all occurrences of `(list, 'x')` with `'x'` if
`'x'` is a list itself.
>>> lhs = (list, 'x')
>>> def repl_list(sd):
... x = sd['x']
... if isinstance(x, list):
... return x
... else:
... return (list, x)
>>> rule = RewriteRule(lhs, repl_list, variables)
"""
def __init__(self, lhs, rhs, vars=()):
if not isinstance(vars, tuple):
raise TypeError("vars must be a tuple of variables")
self.lhs = lhs
if callable(rhs):
self.subs = rhs
else:
self.subs = self._apply
self.rhs = rhs
self._varlist = [t for t in Traverser(lhs) if t in vars]
# Reduce vars down to just variables found in lhs
self.vars = tuple(sorted(set(self._varlist)))
def _apply(self, sub_dict):
term = self.rhs
for key, val in sub_dict.items():
term = subs(term, key, val)
return term
def __str__(self):
return "RewriteRule({0}, {1}, {2})".format(self.lhs, self.rhs,
self.vars)
def __repr__(self):
return str(self)
class RuleSet(object):
"""A set of rewrite rules.
Forms a structure for fast rewriting over a set of rewrite rules. This
allows for syntactic matching of terms to patterns for many patterns at
the same time.
Examples
--------
>>> def f(*args): pass
>>> def g(*args): pass
>>> def h(*args): pass
>>> from operator import add
>>> rs = RuleSet( # Make RuleSet with two Rules
... RewriteRule((add, 'x', 0), 'x', ('x',)),
... RewriteRule((f, (g, 'x'), 'y'),
... (h, 'x', 'y'),
... ('x', 'y')))
>>> rs.rewrite((add, 2, 0)) # Apply ruleset to single task
2
>>> rs.rewrite((f, (g, 'a', 3))) # doctest: +SKIP
(h, 'a', 3)
>>> dsk = {'a': (add, 2, 0), # Apply ruleset to full dask graph
... 'b': (f, (g, 'a', 3))}
>>> from toolz import valmap
>>> valmap(rs.rewrite, dsk) # doctest: +SKIP
{'a': 2,
'b': (h, 'a', 3)}
Attributes
----------
rules : list
A list of `RewriteRule`s included in the `RuleSet`.
"""
def __init__(self, *rules):
"""Create a `RuleSet` for a number of rules
Parameters
----------
rules
One or more instances of RewriteRule
"""
self._net = Node()
self.rules = []
for p in rules:
self.add(p)
def add(self, rule):
"""Add a rule to the RuleSet.
Parameters
----------
rule : RewriteRule
"""
if not isinstance(rule, RewriteRule):
raise TypeError("rule must be instance of RewriteRule")
vars = rule.vars
curr_node = self._net
ind = len(self.rules)
# List of variables, in order they appear in the POT of the term
for t in Traverser(rule.lhs):
prev_node = curr_node
if t in vars:
t = VAR
if t in curr_node.edges:
curr_node = curr_node.edges[t]
else:
curr_node.edges[t] = Node()
curr_node = curr_node.edges[t]
# We've reached a leaf node. Add the term index to this leaf.
prev_node.edges[t].patterns.append(ind)
self.rules.append(rule)
def iter_matches(self, term):
"""A generator that lazily finds matchings for term from the RuleSet.
Parameters
----------
term : task
Yields
------
Tuples of `(rule, subs)`, where `rule` is the rewrite rule being
matched, and `subs` is a dictionary mapping the variables in the lhs
of the rule to their matching values in the term."""
S = Traverser(term)
for m, syms in _match(S, self._net):
for i in m:
rule = self.rules[i]
subs = _process_match(rule, syms)
if subs is not None:
yield rule, subs
def _rewrite(self, term):
"""Apply the rewrite rules in RuleSet to top level of term"""
for rule, sd in self.iter_matches(term):
# We use for (...) because it's fast in all cases for getting the
# first element from the match iterator. As we only want that
# element, we break here
term = rule.subs(sd)
break
return term
def rewrite(self, task, strategy="bottom_up"):
"""Apply the `RuleSet` to `task`.
This applies the most specific matching rule in the RuleSet to the
task, using the provided strategy.
Parameters
----------
term: a task
The task to be rewritten
strategy: str, optional
The rewriting strategy to use. Options are "bottom_up" (default),
or "top_level".
Examples
--------
Suppose there was a function `add` that returned the sum of 2 numbers,
and another function `double` that returned twice its input:
>>> add = lambda x, y: x + y
>>> double = lambda x: 2*x
Now suppose `double` was *significantly* faster than `add`, so
you'd like to replace all expressions `(add, x, x)` with `(double,
x)`, where `x` is a variable. This can be expressed as a rewrite rule:
>>> rule = RewriteRule((add, 'x', 'x'), (double, 'x'), ('x',))
>>> rs = RuleSet(rule)
This can then be applied to terms to perform the rewriting:
>>> term = (add, (add, 2, 2), (add, 2, 2))
>>> rs.rewrite(term) # doctest: +SKIP
(double, (double, 2))
If we only wanted to apply this to the top level of the term, the
`strategy` kwarg can be set to "top_level".
>>> rs.rewrite(term) # doctest: +SKIP
(double, (add, 2, 2))
"""
return strategies[strategy](self, task)
def _top_level(net, term):
return net._rewrite(term)
def _bottom_up(net, term):
if istask(term):
term = (head(term),) + tuple(_bottom_up(net, t) for t in args(term))
elif isinstance(term, list):
term = [_bottom_up(net, t) for t in args(term)]
return net._rewrite(term)
strategies = {'top_level': _top_level,
'bottom_up': _bottom_up}
def _match(S, N):
"""Structural matching of term S to discrimination net node N."""
stack = deque()
restore_state_flag = False
# matches are stored in a tuple, because all mutations result in a copy,
# preventing operations from changing matches stored on the stack.
matches = ()
while True:
if S.current is END:
yield N.patterns, matches
try:
# This try-except block is to catch hashing errors from un-hashable
# types. This allows for variables to be matched with un-hashable
# objects.
n = N.edges.get(S.current, None)
if n and not restore_state_flag:
stack.append((S.copy(), N, matches))
N = n
S.next()
continue
except TypeError:
pass
n = N.edges.get(VAR, None)
if n:
restore_state_flag = False
matches = matches + (S.term,)
S.skip()
N = n
continue
try:
# Backtrack here
(S, N, matches) = stack.pop()
restore_state_flag = True
except:
return
def _process_match(rule, syms):
"""Process a match to determine if it is correct, and to find the correct
substitution that will convert the term into the pattern.
Parameters
----------
rule : RewriteRule
syms : iterable
Iterable of subterms that match a corresponding variable.
Returns
-------
A dictionary of {vars : subterms} describing the substitution to make the
pattern equivalent with the term. Returns `None` if the match is
invalid."""
subs = {}
varlist = rule._varlist
if not len(varlist) == len(syms):
raise RuntimeError("length of varlist doesn't match length of syms.")
for v, s in zip(varlist, syms):
if v in subs and subs[v] != s:
return None
else:
subs[v] = s
return subs
| {
"repo_name": "cpcloud/dask",
"path": "dask/rewrite.py",
"copies": "5",
"size": "12715",
"license": "bsd-3-clause",
"hash": -6800593835000132000,
"line_mean": 28.0296803653,
"line_max": 79,
"alpha_frac": 0.5508454581,
"autogenerated": false,
"ratio": 4.0532355753905005,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00014269406392694063,
"num_lines": 438
} |
from __future__ import absolute_import, division, print_function
from collections import Iterable, Iterator, defaultdict
from functools import wraps, partial
import itertools
import math
from operator import getitem
import os
import types
import uuid
from warnings import warn
from distutils.version import LooseVersion
from ..utils import ignoring
from toolz import (merge, take, reduce, valmap, map, partition_all, filter,
remove, compose, curry, first, second, accumulate)
from toolz.compatibility import iteritems, zip
import toolz
_implement_accumulate = LooseVersion(toolz.__version__) > '0.7.4'
try:
import cytoolz
from cytoolz import (frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)
if LooseVersion(cytoolz.__version__) > '0.7.3':
from cytoolz import accumulate
_implement_accumulate = True
except:
from toolz import (frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)
from ..base import Base, normalize_token, tokenize
from ..compatibility import apply, unicode, urlopen
from ..context import _globals
from ..core import list2, quote, istask, get_dependencies, reverse_dict
from ..multiprocessing import get as mpget
from ..optimize import fuse, cull, inline
from ..utils import (infer_compression, open, system_encoding,
takes_multiple_arguments, funcname, digit, insert)
no_default = '__no__default__'
def lazify_task(task, start=True):
"""
Given a task, remove unnecessary calls to ``list``
Examples
--------
>>> task = (sum, (list, (map, inc, [1, 2, 3]))) # doctest: +SKIP
>>> lazify_task(task) # doctest: +SKIP
(sum, (map, inc, [1, 2, 3]))
"""
if not istask(task):
return task
head, tail = task[0], task[1:]
if not start and head in (list, reify):
task = task[1]
return lazify_task(*tail, start=False)
else:
return (head,) + tuple([lazify_task(arg, False) for arg in tail])
def lazify(dsk):
"""
Remove unnecessary calls to ``list`` in tasks
See Also
--------
``dask.bag.core.lazify_task``
"""
return valmap(lazify_task, dsk)
def inline_singleton_lists(dsk, dependencies=None):
""" Inline lists that are only used once
>>> d = {'b': (list, 'a'),
... 'c': (f, 'b', 1)} # doctest: +SKIP
>>> inline_singleton_lists(d) # doctest: +SKIP
{'c': (f, (list, 'a'), 1)}
Pairs nicely with lazify afterwards
"""
if dependencies is None:
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
dependents = reverse_dict(dependencies)
keys = [k for k, v in dsk.items()
if istask(v) and v and v[0] is list and len(dependents[k]) == 1]
return inline(dsk, keys, inline_constants=False)
def optimize(dsk, keys, **kwargs):
""" Optimize a dask from a dask.bag """
dsk2, dependencies = cull(dsk, keys)
dsk3, dependencies = fuse(dsk2, keys, dependencies)
dsk4 = inline_singleton_lists(dsk3, dependencies)
dsk5 = lazify(dsk4)
return dsk5
def to_textfiles(b, path, name_function=str, compression='infer',
encoding=system_encoding, compute=True):
""" Write bag to disk, one filename per partition, one line per element
**Paths**: This will create one file for each partition in your bag. You
can specify the filenames in a variety of ways.
Use a globstring
>>> b.to_textfiles('/path/to/data/*.json.gz') # doctest: +SKIP
The * will be replaced by the increasing sequence 1, 2, ...
::
/path/to/data/0.json.gz
/path/to/data/1.json.gz
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> b.to_textfiles('/path/to/data/*.json.gz', name_function=name) # doctest: +SKIP
::
/path/to/data/2015-01-01.json.gz
/path/to/data/2015-01-02.json.gz
...
You can also provide an explicit list of paths.
>>> paths = ['/path/to/data/alice.json.gz', '/path/to/data/bob.json.gz', ...] # doctest: +SKIP
>>> b.to_textfiles(paths) # doctest: +SKIP
**Compression**: Filenames with extensions corresponding to known
compression algorithms (gz, bz2) will be compressed accordingly.
**Bag Contents**: The bag calling ``to_textfiles`` _must_ be a bag of
text strings. For example, a bag of dictionaries could be written to
JSON text files by mapping ``json.dumps``on to the bag first, and
then calling ``to_textfiles``:
>>> b_dict.map(json.dumps).to_textfiles("/path/to/data/*.json") # doctest: +SKIP
"""
if isinstance(path, (str, unicode)):
if '*' in path:
paths = [path.replace('*', name_function(i))
for i in range(b.npartitions)]
else:
paths = [os.path.join(path, '%s.part' % name_function(i))
for i in range(b.npartitions)]
elif isinstance(path, (tuple, list, set)):
assert len(path) == b.npartitions
paths = path
else:
raise ValueError("""Path should be either"
1. A list of paths -- ['foo.json', 'bar.json', ...]
2. A directory -- 'foo/
3. A path with a * in it -- 'foo.*.json'""")
def get_compression(path, compression=compression):
if compression == 'infer':
compression = infer_compression(path)
return compression
name = 'to-textfiles-' + uuid.uuid4().hex
dsk = dict(((name, i), (write, (b.name, i), path, get_compression(path),
encoding))
for i, path in enumerate(paths))
result = Bag(merge(b.dask, dsk), name, b.npartitions)
if compute:
result.compute()
else:
return result
def finalize(results):
if isinstance(results, Iterator):
results = list(results)
if isinstance(results[0], Iterable) and not isinstance(results[0], str):
results = toolz.concat(results)
if isinstance(results, Iterator):
results = list(results)
return results
def finalize_item(results):
return results[0]
def unpack_kwargs(kwargs):
""" Extracts dask values from kwargs
Currently only dask.bag.Item and python literal values are supported.
Returns a merged dask graph and a list of [key, val] pairs suitable for
eventually constructing a dict.
"""
dsk = {}
kw_pairs = []
for key, val in iteritems(kwargs):
if isinstance(val, Item):
dsk.update(val.dask)
val = val.key
# TODO elif isinstance(val, Value):
elif isinstance(val, Base):
raise NotImplementedError(
'%s not supported as kwarg value to Bag.map_partitions'
% type(val).__name__)
kw_pairs.append([key, val])
return dsk, kw_pairs
class Item(Base):
_optimize = staticmethod(optimize)
_default_get = staticmethod(mpget)
_finalize = staticmethod(finalize_item)
@staticmethod
def from_imperative(value):
warn("Deprecation warning: moved to from_delayed")
return from_delayed(value)
@staticmethod
def from_delayed(value):
""" Create bag item from a dask.delayed value
Parameters
----------
value: a Value
A single dask.delayed.Value object, such as come from dask.do
Returns
-------
Item
Examples
--------
>>> b = db.Item.from_delayed(x) # doctest: +SKIP
"""
from dask.delayed import Value
assert isinstance(value, Value)
return Item(value.dask, value.key)
def __init__(self, dsk, key):
self.dask = dsk
self.key = key
self.name = key
def _keys(self):
return [self.key]
def apply(self, func):
name = 'apply-{0}-{1}'.format(funcname(func), tokenize(self, func))
dsk = {name: (func, self.key)}
return Item(merge(self.dask, dsk), name)
__int__ = __float__ = __complex__ = __bool__ = Base.compute
def to_imperative(self):
warn("Deprecation warning: moved to to_delayed")
return self.to_delayed()
def to_delayed(self):
""" Convert bag item to dask Value
Returns a single value.
"""
from dask.delayed import Value
return Value(self.key, [self.dask])
class Bag(Base):
""" Parallel collection of Python objects
Examples
--------
Create Bag from sequence
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(lambda x: x % 2 == 0).map(lambda x: x * 10)) # doctest: +SKIP
[0, 20, 40]
Create Bag from filename or globstring of filenames
>>> b = db.read_text('/path/to/mydata.*.json.gz').map(json.loads) # doctest: +SKIP
Create manually (expert use)
>>> dsk = {('x', 0): (range, 5),
... ('x', 1): (range, 5),
... ('x', 2): (range, 5)}
>>> b = Bag(dsk, 'x', npartitions=3)
>>> sorted(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]
>>> int(b.fold(lambda x, y: x + y)) # doctest: +SKIP
30
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(mpget)
_finalize = staticmethod(finalize)
def __init__(self, dsk, name, npartitions):
self.dask = dsk
self.name = name
self.npartitions = npartitions
self.str = StringAccessor(self)
def __str__(self):
name = self.name if len(self.name) < 10 else self.name[:7] + '...'
return 'dask.bag<%s, npartitions=%d>' % (name, self.npartitions)
__repr__ = __str__
def map(self, func, **kwargs):
""" Map a function across all elements in collection
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 10, 20, 30, 40]
Keyword arguments are passed through to ``func``. These can be either
``dask.bag.Item``, or normal python objects.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(1, 101), npartitions=10)
>>> def div(num, den=1):
... return num / den
Using a python object:
>>> hi = b.max().compute()
>>> hi
100
>>> b.map(div, den=hi).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Using an ``Item``:
>>> b.map(div, den=b.max()).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Note that while both versions give the same output, the second forms a
single graph, and then computes everything at once, and in some cases
may be more efficient.
"""
name = 'map-{0}-{1}'.format(funcname(func),
tokenize(self, func, kwargs))
if takes_multiple_arguments(func):
func = partial(apply, func)
dsk = self.dask.copy()
if kwargs:
kw_dsk, kw_pairs = unpack_kwargs(kwargs)
dsk.update(kw_dsk)
func = (apply, partial, [func], (dict, kw_pairs))
dsk.update(((name, i), (reify, (map, func, (self.name, i))))
for i in range(self.npartitions))
return type(self)(dsk, name, self.npartitions)
@property
def _args(self):
return (self.dask, self.name, self.npartitions)
def filter(self, predicate):
""" Filter elements in collection by a predicate function
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(iseven)) # doctest: +SKIP
[0, 2, 4]
"""
name = 'filter-{0}-{1}'.format(funcname(predicate),
tokenize(self, predicate))
dsk = dict(((name, i), (reify, (filter, predicate, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def remove(self, predicate):
""" Remove elements in collection that match predicate
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.remove(iseven)) # doctest: +SKIP
[1, 3]
"""
name = 'remove-{0}-{1}'.format(funcname(predicate),
tokenize(self, predicate))
dsk = dict(((name, i), (reify, (remove, predicate, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def map_partitions(self, func, **kwargs):
""" Apply function to every partition within collection
Note that this requires you to understand how dask.bag partitions your
data and so is somewhat internal.
>>> b.map_partitions(myfunc) # doctest: +SKIP
Keyword arguments are passed through to ``func``. These can be either
``dask.bag.Item``, or normal python objects.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(1, 101), npartitions=10)
>>> def div(nums, den=1):
... return [num / den for num in nums]
Using a python object:
>>> hi = b.max().compute()
>>> hi
100
>>> b.map_partitions(div, den=hi).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Using an ``Item``:
>>> b.map_partitions(div, den=b.max()).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Note that while both versions give the same output, the second forms a
single graph, and then computes everything at once, and in some cases
may be more efficient.
"""
name = 'map-partitions-{0}-{1}'.format(funcname(func),
tokenize(self, func, kwargs))
dsk = self.dask.copy()
if kwargs:
kw_dsk, kw_pairs = unpack_kwargs(kwargs)
dsk.update(kw_dsk)
dsk.update(((name, i),
(apply, func, [(self.name, i)], (dict, kw_pairs))
if kwargs else (func, (self.name, i)))
for i in range(self.npartitions))
return type(self)(dsk, name, self.npartitions)
def pluck(self, key, default=no_default):
""" Select item from all tuples/dicts in collection
>>> b = from_sequence([{'name': 'Alice', 'credits': [1, 2, 3]},
... {'name': 'Bob', 'credits': [10, 20]}])
>>> list(b.pluck('name')) # doctest: +SKIP
['Alice', 'Bob']
>>> list(b.pluck('credits').pluck(0)) # doctest: +SKIP
[1, 10]
"""
name = 'pluck-' + tokenize(self, key, default)
key = quote(key)
if default == no_default:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i))))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i), default)))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def unzip(self, n):
"""Transform a bag of tuples to ``n`` bags of their elements.
Examples
--------
>>> b = from_sequence([(i, i + 1, i + 2) for i in range(10)])
>>> first, second, third = b.unzip(3)
>>> isinstance(first, Bag)
True
>>> first.compute()
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Note that this is equivalent to:
>>> first, second, third = (b.pluck(i) for i in range(3))
"""
return tuple(self.pluck(i) for i in range(n))
@wraps(to_textfiles)
def to_textfiles(self, path, name_function=str, compression='infer',
encoding=system_encoding, compute=True):
return to_textfiles(self, path, name_function, compression, encoding, compute)
def fold(self, binop, combine=None, initial=no_default, split_every=None):
""" Parallelizable reduction
Fold is like the builtin function ``reduce`` except that it works in
parallel. Fold takes two binary operator functions, one to reduce each
partition of our dataset and another to combine results between
partitions
1. ``binop``: Binary operator to reduce within each partition
2. ``combine``: Binary operator to combine results from binop
Sequentially this would look like the following:
>>> intermediates = [reduce(binop, part) for part in partitions] # doctest: +SKIP
>>> final = reduce(combine, intermediates) # doctest: +SKIP
If only one function is given then it is used for both functions
``binop`` and ``combine`` as in the following example to compute the
sum:
>>> def add(x, y):
... return x + y
>>> b = from_sequence(range(5))
>>> b.fold(add).compute() # doctest: +SKIP
10
In full form we provide both binary operators as well as their default
arguments
>>> b.fold(binop=add, combine=add, initial=0).compute() # doctest: +SKIP
10
More complex binary operators are also doable
>>> def add_to_set(acc, x):
... ''' Add new element x to set acc '''
... return acc | set([x])
>>> b.fold(add_to_set, set.union, initial=set()).compute() # doctest: +SKIP
{1, 2, 3, 4, 5}
See Also
--------
Bag.foldby
"""
combine = combine or binop
initial = quote(initial)
if initial is not no_default:
return self.reduction(curry(_reduce, binop, initial=initial),
curry(_reduce, combine),
split_every=split_every)
else:
from toolz.curried import reduce
return self.reduction(reduce(binop), reduce(combine),
split_every=split_every)
def frequencies(self, split_every=None):
""" Count number of occurrences of each distinct element
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> dict(b.frequencies()) # doctest: +SKIP
{'Alice': 2, 'Bob', 1}
"""
return self.reduction(frequencies, merge_frequencies,
out_type=Bag, split_every=split_every,
name='frequencies').map_partitions(dictitems)
def topk(self, k, key=None, split_every=None):
""" K largest elements in collection
Optionally ordered by some key function
>>> b = from_sequence([10, 3, 5, 7, 11, 4])
>>> list(b.topk(2)) # doctest: +SKIP
[11, 10]
>>> list(b.topk(2, lambda x: -x)) # doctest: +SKIP
[3, 4]
"""
if key:
if callable(key) and takes_multiple_arguments(key):
key = partial(apply, key)
func = partial(topk, k, key=key)
else:
func = partial(topk, k)
return self.reduction(func, compose(func, toolz.concat), out_type=Bag,
split_every=split_every, name='topk')
def distinct(self):
""" Distinct elements of collection
Unordered without repeats.
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> sorted(b.distinct())
['Alice', 'Bob']
"""
return self.reduction(set, curry(apply, set.union), out_type=Bag,
name='distinct')
def reduction(self, perpartition, aggregate, split_every=None,
out_type=Item, name=None):
""" Reduce collection with reduction operators
Parameters
----------
perpartition: function
reduction to apply to each partition
aggregate: function
reduction to apply to the results of all partitions
split_every: int (optional)
Group partitions into groups of this size while performing reduction
Defaults to 8
out_type: {Bag, Item}
The out type of the result, Item if a single element, Bag if a list
of elements. Defaults to Item.
Examples
--------
>>> b = from_sequence(range(10))
>>> b.reduction(sum, sum).compute()
45
"""
if split_every is None:
split_every = 8
if split_every is False:
split_every = self.npartitions
token = tokenize(self, perpartition, aggregate, split_every)
a = '%s-part-%s' % (name or funcname(perpartition), token)
dsk = dict(((a, i), (perpartition, (self.name, i)))
for i in range(self.npartitions))
k = self.npartitions
b = a
fmt = '%s-aggregate-%s' % (name or funcname(aggregate), token)
depth = 0
while k > 1:
c = fmt + str(depth)
dsk2 = dict(((c, i), (aggregate, [(b, j) for j in inds]))
for i, inds in enumerate(partition_all(split_every,
range(k))))
dsk.update(dsk2)
k = len(dsk2)
b = c
depth += 1
if out_type is Item:
dsk[b] = dsk.pop((b, 0))
return Item(merge(self.dask, dsk), b)
else:
return Bag(merge(self.dask, dsk), b, 1)
@wraps(sum)
def sum(self, split_every=None):
return self.reduction(sum, sum, split_every=split_every)
@wraps(max)
def max(self, split_every=None):
return self.reduction(max, max, split_every=split_every)
@wraps(min)
def min(self, split_every=None):
return self.reduction(min, min, split_every=split_every)
@wraps(any)
def any(self, split_every=None):
return self.reduction(any, any, split_every=split_every)
@wraps(all)
def all(self, split_every=None):
return self.reduction(all, all, split_every=split_every)
def count(self, split_every=None):
""" Count the number of elements """
return self.reduction(count, sum, split_every=split_every)
def mean(self):
""" Arithmetic mean """
def mean_chunk(seq):
total, n = 0.0, 0
for x in seq:
total += x
n += 1
return total, n
def mean_aggregate(x):
totals, counts = list(zip(*x))
return 1.0 * sum(totals) / sum(counts)
return self.reduction(mean_chunk, mean_aggregate, split_every=False)
def var(self, ddof=0):
""" Variance """
def var_chunk(seq):
squares, total, n = 0.0, 0.0, 0
for x in seq:
squares += x**2
total += x
n += 1
return squares, total, n
def var_aggregate(x):
squares, totals, counts = list(zip(*x))
x2, x, n = float(sum(squares)), float(sum(totals)), sum(counts)
result = (x2 / n) - (x / n)**2
return result * n / (n - ddof)
return self.reduction(var_chunk, var_aggregate, split_every=False)
def std(self, ddof=0):
""" Standard deviation """
return self.var(ddof=ddof).apply(math.sqrt)
def join(self, other, on_self, on_other=None):
""" Join collection with another collection
Other collection must be an Iterable, and not a Bag.
>>> people = from_sequence(['Alice', 'Bob', 'Charlie'])
>>> fruit = ['Apple', 'Apricot', 'Banana']
>>> list(people.join(fruit, lambda x: x[0])) # doctest: +SKIP
[('Apple', 'Alice'), ('Apricot', 'Alice'), ('Banana', 'Bob')]
"""
assert isinstance(other, Iterable)
assert not isinstance(other, Bag)
if on_other is None:
on_other = on_self
name = 'join-' + tokenize(self, other, on_self, on_other)
dsk = dict(((name, i), (list, (join, on_other, other,
on_self, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def product(self, other):
""" Cartesian product between two bags """
assert isinstance(other, Bag)
name = 'product-' + tokenize(self, other)
n, m = self.npartitions, other.npartitions
dsk = dict(((name, i*m + j),
(list, (itertools.product, (self.name, i),
(other.name, j))))
for i in range(n) for j in range(m))
return type(self)(merge(self.dask, other.dask, dsk), name, n*m)
def foldby(self, key, binop, initial=no_default, combine=None,
combine_initial=no_default):
""" Combined reduction and groupby
Foldby provides a combined groupby and reduce for efficient parallel
split-apply-combine tasks.
The computation
>>> b.foldby(key, binop, init) # doctest: +SKIP
is equivalent to the following:
>>> def reduction(group): # doctest: +SKIP
... return reduce(binop, group, init) # doctest: +SKIP
>>> b.groupby(key).map(lambda (k, v): (k, reduction(v)))# doctest: +SKIP
But uses minimal communication and so is *much* faster.
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> add = lambda x, y: x + y
>>> dict(b.foldby(iseven, add)) # doctest: +SKIP
{True: 20, False: 25}
**Key Function**
The key function determines how to group the elements in your bag.
In the common case where your bag holds dictionaries then the key
function often gets out one of those elements.
>>> def key(x):
... return x['name']
This case is so common that it is special cased, and if you provide a
key that is not a callable function then dask.bag will turn it into one
automatically. The following are equivalent:
>>> b.foldby(lambda x: x['name'], ...) # doctest: +SKIP
>>> b.foldby('name', ...) # doctest: +SKIP
**Binops**
It can be tricky to construct the right binary operators to perform
analytic queries. The ``foldby`` method accepts two binary operators,
``binop`` and ``combine``. Binary operators two inputs and output must
have the same type.
Binop takes a running total and a new element and produces a new total:
>>> def binop(total, x):
... return total + x['amount']
Combine takes two totals and combines them:
>>> def combine(total1, total2):
... return total1 + total2
Each of these binary operators may have a default first value for
total, before any other value is seen. For addition binary operators
like above this is often ``0`` or the identity element for your
operation.
>>> b.foldby('name', binop, 0, combine, 0) # doctest: +SKIP
See Also
--------
toolz.reduceby
pyspark.combineByKey
"""
token = tokenize(self, key, binop, initial, combine, combine_initial)
a = 'foldby-a-' + token
b = 'foldby-b-' + token
if combine is None:
combine = binop
if initial is not no_default:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i)))
for i in range(self.npartitions))
def combine2(acc, x):
return combine(acc, x[1])
if combine_initial is not no_default:
dsk2 = {(b, 0): (dictitems, (
reduceby, 0, combine2, (
toolz.concat, (
map, dictitems, list(dsk.keys()))),
combine_initial))}
else:
dsk2 = {(b, 0): (dictitems, (
merge_with, (partial, reduce, combine),
list(dsk.keys())))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def take(self, k, compute=True):
""" Take the first k elements
Evaluates by default, use ``compute=False`` to avoid computation.
Only takes from the first partition
>>> b = from_sequence(range(10))
>>> b.take(3) # doctest: +SKIP
(0, 1, 2)
"""
name = 'take-' + tokenize(self, k)
dsk = {(name, 0): (list, (take, k, (self.name, 0)))}
b = Bag(merge(self.dask, dsk), name, 1)
if compute:
return tuple(b.compute())
else:
return b
def _keys(self):
return [(self.name, i) for i in range(self.npartitions)]
def concat(self):
""" Concatenate nested lists into one long list
>>> b = from_sequence([[1], [2, 3]])
>>> list(b)
[[1], [2, 3]]
>>> list(b.concat())
[1, 2, 3]
"""
name = 'concat-' + tokenize(self)
dsk = dict(((name, i), (list, (toolz.concat, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def __iter__(self):
return iter(self.compute())
def groupby(self, grouper, method=None, npartitions=None, blocksize=2**20,
max_branch=None):
""" Group collection by key function
This requires a full dataset read, serialization and shuffle.
This is expensive. If possible you should use ``foldby``.
Parameters
----------
grouper: function
Function on which to group elements
method: str
Either 'disk' for an on-disk shuffle or 'tasks' to use the task
scheduling framework. Use 'disk' if you are on a single machine
and 'tasks' if you are on a distributed cluster.
npartitions: int
If using the disk-based shuffle, the number of output partitions
blocksize: int
If using the disk-based shuffle, the size of shuffle blocks
max_branch: int
If using the task-based shuffle, the amount of splitting each
partition undergoes. Increase this for fewer copies but more
scheduler overhead.
Examples
--------
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> dict(b.groupby(iseven)) # doctest: +SKIP
{True: [0, 2, 4, 6, 8], False: [1, 3, 5, 7, 9]}
See Also
--------
Bag.foldby
"""
if method is None:
get = _globals.get('get')
if (isinstance(get, types.MethodType) and
'distributed' in get.__func__.__module__):
method = 'tasks'
else:
method = 'disk'
if method == 'disk':
return groupby_disk(self, grouper, npartitions=npartitions,
blocksize=blocksize)
elif method == 'tasks':
return groupby_tasks(self, grouper, max_branch=max_branch)
else:
raise NotImplementedError(
"Shuffle method must be 'disk' or 'tasks'")
def to_dataframe(self, columns=None):
""" Convert Bag to dask.dataframe
Bag should contain tuple or dict records.
Provide ``columns=`` keyword arg to specify column names.
Index will not be particularly meaningful. Use ``reindex`` afterwards
if necessary.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence([{'name': 'Alice', 'balance': 100},
... {'name': 'Bob', 'balance': 200},
... {'name': 'Charlie', 'balance': 300}],
... npartitions=2)
>>> df = b.to_dataframe()
>>> df.compute()
balance name
0 100 Alice
1 200 Bob
0 300 Charlie
"""
import pandas as pd
import dask.dataframe as dd
if columns is None:
head = self.take(1)[0]
if isinstance(head, dict):
columns = sorted(head)
elif isinstance(head, (tuple, list)):
columns = list(range(len(head)))
name = 'to_dataframe-' + tokenize(self, columns)
DataFrame = partial(pd.DataFrame, columns=columns)
dsk = dict(((name, i), (DataFrame, (list2, (self.name, i))))
for i in range(self.npartitions))
divisions = [None] * (self.npartitions + 1)
return dd.DataFrame(merge(optimize(self.dask, self._keys()), dsk),
name, columns, divisions)
def to_imperative(self):
warn("Deprecation warning: moved to to_delayed")
return self.to_delayed()
def to_delayed(self):
""" Convert bag to dask Values
Returns list of values, one value per partition.
"""
from dask.delayed import Value
return [Value(k, [self.dask]) for k in self._keys()]
def repartition(self, npartitions):
""" Coalesce bag into fewer partitions
Examples
--------
>>> b.repartition(5) # set to have 5 partitions # doctest: +SKIP
"""
if npartitions > self.npartitions:
raise NotImplementedError(
"Repartition only supports going to fewer partitions\n"
" old: %d new: %d" % (self.npartitions, npartitions))
size = self.npartitions / npartitions
L = [int(i * size) for i in range(npartitions + 1)]
name = 'repartition-%d-%s' % (npartitions, self.name)
dsk = dict(((name, i), (list,
(toolz.concat,
[(self.name, j) for j in range(L[i], L[i + 1])]
)))
for i in range(npartitions))
return Bag(merge(self.dask, dsk), name, npartitions)
def accumulate(self, binop, initial=no_default):
"""Repeatedly apply binary function to a sequence, accumulating results.
Examples
--------
>>> from operator import add
>>> b = from_sequence([1, 2, 3, 4, 5], npartitions=2)
>>> b.accumulate(add).compute() # doctest: +SKIP
[1, 3, 6, 10, 15]
Accumulate also takes an optional argument that will be used as the
first value.
>>> b.accumulate(add, -1) # doctest: +SKIP
[-1, 0, 2, 5, 9, 15]
"""
if not _implement_accumulate:
raise NotImplementedError("accumulate requires `toolz` > 0.7.4"
" or `cytoolz` > 0.7.3.")
token = tokenize(self, binop, initial)
binop_name = funcname(binop)
a = '%s-part-%s' % (binop_name, token)
b = '%s-first-%s' % (binop_name, token)
c = '%s-second-%s' % (binop_name, token)
dsk = {(a, 0): (accumulate_part, binop, (self.name, 0), initial, True),
(b, 0): (first, (a, 0)),
(c, 0): (second, (a, 0))}
for i in range(1, self.npartitions):
dsk[(a, i)] = (accumulate_part, binop, (self.name, i),
(c, i - 1))
dsk[(b, i)] = (first, (a, i))
dsk[(c, i)] = (second, (a, i))
return Bag(merge(self.dask, dsk), b, self.npartitions)
def accumulate_part(binop, seq, initial, is_first=False):
if initial == no_default:
res = list(accumulate(binop, seq))
else:
res = list(accumulate(binop, seq, initial=initial))
if is_first:
return res, res[-1] if res else [], initial
return res[1:], res[-1]
normalize_token.register(Item, lambda a: a.key)
normalize_token.register(Bag, lambda a: a.name)
def partition(grouper, sequence, npartitions, p, nelements=2**20):
""" Partition a bag along a grouper, store partitions on disk """
for block in partition_all(nelements, sequence):
d = groupby(grouper, block)
d2 = defaultdict(list)
for k, v in d.items():
d2[abs(hash(k)) % npartitions].extend(v)
p.append(d2)
return p
def collect(grouper, group, p, barrier_token):
""" Collect partitions from disk and yield k,v group pairs """
d = groupby(grouper, p.get(group, lock=False))
return list(d.items())
def from_filenames(filenames, chunkbytes=None, compression='infer',
encoding=system_encoding, linesep=os.linesep):
""" Deprecated. See read_text """
warn("db.from_filenames is deprecated in favor of db.read_text")
from .text import read_text
return read_text(filenames, blocksize=chunkbytes, compression=compression,
encoding=encoding, linedelimiter=linesep)
def write(data, filename, compression, encoding):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
with ignoring(OSError):
os.makedirs(dirname)
f = open(filename, mode='wb', compression=compression)
# Check presence of endlines
data = iter(data)
try:
firstline = next(data)
except StopIteration:
f.close()
return
if not (firstline.endswith(os.linesep) or firstline.endswith('\n')):
sep = os.linesep if firstline.endswith(os.linesep) else '\n'
firstline = firstline + sep
data = (line + sep for line in data)
f.write(firstline.encode(encoding))
try:
lastline = ''
for line in data:
f.write(lastline.encode(encoding))
lastline = line
f.write(lastline.rstrip(os.linesep).encode(encoding))
finally:
f.close()
def from_sequence(seq, partition_size=None, npartitions=None):
""" Create dask from Python sequence
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Examples
--------
>>> b = from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
See Also
--------
read_text: Create bag from textfiles
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = 'from_sequence-' + tokenize(seq, partition_size)
d = dict(((name, i), part) for i, part in enumerate(parts))
return Bag(d, name, len(d))
def from_castra(x, columns=None, index=False):
"""Load a dask Bag from a Castra.
Parameters
----------
x : filename or Castra
columns: list or string, optional
The columns to load. Default is all columns.
index: bool, optional
If True, the index is included as the first element in each tuple.
Default is False.
"""
from castra import Castra
if not isinstance(x, Castra):
x = Castra(x, readonly=True)
elif not x._readonly:
x = Castra(x.path, readonly=True)
if columns is None:
columns = x.columns
name = 'from-castra-' + tokenize(os.path.getmtime(x.path), x.path,
columns, index)
dsk = dict(((name, i), (load_castra_partition, x, part, columns, index))
for i, part in enumerate(x.partitions))
return Bag(dsk, name, len(x.partitions))
def load_castra_partition(castra, part, columns, index):
import blosc
# Due to serialization issues, blosc needs to be manually initialized in
# each process.
blosc.init()
df = castra.load_partition(part, columns)
if isinstance(columns, list):
items = df.itertuples(index)
else:
items = df.iteritems() if index else iter(df)
items = list(items)
if items and isinstance(items[0], tuple) and type(items[0]) is not tuple:
names = items[0]._fields
items = [dict(zip(names, item)) for item in items]
return items
def from_url(urls):
"""Create a dask.bag from a url
>>> a = from_url('http://raw.githubusercontent.com/dask/dask/master/README.rst') # doctest: +SKIP
>>> a.npartitions # doctest: +SKIP
1
>>> a.take(8) # doctest: +SKIP
('Dask\\n',
'====\\n',
'\\n',
'|Build Status| |Coverage| |Doc Status| |Gitter|\\n',
'\\n',
'Dask provides multi-core execution on larger-than-memory datasets using blocked\\n',
'algorithms and task scheduling. It maps high-level NumPy and list operations\\n',
'on large datasets on to graphs of many operations on small in-memory datasets.\\n')
>>> b = from_url(['http://github.com', 'http://google.com']) # doctest: +SKIP
>>> b.npartitions # doctest: +SKIP
2
"""
if isinstance(urls, str):
urls = [urls]
name = 'from_url-' + uuid.uuid4().hex
dsk = {}
for i, u in enumerate(urls):
dsk[(name, i)] = (list, (urlopen, u))
return Bag(dsk, name, len(urls))
def dictitems(d):
""" A pickleable version of dict.items
>>> dictitems({'x': 1})
[('x', 1)]
"""
return list(d.items())
def concat(bags):
""" Concatenate many bags together, unioning all elements
>>> import dask.bag as db
>>> a = db.from_sequence([1, 2, 3])
>>> b = db.from_sequence([4, 5, 6])
>>> c = db.concat([a, b])
>>> list(c)
[1, 2, 3, 4, 5, 6]
"""
name = 'concat-' + tokenize(*bags)
counter = itertools.count(0)
dsk = dict(((name, next(counter)), key)
for bag in bags for key in sorted(bag._keys()))
return Bag(merge(dsk, *[b.dask for b in bags]), name, len(dsk))
class StringAccessor(object):
""" String processing functions
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.lower())
['alice smith', 'bob jones', 'charlie smith']
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
>>> list(b.str.split(' '))
[['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']]
"""
def __init__(self, bag):
self._bag = bag
def __dir__(self):
return sorted(set(dir(type(self)) + dir(str)))
def _strmap(self, key, *args, **kwargs):
return self._bag.map(lambda s: getattr(s, key)(*args, **kwargs))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(str):
func = getattr(str, key)
return robust_wraps(func)(partial(self._strmap, key))
else:
raise
def match(self, pattern):
""" Filter strings by those that match a pattern
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
See Also
--------
fnmatch.fnmatch
"""
from fnmatch import fnmatch
return self._bag.filter(partial(fnmatch, pat=pattern))
def robust_wraps(wrapper):
""" A weak version of wraps that only copies doc """
def _(wrapped):
wrapped.__doc__ = wrapper.__doc__
return wrapped
return _
def reify(seq):
if isinstance(seq, Iterator):
seq = list(seq)
if seq and isinstance(seq[0], Iterator):
seq = list(map(list, seq))
return seq
def from_imperative(values):
warn("Deprecation warning: moved to from_delayed")
return from_delayed(values)
def from_delayed(values):
""" Create bag from many dask.delayed objects
Parameters
----------
values: list of Values
An iterable of dask.delayed.Value objects, such as come from dask.do
These comprise the individual partitions of the resulting bag
Returns
-------
Bag
Examples
--------
>>> b = from_delayed([x, y, z]) # doctest: +SKIP
"""
from dask.delayed import Value
if isinstance(values, Value):
values = [values]
dsk = merge(v.dask for v in values)
name = 'bag-from-delayed-' + tokenize(*values)
names = [(name, i) for i in range(len(values))]
values = [v.key for v in values]
dsk2 = dict(zip(names, values))
return Bag(merge(dsk, dsk2), name, len(values))
def merge_frequencies(seqs):
first, rest = seqs[0], seqs[1:]
if not rest:
return first
out = defaultdict(int)
out.update(first)
for d in rest:
for k, v in iteritems(d):
out[k] += v
return out
def bag_range(n, npartitions):
""" Numbers from zero to n
Examples
--------
>>> import dask.bag as db
>>> b = db.range(5, npartitions=2)
>>> list(b)
[0, 1, 2, 3, 4]
"""
size = n // npartitions
name = 'range-%d-npartitions-%d' % (n, npartitions)
ijs = list(enumerate(take(npartitions, range(0, n, size))))
dsk = dict(((name, i), (reify, (range, j, min(j + size, n))))
for i, j in ijs)
if n % npartitions != 0:
i, j = ijs[-1]
dsk[(name, i)] = (reify, (range, j, n))
return Bag(dsk, name, npartitions)
def bag_zip(*bags):
""" Partition-wise bag zip
All passed bags must have the same number of partitions.
NOTE: corresponding partitions should have the same length; if they do not,
the "extra" elements from the longer partition(s) will be dropped. If you
have this case chances are that what you really need is a data alignment
mechanism like pandas's, and not a missing value filler like zip_longest.
Examples
--------
Correct usage:
>>> import dask.bag as db
>>> evens = db.from_sequence(range(0, 10, 2), partition_size=4)
>>> odds = db.from_sequence(range(1, 10, 2), partition_size=4)
>>> pairs = db.zip(evens, odds)
>>> list(pairs)
[(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]
Incorrect usage:
>>> numbers = db.range(20) # doctest: +SKIP
>>> fizz = numbers.filter(lambda n: n % 3 == 0) # doctest: +SKIP
>>> buzz = numbers.filter(lambda n: n % 5 == 0) # doctest: +SKIP
>>> fizzbuzz = db.zip(fizz, buzz) # doctest: +SKIP
>>> list(fizzbuzzz) # doctest: +SKIP
[(0, 0), (3, 5), (6, 10), (9, 15), (12, 20), (15, 25), (18, 30)]
When what you really wanted was more along the lines of:
>>> list(fizzbuzzz) # doctest: +SKIP
[(0, 0), (3, None), (None, 5), (6, None), (None 10), (9, None),
(12, None), (15, 15), (18, None), (None, 20), (None, 25), (None, 30)]
"""
npartitions = bags[0].npartitions
assert all(bag.npartitions == npartitions for bag in bags)
# TODO: do more checks
name = 'zip-' + tokenize(*bags)
dsk = dict(
((name, i), (reify, (zip,) + tuple((bag.name, i) for bag in bags)))
for i in range(npartitions))
bags_dsk = merge(*(bag.dask for bag in bags))
return Bag(merge(bags_dsk, dsk), name, npartitions)
def _reduce(binop, sequence, initial=no_default):
if initial is not no_default:
return reduce(binop, sequence, initial)
else:
return reduce(binop, sequence)
def make_group(k, stage):
def h(x):
return x[0] // k ** stage % k
return h
def groupby_tasks(b, grouper, hash=hash, max_branch=32):
max_branch = max_branch or 32
n = b.npartitions
stages = int(math.ceil(math.log(n) / math.log(max_branch)))
if stages > 1:
k = int(math.ceil(n ** (1 / stages)))
else:
k = n
groups = []
splits = []
joins = []
inputs = [tuple(digit(i, j, k) for j in range(stages))
for i in range(n)]
sinputs = set(inputs)
b2 = b.map(lambda x: (hash(grouper(x)), x))
token = tokenize(b, grouper, hash, max_branch)
start = dict((('shuffle-join-' + token, 0, inp), (b2.name, i))
for i, inp in enumerate(inputs))
for stage in range(1, stages + 1):
group = dict((('shuffle-group-' + token, stage, inp),
(groupby,
(make_group, k, stage - 1),
('shuffle-join-' + token, stage - 1, inp)))
for inp in inputs)
split = dict((('shuffle-split-' + token, stage, i, inp),
(dict.get, ('shuffle-group-' + token, stage, inp), i, {}))
for i in range(k)
for inp in inputs)
join = dict((('shuffle-join-' + token, stage, inp),
(list, (toolz.concat,
[('shuffle-split-' + token, stage, inp[stage-1],
insert(inp, stage - 1, j)) for j in range(k)
if insert(inp, stage - 1, j) in sinputs])))
for inp in inputs)
groups.append(group)
splits.append(split)
joins.append(join)
end = dict((('shuffle-' + token, i), (list, (pluck, 1, j)))
for i, j in enumerate(join))
dsk = merge(b2.dask, start, end, *(groups + splits + joins))
return type(b)(dsk, 'shuffle-' + token, n)
def groupby_disk(b, grouper, npartitions=None, blocksize=2**20):
if npartitions is None:
npartitions = b.npartitions
token = tokenize(b, grouper, npartitions, blocksize)
import partd
p = ('partd-' + token,)
try:
dsk1 = {p: (partd.Python, (partd.Snappy, partd.File()))}
except AttributeError:
dsk1 = {p: (partd.Python, partd.File())}
# Partition data on disk
name = 'groupby-part-{0}-{1}'.format(funcname(grouper), token)
dsk2 = dict(((name, i), (partition, grouper, (b.name, i),
npartitions, p, blocksize))
for i in range(b.npartitions))
# Barrier
barrier_token = 'groupby-barrier-' + token
def barrier(args):
return 0
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = 'groupby-collect-' + token
dsk4 = dict(((name, i),
(collect, grouper, i, p, barrier_token))
for i in range(npartitions))
return type(b)(merge(b.dask, dsk1, dsk2, dsk3, dsk4), name,
npartitions)
| {
"repo_name": "mikegraham/dask",
"path": "dask/bag/core.py",
"copies": "1",
"size": "51601",
"license": "bsd-3-clause",
"hash": 7992446081672394000,
"line_mean": 32.0352112676,
"line_max": 102,
"alpha_frac": 0.5517722525,
"autogenerated": false,
"ratio": 3.752527088938986,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4804299341438986,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterable, Iterator, defaultdict
from functools import wraps, partial
import itertools
import math
import os
import types
import uuid
from random import Random
from warnings import warn
from distutils.version import LooseVersion
from ..utils import ignoring, eq_strict
from toolz import (merge, take, reduce, valmap, map, partition_all, filter,
remove, compose, curry, first, second, accumulate)
from toolz.compatibility import iteritems, zip
import toolz
_implement_accumulate = LooseVersion(toolz.__version__) > '0.7.4'
try:
import cytoolz
from cytoolz import (frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)
if LooseVersion(cytoolz.__version__) > '0.7.3':
from cytoolz import accumulate # noqa: F811
_implement_accumulate = True
except:
from toolz import (frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)
from ..base import Base, normalize_token, tokenize
from ..compatibility import apply, urlopen
from ..context import _globals
from ..core import list2, quote, istask, get_dependencies, reverse_dict
from ..multiprocessing import get as mpget
from ..optimize import fuse, cull, inline
from ..utils import (open, system_encoding, takes_multiple_arguments, funcname,
digit, insert, different_seeds)
from ..bytes.core import write_bytes
no_default = '__no__default__'
no_result = '__no__result__'
def lazify_task(task, start=True):
"""
Given a task, remove unnecessary calls to ``list``
Examples
--------
>>> task = (sum, (list, (map, inc, [1, 2, 3]))) # doctest: +SKIP
>>> lazify_task(task) # doctest: +SKIP
(sum, (map, inc, [1, 2, 3]))
"""
if not istask(task):
return task
head, tail = task[0], task[1:]
if not start and head in (list, reify):
task = task[1]
return lazify_task(*tail, start=False)
else:
return (head,) + tuple([lazify_task(arg, False) for arg in tail])
def lazify(dsk):
"""
Remove unnecessary calls to ``list`` in tasks
See Also
--------
``dask.bag.core.lazify_task``
"""
return valmap(lazify_task, dsk)
def inline_singleton_lists(dsk, dependencies=None):
""" Inline lists that are only used once
>>> d = {'b': (list, 'a'),
... 'c': (f, 'b', 1)} # doctest: +SKIP
>>> inline_singleton_lists(d) # doctest: +SKIP
{'c': (f, (list, 'a'), 1)}
Pairs nicely with lazify afterwards
"""
if dependencies is None:
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
dependents = reverse_dict(dependencies)
keys = [k for k, v in dsk.items()
if istask(v) and v and v[0] is list and len(dependents[k]) == 1]
dsk = inline(dsk, keys, inline_constants=False)
for k in keys:
del dsk[k]
return dsk
def optimize(dsk, keys, **kwargs):
""" Optimize a dask from a dask.bag """
dsk2, dependencies = cull(dsk, keys)
dsk3, dependencies = fuse(dsk2, keys, dependencies)
dsk4 = inline_singleton_lists(dsk3, dependencies)
dsk5 = lazify(dsk4)
return dsk5
def to_textfiles(b, path, name_function=None, compression='infer',
encoding=system_encoding, compute=True):
""" Write bag to disk, one filename per partition, one line per element
**Paths**: This will create one file for each partition in your bag. You
can specify the filenames in a variety of ways.
Use a globstring
>>> b.to_textfiles('/path/to/data/*.json.gz') # doctest: +SKIP
The * will be replaced by the increasing sequence 1, 2, ...
::
/path/to/data/0.json.gz
/path/to/data/1.json.gz
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
Strings produced by name_function must preserve the order of their
respective partition indices.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> b.to_textfiles('/path/to/data/*.json.gz', name_function=name) # doctest: +SKIP
::
/path/to/data/2015-01-01.json.gz
/path/to/data/2015-01-02.json.gz
...
You can also provide an explicit list of paths.
>>> paths = ['/path/to/data/alice.json.gz', '/path/to/data/bob.json.gz', ...] # doctest: +SKIP
>>> b.to_textfiles(paths) # doctest: +SKIP
**Compression**: Filenames with extensions corresponding to known
compression algorithms (gz, bz2) will be compressed accordingly.
**Bag Contents**: The bag calling ``to_textfiles`` must be a bag of
text strings. For example, a bag of dictionaries could be written to
JSON text files by mapping ``json.dumps`` on to the bag first, and
then calling ``to_textfiles`` :
>>> b_dict.map(json.dumps).to_textfiles("/path/to/data/*.json") # doctest: +SKIP
"""
out = write_bytes(b.to_delayed(), path, name_function, compression,
encoding=encoding)
if compute:
from dask import compute
compute(*out)
else:
return out
def finalize(results):
if not results:
return results
if isinstance(results, Iterator):
results = list(results)
if isinstance(results[0], Iterable) and not isinstance(results[0], str):
results = toolz.concat(results)
if isinstance(results, Iterator):
results = list(results)
return results
def finalize_item(results):
return results[0]
def unpack_kwargs(kwargs):
""" Extracts dask values from kwargs
Currently only dask.bag.Item and python literal values are supported.
Returns a merged dask graph and a list of [key, val] pairs suitable for
eventually constructing a dict.
"""
dsk = {}
kw_pairs = []
for key, val in iteritems(kwargs):
if isinstance(val, Item):
dsk.update(val.dask)
val = val.key
# TODO elif isinstance(val, Delayed):
elif isinstance(val, Base):
raise NotImplementedError(
'%s not supported as kwarg value to Bag.map_partitions'
% type(val).__name__)
kw_pairs.append([key, val])
return dsk, kw_pairs
class StringAccessor(object):
""" String processing functions
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.lower())
['alice smith', 'bob jones', 'charlie smith']
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
>>> list(b.str.split(' '))
[['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']]
"""
def __init__(self, bag):
self._bag = bag
def __dir__(self):
return sorted(set(dir(type(self)) + dir(str)))
def _strmap(self, key, *args, **kwargs):
return self._bag.map(lambda s: getattr(s, key)(*args, **kwargs))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(str):
func = getattr(str, key)
return robust_wraps(func)(partial(self._strmap, key))
else:
raise
def match(self, pattern):
""" Filter strings by those that match a pattern
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
See Also
--------
fnmatch.fnmatch
"""
from fnmatch import fnmatch
return self._bag.filter(partial(fnmatch, pat=pattern))
def robust_wraps(wrapper):
""" A weak version of wraps that only copies doc """
def _(wrapped):
wrapped.__doc__ = wrapper.__doc__
return wrapped
return _
class Item(Base):
_optimize = staticmethod(optimize)
_default_get = staticmethod(mpget)
_finalize = staticmethod(finalize_item)
@staticmethod
def from_delayed(value):
""" Create bag item from a dask.delayed value
See ``dask.bag.from_delayed`` for details
"""
from dask.delayed import Delayed
assert isinstance(value, Delayed)
return Item(value.dask, value.key)
def __init__(self, dsk, key):
self.dask = dsk
self.key = key
self.name = key
@property
def _args(self):
return (self.dask, self.key)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self.key = state
def _keys(self):
return [self.key]
def apply(self, func):
name = 'apply-{0}-{1}'.format(funcname(func), tokenize(self, func))
dsk = {name: (func, self.key)}
return Item(merge(self.dask, dsk), name)
__int__ = __float__ = __complex__ = __bool__ = Base.compute
def to_delayed(self):
""" Convert bag item to dask Delayed
Returns a single value.
"""
from dask.delayed import Delayed
return Delayed(self.key, [self.dask])
class Bag(Base):
""" Parallel collection of Python objects
Examples
--------
Create Bag from sequence
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(lambda x: x % 2 == 0).map(lambda x: x * 10)) # doctest: +SKIP
[0, 20, 40]
Create Bag from filename or globstring of filenames
>>> b = db.read_text('/path/to/mydata.*.json.gz').map(json.loads) # doctest: +SKIP
Create manually (expert use)
>>> dsk = {('x', 0): (range, 5),
... ('x', 1): (range, 5),
... ('x', 2): (range, 5)}
>>> b = Bag(dsk, 'x', npartitions=3)
>>> sorted(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]
>>> int(b.fold(lambda x, y: x + y)) # doctest: +SKIP
30
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(mpget)
_finalize = staticmethod(finalize)
def __init__(self, dsk, name, npartitions):
self.dask = dsk
self.name = name
self.npartitions = npartitions
def __str__(self):
name = self.name if len(self.name) < 10 else self.name[:7] + '...'
return 'dask.bag<%s, npartitions=%d>' % (name, self.npartitions)
__repr__ = __str__
str = property(fget=StringAccessor)
def map(self, func, **kwargs):
""" Map a function across all elements in collection
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 10, 20, 30, 40]
Keyword arguments are passed through to ``func``. These can be either
``dask.bag.Item``, or normal python objects.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(1, 101), npartitions=10)
>>> def div(num, den=1):
... return num / den
Using a python object:
>>> hi = b.max().compute()
>>> hi
100
>>> b.map(div, den=hi).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Using an ``Item``:
>>> b.map(div, den=b.max()).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Note that while both versions give the same output, the second forms a
single graph, and then computes everything at once, and in some cases
may be more efficient.
"""
name = 'map-{0}-{1}'.format(funcname(func),
tokenize(self, func, kwargs))
if takes_multiple_arguments(func):
func = partial(apply, func)
dsk = self.dask.copy()
if kwargs:
kw_dsk, kw_pairs = unpack_kwargs(kwargs)
dsk.update(kw_dsk)
func = (apply, partial, [func], (dict, kw_pairs))
dsk.update(((name, i), (reify, (map, func, (self.name, i))))
for i in range(self.npartitions))
return type(self)(dsk, name, self.npartitions)
@property
def _args(self):
return (self.dask, self.name, self.npartitions)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self.name, self.npartitions = state
def filter(self, predicate):
""" Filter elements in collection by a predicate function
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(iseven)) # doctest: +SKIP
[0, 2, 4]
"""
name = 'filter-{0}-{1}'.format(funcname(predicate),
tokenize(self, predicate))
dsk = dict(((name, i), (reify, (filter, predicate, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def _get_rs_predicate(self, seed, prob):
""" Return sampling filter predicate for the given seed.
"""
random_state = Random(seed)
return lambda _: random_state.random() < prob
def random_sample(self, prob, random_state=None):
""" Return elements from bag with probability of ``prob``.
``prob`` must be a number in the interval `[0, 1]`. All elements are
considered independently without replacement.
Providing an integer seed for ``random_state`` will result in
deterministic sampling. Given the same seed it will return the same
sample every time.
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.random_sample(0.5, 42))
[1, 3]
>>> list(b.random_sample(0.5, 42))
[1, 3]
"""
if not 0 <= prob <= 1:
raise ValueError('prob must be a number in the interval [0, 1]')
import numpy as np
if random_state is None:
random_state = np.random.randint(np.iinfo(np.int32).max)
name = 'random-sample-{0}'.format(tokenize(self, prob, random_state))
# we need to generate a different random seed for each partition or
# otherwise we would be selecting the exact same positions at each
# partition
seeds = different_seeds(self.npartitions, random_state)
dsk = dict(((name, i), (reify, (filter,
self._get_rs_predicate(seed, prob), (self.name, i))))
for i, seed in zip(range(self.npartitions), seeds))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def remove(self, predicate):
""" Remove elements in collection that match predicate
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.remove(iseven)) # doctest: +SKIP
[1, 3]
"""
name = 'remove-{0}-{1}'.format(funcname(predicate),
tokenize(self, predicate))
dsk = dict(((name, i), (reify, (remove, predicate, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def map_partitions(self, func, **kwargs):
""" Apply function to every partition within collection
Note that this requires you to understand how dask.bag partitions your
data and so is somewhat internal.
>>> b.map_partitions(myfunc) # doctest: +SKIP
Keyword arguments are passed through to ``func``. These can be either
``dask.bag.Item``, or normal python objects.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(1, 101), npartitions=10)
>>> def div(nums, den=1):
... return [num / den for num in nums]
Using a python object:
>>> hi = b.max().compute()
>>> hi
100
>>> b.map_partitions(div, den=hi).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Using an ``Item``:
>>> b.map_partitions(div, den=b.max()).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Note that while both versions give the same output, the second forms a
single graph, and then computes everything at once, and in some cases
may be more efficient.
"""
name = 'map-partitions-{0}-{1}'.format(funcname(func),
tokenize(self, func, kwargs))
dsk = self.dask.copy()
if kwargs:
kw_dsk, kw_pairs = unpack_kwargs(kwargs)
dsk.update(kw_dsk)
dsk.update(((name, i),
(apply, func, [(self.name, i)], (dict, kw_pairs))
if kwargs else (func, (self.name, i)))
for i in range(self.npartitions))
return type(self)(dsk, name, self.npartitions)
def pluck(self, key, default=no_default):
""" Select item from all tuples/dicts in collection
>>> b = from_sequence([{'name': 'Alice', 'credits': [1, 2, 3]},
... {'name': 'Bob', 'credits': [10, 20]}])
>>> list(b.pluck('name')) # doctest: +SKIP
['Alice', 'Bob']
>>> list(b.pluck('credits').pluck(0)) # doctest: +SKIP
[1, 10]
"""
name = 'pluck-' + tokenize(self, key, default)
key = quote(key)
if default == no_default:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i))))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i), default)))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def unzip(self, n):
"""Transform a bag of tuples to ``n`` bags of their elements.
Examples
--------
>>> b = from_sequence([(i, i + 1, i + 2) for i in range(10)])
>>> first, second, third = b.unzip(3)
>>> isinstance(first, Bag)
True
>>> first.compute()
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Note that this is equivalent to:
>>> first, second, third = (b.pluck(i) for i in range(3))
"""
return tuple(self.pluck(i) for i in range(n))
@wraps(to_textfiles)
def to_textfiles(self, path, name_function=None, compression='infer',
encoding=system_encoding, compute=True):
return to_textfiles(self, path, name_function, compression, encoding, compute)
def fold(self, binop, combine=None, initial=no_default, split_every=None):
""" Parallelizable reduction
Fold is like the builtin function ``reduce`` except that it works in
parallel. Fold takes two binary operator functions, one to reduce each
partition of our dataset and another to combine results between
partitions
1. ``binop``: Binary operator to reduce within each partition
2. ``combine``: Binary operator to combine results from binop
Sequentially this would look like the following:
>>> intermediates = [reduce(binop, part) for part in partitions] # doctest: +SKIP
>>> final = reduce(combine, intermediates) # doctest: +SKIP
If only one function is given then it is used for both functions
``binop`` and ``combine`` as in the following example to compute the
sum:
>>> def add(x, y):
... return x + y
>>> b = from_sequence(range(5))
>>> b.fold(add).compute() # doctest: +SKIP
10
In full form we provide both binary operators as well as their default
arguments
>>> b.fold(binop=add, combine=add, initial=0).compute() # doctest: +SKIP
10
More complex binary operators are also doable
>>> def add_to_set(acc, x):
... ''' Add new element x to set acc '''
... return acc | set([x])
>>> b.fold(add_to_set, set.union, initial=set()).compute() # doctest: +SKIP
{1, 2, 3, 4, 5}
See Also
--------
Bag.foldby
"""
combine = combine or binop
initial = quote(initial)
if initial is not no_default:
return self.reduction(curry(_reduce, binop, initial=initial),
curry(_reduce, combine),
split_every=split_every)
else:
from toolz.curried import reduce
return self.reduction(reduce(binop), reduce(combine),
split_every=split_every)
def frequencies(self, split_every=None):
""" Count number of occurrences of each distinct element
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> dict(b.frequencies()) # doctest: +SKIP
{'Alice': 2, 'Bob', 1}
"""
return self.reduction(frequencies, merge_frequencies,
out_type=Bag, split_every=split_every,
name='frequencies').map_partitions(dictitems)
def topk(self, k, key=None, split_every=None):
""" K largest elements in collection
Optionally ordered by some key function
>>> b = from_sequence([10, 3, 5, 7, 11, 4])
>>> list(b.topk(2)) # doctest: +SKIP
[11, 10]
>>> list(b.topk(2, lambda x: -x)) # doctest: +SKIP
[3, 4]
"""
if key:
if callable(key) and takes_multiple_arguments(key):
key = partial(apply, key)
func = partial(topk, k, key=key)
else:
func = partial(topk, k)
return self.reduction(func, compose(func, toolz.concat), out_type=Bag,
split_every=split_every, name='topk')
def distinct(self):
""" Distinct elements of collection
Unordered without repeats.
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> sorted(b.distinct())
['Alice', 'Bob']
"""
return self.reduction(set, curry(apply, set.union), out_type=Bag,
name='distinct')
def reduction(self, perpartition, aggregate, split_every=None,
out_type=Item, name=None):
""" Reduce collection with reduction operators
Parameters
----------
perpartition: function
reduction to apply to each partition
aggregate: function
reduction to apply to the results of all partitions
split_every: int (optional)
Group partitions into groups of this size while performing reduction
Defaults to 8
out_type: {Bag, Item}
The out type of the result, Item if a single element, Bag if a list
of elements. Defaults to Item.
Examples
--------
>>> b = from_sequence(range(10))
>>> b.reduction(sum, sum).compute()
45
"""
if split_every is None:
split_every = 8
if split_every is False:
split_every = self.npartitions
token = tokenize(self, perpartition, aggregate, split_every)
a = '%s-part-%s' % (name or funcname(perpartition), token)
dsk = dict(((a, i), (empty_safe_apply, perpartition, (self.name, i)))
for i in range(self.npartitions))
k = self.npartitions
b = a
fmt = '%s-aggregate-%s' % (name or funcname(aggregate), token)
depth = 0
while k > 1:
c = fmt + str(depth)
dsk2 = dict(((c, i), (empty_safe_aggregate,
aggregate,
[(b, j) for j in inds]))
for i, inds in enumerate(partition_all(split_every,
range(k))))
dsk.update(dsk2)
k = len(dsk2)
b = c
depth += 1
if out_type is Item:
dsk[b] = dsk.pop((b, 0))
return Item(merge(self.dask, dsk), b)
else:
return Bag(merge(self.dask, dsk), b, 1)
@wraps(sum)
def sum(self, split_every=None):
return self.reduction(sum, sum, split_every=split_every)
@wraps(max)
def max(self, split_every=None):
return self.reduction(max, max, split_every=split_every)
@wraps(min)
def min(self, split_every=None):
return self.reduction(min, min, split_every=split_every)
@wraps(any)
def any(self, split_every=None):
return self.reduction(any, any, split_every=split_every)
@wraps(all)
def all(self, split_every=None):
return self.reduction(all, all, split_every=split_every)
def count(self, split_every=None):
""" Count the number of elements """
return self.reduction(count, sum, split_every=split_every)
def mean(self):
""" Arithmetic mean """
def mean_chunk(seq):
total, n = 0.0, 0
for x in seq:
total += x
n += 1
return total, n
def mean_aggregate(x):
totals, counts = list(zip(*x))
return 1.0 * sum(totals) / sum(counts)
return self.reduction(mean_chunk, mean_aggregate, split_every=False)
def var(self, ddof=0):
""" Variance """
def var_chunk(seq):
squares, total, n = 0.0, 0.0, 0
for x in seq:
squares += x**2
total += x
n += 1
return squares, total, n
def var_aggregate(x):
squares, totals, counts = list(zip(*x))
x2, x, n = float(sum(squares)), float(sum(totals)), sum(counts)
result = (x2 / n) - (x / n)**2
return result * n / (n - ddof)
return self.reduction(var_chunk, var_aggregate, split_every=False)
def std(self, ddof=0):
""" Standard deviation """
return self.var(ddof=ddof).apply(math.sqrt)
def join(self, other, on_self, on_other=None):
""" Join collection with another collection
Other collection must be an Iterable, and not a Bag.
>>> people = from_sequence(['Alice', 'Bob', 'Charlie'])
>>> fruit = ['Apple', 'Apricot', 'Banana']
>>> list(people.join(fruit, lambda x: x[0])) # doctest: +SKIP
[('Apple', 'Alice'), ('Apricot', 'Alice'), ('Banana', 'Bob')]
"""
assert isinstance(other, Iterable)
assert not isinstance(other, Bag)
if on_other is None:
on_other = on_self
name = 'join-' + tokenize(self, other, on_self, on_other)
dsk = dict(((name, i), (list, (join, on_other, other,
on_self, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def product(self, other):
""" Cartesian product between two bags """
assert isinstance(other, Bag)
name = 'product-' + tokenize(self, other)
n, m = self.npartitions, other.npartitions
dsk = dict(((name, i*m + j),
(list, (itertools.product, (self.name, i),
(other.name, j))))
for i in range(n) for j in range(m))
return type(self)(merge(self.dask, other.dask, dsk), name, n*m)
def foldby(self, key, binop, initial=no_default, combine=None,
combine_initial=no_default):
""" Combined reduction and groupby
Foldby provides a combined groupby and reduce for efficient parallel
split-apply-combine tasks.
The computation
>>> b.foldby(key, binop, init) # doctest: +SKIP
is equivalent to the following:
>>> def reduction(group): # doctest: +SKIP
... return reduce(binop, group, init) # doctest: +SKIP
>>> b.groupby(key).map(lambda (k, v): (k, reduction(v)))# doctest: +SKIP
But uses minimal communication and so is *much* faster.
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> add = lambda x, y: x + y
>>> dict(b.foldby(iseven, add)) # doctest: +SKIP
{True: 20, False: 25}
**Key Function**
The key function determines how to group the elements in your bag.
In the common case where your bag holds dictionaries then the key
function often gets out one of those elements.
>>> def key(x):
... return x['name']
This case is so common that it is special cased, and if you provide a
key that is not a callable function then dask.bag will turn it into one
automatically. The following are equivalent:
>>> b.foldby(lambda x: x['name'], ...) # doctest: +SKIP
>>> b.foldby('name', ...) # doctest: +SKIP
**Binops**
It can be tricky to construct the right binary operators to perform
analytic queries. The ``foldby`` method accepts two binary operators,
``binop`` and ``combine``. Binary operators two inputs and output must
have the same type.
Binop takes a running total and a new element and produces a new total:
>>> def binop(total, x):
... return total + x['amount']
Combine takes two totals and combines them:
>>> def combine(total1, total2):
... return total1 + total2
Each of these binary operators may have a default first value for
total, before any other value is seen. For addition binary operators
like above this is often ``0`` or the identity element for your
operation.
>>> b.foldby('name', binop, 0, combine, 0) # doctest: +SKIP
See Also
--------
toolz.reduceby
pyspark.combineByKey
"""
token = tokenize(self, key, binop, initial, combine, combine_initial)
a = 'foldby-a-' + token
b = 'foldby-b-' + token
if combine is None:
combine = binop
if initial is not no_default:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i)))
for i in range(self.npartitions))
def combine2(acc, x):
return combine(acc, x[1])
if combine_initial is not no_default:
dsk2 = {(b, 0): (dictitems, (
reduceby, 0, combine2, (
toolz.concat, (
map, dictitems, list(dsk.keys()))),
combine_initial))}
else:
dsk2 = {(b, 0): (dictitems, (
merge_with, (partial, reduce, combine),
list(dsk.keys())))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def take(self, k, npartitions=1, compute=True):
""" Take the first k elements
Parameters
----------
k : int
The number of elements to return
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``k`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
>>> b = from_sequence(range(10))
>>> b.take(3) # doctest: +SKIP
(0, 1, 2)
"""
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
raise ValueError("only {} partitions, take "
"received {}".format(self.npartitions, npartitions))
token = tokenize(self, k, npartitions)
name = 'take-' + token
if npartitions > 1:
name_p = 'take-partial-' + token
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (list, (take, k, (self.name, i)))
concat = (toolz.concat, ([(name_p, i) for i in range(npartitions)]))
dsk[(name, 0)] = (safe_take, k, concat)
else:
dsk = {(name, 0): (safe_take, k, (self.name, 0))}
b = Bag(merge(self.dask, dsk), name, 1)
if compute:
return tuple(b.compute())
else:
return b
def _keys(self):
return [(self.name, i) for i in range(self.npartitions)]
def concat(self):
""" Concatenate nested lists into one long list
>>> b = from_sequence([[1], [2, 3]])
>>> list(b)
[[1], [2, 3]]
>>> list(b.concat())
[1, 2, 3]
"""
name = 'concat-' + tokenize(self)
dsk = dict(((name, i), (list, (toolz.concat, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def __iter__(self):
return iter(self.compute())
def groupby(self, grouper, method=None, npartitions=None, blocksize=2**20,
max_branch=None):
""" Group collection by key function
This requires a full dataset read, serialization and shuffle.
This is expensive. If possible you should use ``foldby``.
Parameters
----------
grouper: function
Function on which to group elements
method: str
Either 'disk' for an on-disk shuffle or 'tasks' to use the task
scheduling framework. Use 'disk' if you are on a single machine
and 'tasks' if you are on a distributed cluster.
npartitions: int
If using the disk-based shuffle, the number of output partitions
blocksize: int
If using the disk-based shuffle, the size of shuffle blocks
max_branch: int
If using the task-based shuffle, the amount of splitting each
partition undergoes. Increase this for fewer copies but more
scheduler overhead.
Examples
--------
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> dict(b.groupby(iseven)) # doctest: +SKIP
{True: [0, 2, 4, 6, 8], False: [1, 3, 5, 7, 9]}
See Also
--------
Bag.foldby
"""
if method is None:
get = _globals.get('get')
if (isinstance(get, types.MethodType) and
'distributed' in get.__func__.__module__):
method = 'tasks'
else:
method = 'disk'
if method == 'disk':
return groupby_disk(self, grouper, npartitions=npartitions,
blocksize=blocksize)
elif method == 'tasks':
return groupby_tasks(self, grouper, max_branch=max_branch)
else:
raise NotImplementedError(
"Shuffle method must be 'disk' or 'tasks'")
def to_dataframe(self, columns=None):
""" Convert Bag to dask.dataframe
Bag should contain tuples, dict records, or scalars.
Index will not be particularly meaningful. Use ``reindex`` afterwards
if necessary.
Parameters
----------
columns : pandas.DataFrame or list, optional
If a ``pandas.DataFrame``, it should mirror the column names and
dtypes of the output dataframe. If a list, it provides the desired
column names. If not provided or a list, a single element from
the first partition will be computed, triggering a potentially
expensive call to ``compute``. Providing a list is only useful for
selecting subset of columns, to avoid an internal compute call you
must provide a ``pandas.DataFrame`` as dask requires dtype knowledge
ahead of time.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence([{'name': 'Alice', 'balance': 100},
... {'name': 'Bob', 'balance': 200},
... {'name': 'Charlie', 'balance': 300}],
... npartitions=2)
>>> df = b.to_dataframe()
>>> df.compute()
balance name
0 100 Alice
1 200 Bob
0 300 Charlie
"""
import pandas as pd
import dask.dataframe as dd
if isinstance(columns, pd.DataFrame):
meta = columns
else:
head = self.take(1)[0]
meta = pd.DataFrame([head], columns=columns)
columns = list(meta.columns)
name = 'to_dataframe-' + tokenize(self, columns)
DataFrame = partial(pd.DataFrame, columns=columns)
dsk = dict(((name, i), (DataFrame, (list2, (self.name, i))))
for i in range(self.npartitions))
divisions = [None] * (self.npartitions + 1)
return dd.DataFrame(merge(optimize(self.dask, self._keys()), dsk),
name, meta, divisions)
def to_delayed(self):
""" Convert bag to list of dask Delayed
Returns list of Delayed, one per partition.
"""
from dask.delayed import Delayed
return [Delayed(k, [self.dask]) for k in self._keys()]
def repartition(self, npartitions):
""" Coalesce bag into fewer partitions
Examples
--------
>>> b.repartition(5) # set to have 5 partitions # doctest: +SKIP
"""
if npartitions > self.npartitions:
raise NotImplementedError(
"Repartition only supports going to fewer partitions\n"
" old: %d new: %d" % (self.npartitions, npartitions))
npartitions_ratio = self.npartitions / npartitions
new_partitions_boundaries = [int(old_partition_index * npartitions_ratio)
for old_partition_index in range(npartitions + 1)]
new_name = 'repartition-%d-%s' % (npartitions, tokenize(self))
dsk = {(new_name, new_partition_index):
(list,
(toolz.concat,
[(self.name, old_partition_index)
for old_partition_index in range(
new_partitions_boundaries[new_partition_index],
new_partitions_boundaries[new_partition_index + 1])]))
for new_partition_index in range(npartitions)}
return Bag(dsk=merge(self.dask, dsk), name=new_name, npartitions=npartitions)
def accumulate(self, binop, initial=no_default):
"""Repeatedly apply binary function to a sequence, accumulating results.
Examples
--------
>>> from operator import add
>>> b = from_sequence([1, 2, 3, 4, 5], npartitions=2)
>>> b.accumulate(add).compute() # doctest: +SKIP
[1, 3, 6, 10, 15]
Accumulate also takes an optional argument that will be used as the
first value.
>>> b.accumulate(add, -1) # doctest: +SKIP
[-1, 0, 2, 5, 9, 15]
"""
if not _implement_accumulate:
raise NotImplementedError("accumulate requires `toolz` > 0.7.4"
" or `cytoolz` > 0.7.3.")
token = tokenize(self, binop, initial)
binop_name = funcname(binop)
a = '%s-part-%s' % (binop_name, token)
b = '%s-first-%s' % (binop_name, token)
c = '%s-second-%s' % (binop_name, token)
dsk = {(a, 0): (accumulate_part, binop, (self.name, 0), initial, True),
(b, 0): (first, (a, 0)),
(c, 0): (second, (a, 0))}
for i in range(1, self.npartitions):
dsk[(a, i)] = (accumulate_part, binop, (self.name, i),
(c, i - 1))
dsk[(b, i)] = (first, (a, i))
dsk[(c, i)] = (second, (a, i))
return Bag(merge(self.dask, dsk), b, self.npartitions)
def accumulate_part(binop, seq, initial, is_first=False):
if initial == no_default:
res = list(accumulate(binop, seq))
else:
res = list(accumulate(binop, seq, initial=initial))
if is_first:
return res, res[-1] if res else [], initial
return res[1:], res[-1]
normalize_token.register(Item, lambda a: a.key)
normalize_token.register(Bag, lambda a: a.name)
def partition(grouper, sequence, npartitions, p, nelements=2**20):
""" Partition a bag along a grouper, store partitions on disk """
for block in partition_all(nelements, sequence):
d = groupby(grouper, block)
d2 = defaultdict(list)
for k, v in d.items():
d2[abs(hash(k)) % npartitions].extend(v)
p.append(d2, fsync=True)
return p
def collect(grouper, group, p, barrier_token):
""" Collect partitions from disk and yield k,v group pairs """
d = groupby(grouper, p.get(group, lock=False))
return list(d.items())
def from_filenames(filenames, chunkbytes=None, compression='infer',
encoding=system_encoding, linesep=os.linesep):
""" Deprecated. See read_text """
warn("db.from_filenames is deprecated in favor of db.read_text")
from .text import read_text
return read_text(filenames, blocksize=chunkbytes, compression=compression,
encoding=encoding, linedelimiter=linesep)
def write(data, filename, compression, encoding):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
with ignoring(OSError):
os.makedirs(dirname)
f = open(filename, mode='wb', compression=compression)
# Check presence of endlines
data = iter(data)
try:
firstline = next(data)
except StopIteration:
f.close()
return
if not (firstline.endswith(os.linesep) or firstline.endswith('\n')):
sep = os.linesep if firstline.endswith(os.linesep) else '\n'
firstline = firstline + sep
data = (line + sep for line in data)
f.write(firstline.encode(encoding))
try:
lastline = ''
for line in data:
f.write(lastline.encode(encoding))
lastline = line
f.write(lastline.rstrip(os.linesep).encode(encoding))
finally:
f.close()
def from_sequence(seq, partition_size=None, npartitions=None):
""" Create dask from Python sequence
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Examples
--------
>>> b = from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
See Also
--------
read_text: Create bag from textfiles
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = 'from_sequence-' + tokenize(seq, partition_size)
d = dict(((name, i), list(part)) for i, part in enumerate(parts))
return Bag(d, name, len(d))
def from_castra(x, columns=None, index=False):
"""Load a dask Bag from a Castra.
Parameters
----------
x : filename or Castra
columns: list or string, optional
The columns to load. Default is all columns.
index: bool, optional
If True, the index is included as the first element in each tuple.
Default is False.
"""
from castra import Castra
if not isinstance(x, Castra):
x = Castra(x, readonly=True)
elif not x._readonly:
x = Castra(x.path, readonly=True)
if columns is None:
columns = x.columns
name = 'from-castra-' + tokenize(os.path.getmtime(x.path), x.path,
columns, index)
dsk = dict(((name, i), (load_castra_partition, x, part, columns, index))
for i, part in enumerate(x.partitions))
return Bag(dsk, name, len(x.partitions))
def load_castra_partition(castra, part, columns, index):
import blosc
# Due to serialization issues, blosc needs to be manually initialized in
# each process.
blosc.init()
df = castra.load_partition(part, columns)
if isinstance(columns, list):
items = df.itertuples(index)
else:
items = df.iteritems() if index else iter(df)
items = list(items)
if items and isinstance(items[0], tuple) and type(items[0]) is not tuple:
names = items[0]._fields
items = [dict(zip(names, item)) for item in items]
return items
def from_url(urls):
"""Create a dask.bag from a url
>>> a = from_url('http://raw.githubusercontent.com/dask/dask/master/README.rst') # doctest: +SKIP
>>> a.npartitions # doctest: +SKIP
1
>>> a.take(8) # doctest: +SKIP
('Dask\\n',
'====\\n',
'\\n',
'|Build Status| |Coverage| |Doc Status| |Gitter|\\n',
'\\n',
'Dask provides multi-core execution on larger-than-memory datasets using blocked\\n',
'algorithms and task scheduling. It maps high-level NumPy and list operations\\n',
'on large datasets on to graphs of many operations on small in-memory datasets.\\n')
>>> b = from_url(['http://github.com', 'http://google.com']) # doctest: +SKIP
>>> b.npartitions # doctest: +SKIP
2
"""
if isinstance(urls, str):
urls = [urls]
name = 'from_url-' + uuid.uuid4().hex
dsk = {}
for i, u in enumerate(urls):
dsk[(name, i)] = (list, (urlopen, u))
return Bag(dsk, name, len(urls))
def dictitems(d):
""" A pickleable version of dict.items
>>> dictitems({'x': 1})
[('x', 1)]
"""
return list(d.items())
def concat(bags):
""" Concatenate many bags together, unioning all elements
>>> import dask.bag as db
>>> a = db.from_sequence([1, 2, 3])
>>> b = db.from_sequence([4, 5, 6])
>>> c = db.concat([a, b])
>>> list(c)
[1, 2, 3, 4, 5, 6]
"""
name = 'concat-' + tokenize(*bags)
counter = itertools.count(0)
dsk = dict(((name, next(counter)), key)
for bag in bags for key in sorted(bag._keys()))
return Bag(merge(dsk, *[b.dask for b in bags]), name, len(dsk))
def reify(seq):
if isinstance(seq, Iterator):
seq = list(seq)
if seq and isinstance(seq[0], Iterator):
seq = list(map(list, seq))
return seq
def from_delayed(values):
""" Create bag from many dask.delayed objects
These objects will become the partitions of the resulting Bag. They should
evaluate to a ``list`` or some other concrete sequence.
Parameters
----------
values: list of delayed values
An iterable of dask Delayed objects. Each evaluating to a list.
Returns
-------
Bag
Examples
--------
>>> x, y, z = [delayed(load_sequence_from_file)(fn)
... for fn in filenames] # doctest: +SKIP
>>> b = from_delayed([x, y, z]) # doctest: +SKIP
See also
--------
dask.delayed
"""
from dask.delayed import Delayed
if isinstance(values, Delayed):
values = [values]
dsk = merge(v.dask for v in values)
name = 'bag-from-delayed-' + tokenize(*values)
names = [(name, i) for i in range(len(values))]
values = [v.key for v in values]
dsk2 = dict(zip(names, values))
return Bag(merge(dsk, dsk2), name, len(values))
def merge_frequencies(seqs):
first, rest = seqs[0], seqs[1:]
if not rest:
return first
out = defaultdict(int)
out.update(first)
for d in rest:
for k, v in iteritems(d):
out[k] += v
return out
def bag_range(n, npartitions):
""" Numbers from zero to n
Examples
--------
>>> import dask.bag as db
>>> b = db.range(5, npartitions=2)
>>> list(b)
[0, 1, 2, 3, 4]
"""
size = n // npartitions
name = 'range-%d-npartitions-%d' % (n, npartitions)
ijs = list(enumerate(take(npartitions, range(0, n, size))))
dsk = dict(((name, i), (reify, (range, j, min(j + size, n))))
for i, j in ijs)
if n % npartitions != 0:
i, j = ijs[-1]
dsk[(name, i)] = (reify, (range, j, n))
return Bag(dsk, name, npartitions)
def bag_zip(*bags):
""" Partition-wise bag zip
All passed bags must have the same number of partitions.
NOTE: corresponding partitions should have the same length; if they do not,
the "extra" elements from the longer partition(s) will be dropped. If you
have this case chances are that what you really need is a data alignment
mechanism like pandas's, and not a missing value filler like zip_longest.
Examples
--------
Correct usage:
>>> import dask.bag as db
>>> evens = db.from_sequence(range(0, 10, 2), partition_size=4)
>>> odds = db.from_sequence(range(1, 10, 2), partition_size=4)
>>> pairs = db.zip(evens, odds)
>>> list(pairs)
[(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]
Incorrect usage:
>>> numbers = db.range(20) # doctest: +SKIP
>>> fizz = numbers.filter(lambda n: n % 3 == 0) # doctest: +SKIP
>>> buzz = numbers.filter(lambda n: n % 5 == 0) # doctest: +SKIP
>>> fizzbuzz = db.zip(fizz, buzz) # doctest: +SKIP
>>> list(fizzbuzzz) # doctest: +SKIP
[(0, 0), (3, 5), (6, 10), (9, 15), (12, 20), (15, 25), (18, 30)]
When what you really wanted was more along the lines of:
>>> list(fizzbuzzz) # doctest: +SKIP
[(0, 0), (3, None), (None, 5), (6, None), (None 10), (9, None),
(12, None), (15, 15), (18, None), (None, 20), (None, 25), (None, 30)]
"""
npartitions = bags[0].npartitions
assert all(bag.npartitions == npartitions for bag in bags)
# TODO: do more checks
name = 'zip-' + tokenize(*bags)
dsk = dict(
((name, i), (reify, (zip,) + tuple((bag.name, i) for bag in bags)))
for i in range(npartitions))
bags_dsk = merge(*(bag.dask for bag in bags))
return Bag(merge(bags_dsk, dsk), name, npartitions)
def _reduce(binop, sequence, initial=no_default):
if initial is not no_default:
return reduce(binop, sequence, initial)
else:
return reduce(binop, sequence)
def make_group(k, stage):
def h(x):
return x[0] // k ** stage % k
return h
def groupby_tasks(b, grouper, hash=hash, max_branch=32):
max_branch = max_branch or 32
n = b.npartitions
stages = int(math.ceil(math.log(n) / math.log(max_branch)))
if stages > 1:
k = int(math.ceil(n ** (1 / stages)))
else:
k = n
groups = []
splits = []
joins = []
inputs = [tuple(digit(i, j, k) for j in range(stages))
for i in range(k**stages)]
b2 = b.map(lambda x: (hash(grouper(x)), x))
token = tokenize(b, grouper, hash, max_branch)
start = dict((('shuffle-join-' + token, 0, inp),
(b2.name, i) if i < b.npartitions else [])
for i, inp in enumerate(inputs))
for stage in range(1, stages + 1):
group = dict((('shuffle-group-' + token, stage, inp),
(groupby,
(make_group, k, stage - 1),
('shuffle-join-' + token, stage - 1, inp)))
for inp in inputs)
split = dict((('shuffle-split-' + token, stage, i, inp),
(dict.get, ('shuffle-group-' + token, stage, inp), i, {}))
for i in range(k)
for inp in inputs)
join = dict((('shuffle-join-' + token, stage, inp),
(list, (toolz.concat,
[('shuffle-split-' + token, stage, inp[stage-1],
insert(inp, stage - 1, j)) for j in range(k)])))
for inp in inputs)
groups.append(group)
splits.append(split)
joins.append(join)
end = dict((('shuffle-' + token, i),
(list, (dict.items, (groupby, grouper, (pluck, 1, j)))))
for i, j in enumerate(join))
dsk = merge(b2.dask, start, end, *(groups + splits + joins))
return type(b)(dsk, 'shuffle-' + token, len(inputs))
def groupby_disk(b, grouper, npartitions=None, blocksize=2**20):
if npartitions is None:
npartitions = b.npartitions
token = tokenize(b, grouper, npartitions, blocksize)
import partd
p = ('partd-' + token,)
try:
dsk1 = {p: (partd.Python, (partd.Snappy, partd.File()))}
except AttributeError:
dsk1 = {p: (partd.Python, partd.File())}
# Partition data on disk
name = 'groupby-part-{0}-{1}'.format(funcname(grouper), token)
dsk2 = dict(((name, i), (partition, grouper, (b.name, i),
npartitions, p, blocksize))
for i in range(b.npartitions))
# Barrier
barrier_token = 'groupby-barrier-' + token
def barrier(args):
return 0
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = 'groupby-collect-' + token
dsk4 = dict(((name, i),
(collect, grouper, i, p, barrier_token))
for i in range(npartitions))
return type(b)(merge(b.dask, dsk1, dsk2, dsk3, dsk4), name,
npartitions)
def empty_safe_apply(func, part):
part = list(part)
if part:
return func(part)
else:
return no_result
def empty_safe_aggregate(func, parts):
parts2 = [p for p in parts if not eq_strict(p, no_result)]
return empty_safe_apply(func, parts2)
def safe_take(n, b):
r = list(take(n, b))
if len(r) != n:
warn("Insufficient elements for `take`. {0} elements requested, "
"only {1} elements available. Try passing larger `npartitions` "
"to `take`.".format(n, len(r)))
return r
| {
"repo_name": "cowlicks/dask",
"path": "dask/bag/core.py",
"copies": "1",
"size": "55166",
"license": "bsd-3-clause",
"hash": -5104500047682802000,
"line_mean": 32.4136886735,
"line_max": 102,
"alpha_frac": 0.5554326941,
"autogenerated": false,
"ratio": 3.7797875984926343,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4835220292592634,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterable, Iterator
from functools import partial
from glob import glob
import gzip
from ..dispatch import dispatch
from .csv import *
from .json import *
from .hdf5 import *
from .meta import *
from .sql import *
from ..compatibility import urlopen, _strtypes
__all__ = ['resource', 'copy', 'into']
filetypes = {'csv': CSV,
'tsv': CSV,
'json': JSON,
'h5': HDF5,
'hdf5': HDF5}
opens = {'http': urlopen,
'https': urlopen,
#'ssh': paramiko.open?
}
def resource(uri, **kwargs):
""" Get data resource from universal resource indicator
Supports the following logic:
* Infer data format based on the file extension (.csv, .json. .hdf5)
* Use ``gzip.open`` if files end in ``.gz`` extension (csv, json only)
* Use ``urlopen`` if web protocols detected (http, https)
* Use SQL if text ``sql`` found in protocol string
URI may be in any of the following forms
>>> uri = '/path/to/data.csv' # csv, json, etc...
>>> uri = '/path/to/data.json.gz' # handles gzip
>>> uri = '/path/to/*/many*/data.*.json' # glob string - manyfiles
>>> uri = '/path/to/data.hdf5::/path/within/hdf5' # HDF5 path :: datapath
>>> uri = 'postgresql://sqlalchemy.uri::tablename'# SQLAlchemy :: tablename
>>> uri = 'http://api.domain.com/data.json' # Web requests
Note that this follows standard ``protocol://path`` syntax. In cases where
more information is needed, such as an HDF5 datapath or a SQL table name
the additional information follows two colons `::` as in the following
/path/to/data.hdf5::/datapath
"""
descriptor = None
args = []
in_uri = uri
if '::' in uri:
uri, datapath = uri.rsplit('::')
args.insert(0, datapath)
extensions = uri.split('.')
if extensions[-1] == 'gz':
kwargs['open'] = kwargs.get('open', gzip.open)
extensions.pop()
descriptor = filetypes.get(extensions[-1], None)
if '://' in uri:
protocol, _ = uri.split('://')
if protocol in opens:
kwargs['open'] = kwargs.get('open', opens[protocol])
if 'sql' in protocol:
descriptor = SQL
try:
filenames = glob(uri)
except:
filenames = []
if len(filenames) > 1:
resources = [resource(in_uri.replace(uri, filename), **kwargs)
for filename in filenames]
return Stack(resources)
if descriptor:
return descriptor(uri, *args, **kwargs)
raise ValueError('Unknown resource type\n\t%s' % uri)
def copy(src, dest, **kwargs):
""" Copy content from one data descriptor to another """
dest.extend_chunks(src.chunks(**kwargs))
@dispatch(DataDescriptor, (DataDescriptor, Iterable, Iterator))
def into(a, b):
a.extend(b)
return a
| {
"repo_name": "aterrel/blaze",
"path": "blaze/data/usability.py",
"copies": "1",
"size": "2985",
"license": "bsd-3-clause",
"hash": -4053828978575719400,
"line_mean": 29.1515151515,
"line_max": 79,
"alpha_frac": 0.5949748744,
"autogenerated": false,
"ratio": 3.88671875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49816936244,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterable
from functools import partial
import numpy as np
import pandas as pd
from . import rolling
from .computation import apply_ufunc
from .npcompat import flip
from .pycompat import iteritems
from .utils import is_scalar
class BaseInterpolator(object):
'''gerneric interpolator class for normalizing interpolation methods'''
cons_kwargs = {}
call_kwargs = {}
f = None
method = None
def __init__(self, xi, yi, method=None, **kwargs):
self.method = method
self.call_kwargs = kwargs
def __call__(self, x):
return self.f(x, **self.call_kwargs)
def __repr__(self):
return "{type}: method={method}".format(type=self.__class__.__name__,
method=self.method)
class NumpyInterpolator(BaseInterpolator):
'''One-dimensional linear interpolation.
See Also
--------
numpy.interp
'''
def __init__(self, xi, yi, method='linear', fill_value=None, **kwargs):
if method != 'linear':
raise ValueError(
'only method `linear` is valid for the NumpyInterpolator')
self.method = method
self.f = np.interp
self.cons_kwargs = kwargs
self.call_kwargs = {'period': self.cons_kwargs.pop('period', None)}
self._xi = xi
self._yi = yi
if self.cons_kwargs:
raise ValueError(
'recieved invalid kwargs: %r' % self.cons_kwargs.keys())
if fill_value is None:
self._left = np.nan
self._right = np.nan
elif isinstance(fill_value, Iterable) and len(fill_value) == 2:
self._left = fill_value[0]
self._right = fill_value[1]
elif is_scalar(fill_value):
self._left = fill_value
self._right = fill_value
else:
raise ValueError('%s is not a valid fill_value' % fill_value)
def __call__(self, x):
return self.f(x, self._xi, self._yi, left=self._left,
right=self._right, **self.call_kwargs)
class ScipyInterpolator(BaseInterpolator):
'''Interpolate a 1-D function using Scipy interp1d
See Also
--------
scipy.interpolate.interp1d
'''
def __init__(self, xi, yi, method=None, fill_value=None,
assume_sorted=True, copy=False, bounds_error=False, **kwargs):
from scipy.interpolate import interp1d
if method is None:
raise ValueError('method is a required argument, please supply a '
'valid scipy.inter1d method (kind)')
if method == 'polynomial':
method = kwargs.pop('order', None)
if method is None:
raise ValueError('order is required when method=polynomial')
self.method = method
self.cons_kwargs = kwargs
self.call_kwargs = {}
if fill_value is None and method == 'linear':
fill_value = kwargs.pop('fill_value', (np.nan, np.nan))
elif fill_value is None:
fill_value = np.nan
self.f = interp1d(xi, yi, kind=self.method, fill_value=fill_value,
bounds_error=False, assume_sorted=assume_sorted,
copy=copy, **self.cons_kwargs)
class SplineInterpolator(BaseInterpolator):
'''One-dimensional smoothing spline fit to a given set of data points.
See Also
--------
scipy.interpolate.UnivariateSpline
'''
def __init__(self, xi, yi, method='spline', fill_value=None, order=3,
**kwargs):
from scipy.interpolate import UnivariateSpline
if method != 'spline':
raise ValueError(
'only method `spline` is valid for the SplineInterpolator')
self.method = method
self.cons_kwargs = kwargs
self.call_kwargs['nu'] = kwargs.pop('nu', 0)
self.call_kwargs['ext'] = kwargs.pop('ext', None)
if fill_value is not None:
raise ValueError('SplineInterpolator does not support fill_value')
self.f = UnivariateSpline(xi, yi, k=order, **self.cons_kwargs)
def _apply_over_vars_with_dim(func, self, dim=None, **kwargs):
'''wrapper for datasets'''
ds = type(self)(coords=self.coords, attrs=self.attrs)
for name, var in iteritems(self.data_vars):
if dim in var.dims:
ds[name] = func(var, dim=dim, **kwargs)
else:
ds[name] = var
return ds
def get_clean_interp_index(arr, dim, use_coordinate=True, **kwargs):
'''get index to use for x values in interpolation.
If use_coordinate is True, the coordinate that shares the name of the
dimension along which interpolation is being performed will be used as the
x values.
If use_coordinate is False, the x values are set as an equally spaced
sequence.
'''
if use_coordinate:
if use_coordinate is True:
index = arr.get_index(dim)
else:
index = arr.coords[use_coordinate]
if index.ndim != 1:
raise ValueError(
'Coordinates used for interpolation must be 1D, '
'%s is %dD.' % (use_coordinate, index.ndim))
# raise if index cannot be cast to a float (e.g. MultiIndex)
try:
index = index.values.astype(np.float64)
except (TypeError, ValueError):
# pandas raises a TypeError
# xarray/nuppy raise a ValueError
raise TypeError('Index must be castable to float64 to support'
'interpolation, got: %s' % type(index))
# check index sorting now so we can skip it later
if not (np.diff(index) > 0).all():
raise ValueError("Index must be monotonicly increasing")
else:
axis = arr.get_axis_num(dim)
index = np.arange(arr.shape[axis], dtype=np.float64)
return index
def interp_na(self, dim=None, use_coordinate=True, method='linear', limit=None,
**kwargs):
'''Interpolate values according to different methods.'''
if dim is None:
raise NotImplementedError('dim is a required argument')
if limit is not None:
valids = _get_valid_fill_mask(self, dim, limit)
# method
index = get_clean_interp_index(self, dim, use_coordinate=use_coordinate,
**kwargs)
interpolator = _get_interpolator(method, **kwargs)
arr = apply_ufunc(interpolator, index, self,
input_core_dims=[[dim], [dim]],
output_core_dims=[[dim]],
output_dtypes=[self.dtype],
dask='parallelized',
vectorize=True,
keep_attrs=True).transpose(*self.dims)
if limit is not None:
arr = arr.where(valids)
return arr
def wrap_interpolator(interpolator, x, y, **kwargs):
'''helper function to apply interpolation along 1 dimension'''
# it would be nice if this wasn't necessary, works around:
# "ValueError: assignment destination is read-only" in assignment below
out = y.copy()
nans = pd.isnull(y)
nonans = ~nans
# fast track for no-nans and all-nans cases
n_nans = nans.sum()
if n_nans == 0 or n_nans == len(y):
return y
f = interpolator(x[nonans], y[nonans], **kwargs)
out[nans] = f(x[nans])
return out
def _bfill(arr, n=None, axis=-1):
'''inverse of ffill'''
import bottleneck as bn
arr = flip(arr, axis=axis)
# fill
arr = bn.push(arr, axis=axis, n=n)
# reverse back to original
return flip(arr, axis=axis)
def ffill(arr, dim=None, limit=None):
'''forward fill missing values'''
import bottleneck as bn
axis = arr.get_axis_num(dim)
# work around for bottleneck 178
_limit = limit if limit is not None else arr.shape[axis]
return apply_ufunc(bn.push, arr,
dask='parallelized',
keep_attrs=True,
output_dtypes=[arr.dtype],
kwargs=dict(n=_limit, axis=axis)).transpose(*arr.dims)
def bfill(arr, dim=None, limit=None):
'''backfill missing values'''
axis = arr.get_axis_num(dim)
# work around for bottleneck 178
_limit = limit if limit is not None else arr.shape[axis]
return apply_ufunc(_bfill, arr,
dask='parallelized',
keep_attrs=True,
output_dtypes=[arr.dtype],
kwargs=dict(n=_limit, axis=axis)).transpose(*arr.dims)
def _get_interpolator(method, **kwargs):
'''helper function to select the appropriate interpolator class
returns a partial of wrap_interpolator
'''
interp1d_methods = ['linear', 'nearest', 'zero', 'slinear', 'quadratic',
'cubic', 'polynomial']
valid_methods = interp1d_methods + ['barycentric', 'krog', 'pchip',
'spline', 'akima']
if (method == 'linear' and not
kwargs.get('fill_value', None) == 'extrapolate'):
kwargs.update(method=method)
interp_class = NumpyInterpolator
elif method in valid_methods:
try:
from scipy import interpolate
except ImportError:
raise ImportError(
'Interpolation with method `%s` requires scipy' % method)
if method in interp1d_methods:
kwargs.update(method=method)
interp_class = ScipyInterpolator
elif method == 'barycentric':
interp_class = interpolate.BarycentricInterpolator
elif method == 'krog':
interp_class = interpolate.KroghInterpolator
elif method == 'pchip':
interp_class = interpolate.PchipInterpolator
elif method == 'spline':
kwargs.update(method=method)
interp_class = SplineInterpolator
elif method == 'akima':
interp_class = interpolate.Akima1DInterpolator
else:
raise ValueError('%s is not a valid scipy interpolator' % method)
else:
raise ValueError('%s is not a valid interpolator' % method)
return partial(wrap_interpolator, interp_class, **kwargs)
def _get_valid_fill_mask(arr, dim, limit):
'''helper function to determine values that can be filled when limit is not
None'''
kw = {dim: limit + 1}
# we explicitly use construct method to avoid copy.
new_dim = rolling._get_new_dimname(arr.dims, '_window')
return (arr.isnull().rolling(min_periods=1, **kw)
.construct(new_dim, fill_value=False)
.sum(new_dim, skipna=False)) <= limit
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/missing.py",
"copies": "1",
"size": "10787",
"license": "apache-2.0",
"hash": 2781953279753512000,
"line_mean": 31.2964071856,
"line_max": 79,
"alpha_frac": 0.5863539446,
"autogenerated": false,
"ratio": 4.050694705219677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5137048649819678,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterable
import datetime
import numpy as np
from pandas import DataFrame, Series
from datashape import to_numpy, to_numpy_dtype
from numbers import Number
from odo import odo
from ..expr import (
Reduction, Field, Projection, Broadcast, Selection, ndim,
Distinct, Sort, Tail, Head, Label, ReLabel, Expr, Slice, Join,
std, var, count, nunique, Summary, IsIn,
BinOp, UnaryOp, USub, Not, nelements, Repeat, Concat, Interp,
UTCFromTimestamp, DateTimeTruncate,
Transpose, TensorDot, Coerce, isnan,
greatest, least, BinaryMath, atan2, Coalesce, Cast
)
from ..utils import keywords
from .core import base, compute
from .pandas import array_coalesce
from .varargs import register_varargs_arity
from ..dispatch import dispatch
from odo import into
import pandas as pd
__all__ = ['np']
@dispatch(Field, np.ndarray)
def compute_up(c, x, **kwargs):
if x.dtype.names and c._name in x.dtype.names:
return x[c._name]
if not x.dtype.names and x.shape[1] == len(c._child.fields):
return x[:, c._child.fields.index(c._name)]
raise NotImplementedError() # pragma: no cover
@dispatch(Projection, np.ndarray)
def compute_up(t, x, **kwargs):
if x.dtype.names and all(col in x.dtype.names for col in t.fields):
return x[t.fields]
if not x.dtype.names and x.shape[1] == len(t._child.fields):
return x[:, [t._child.fields.index(col) for col in t.fields]]
raise NotImplementedError() # pragma: no cover
try:
from .numba import broadcast_numba as broadcast_ndarray
except ImportError:
def broadcast_ndarray(t, *data, **kwargs):
del kwargs['scope']
d = dict(zip(t._scalar_expr._leaves(), data))
return compute(t._scalar_expr, d, return_type='native', **kwargs)
compute_up.register(Broadcast, np.ndarray)(broadcast_ndarray)
for i in range(2, 6):
compute_up.register(Broadcast, *([(np.ndarray, Number)] * i))(broadcast_ndarray)
@dispatch(Repeat, np.ndarray)
def compute_up(t, data, _char_mul=np.char.multiply, **kwargs):
if isinstance(t.lhs, Expr):
return _char_mul(data, t.rhs)
else:
return _char_mul(t.lhs, data)
@compute_up.register(Repeat, np.ndarray, (np.ndarray, base))
@compute_up.register(Repeat, base, np.ndarray)
def compute_up_np_repeat(t, lhs, rhs, _char_mul=np.char.multiply, **kwargs):
return _char_mul(lhs, rhs)
def _interp(arr, v, _Series=pd.Series, _charmod=np.char.mod):
"""
Delegate to the most efficient string formatting technique based on
the length of the array.
"""
if len(arr) >= 145:
return _Series(arr) % v
return _charmod(arr, v)
@dispatch(Interp, np.ndarray)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return _interp(data, t.rhs)
else:
return _interp(t.lhs, data)
@compute_up.register(Interp, np.ndarray, (np.ndarray, base))
@compute_up.register(Interp, base, np.ndarray)
def compute_up_np_interp(t, lhs, rhs, **kwargs):
return _interp(lhs, rhs)
@compute_up.register(greatest, np.ndarray, (np.ndarray, base))
@compute_up.register(greatest, base, np.ndarray)
def compute_up_greatest(expr, lhs, rhs, **kwargs):
return np.maximum(lhs, rhs)
@compute_up.register(least, np.ndarray, (np.ndarray, base))
@compute_up.register(least, base, np.ndarray)
def compute_up_least(expr, lhs, rhs, **kwargs):
return np.minimum(lhs, rhs)
@dispatch(BinOp, np.ndarray, (np.ndarray, base))
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(BinOp, base, np.ndarray)
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(BinOp, np.ndarray)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return t.op(data, t.rhs)
else:
return t.op(t.lhs, data)
@compute_up.register(BinaryMath, np.ndarray, (np.ndarray, base))
@compute_up.register(BinaryMath, base, np.ndarray)
def compute_up_binary_math(t, lhs, rhs, **kwargs):
return getattr(np, type(t).__name__)(lhs, rhs)
@dispatch(BinaryMath, np.ndarray)
def compute_up(t, data, **kwargs):
func = getattr(np, type(t).__name__)
if isinstance(t.lhs, Expr):
return func(data, t.rhs)
else:
return func(t.lhs, data)
@compute_up.register(atan2, np.ndarray, (np.ndarray, base))
@compute_up.register(atan2, base, np.ndarray)
def compute_up_binary_math(t, lhs, rhs, **kwargs):
return np.arctan2(lhs, rhs)
@dispatch(atan2, np.ndarray)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return np.arctan2(data, t.rhs)
else:
return np.arctan2(t.lhs, data)
@dispatch(UnaryOp, np.ndarray)
def compute_up(t, x, **kwargs):
return getattr(np, t.symbol)(x)
@dispatch(Not, np.ndarray)
def compute_up(t, x, **kwargs):
return np.logical_not(x)
@dispatch(USub, np.ndarray)
def compute_up(t, x, **kwargs):
return np.negative(x)
inat = np.datetime64('NaT').view('int64')
@dispatch(count, np.ndarray)
def compute_up(t, x, **kwargs):
result_dtype = to_numpy_dtype(t.dshape)
if issubclass(x.dtype.type, (np.floating, np.object_)):
return pd.notnull(x).sum(keepdims=t.keepdims, axis=t.axis,
dtype=result_dtype)
elif issubclass(x.dtype.type, np.datetime64):
return (x.view('int64') != inat).sum(keepdims=t.keepdims, axis=t.axis,
dtype=result_dtype)
else:
return np.ones(x.shape, dtype=result_dtype).sum(keepdims=t.keepdims,
axis=t.axis,
dtype=result_dtype)
@dispatch(nunique, np.ndarray)
def compute_up(t, x, **kwargs):
assert t.axis == tuple(range(ndim(t._child)))
result = len(np.unique(x))
if t.keepdims:
result = np.array([result])
return result
@dispatch(Reduction, np.ndarray)
def compute_up(t, x, **kwargs):
# can't use the method here, as they aren't Python functions
reducer = getattr(np, t.symbol)
if 'dtype' in keywords(reducer):
return reducer(x, axis=t.axis, keepdims=t.keepdims,
dtype=to_numpy_dtype(t.schema))
return reducer(x, axis=t.axis, keepdims=t.keepdims)
def axify(expr, axis, keepdims=False):
""" inject axis argument into expression
Helper function for compute_up(Summary, np.ndarray)
>>> from blaze import symbol
>>> s = symbol('s', '10 * 10 * int')
>>> expr = s.sum()
>>> axify(expr, axis=0)
sum(s, axis=(0,))
"""
return type(expr)(expr._child, axis=axis, keepdims=keepdims)
@dispatch(Summary, np.ndarray)
def compute_up(expr, data, **kwargs):
shape, dtype = to_numpy(expr.dshape)
if shape:
result = np.empty(shape=shape, dtype=dtype)
for n, v in zip(expr.names, expr.values):
result[n] = compute(
axify(v, expr.axis, expr.keepdims),
data,
return_type='native',
)
return result
else:
return tuple(
compute(axify(v, expr.axis), data, return_type='native')
for v in expr.values
)
@dispatch((std, var), np.ndarray)
def compute_up(t, x, **kwargs):
return getattr(x, t.symbol)(ddof=t.unbiased, axis=t.axis,
keepdims=t.keepdims)
@compute_up.register(Distinct, np.recarray)
def recarray_distinct(t, rec, **kwargs):
return pd.DataFrame.from_records(rec).drop_duplicates(
subset=t.on or None).to_records(index=False).astype(rec.dtype)
@dispatch(Distinct, np.ndarray)
def compute_up(t, arr, _recarray_distinct=recarray_distinct, **kwargs):
if t.on:
if getattr(arr.dtype, 'names', None) is not None:
return _recarray_distinct(t, arr, **kwargs).view(np.ndarray)
else:
raise ValueError('malformed expression: no columns to distinct on')
return np.unique(arr)
@dispatch(Sort, np.ndarray)
def compute_up(t, x, **kwargs):
if x.dtype.names is None: # not a struct array
result = np.sort(x)
elif (t.key in x.dtype.names or # struct array
isinstance(t.key, list) and all(k in x.dtype.names for k in t.key)):
result = np.sort(x, order=t.key)
elif t.key:
raise NotImplementedError("Sort key %s not supported" % t.key)
if not t.ascending:
result = result[::-1]
return result
@dispatch(Head, np.ndarray)
def compute_up(t, x, **kwargs):
return x[:t.n]
@dispatch(Tail, np.ndarray)
def compute_up(t, x, **kwargs):
return x[-t.n:]
@dispatch(Label, np.ndarray)
def compute_up(t, x, **kwargs):
return np.array(x, dtype=[(t.label, x.dtype.type)])
@dispatch(ReLabel, np.ndarray)
def compute_up(t, x, **kwargs):
types = [x.dtype[i] for i in range(len(x.dtype))]
return np.array(x, dtype=list(zip(t.fields, types)))
@dispatch(Selection, np.ndarray)
def compute_up(sel, x, **kwargs):
predicate = compute(sel.predicate, {sel._child: x}, return_type='native')
cond = getattr(predicate, 'values', predicate)
return x[cond]
@dispatch(Selection, np.ndarray, np.ndarray)
def compute_up(expr, arr, predicate, **kwargs):
return arr[predicate]
@dispatch(Selection, np.ndarray, Series)
def compute_up(expr, arr, predicate, **kwargs):
return arr[predicate.values]
@dispatch(UTCFromTimestamp, np.ndarray)
def compute_up(expr, data, **kwargs):
return (data * 1e6).astype('datetime64[us]')
@dispatch(Slice, np.ndarray)
def compute_up(expr, x, **kwargs):
return x[expr.index]
@dispatch(Cast, np.ndarray)
def compute_up(t, x, **kwargs):
# resolve ambiguity with [Expr, np.array]
return x
@dispatch(Expr, np.ndarray)
def compute_up(t, x, **kwargs):
ds = t._child.dshape
if x.ndim > 1 or isinstance(x, np.recarray) or x.dtype.fields is not None:
return compute_up(t, into(DataFrame, x, dshape=ds), **kwargs)
else:
return compute_up(t, into(Series, x, dshape=ds), **kwargs)
# resolve the ambiguous overload here with [Expr, np.ndarray]
register_varargs_arity(1, type_=np.ndarray)
@dispatch(nelements, np.ndarray)
def compute_up(expr, data, **kwargs):
axis = expr.axis
if expr.keepdims:
shape = tuple(data.shape[i] if i not in axis else 1
for i in range(ndim(expr._child)))
else:
shape = tuple(data.shape[i] for i in range(ndim(expr._child))
if i not in axis)
value = np.prod([data.shape[i] for i in axis])
result = np.empty(shape)
result.fill(value)
result = result.astype('int64')
return result
# Note the use of 'week': 'M8[D]' here.
# We truncate week offsets "manually" in the compute_up implementation by first
# converting to days then multiplying our measure by 7 this simplifies our code
# by only requiring us to calculate the week offset relative to the day of week.
precision_map = {'year': 'M8[Y]',
'month': 'M8[M]',
'week': 'M8[D]',
'day': 'M8[D]',
'hour': 'M8[h]',
'minute': 'M8[m]',
'second': 'M8[s]',
'millisecond': 'M8[ms]',
'microsecond': 'M8[us]',
'nanosecond': 'M8[ns]'}
# these offsets are integers in units of their representation
epoch = datetime.datetime(1970, 1, 1)
offsets = {
'week': epoch.isoweekday(),
'day': epoch.toordinal() # number of days since *Python's* epoch (01/01/01)
}
@dispatch(DateTimeTruncate, (np.ndarray, np.datetime64))
def compute_up(expr, data, **kwargs):
np_dtype = precision_map[expr.unit]
offset = offsets.get(expr.unit, 0)
measure = expr.measure * 7 if expr.unit == 'week' else expr.measure
result = (((data.astype(np_dtype)
.view('int64')
+ offset)
// measure
* measure
- offset)
.astype(np_dtype))
return result
@dispatch(isnan, np.ndarray)
def compute_up(expr, data, **kwargs):
return np.isnan(data)
@dispatch(np.ndarray)
def chunks(x, chunksize=1024):
start = 0
n = len(x)
while start < n:
yield x[start:start + chunksize]
start += chunksize
@dispatch(Transpose, np.ndarray)
def compute_up(expr, x, **kwargs):
return np.transpose(x, axes=expr.axes)
@dispatch(TensorDot, np.ndarray, np.ndarray)
def compute_up(expr, lhs, rhs, **kwargs):
return np.tensordot(lhs, rhs, axes=[expr._left_axes, expr._right_axes])
@dispatch(IsIn, np.ndarray, Iterable)
def compute_up(expr, data, keys, **kwargs):
return np.in1d(data, keys)
@compute_up.register(Join, DataFrame, np.ndarray)
@compute_up.register(Join, np.ndarray, DataFrame)
@compute_up.register(Join, np.ndarray, np.ndarray)
def join_ndarray(expr, lhs, rhs, **kwargs):
if isinstance(lhs, np.ndarray):
lhs = DataFrame(lhs)
if isinstance(rhs, np.ndarray):
rhs = DataFrame(rhs)
return compute_up(expr, lhs, rhs, **kwargs)
@dispatch(Coerce, np.ndarray)
def compute_up(expr, data, **kwargs):
return data.astype(to_numpy_dtype(expr.schema))
@dispatch(Concat, np.ndarray, np.ndarray)
def compute_up(expr, lhs, rhs, _concat=np.concatenate, **kwargs):
return _concat((lhs, rhs), axis=expr.axis)
compute_up.register(Coalesce, np.ndarray, (np.ndarray, base))(array_coalesce)
compute_up.register(Coalesce, base, np.ndarray)(array_coalesce)
@dispatch(Coalesce, np.ndarray)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
lhs = data
rhs = t.rhs
else:
lhs = t.lhs
rhs = data
return array_coalesce(t, lhs, rhs)
def intonumpy(data, dtype=None, **kwargs):
# TODO: Don't ignore other kwargs like copy
result = odo(data, np.ndarray)
if dtype and result.dtype != dtype:
result = result.astype(dtype)
return result
Expr.__array__ = intonumpy
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/compute/numpy.py",
"copies": "3",
"size": "13984",
"license": "bsd-3-clause",
"hash": -2843428944827476500,
"line_mean": 27.5971370143,
"line_max": 84,
"alpha_frac": 0.632937643,
"autogenerated": false,
"ratio": 3.214712643678161,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5347650286678161,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterable
from datashape import DataShape
from odo.utils import copydoc
from .expressions import Expr, ndim, symbol
__all__ = 'Transpose', 'TensorDot', 'dot', 'transpose', 'tensordot'
class Transpose(Expr):
""" Transpose dimensions in an N-Dimensional array
Examples
--------
>>> x = symbol('x', '10 * 20 * int32')
>>> x.T
transpose(x)
>>> x.T.shape
(20, 10)
Specify axis ordering with axes keyword argument
>>> x = symbol('x', '10 * 20 * 30 * int32')
>>> x.transpose([2, 0, 1])
transpose(x, axes=[2, 0, 1])
>>> x.transpose([2, 0, 1]).shape
(30, 10, 20)
"""
__slots__ = '_hash', '_child', 'axes'
@property
def dshape(self):
s = self._child.shape
return DataShape(*(tuple([s[i] for i in self.axes]) +
(self._child.dshape.measure,)))
def __str__(self):
if self.axes == tuple(range(ndim(self)))[::-1]:
return 'transpose(%s)' % self._child
else:
return 'transpose(%s, axes=%s)' % (self._child,
list(self.axes))
@copydoc(Transpose)
def transpose(expr, axes=None):
if axes is None:
axes = tuple(range(ndim(expr)))[::-1]
if isinstance(axes, list):
axes = tuple(axes)
return Transpose(expr, axes)
@copydoc(Transpose)
def T(expr):
return transpose(expr)
class TensorDot(Expr):
""" Dot Product: Contract and sum dimensions of two arrays
>>> x = symbol('x', '20 * 20 * int32')
>>> y = symbol('y', '20 * 30 * int32')
>>> x.dot(y)
tensordot(x, y)
>>> tensordot(x, y, axes=[0, 0])
tensordot(x, y, axes=[0, 0])
"""
__slots__ = '_hash', 'lhs', 'rhs', '_left_axes', '_right_axes'
__inputs__ = 'lhs', 'rhs'
@property
def dshape(self):
# Compute shape
shape = tuple([d for i, d in enumerate(self.lhs.shape)
if i not in self._left_axes] +
[d for i, d in enumerate(self.rhs.shape)
if i not in self._right_axes])
# Compute measure by mimicking a mul and add
l = symbol('l', self.lhs.dshape.measure)
r = symbol('r', self.rhs.dshape.measure)
measure = ((l * r) + (l * r)).dshape.measure
return DataShape(*(shape + (measure,)))
def __str__(self):
if self.isidentical(tensordot(self.lhs, self.rhs)):
return 'tensordot(%s, %s)' % (self.lhs, self.rhs)
else:
la = self._left_axes
if len(la) == 1:
la = la[0]
ra = self._right_axes
if len(ra) == 1:
ra = ra[0]
return 'tensordot(%s, %s, axes=[%s, %s])' % (
self.lhs, self.rhs, str(la), str(ra))
@copydoc(TensorDot)
def tensordot(lhs, rhs, axes=None):
if axes is None:
left = ndim(lhs) - 1
right = 0
elif isinstance(axes, Iterable):
left, right = axes
else:
left, right = axes, axes
if isinstance(left, int):
left = (left,)
if isinstance(right, int):
right = (right,)
if isinstance(left, list):
left = tuple(left)
if isinstance(right, list):
right = tuple(right)
return TensorDot(lhs, rhs, left, right)
@copydoc(TensorDot)
def dot(lhs, rhs):
return tensordot(lhs, rhs)
from datashape.predicates import isnumeric, isboolean
from .expressions import dshape_method_list, method_properties
dshape_method_list.extend([
(lambda ds: ndim(ds) > 1, set([transpose])),
(lambda ds: ndim(ds) == 2, set([T])),
(lambda ds: ndim(ds) >= 1 and (isnumeric(ds) or isboolean(ds)), set([dot]))
])
method_properties.add(T)
| {
"repo_name": "xlhtc007/blaze",
"path": "blaze/expr/arrays.py",
"copies": "10",
"size": "3787",
"license": "bsd-3-clause",
"hash": -7419031091356881000,
"line_mean": 25.6690140845,
"line_max": 79,
"alpha_frac": 0.5444943227,
"autogenerated": false,
"ratio": 3.4086408640864088,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8953135186786408,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator, Mapping
import itertools
import datashape
from datashape import discover, Tuple, Record, DataShape, var
from datashape.predicates import (
istabular,
isscalar,
isrecord,
)
from odo import resource
from odo.utils import ignoring, copydoc
from ..compatibility import _strtypes
from ..dispatch import dispatch
from .expressions import sanitized_dshape, Symbol
__all__ = ['BoundSymbol', 'Literal', 'Data', 'literal', 'data']
_names = ('_%d' % i for i in itertools.count(1))
not_an_iterator = []
with ignoring(ImportError):
import bcolz
not_an_iterator.append(bcolz.carray)
with ignoring(ImportError):
import pymongo
not_an_iterator.append(pymongo.collection.Collection)
not_an_iterator.append(pymongo.database.Database)
class generate(object):
"""A sentinel value, indicating whether or not `literal` should
generate a name for the returned `BoundSymbol`.
"""
def __new__(cls):
raise NotImplementedError('Can not create instance of sentinel type.')
class BoundSymbol(Symbol):
# NOTE: This docstring is meant to correspond to the ``literal()`` and
# ``data()`` APIs, which is why the Parameters section doesn't match the
# arguments to ``Literal.__new__()``.
"""Bind a data resource to a symbol, for use in expressions and
computation.
A ``data`` object presents a consistent view onto a variety of concrete
data sources. Like ``symbol`` objects, they are meant to be used in
expressions. Because they are tied to concrete data resources, ``data``
objects can be used with ``compute`` directly, making them convenient for
interactive exploration.
Parameters
----------
data_source : object
Any type with ``discover`` and ``compute`` implementations
fields : list, optional
Field or column names, will be inferred from data_source if possible
dshape : str or DataShape, optional
DataShape describing input data
name : str, optional
A name for the data.
Examples
--------
>>> t = data([(1, 'Alice', 100),
... (2, 'Bob', -200),
... (3, 'Charlie', 300),
... (4, 'Denis', 400),
... (5, 'Edith', -500)],
... fields=['id', 'name', 'balance'])
>>> t[t.balance < 0].name.peek()
name
0 Bob
1 Edith
"""
_arguments = 'data', 'dshape', '_name'
def _resources(self):
return {self: self.data}
@property
def _token(self):
return 0
@classmethod
def _static_identity(cls, data, dshape, _name):
try:
# cannot use isinstance(data, Hashable)
# some classes give a false positive
hash(data)
except TypeError:
data = id(data)
return cls, data, dshape, _name
def __str__(self):
name = self._name
return name if name is not None else repr(self)
class Literal(BoundSymbol):
def __repr__(self):
name = self._name
return name if name is not None else repr(self.data)
class Data(BoundSymbol):
def __repr__(self):
return "<'{}' data; _name='{}', dshape='{}'>".format(
type(self.data).__name__,
self._name,
sanitized_dshape(self.dshape),
)
def _bound_symbol(cls,
data_source,
dshape,
name,
fields,
schema,
**kwargs):
if schema and dshape:
raise ValueError(
'Please specify one of schema= or dshape= keyword arguments',
)
if isinstance(data_source, BoundSymbol):
return _bound_symbol(
cls,
data_source.data,
dshape,
name,
fields,
schema,
**kwargs
)
if schema and not dshape:
dshape = var * schema
if dshape and isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if isinstance(data_source, _strtypes):
data_source = resource(
data_source,
schema=schema,
dshape=dshape,
**kwargs
)
if (isinstance(data_source, Iterator) and
not isinstance(data_source, tuple(not_an_iterator))):
data_source = tuple(data_source)
if not dshape:
dshape = discover(data_source)
types = None
if isinstance(dshape.measure, Tuple) and fields:
types = dshape[1].dshapes
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
elif isscalar(dshape.measure) and fields:
types = (dshape.measure,) * int(dshape[-2])
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape[:-1] + (schema,)))
elif isrecord(dshape.measure) and fields:
ds = discover(data_source)
assert isrecord(ds.measure)
names = ds.measure.names
if names != fields:
raise ValueError(
'data column names %s\n'
'\tnot equal to fields parameter %s,\n'
'\tuse data(data_source).relabel(%s) to rename '
'fields' % (
names,
fields,
', '.join(
'%s=%r' % (k, v)
for k, v in
zip(names, fields)
),
),
)
types = dshape.measure.types
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
ds = datashape.dshape(dshape)
if name is generate:
if istabular(dshape):
name = next(_names)
else:
name = None
return cls(data_source, ds, name)
@copydoc(BoundSymbol)
def literal(data_source,
dshape=None,
name=None,
fields=None,
schema=None,
**kwargs):
return _bound_symbol(
Literal,
data_source,
dshape=dshape,
name=name,
fields=fields,
schema=schema,
**kwargs
)
@copydoc(BoundSymbol)
def data(data_source,
dshape=None,
name=generate,
fields=None,
schema=None,
**kwargs):
return _bound_symbol(
Data,
data_source,
dshape=dshape,
name=name,
fields=fields,
schema=schema,
**kwargs
)
@dispatch(BoundSymbol, Mapping)
def _subs(o, d):
return o
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/literal.py",
"copies": "3",
"size": "6793",
"license": "bsd-3-clause",
"hash": -4476100298831097300,
"line_mean": 26.3911290323,
"line_max": 78,
"alpha_frac": 0.5465920801,
"autogenerated": false,
"ratio": 4.177736777367774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6224328857467775,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from contextlib import contextmanager
from errno import ENOENT
from functools import partial
import os
import sys
import shutil
import struct
import gzip
import tempfile
import inspect
from .compatibility import unicode, long
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
opens = {'gzip': gzip.open}
def textblock(file, start, stop, compression=None):
""" Pull out a block of text from a file given start and stop bytes
This gets data starting/ending from the next newline delimiter
Example
-------
>> with open('myfile.txt', 'w') as f:
.. f.write('123\n456\n789\nabc')
>> f = open('myfile.txt')
In the example below, 1 and 10 don't line up with endlines
>> textblock(f, 1, 10)
'456\n789\n'
"""
if isinstance(file, (str, unicode)):
myopen = opens.get(compression, open)
f = myopen(file, 'rb')
try:
result = textblock(f, start, stop)
finally:
f.close()
return result
if start:
file.seek(start - 1)
line = file.readline() # burn a line
start = file.tell()
if stop is None:
file.seek(start)
return file.read()
stop -= 1
file.seek(stop)
line = file.readline()
stop = file.tell()
file.seek(start)
return file.read(stop - start)
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def skip(func):
pass
def pseudorandom(n, p, key):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], key=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], key=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
x = np.random.RandomState(key).random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def getargspec(func):
"""Version of inspect.getargspec that works for functools.partial objects"""
if isinstance(func, partial):
return inspect.getargspec(func.func)
else:
if isinstance(func, type):
return inspect.getargspec(func.__init__)
else:
return inspect.getargspec(func)
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
import numpy as np
if isinstance(i, (int, long)):
return True
if isinstance(i, float):
return (i).is_integer()
if issubclass(type(i), np.integer):
return i
else:
return False
def file_size(fn, compression=None):
""" Size of a file on disk
If compressed then return the uncompressed file size
"""
if compression == 'gzip':
with open(fn, 'rb') as f:
f.seek(-4, 2)
result = struct.unpack('I', f.read(4))[0]
else:
result = os.stat(fn).st_size
return result
ONE_ARITY_BUILTINS = set([abs, all, any, bool, bytearray, bytes, callable, chr,
classmethod, complex, dict, dir, enumerate, eval, float, format, frozenset,
hash, hex, id, int, iter, len, list, max, min, next, oct, open, ord, range,
repr, reversed, round, set, slice, sorted, staticmethod, str, sum, tuple,
type, vars, zip])
if sys.version_info[0] == 3: # Python 3
ONE_ARITY_BUILTINS |= set([ascii])
if sys.version_info[:2] != (2, 6):
ONE_ARITY_BUILTINS |= set([memoryview])
MULTI_ARITY_BUILTINS = set([compile, delattr, divmod, filter, getattr, hasattr,
isinstance, issubclass, map, pow, setattr])
def takes_multiple_arguments(func):
""" Does this function take multiple arguments?
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> class Thing(object):
... def __init__(self, a): pass
>>> takes_multiple_arguments(Thing)
False
"""
if func in ONE_ARITY_BUILTINS:
return False
elif func in MULTI_ARITY_BUILTINS:
return True
try:
spec = getargspec(func)
except:
return False
try:
is_constructor = spec.args[0] == 'self' and isinstance(func, type)
except:
is_constructor = False
if spec.varargs:
return True
if spec.defaults is None:
return len(spec.args) - is_constructor != 1
return len(spec.args) - len(spec.defaults) - is_constructor > 1
class Dispatch(object):
"""Simple single dispatch."""
def __init__(self):
self._lookup = {}
def register(self, type, func):
"""Register dispatch of `func` on arguments of type `type`"""
if isinstance(type, tuple):
for t in type:
self.register(t, func)
else:
self._lookup[type] = func
def __call__(self, arg):
# We dispatch first on type(arg), and fall back to iterating through
# the mro. This is significantly faster in the common case where
# type(arg) is in the lookup, with only a small penalty on fall back.
lk = self._lookup
typ = type(arg)
if typ in lk:
return lk[typ](arg)
for cls in inspect.getmro(typ)[1:]:
if cls in lk:
return lk[cls](arg)
raise TypeError("No dispatch for {0} type".format(typ))
def ensure_not_exists(filename):
"""
Ensure that a file does not exist.
"""
try:
os.unlink(filename)
except OSError as e:
if e.errno != ENOENT:
raise
| {
"repo_name": "clarkfitzg/dask",
"path": "dask/utils.py",
"copies": "1",
"size": "8610",
"license": "bsd-3-clause",
"hash": 7641240059924745000,
"line_mean": 22.5245901639,
"line_max": 80,
"alpha_frac": 0.5601626016,
"autogenerated": false,
"ratio": 3.5904920767306088,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9644705201695796,
"avg_score": 0.0011898953269627221,
"num_lines": 366
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from contextlib import contextmanager
from errno import ENOENT
import functools
import io
import os
import sys
import shutil
import struct
import tempfile
import inspect
import codecs
from sys import getdefaultencoding
from .compatibility import long, getargspec, BZ2File, GzipFile, LZMAFile
system_encoding = getdefaultencoding()
if system_encoding == 'ascii':
system_encoding = 'utf-8'
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
@contextmanager
def tmpfile(extension='', dir=None):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension, dir=dir)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
with ignoring(OSError):
os.remove(filename)
@contextmanager
def tmpdir(dir=None):
dirname = tempfile.mkdtemp(dir=dir)
try:
yield dirname
finally:
if os.path.exists(dirname):
if os.path.isdir(dirname):
shutil.rmtree(dirname)
else:
with ignoring(OSError):
os.remove(dirname)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open, mode='t'):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'w' + mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
compressions = {'gz': 'gzip', 'bz2': 'bz2', 'xz': 'xz'}
def infer_compression(filename):
extension = os.path.splitext(filename)[-1].strip('.')
return compressions.get(extension, None)
opens = {'gzip': GzipFile, 'bz2': BZ2File, 'xz': LZMAFile}
def open(filename, mode='rb', compression=None, **kwargs):
if compression == 'infer':
compression = infer_compression(filename)
return opens.get(compression, io.open)(filename, mode, **kwargs)
def get_bom(fn, compression=None):
"""
Get the Byte Order Mark (BOM) if it exists.
"""
boms = set((codecs.BOM_UTF16, codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE))
with open(fn, mode='rb', compression=compression) as f:
f.seek(0)
bom = f.read(2)
f.seek(0)
if bom in boms:
return bom
else:
return b''
def get_bin_linesep(encoding, linesep):
"""
Simply doing `linesep.encode(encoding)` does not always give you
*just* the linesep bytes, for some encodings this prefix's the
linesep bytes with the BOM. This function ensures we just get the
linesep bytes.
"""
if encoding == 'utf-16':
return linesep.encode('utf-16')[2:] # [2:] strips bom
else:
return linesep.encode(encoding)
def textblock(filename, start, end, compression=None, encoding=system_encoding,
linesep=os.linesep, buffersize=4096):
"""Pull out a block of text from a file given start and stop bytes.
This gets data starting/ending from the next linesep delimiter. Each block
consists of bytes in the range [start,end[, i.e. the stop byte is excluded.
If `start` is 0, then `start` corresponds to the true start byte. If
`start` is greater than 0 and does not point to the beginning of a new
line, then `start` is incremented until it corresponds to the start byte of
the next line. If `end` does not point to the beginning of a new line, then
the line that begins before `end` is included in the block although its
last byte exceeds `end`.
Examples
--------
>> with open('myfile.txt', 'wb') as f:
.. f.write('123\n456\n789\nabc')
In the example below, 1 and 10 don't line up with endlines.
>> u''.join(textblock('myfile.txt', 1, 10))
'456\n789\n'
"""
# Make sure `linesep` is not a byte string because
# `io.TextIOWrapper` in Python versions other than 2.7 dislike byte
# strings for the `newline` argument.
linesep = str(linesep)
# Get byte representation of the line separator.
bin_linesep = get_bin_linesep(encoding, linesep)
bin_linesep_len = len(bin_linesep)
if buffersize < bin_linesep_len:
error = ('`buffersize` ({0:d}) must be at least as large as the '
'number of line separator bytes ({1:d}).')
raise ValueError(error.format(buffersize, bin_linesep_len))
chunksize = end - start
with open(filename, 'rb', compression) as f:
with io.BufferedReader(f) as fb:
# If `start` does not correspond to the beginning of the file, we
# need to move the file pointer to `start - len(bin_linesep)`,
# search for the position of the next a line separator, and set
# `start` to the position after that line separator.
if start > 0:
# `start` is decremented by `len(bin_linesep)` to detect the
# case where the original `start` value corresponds to the
# beginning of a line.
start = max(0, start - bin_linesep_len)
# Set the file pointer to `start`.
fb.seek(start)
# Number of bytes to shift the file pointer before reading a
# new chunk to make sure that a multi-byte line separator, that
# is split by the chunk reader, is still detected.
shift = 1 - bin_linesep_len
while True:
buf = f.read(buffersize)
if len(buf) < bin_linesep_len:
raise StopIteration
try:
# Find the position of the next line separator and add
# `len(bin_linesep)` which yields the position of the
# first byte of the next line.
start += buf.index(bin_linesep)
start += bin_linesep_len
except ValueError:
# No line separator was found in the current chunk.
# Before reading the next chunk, we move the file
# pointer back `len(bin_linesep) - 1` bytes to make
# sure that a multi-byte line separator, that may have
# been split by the chunk reader, is still detected.
start += len(buf)
start += shift
fb.seek(shift, os.SEEK_CUR)
else:
# We have found the next line separator, so we need to
# set the file pointer to the first byte of the next
# line.
fb.seek(start)
break
with io.TextIOWrapper(fb, encoding, newline=linesep) as fbw:
# Retrieve and yield lines until the file pointer reaches
# `end`.
while start < end:
line = next(fbw)
# We need to encode the line again to get the byte length
# in order to correctly update `start`.
bin_line_len = len(line.encode(encoding))
if chunksize < bin_line_len:
error = ('`chunksize` ({0:d}) is less than the line '
'length ({1:d}). This may cause duplicate '
'processing of this line. It is advised to '
'increase `chunksize`.')
raise IOError(error.format(chunksize, bin_line_len))
yield line
start += bin_line_len
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def skip(func):
pass
def pseudorandom(n, p, random_state=None):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], random_state=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], random_state=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
x = random_state.random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def different_seeds(n, random_state=None):
""" A list of different 32 bit integer seeds
Parameters
----------
n: int
Number of distinct seeds to return
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
"""
import numpy as np
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
big_n = np.iinfo(np.int32).max
seeds = set(random_state.randint(big_n, size=n))
while len(seeds) < n:
seeds.add(random_state.randint(big_n))
# Sorting makes it easier to know what seeds are for what chunk
return sorted(seeds)
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
import numpy as np
if isinstance(i, (int, long)):
return True
if isinstance(i, float):
return (i).is_integer()
if issubclass(type(i), np.integer):
return i
else:
return False
def file_size(fn, compression=None):
""" Size of a file on disk
If compressed then return the uncompressed file size
"""
if compression == 'gzip':
with open(fn, 'rb') as f:
f.seek(-4, 2)
result = struct.unpack('I', f.read(4))[0]
elif compression:
# depending on the implementation, this may be inefficient
with open(fn, 'rb', compression) as f:
result = f.seek(0, 2)
else:
result = os.stat(fn).st_size
return result
ONE_ARITY_BUILTINS = set([abs, all, any, bool, bytearray, bytes, callable, chr,
classmethod, complex, dict, dir, enumerate, eval, float, format, frozenset,
hash, hex, id, int, iter, len, list, max, min, next, oct, open, ord, range,
repr, reversed, round, set, slice, sorted, staticmethod, str, sum, tuple,
type, vars, zip])
if sys.version_info[0] == 3: # Python 3
ONE_ARITY_BUILTINS |= set([ascii])
if sys.version_info[:2] != (2, 6):
ONE_ARITY_BUILTINS |= set([memoryview])
MULTI_ARITY_BUILTINS = set([compile, delattr, divmod, filter, getattr, hasattr,
isinstance, issubclass, map, pow, setattr])
def takes_multiple_arguments(func):
""" Does this function take multiple arguments?
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> class Thing(object):
... def __init__(self, a): pass
>>> takes_multiple_arguments(Thing)
False
"""
if func in ONE_ARITY_BUILTINS:
return False
elif func in MULTI_ARITY_BUILTINS:
return True
try:
spec = getargspec(func)
except:
return False
try:
is_constructor = spec.args[0] == 'self' and isinstance(func, type)
except:
is_constructor = False
if spec.varargs:
return True
if spec.defaults is None:
return len(spec.args) - is_constructor != 1
return len(spec.args) - len(spec.defaults) - is_constructor > 1
class Dispatch(object):
"""Simple single dispatch."""
def __init__(self):
self._lookup = {}
def register(self, type, func):
"""Register dispatch of `func` on arguments of type `type`"""
if isinstance(type, tuple):
for t in type:
self.register(t, func)
else:
self._lookup[type] = func
def __call__(self, arg):
# We dispatch first on type(arg), and fall back to iterating through
# the mro. This is significantly faster in the common case where
# type(arg) is in the lookup, with only a small penalty on fall back.
lk = self._lookup
typ = type(arg)
if typ in lk:
return lk[typ](arg)
for cls in inspect.getmro(typ)[1:]:
if cls in lk:
return lk[cls](arg)
raise TypeError("No dispatch for {0} type".format(typ))
def ensure_not_exists(filename):
"""
Ensure that a file does not exist.
"""
try:
os.unlink(filename)
except OSError as e:
if e.errno != ENOENT:
raise
def _skip_doctest(line):
if '>>>' in line:
return line + ' # doctest: +SKIP'
else:
return line
def derived_from(original_klass, version=None, ua_args=[]):
"""Decorator to attach original class's docstring to the wrapped method.
Parameters
----------
original_klass: type
Original class which the method is derived from
version : str
Original package version which supports the wrapped method
ua_args : list
List of keywords which Dask doesn't support. Keywords existing in
original but not in Dask will automatically be added.
"""
def wrapper(method):
method_name = method.__name__
try:
# do not use wraps here, as it hides keyword arguments displayed
# in the doc
original_method = getattr(original_klass, method_name)
doc = original_method.__doc__
if doc is None:
doc = ''
method_args = getargspec(method).args
original_args = getargspec(original_method).args
not_supported = [m for m in original_args if m not in method_args]
if len(ua_args) > 0:
not_supported.extend(ua_args)
if len(not_supported) > 0:
note = ("\n Notes\n -----\n"
" Dask doesn't supports following argument(s).\n\n")
args = ''.join([' * {0}\n'.format(a) for a in not_supported])
doc = doc + note + args
doc = '\n'.join([_skip_doctest(line) for line in doc.split('\n')])
method.__doc__ = doc
return method
except AttributeError:
module_name = original_klass.__module__.split('.')[0]
@functools.wraps(method)
def wrapped(*args, **kwargs):
msg = "Base package doesn't support '{0}'.".format(method_name)
if version is not None:
msg2 = " Use {0} {1} or later to use this method."
msg += msg2.format(module_name, version)
raise NotImplementedError(msg)
return wrapped
return wrapper
def funcname(func, full=False):
"""Get the name of a function."""
while hasattr(func, 'func'):
func = func.func
try:
if full:
return func.__qualname__.strip('<>')
else:
return func.__name__.strip('<>')
except:
return str(func).strip('<>')
def ensure_bytes(s):
""" Turn string or bytes to bytes
>>> ensure_bytes(u'123')
'123'
>>> ensure_bytes('123')
'123'
>>> ensure_bytes(b'123')
'123'
"""
if isinstance(s, bytes):
return s
if hasattr(s, 'encode'):
return s.encode()
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s)
def digit(n, k, base):
"""
>>> digit(1234, 0, 10)
4
>>> digit(1234, 1, 10)
3
>>> digit(1234, 2, 10)
2
>>> digit(1234, 3, 10)
1
"""
return n // base**k % base
def insert(tup, loc, val):
"""
>>> insert(('a', 'b', 'c'), 0, 'x')
('x', 'b', 'c')
"""
L = list(tup)
L[loc] = val
return tuple(L)
| {
"repo_name": "mikegraham/dask",
"path": "dask/utils.py",
"copies": "1",
"size": "18313",
"license": "bsd-3-clause",
"hash": 6529543048769672000,
"line_mean": 28.2073365231,
"line_max": 84,
"alpha_frac": 0.5564899252,
"autogenerated": false,
"ratio": 3.916381522668948,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9968688457837731,
"avg_score": 0.0008365980062433763,
"num_lines": 627
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from contextlib import contextmanager
from errno import ENOENT
import functools
import os
import sys
import shutil
import struct
import types
import gzip
import tempfile
import inspect
import codecs
from .compatibility import unicode, long, getargspec
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
@contextmanager
def tmpfile(extension='', dir=None):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension, dir=dir)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
opens = {'gzip': gzip.open}
def get_bom(fn):
"""
Get the Byte Order Mark (BOM) if it exists.
"""
boms = set((codecs.BOM_UTF16, codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE))
with open(fn, 'rb') as f:
f.seek(0)
bom = f.read(2)
f.seek(0)
if bom in boms:
return bom
else:
return b''
def get_bin_linesep(encoding, linesep):
"""
Simply doing `linesep.encode(encoding)` does not always give you
*just* the linesep bytes, for some encodings this prefix's the
linesep bytes with the BOM. This function ensures we just get the
linesep bytes.
"""
if encoding == 'utf-16':
return linesep.encode('utf-16')[2:] # [2:] strips bom
else:
return linesep.encode(encoding)
def next_linesep(fo, seek, encoding, linesep):
bin_linesep = get_bin_linesep(encoding, linesep)
data = b''
data_len = 0
while bin_linesep not in data:
data_len += 1
fo.seek(seek)
data = fo.read(data_len)
if len(data) < data_len:
break # eof
stop = seek + data_len
start = stop - len(bin_linesep)
return start, stop
def textblock(file, start, stop, compression=None, encoding=None,
linesep=None):
""" Pull out a block of text from a file given start and stop bytes
This gets data starting/ending from the next linesep delimiter
Examples
--------
>> with open('myfile.txt', 'w') as f:
.. f.write('123\n456\n789\nabc')
>> f = open('myfile.txt')
In the example below, 1 and 10 don't line up with endlines
>> textblock(f, 1, 10)
'456\n789\n'
"""
if isinstance(file, (str, unicode)):
myopen = opens.get(compression, open)
f = myopen(file, 'rb')
try:
result = textblock(f, start, stop, compression=None,
encoding=encoding, linesep=linesep)
finally:
f.close()
return result
if encoding is None:
encoding = getattr(file, 'encoding', 'utf-8')
if encoding is None:
encoding = 'utf-8'
if linesep is None:
linesep = os.linesep
if start:
startstart, startstop = next_linesep(file, start, encoding, linesep)
else:
startstart = start
startstop = start
if stop is None:
file.seek(start)
return file.read()
stopstart, stopstop = next_linesep(file, stop, encoding, linesep)
file.seek(startstop)
return file.read(stopstop - startstop)
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def skip(func):
pass
def pseudorandom(n, p, random_state=None):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], random_state=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], random_state=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
x = random_state.random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def different_seeds(n, random_state=None):
""" A list of different 32 bit integer seeds
Parameters
----------
n: int
Number of distinct seeds to return
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
"""
import numpy as np
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
big_n = np.iinfo(np.int32).max
seeds = set(random_state.randint(big_n, size=n))
while len(seeds) < n:
seeds.add(random_state.randint(big_n))
# Sorting makes it easier to know what seeds are for what chunk
return sorted(seeds)
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
import numpy as np
if isinstance(i, (int, long)):
return True
if isinstance(i, float):
return (i).is_integer()
if issubclass(type(i), np.integer):
return i
else:
return False
def file_size(fn, compression=None):
""" Size of a file on disk
If compressed then return the uncompressed file size
"""
if compression == 'gzip':
with open(fn, 'rb') as f:
f.seek(-4, 2)
result = struct.unpack('I', f.read(4))[0]
else:
result = os.stat(fn).st_size
return result
ONE_ARITY_BUILTINS = set([abs, all, any, bool, bytearray, bytes, callable, chr,
classmethod, complex, dict, dir, enumerate, eval, float, format, frozenset,
hash, hex, id, int, iter, len, list, max, min, next, oct, open, ord, range,
repr, reversed, round, set, slice, sorted, staticmethod, str, sum, tuple,
type, vars, zip])
if sys.version_info[0] == 3: # Python 3
ONE_ARITY_BUILTINS |= set([ascii])
if sys.version_info[:2] != (2, 6):
ONE_ARITY_BUILTINS |= set([memoryview])
MULTI_ARITY_BUILTINS = set([compile, delattr, divmod, filter, getattr, hasattr,
isinstance, issubclass, map, pow, setattr])
def takes_multiple_arguments(func):
""" Does this function take multiple arguments?
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> class Thing(object):
... def __init__(self, a): pass
>>> takes_multiple_arguments(Thing)
False
"""
if func in ONE_ARITY_BUILTINS:
return False
elif func in MULTI_ARITY_BUILTINS:
return True
try:
spec = getargspec(func)
except:
return False
try:
is_constructor = spec.args[0] == 'self' and isinstance(func, type)
except:
is_constructor = False
if spec.varargs:
return True
if spec.defaults is None:
return len(spec.args) - is_constructor != 1
return len(spec.args) - len(spec.defaults) - is_constructor > 1
class Dispatch(object):
"""Simple single dispatch."""
def __init__(self):
self._lookup = {}
def register(self, type, func):
"""Register dispatch of `func` on arguments of type `type`"""
if isinstance(type, tuple):
for t in type:
self.register(t, func)
else:
self._lookup[type] = func
def __call__(self, arg):
# We dispatch first on type(arg), and fall back to iterating through
# the mro. This is significantly faster in the common case where
# type(arg) is in the lookup, with only a small penalty on fall back.
lk = self._lookup
typ = type(arg)
if typ in lk:
return lk[typ](arg)
for cls in inspect.getmro(typ)[1:]:
if cls in lk:
return lk[cls](arg)
raise TypeError("No dispatch for {0} type".format(typ))
def ensure_not_exists(filename):
"""
Ensure that a file does not exist.
"""
try:
os.unlink(filename)
except OSError as e:
if e.errno != ENOENT:
raise
def _skip_doctest(line):
if '>>>' in line:
return line + ' # doctest: +SKIP'
else:
return line
def derived_from(original_klass, version=None, ua_args=[]):
"""Decorator to attach original class's docstring to the wrapped method.
Parameters
----------
original_klass: type
Original class which the method is derived from
version : str
Original package version which supports the wrapped method
ua_args : list
List of keywords which Dask doesn't support. Keywords existing in
original but not in Dask will automatically be added.
"""
def wrapper(method):
method_name = method.__name__
try:
# do not use wraps here, as it hides keyword arguments displayed
# in the doc
original_method = getattr(original_klass, method_name)
doc = original_method.__doc__
if doc is None:
doc = ''
method_args = getargspec(method).args
original_args = getargspec(original_method).args
not_supported = [m for m in original_args if m not in method_args]
if len(ua_args) > 0:
not_supported.extend(ua_args)
if len(not_supported) > 0:
note = ("\n Notes\n -----\n"
" Dask doesn't supports following argument(s).\n\n")
args = ''.join([' * {0}\n'.format(a) for a in not_supported])
doc = doc + note + args
doc = '\n'.join([_skip_doctest(line) for line in doc.split('\n')])
method.__doc__ = doc
return method
except AttributeError:
module_name = original_klass.__module__.split('.')[0]
@functools.wraps(method)
def wrapped(*args, **kwargs):
msg = "Base package doesn't support '{0}'.".format(method_name)
if version is not None:
msg2 = " Use {0} {1} or later to use this method."
msg += msg2.format(module_name, version)
raise NotImplementedError(msg)
return wrapped
return wrapper
def funcname(func):
"""Get the name of a function."""
while hasattr(func, 'func'):
func = func.func
try:
return func.__name__
except:
return str(func)
| {
"repo_name": "pombredanne/dask",
"path": "dask/utils.py",
"copies": "2",
"size": "13088",
"license": "bsd-3-clause",
"hash": 6246447852789819000,
"line_mean": 24.662745098,
"line_max": 84,
"alpha_frac": 0.5663202934,
"autogenerated": false,
"ratio": 3.721353426215525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5287673719615524,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from contextlib import contextmanager
from functools import partial
import os
import shutil
import gzip
import tempfile
import inspect
from .compatibility import unicode, long
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
opens = {'gzip': gzip.open}
def textblock(file, start, stop, compression=None):
""" Pull out a block of text from a file given start and stop bytes
This gets data starting/ending from the next newline delimiter
Example
-------
>> with open('myfile.txt', 'w') as f:
.. f.write('123\n456\n789\nabc')
>> f = open('myfile.txt')
In the example below, 1 and 10 don't line up with endlines
>> textblock(f, 1, 10)
'456\n789\n'
"""
if isinstance(file, (str, unicode)):
myopen = opens.get(compression, open)
f = myopen(file, 'rb')
try:
result = textblock(f, start, stop)
finally:
f.close()
return result
if start:
file.seek(start - 1)
line = file.readline() # burn a line
start = file.tell()
if stop is None:
file.seek(start)
return file.read()
stop -= 1
file.seek(stop)
line = file.readline()
stop = file.tell()
file.seek(start)
return file.read(stop - start)
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def skip(func):
pass
def pseudorandom(n, p, key):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], key=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], key=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
x = np.random.RandomState(key).random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def getargspec(func):
"""Version of inspect.getargspec that works for functools.partial objects"""
if isinstance(func, partial):
return inspect.getargspec(func.func)
else:
return inspect.getargspec(func)
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
import numpy as np
if isinstance(i, (int, long)):
return True
if isinstance(i, float):
return (i).is_integer()
if issubclass(type(i), np.integer):
return i
else:
return False
| {
"repo_name": "freeman-lab/dask",
"path": "dask/utils.py",
"copies": "1",
"size": "5356",
"license": "bsd-3-clause",
"hash": 5066064067924227000,
"line_mean": 20.5967741935,
"line_max": 80,
"alpha_frac": 0.5451829724,
"autogenerated": false,
"ratio": 3.5260039499670834,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9568929889028062,
"avg_score": 0.0004514066678045173,
"num_lines": 248
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from contextlib import contextmanager
from functools import partial
import os
import shutil
import struct
import gzip
import tempfile
import inspect
from .compatibility import unicode, long
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
opens = {'gzip': gzip.open}
def textblock(file, start, stop, compression=None):
""" Pull out a block of text from a file given start and stop bytes
This gets data starting/ending from the next newline delimiter
Example
-------
>> with open('myfile.txt', 'w') as f:
.. f.write('123\n456\n789\nabc')
>> f = open('myfile.txt')
In the example below, 1 and 10 don't line up with endlines
>> textblock(f, 1, 10)
'456\n789\n'
"""
if isinstance(file, (str, unicode)):
myopen = opens.get(compression, open)
f = myopen(file, 'rb')
try:
result = textblock(f, start, stop)
finally:
f.close()
return result
if start:
file.seek(start - 1)
line = file.readline() # burn a line
start = file.tell()
if stop is None:
file.seek(start)
return file.read()
stop -= 1
file.seek(stop)
line = file.readline()
stop = file.tell()
file.seek(start)
return file.read(stop - start)
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def skip(func):
pass
def pseudorandom(n, p, key):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], key=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], key=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
x = np.random.RandomState(key).random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def getargspec(func):
"""Version of inspect.getargspec that works for functools.partial objects"""
if isinstance(func, partial):
return inspect.getargspec(func.func)
else:
return inspect.getargspec(func)
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
import numpy as np
if isinstance(i, (int, long)):
return True
if isinstance(i, float):
return (i).is_integer()
if issubclass(type(i), np.integer):
return i
else:
return False
def file_size(fn, compression=None):
""" Size of a file on disk
If compressed then return the uncompressed file size
"""
if compression == 'gzip':
with open(fn, 'rb') as f:
f.seek(-4, 2)
result = struct.unpack('I', f.read(4))[0]
else:
result = os.stat(fn).st_size
return result
| {
"repo_name": "simudream/dask",
"path": "dask/utils.py",
"copies": "4",
"size": "5715",
"license": "bsd-3-clause",
"hash": -930469120743988500,
"line_mean": 20.7300380228,
"line_max": 80,
"alpha_frac": 0.5464566929,
"autogenerated": false,
"ratio": 3.5343228200371057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000425661040363195,
"num_lines": 263
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from contextlib import contextmanager
from functools import partial
import os
import sys
import shutil
import struct
import gzip
import tempfile
import inspect
from .compatibility import unicode, long
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
opens = {'gzip': gzip.open}
def textblock(file, start, stop, compression=None):
""" Pull out a block of text from a file given start and stop bytes
This gets data starting/ending from the next newline delimiter
Example
-------
>> with open('myfile.txt', 'w') as f:
.. f.write('123\n456\n789\nabc')
>> f = open('myfile.txt')
In the example below, 1 and 10 don't line up with endlines
>> textblock(f, 1, 10)
'456\n789\n'
"""
if isinstance(file, (str, unicode)):
myopen = opens.get(compression, open)
f = myopen(file, 'rb')
try:
result = textblock(f, start, stop)
finally:
f.close()
return result
if start:
file.seek(start - 1)
line = file.readline() # burn a line
start = file.tell()
if stop is None:
file.seek(start)
return file.read()
stop -= 1
file.seek(stop)
line = file.readline()
stop = file.tell()
file.seek(start)
return file.read(stop - start)
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def skip(func):
pass
def pseudorandom(n, p, key):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], key=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], key=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
x = np.random.RandomState(key).random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def getargspec(func):
"""Version of inspect.getargspec that works for functools.partial objects"""
if isinstance(func, partial):
return inspect.getargspec(func.func)
else:
if isinstance(func, type):
return inspect.getargspec(func.__init__)
else:
return inspect.getargspec(func)
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
import numpy as np
if isinstance(i, (int, long)):
return True
if isinstance(i, float):
return (i).is_integer()
if issubclass(type(i), np.integer):
return i
else:
return False
def file_size(fn, compression=None):
""" Size of a file on disk
If compressed then return the uncompressed file size
"""
if compression == 'gzip':
with open(fn, 'rb') as f:
f.seek(-4, 2)
result = struct.unpack('I', f.read(4))[0]
else:
result = os.stat(fn).st_size
return result
ONE_ARITY_BUILTINS = set([abs, all, any, bool, bytearray, bytes, callable, chr,
classmethod, complex, dict, dir, enumerate, eval, float, format, frozenset,
hash, hex, id, int, iter, len, list, max, min, next, oct, open, ord, range,
repr, reversed, round, set, slice, sorted, staticmethod, str, sum, tuple,
type, vars, zip])
if sys.version_info[0] == 3: # Python 3
ONE_ARITY_BUILTINS |= set([ascii])
if sys.version_info[:2] != (2, 6):
ONE_ARITY_BUILTINS |= set([memoryview])
MULTI_ARITY_BUILTINS = set([compile, delattr, divmod, filter, getattr, hasattr,
isinstance, issubclass, map, pow, setattr])
def takes_multiple_arguments(func):
""" Does this function take multiple arguments?
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> class Thing(object):
... def __init__(self, a): pass
>>> takes_multiple_arguments(Thing)
False
"""
if func in ONE_ARITY_BUILTINS:
return False
elif func in MULTI_ARITY_BUILTINS:
return True
try:
spec = getargspec(func)
except:
return False
try:
is_constructor = spec.args[0] == 'self' and isinstance(func, type)
except:
is_constructor = False
if spec.varargs:
return True
if spec.defaults is None:
return len(spec.args) - is_constructor != 1
return len(spec.args) - len(spec.defaults) - is_constructor > 1
| {
"repo_name": "jayhetee/dask",
"path": "dask/utils.py",
"copies": "1",
"size": "7482",
"license": "bsd-3-clause",
"hash": -4247454888696927700,
"line_mean": 21.880733945,
"line_max": 80,
"alpha_frac": 0.5608126169,
"autogenerated": false,
"ratio": 3.5342465753424657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45950591922424655,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from contextlib import contextmanager
import os
import gzip
import tempfile
from .compatibility import unicode
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
opens = {'gzip': gzip.open}
def textblock(file, start, stop, compression=None):
""" Pull out a block of text from a file given start and stop bytes
This gets data starting/ending from the next newline delimiter
Example
-------
>> with open('myfile.txt', 'w') as f:
.. f.write('123\n456\n789\nabc')
>> f = open('myfile.txt')
In the example below, 1 and 10 don't line up with endlines
>> textblock(f, 1, 10)
'456\n789\n'
"""
if isinstance(file, (str, unicode)):
myopen = opens.get(compression, open)
f = myopen(file, 'rb')
try:
result = textblock(f, start, stop)
finally:
f.close()
return result
if start:
file.seek(start - 1)
line = file.readline() # burn a line
start = file.tell()
if stop is None:
file.seek(start)
return file.read()
stop -= 1
file.seek(stop)
line = file.readline()
stop = file.tell()
file.seek(start)
return file.read(stop - start)
| {
"repo_name": "minrk/dask",
"path": "dask/utils.py",
"copies": "1",
"size": "3721",
"license": "bsd-3-clause",
"hash": -189036384009499100,
"line_mean": 20.2628571429,
"line_max": 71,
"alpha_frac": 0.5396398818,
"autogenerated": false,
"ratio": 3.721,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9757794076811337,
"avg_score": 0.0005691609977324264,
"num_lines": 175
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from contextlib import contextmanager
import os
import shutil
import gzip
import tempfile
from .compatibility import unicode
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
opens = {'gzip': gzip.open}
def textblock(file, start, stop, compression=None):
""" Pull out a block of text from a file given start and stop bytes
This gets data starting/ending from the next newline delimiter
Example
-------
>> with open('myfile.txt', 'w') as f:
.. f.write('123\n456\n789\nabc')
>> f = open('myfile.txt')
In the example below, 1 and 10 don't line up with endlines
>> textblock(f, 1, 10)
'456\n789\n'
"""
if isinstance(file, (str, unicode)):
myopen = opens.get(compression, open)
f = myopen(file, 'rb')
try:
result = textblock(f, start, stop)
finally:
f.close()
return result
if start:
file.seek(start - 1)
line = file.readline() # burn a line
start = file.tell()
if stop is None:
file.seek(start)
return file.read()
stop -= 1
file.seek(stop)
line = file.readline()
stop = file.tell()
file.seek(start)
return file.read(stop - start)
| {
"repo_name": "esc/dask",
"path": "dask/utils.py",
"copies": "1",
"size": "3735",
"license": "bsd-3-clause",
"hash": 6331134230731536000,
"line_mean": 20.2215909091,
"line_max": 71,
"alpha_frac": 0.5408299866,
"autogenerated": false,
"ratio": 3.7238285144566303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.476465850105663,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from copy import copy
from distutils.version import LooseVersion
import operator
from operator import getitem, setitem
from pprint import pformat
import uuid
import warnings
from toolz import merge, partial, first, unique, partition_all
import pandas as pd
from pandas.util.decorators import cache_readonly
import numpy as np
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..array.core import partial_by_order
from .. import threaded
from ..compatibility import apply, operator_div, bind_method
from ..utils import (repr_long_list, random_state_data,
pseudorandom, derived_from, funcname, memory_repr,
put_lines, M)
from ..base import Base, compute, tokenize, normalize_token
from ..async import get_sync
from . import methods
from .utils import (meta_nonempty, make_meta, insert_meta_param_description,
raise_on_meta_error)
no_default = '__no_default__'
pd.computation.expressions.set_use_numexpr(False)
def _concat(args, **kwargs):
""" Generic concat operation """
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if isinstance(args[0], (pd.DataFrame, pd.Series)):
args2 = [arg for arg in args if len(arg)]
if not args2:
return args[0]
return pd.concat(args2)
if isinstance(args[0], (pd.Index)):
args = [arg for arg in args if len(arg)]
return args[0].append(args[1:])
try:
return pd.Series(args)
except:
return args
def _get_return_type(meta):
if isinstance(meta, _Frame):
meta = meta._meta
if isinstance(meta, pd.Series):
return Series
elif isinstance(meta, pd.DataFrame):
return DataFrame
elif isinstance(meta, pd.Index):
return Index
return Scalar
def new_dd_object(dsk, _name, meta, divisions):
"""Generic constructor for dask.dataframe objects.
Decides the appropriate output class based on the type of `meta` provided.
"""
return _get_return_type(meta)(dsk, _name, meta, divisions)
def optimize(dsk, keys, **kwargs):
from .optimize import optimize
return optimize(dsk, keys, **kwargs)
def finalize(results):
return _concat(results)
class Scalar(Base):
""" A Dask object to represent a pandas scalar"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(first)
def __init__(self, dsk, name, meta, divisions=None):
# divisions is ignored, only present to be compatible with other
# objects.
self.dask = dsk
self._name = name
meta = make_meta(meta)
if isinstance(meta, (pd.DataFrame, pd.Series, pd.Index)):
raise ValueError("Expected meta to specify scalar, got "
"{0}".format(type(meta).__name__))
self._meta = meta
@property
def _meta_nonempty(self):
return self._meta
@property
def dtype(self):
return self._meta.dtype
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, 'dtype'):
o.remove('dtype') # dtype only in `dir` if available
return list(o)
@property
def divisions(self):
"""Dummy divisions to be compat with Series and DataFrame"""
return [None, None]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + '...'
if hasattr(self._meta, 'dtype'):
extra = ', dtype=%s' % self._meta.dtype
else:
extra = ', type=%s' % type(self._meta).__name__
return "dd.Scalar<%s%s>" % (name, extra)
def __array__(self):
# array interface is required to support pandas instance + Scalar
# Otherwise, above op results in pd.Series of Scalar (object dtype)
return np.asarray(self.compute())
@property
def _args(self):
return (self.dask, self._name, self._meta)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta = state
@property
def key(self):
return (self._name, 0)
def _keys(self):
return [self.key]
@classmethod
def _get_unary_operator(cls, op):
def f(self):
name = funcname(op) + '-' + tokenize(self)
dsk = {(name, 0): (op, (self._name, 0))}
meta = op(self._meta_nonempty)
return Scalar(merge(dsk, self.dask), name, meta)
return f
@classmethod
def _get_binary_operator(cls, op, inv=False):
return lambda self, other: _scalar_binary(op, self, other, inv=inv)
def _scalar_binary(op, self, other, inv=False):
name = '{0}-{1}'.format(funcname(op), tokenize(self, other))
dsk = self.dask
return_type = _get_return_type(other)
if isinstance(other, Scalar):
dsk = merge(dsk, other.dask)
other_key = (other._name, 0)
elif isinstance(other, Base):
return NotImplemented
else:
other_key = other
if inv:
dsk.update({(name, 0): (op, other_key, (self._name, 0))})
else:
dsk.update({(name, 0): (op, (self._name, 0), other_key)})
other_meta = make_meta(other)
other_meta_nonempty = meta_nonempty(other_meta)
if inv:
meta = op(other_meta_nonempty, self._meta_nonempty)
else:
meta = op(self._meta_nonempty, other_meta_nonempty)
if return_type is not Scalar:
return return_type(dsk, name, meta,
[other.index.min(), other.index.max()])
else:
return Scalar(dsk, name, meta)
class _Frame(Base):
""" Superclass for DataFrame and Series
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
meta: pandas.DataFrame, pandas.Series, or pandas.Index
An empty pandas object with names, dtypes, and indices matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(finalize)
def __init__(self, dsk, name, meta, divisions):
self.dask = dsk
self._name = name
meta = make_meta(meta)
if not isinstance(meta, self._partition_type):
raise ValueError("Expected meta to specify type {0}, got type "
"{1}".format(self._partition_type.__name__,
type(meta).__name__))
self._meta = meta
self.divisions = tuple(divisions)
@property
def _constructor(self):
return new_dd_object
@property
def npartitions(self):
"""Return number of partitions"""
return len(self.divisions) - 1
@property
def size(self):
return self.reduction(methods.size, np.sum, token='size', meta=int,
split_every=False)
@property
def _meta_nonempty(self):
""" A non-empty version of `_meta` with fake data."""
return meta_nonempty(self._meta)
@property
def _args(self):
return (self.dask, self._name, self._meta, self.divisions)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisions = state
def _keys(self):
return [(self._name, i) for i in range(self.npartitions)]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + '...'
if self.known_divisions:
div_text = ', divisions=%s' % repr_long_list(self.divisions)
else:
div_text = ''
return ("dd.%s<%s, npartitions=%s%s>" %
(self.__class__.__name__, name, self.npartitions, div_text))
def __array__(self, dtype=None, **kwargs):
self._computed = self.compute()
x = np.array(self._computed)
return x
def __array_wrap__(self, array, context=None):
raise NotImplementedError
@property
def _elemwise(self):
return elemwise
@property
def index(self):
"""Return dask Index instance"""
name = self._name + '-index'
dsk = dict(((name, i), (getattr, key, 'index'))
for i, key in enumerate(self._keys()))
return Index(merge(dsk, self.dask), name,
self._meta.index, self.divisions)
@derived_from(pd.DataFrame)
def reset_index(self, drop=False):
return self.map_partitions(M.reset_index, drop=drop).clear_divisions()
@property
def known_divisions(self):
"""Whether divisions are already known"""
return len(self.divisions) > 0 and self.divisions[0] is not None
def clear_divisions(self):
divisions = (None,) * (self.npartitions + 1)
return type(self)(self.dask, self._name, self._meta, divisions)
def get_partition(self, n):
"""Get a dask DataFrame/Series representing the `nth` partition."""
if 0 <= n < self.npartitions:
name = 'get-partition-%s-%s' % (str(n), self._name)
dsk = {(name, 0): (self._name, n)}
divisions = self.divisions[n:n + 2]
return new_dd_object(merge(self.dask, dsk), name,
self._meta, divisions)
else:
msg = "n must be 0 <= n < {0}".format(self.npartitions)
raise ValueError(msg)
def cache(self, cache=Cache):
""" Evaluate Dataframe and store in local cache
Uses chest by default to store data on disk
"""
warnings.warn("Deprecation Warning: The `cache` method is deprecated, "
"and will be removed in the next release. To achieve "
"the same behavior, either write to disk or use "
"`Client.persist`, from `dask.distributed`.")
if callable(cache):
cache = cache()
# Evaluate and store in cache
name = 'cache' + uuid.uuid1().hex
dsk = dict(((name, i), (setitem, cache, (tuple, list(key)), key))
for i, key in enumerate(self._keys()))
self._get(merge(dsk, self.dask), list(dsk.keys()))
# Create new dataFrame pointing to that cache
name = 'from-cache-' + self._name
dsk2 = dict(((name, i), (getitem, cache, (tuple, list(key))))
for i, key in enumerate(self._keys()))
return new_dd_object(dsk2, name, self._meta, self.divisions)
@derived_from(pd.DataFrame)
def drop_duplicates(self, **kwargs):
split_every = kwargs.pop('split_every', None)
assert all(k in ('keep', 'subset', 'take_last') for k in kwargs)
chunk = M.drop_duplicates
return aca(self, chunk=chunk, aggregate=chunk, meta=self._meta,
token='drop-duplicates', split_every=split_every, **kwargs)
def __len__(self):
return self.reduction(len, np.sum, token='len', meta=int,
split_every=False).compute()
@insert_meta_param_description(pad=12)
def map_partitions(self, func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
One can use ``map_partitions`` to apply a function on each partition.
Extra arguments and keywords can optionally be provided, and will be
passed to the function after the partition.
Here we apply a function with arguments and keywords to a DataFrame,
resulting in a Series:
>>> def myadd(df, a, b=1):
... return df.x + df.y + a + b
>>> res = ddf.map_partitions(myadd, 1, b=2)
>>> res.dtype
dtype('float64')
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with no name, and dtype
``float64``:
>>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))
Here we map a function that takes in a DataFrame, and returns a
DataFrame with a new column:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))
>>> res.dtypes
x int64
y float64
z float64
dtype: object
As before, the output metadata can also be specified manually. This
time we pass in a ``dict``, as the output is a DataFrame:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),
... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.map_partitions(lambda df: df.head(), meta=df)
"""
return map_partitions(func, self, *args, **kwargs)
@insert_meta_param_description(pad=12)
def reduction(self, chunk, aggregate=None, combine=None, meta=no_default,
token=None, split_every=None, chunk_kwargs=None,
aggregate_kwargs=None, combine_kwargs=None, **kwargs):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
combine : callable, optional
Function to operate on intermediate concatenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'sum': x.sum(), 'count': x.count()})
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'sum': x.sum(), 'count': x.count()})
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs['aca_chunk'] = chunk
combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}
combine_kwargs['aca_combine'] = combine
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs['aca_aggregate'] = aggregate
return aca(self, chunk=_reduction_chunk, aggregate=_reduction_aggregate,
combine=_reduction_combine, meta=meta, token=token,
split_every=split_every, chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
combine_kwargs=combine_kwargs, **kwargs)
@derived_from(pd.DataFrame)
def pipe(self, func, *args, **kwargs):
# Taken from pandas:
# https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def random_split(self, frac, random_state=None):
""" Pseudorandomly split dataframe into different pieces row-wise
Parameters
----------
frac : list
List of floats that should sum to one.
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
Examples
--------
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.DataFrame.sample
"""
if not np.allclose(sum(frac), 1):
raise ValueError("frac should sum to 1")
state_data = random_state_data(self.npartitions, random_state)
token = tokenize(self, frac, random_state)
name = 'split-' + token
dsk = {(name, i): (pd_split, (self._name, i), frac, state)
for i, state in enumerate(state_data)}
out = []
for i in range(len(frac)):
name2 = 'split-%d-%s' % (i, token)
dsk2 = {(name2, j): (getitem, (name, j), i)
for j in range(self.npartitions)}
out.append(type(self)(merge(self.dask, dsk, dsk2), name2,
self._meta, self.divisions))
return out
def head(self, n=5, npartitions=1, compute=True):
""" First n rows of the dataset
Parameters
----------
n : int, optional
The number of rows to return. Default is 5.
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``n`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
"""
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
msg = "only {} partitions, head received {}"
raise ValueError(msg.format(self.npartitions, npartitions))
name = 'head-%d-%d-%s' % (npartitions, n, self._name)
if npartitions > 1:
name_p = 'head-partial-%d-%s' % (n, self._name)
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (M.head, (self._name, i), n)
concat = (_concat, [(name_p, i) for i in range(npartitions)])
dsk[(name, 0)] = (safe_head, concat, n)
else:
dsk = {(name, 0): (safe_head, (self._name, 0), n)}
result = new_dd_object(merge(self.dask, dsk), name, self._meta,
[self.divisions[0], self.divisions[npartitions]])
if compute:
result = result.compute()
return result
def tail(self, n=5, compute=True):
""" Last n rows of the dataset
Caveat, the only checks the last n rows of the last partition.
"""
name = 'tail-%d-%s' % (n, self._name)
dsk = {(name, 0): (M.tail, (self._name, self.npartitions - 1), n)}
result = new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions[-2:])
if compute:
result = result.compute()
return result
@property
def loc(self):
""" Purely label-location based indexer for selection by label.
>>> df.loc["b"] # doctest: +SKIP
>>> df.loc["b":"d"] # doctest: +SKIP"""
from .indexing import _LocIndexer
return _LocIndexer(self)
# NOTE: `iloc` is not implemented because of performance concerns.
# see https://github.com/dask/dask/pull/507
def repartition(self, divisions=None, npartitions=None, force=False):
""" Repartition dataframe along new divisions
Parameters
----------
divisions : list, optional
List of partitions to be used. If specified npartitions will be
ignored.
npartitions : int, optional
Number of partitions of output, must be less than npartitions of
input. Only used if divisions isn't specified.
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition(npartitions=10) # doctest: +SKIP
>>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP
"""
if npartitions is not None and divisions is not None:
warnings.warn("When providing both npartitions and divisions to "
"repartition only npartitions is used.")
if npartitions is not None:
if npartitions > self.npartitions:
raise ValueError("Can only repartition to fewer partitions")
return repartition_npartitions(self, npartitions)
elif divisions is not None:
return repartition(self, divisions, force=force)
else:
raise ValueError(
"Provide either divisions= or npartitions= to repartition")
@derived_from(pd.Series)
def fillna(self, value):
return self.map_partitions(M.fillna, value=value)
def sample(self, frac, replace=False, random_state=None):
""" Random sample of items
Parameters
----------
frac : float, optional
Fraction of axis items to return.
replace: boolean, optional
Sample with or without replacement. Default = False.
random_state: int or ``np.random.RandomState``
If int we create a new RandomState with this as the seed
Otherwise we draw from the passed RandomState
See Also
--------
dask.DataFrame.random_split, pd.DataFrame.sample
"""
if random_state is None:
random_state = np.random.RandomState()
name = 'sample-' + tokenize(self, frac, replace, random_state)
state_data = random_state_data(self.npartitions, random_state)
dsk = {(name, i): (methods.sample, (self._name, i), state, frac, replace)
for i, state in enumerate(state_data)}
return new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions)
def to_hdf(self, path_or_buf, key, mode='a', append=False, get=None, **kwargs):
""" Export frame to hdf file(s)
Export dataframe to one or multiple hdf5 files or nodes.
Exported hdf format is pandas' hdf table format only.
Data saved by this function should be read by pandas dataframe
compatible reader.
By providing a single asterisk in either the path_or_buf or key
parameters you direct dask to save each partition to a different file
or node (respectively). The asterisk will be replaced with a zero
padded partition number, as this is the default implementation of
name_function.
When writing to a single hdf node in a single hdf file, all hdf save
tasks are required to execute in a specific order, often becoming the
bottleneck of the entire execution graph. Saving to multiple nodes or
files removes that restriction (order is still preserved by enforcing
order on output, using name_function) and enables executing save tasks
in parallel.
Parameters
----------
path_or_buf: HDFStore object or string
Destination file(s). If string, can contain a single asterisk to
save each partition to a different file. Only one asterisk is
allowed in both path_or_buf and key parameters.
key: string
A node / group path in file, can contain a single asterisk to save
each partition to a different hdf node in a single file. Only one
asterisk is allowed in both path_or_buf and key parameters.
format: optional, default 'table'
Default hdf storage format, currently only pandas' 'table' format
is supported.
mode: optional, {'a', 'w', 'r+'}, default 'a'
``'a'``
Append: Add data to existing file(s) or create new.
``'w'``
Write: overwrite any existing files with new ones.
``'r+'``
Append to existing files, files must already exist.
append: optional, default False
If False, overwrites existing node with the same name otherwise
appends to it.
complevel: optional, 0-9, default 0
compression level, higher means better compression ratio and
possibly more CPU time. Depends on complib.
complib: {'zlib', 'bzip2', 'lzo', 'blosc', None}, default None
If complevel > 0 compress using this compression library when
possible
fletcher32: bool, default False
If True and compression is used, additionally apply the fletcher32
checksum.
get: callable, optional
A scheduler `get` function to use. If not provided, the default is
to check the global settings first, and then fall back to defaults
for the collections.
dask_kwargs: dict, optional
A dictionary of keyword arguments passed to the `get` function
used.
name_function: callable, optional, default None
A callable called for each partition that accepts a single int
representing the partition number. name_function must return a
string representation of a partition's index in a way that will
preserve the partition's location after a string sort.
If None, a default name_function is used. The default name_function
will return a zero padded string of received int. See
dask.utils.build_name_function for more info.
compute: bool, default True
If True, execute computation of resulting dask graph.
If False, return a Delayed object.
lock: bool, None or lock object, default None
In to_hdf locks are needed for two reasons. First, to protect
against writing to the same file from multiple processes or threads
simultaneously. Second, default libhdf5 is not thread safe, so we
must additionally lock on it's usage. By default if lock is None
lock will be determined optimally based on path_or_buf, key and the
scheduler used. Manually setting this parameter is usually not
required to improve performance.
Alternatively, you can specify specific values:
If False, no locking will occur. If True, default lock object will
be created (multiprocessing.Manager.Lock on multiprocessing
scheduler, Threading.Lock otherwise), This can be used to force
using a lock in scenarios the default behavior will be to avoid
locking. Else, value is assumed to implement the lock interface,
and will be the lock object used.
See Also
--------
dask.DataFrame.read_hdf: reading hdf files
dask.Series.read_hdf: reading hdf files
Examples
--------
Saving data to a single file:
>>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP
Saving data to multiple nodes:
>>> with pd.HDFStore('output.hdf') as fh:
... df.to_hdf(fh, '/data*')
... fh.keys() # doctest: +SKIP
['/data0', '/data1']
Or multiple files:
>>> df.to_hdf('output_*.hdf', '/data') # doctest: +SKIP
Saving multiple files with the multiprocessing scheduler and manually
disabling locks:
>>> df.to_hdf('output_*.hdf', '/data',
... get=dask.multiprocessing.get, lock=False) # doctest: +SKIP
"""
from .io import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, get=get, **kwargs)
def to_csv(self, filename, **kwargs):
"""Write DataFrame to a series of comma-separated values (csv) files
One filename per partition will be created. You can specify the
filenames in a variety of ways.
Use a globstring::
>>> df.to_csv('/path/to/data/export-*.csv') # doctest: +SKIP
The * will be replaced by the increasing sequence 0, 1, 2, ...
::
/path/to/data/export-0.csv
/path/to/data/export-1.csv
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
Strings produced by name_function must preserve the order of their
respective partition indices.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> df.to_csv('/path/to/data/export-*.csv', name_function=name) # doctest: +SKIP
::
/path/to/data/export-2015-01-01.csv
/path/to/data/export-2015-01-02.csv
...
You can also provide an explicit list of paths::
>>> paths = ['/path/to/data/alice.csv', '/path/to/data/bob.csv', ...] # doctest: +SKIP
>>> df.to_csv(paths) # doctest: +SKIP
Parameters
----------
filename : string
Path glob indicating the naming scheme for the output files
name_function : callable, default None
Function accepting an integer (partition index) and producing a
string to replace the asterisk in the given filename globstring.
Should preserve the lexicographic order of partitions
compression : string or None
String like 'gzip' or 'xz'. Must support efficient random access.
Filenames with extensions corresponding to known compression
algorithms (gz, bz2) will be compressed accordingly automatically
sep : character, default ','
Field delimiter for the output file
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is assumed
to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
nanRep : None
deprecated, use na_rep
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2', 'xz',
only used when the first argument is a filename
line_terminator : string, default '\\n'
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
"""
from .io import to_csv
return to_csv(self, filename, **kwargs)
def to_delayed(self):
""" Convert dataframe into dask Delayed objects
Returns a list of delayed values, one value per partition.
"""
from ..delayed import Delayed
return [Delayed(k, [self.dask]) for k in self._keys()]
@classmethod
def _get_unary_operator(cls, op):
return lambda self: elemwise(op, self)
@classmethod
def _get_binary_operator(cls, op, inv=False):
if inv:
return lambda self, other: elemwise(op, other, self)
else:
return lambda self, other: elemwise(op, self, other)
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, axis=0):
"""Provides rolling transformations.
Parameters
----------
window : int
Size of the moving window. This is the number of observations used
for calculating the statistic. The window size must not be so large
as to span more than one adjacent partition.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. The recognized window types are identical
to pandas.
axis : int, default 0
Returns
-------
a Rolling object on which to call a method to compute a statistic
Notes
-----
The `freq` argument is not supported.
"""
from dask.dataframe.rolling import Rolling
if not isinstance(window, int):
raise ValueError('window must be an integer')
if window < 0:
raise ValueError('window must be >= 0')
if min_periods is not None:
if not isinstance(min_periods, int):
raise ValueError('min_periods must be an integer')
if min_periods < 0:
raise ValueError('min_periods must be >= 0')
return Rolling(self, window=window, min_periods=min_periods,
freq=freq, center=center, win_type=win_type, axis=axis)
def _reduction_agg(self, name, axis=None, skipna=True,
split_every=False):
axis = self._validate_axis(axis)
meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)
token = self._token_prefix + name
method = getattr(M, name)
if axis == 1:
return self.map_partitions(method, meta=meta,
token=token, skipna=skipna, axis=axis)
else:
return self.reduction(method, meta=meta, token=token,
skipna=skipna, axis=axis,
split_every=split_every)
@derived_from(pd.DataFrame)
def all(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('all', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def any(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('any', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def sum(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('sum', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def max(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('max', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def min(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('min', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def idxmax(self, axis=None, skipna=True, split_every=False):
fn = 'idxmax'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.idxmax, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not isinstance(meta, pd.Series)
return aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
@derived_from(pd.DataFrame)
def idxmin(self, axis=None, skipna=True, split_every=False):
fn = 'idxmin'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis)
if axis == 1:
return map_partitions(M.idxmin, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not isinstance(meta, pd.Series)
return aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
@derived_from(pd.DataFrame)
def count(self, axis=None, split_every=False):
axis = self._validate_axis(axis)
token = self._token_prefix + 'count'
if axis == 1:
meta = self._meta_nonempty.count(axis=axis)
return self.map_partitions(M.count, meta=meta, token=token,
axis=axis)
else:
meta = self._meta_nonempty.count()
return self.reduction(M.count, aggregate=M.sum, meta=meta,
token=token, split_every=split_every)
@derived_from(pd.DataFrame)
def mean(self, axis=None, skipna=True, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.mean(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.mean, self, meta=meta,
token=self._token_prefix + 'mean',
axis=axis, skipna=skipna)
else:
num = self._get_numeric_data()
s = num.sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'mean-%s' % tokenize(self, axis, skipna)
return map_partitions(methods.mean_aggregate, s, n,
token=name, meta=meta)
@derived_from(pd.DataFrame)
def var(self, axis=None, skipna=True, ddof=1, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.var, self, meta=meta,
token=self._token_prefix + 'var',
axis=axis, skipna=skipna, ddof=ddof)
else:
num = self._get_numeric_data()
x = 1.0 * num.sum(skipna=skipna, split_every=split_every)
x2 = 1.0 * (num ** 2).sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'var-%s' % tokenize(self, axis, skipna, ddof)
return map_partitions(methods.var_aggregate, x2, x, n,
token=name, meta=meta, ddof=ddof)
@derived_from(pd.DataFrame)
def std(self, axis=None, skipna=True, ddof=1, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.std(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.std, self, meta=meta,
token=self._token_prefix + 'std',
axis=axis, skipna=skipna, ddof=ddof)
else:
v = self.var(skipna=skipna, ddof=ddof, split_every=split_every)
token = tokenize(self, axis, skipna, ddof)
name = self._token_prefix + 'std-finish--%s' % token
return map_partitions(np.sqrt, v, meta=meta, token=name)
def quantile(self, q=0.5, axis=0):
""" Approximate row-wise and precise column-wise quantiles of DataFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
"""
axis = self._validate_axis(axis)
keyname = 'quantiles-concat--' + tokenize(self, q, axis)
if axis == 1:
if isinstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
if LooseVersion(pd.__version__) >= '0.19':
name = q
else:
name = None
meta = pd.Series([], dtype='f8', name=name)
return map_partitions(M.quantile, self, q, axis,
token=keyname, meta=meta)
else:
meta = self._meta.quantile(q, axis=axis)
num = self._get_numeric_data()
quantiles = tuple(quantile(self[c], q) for c in num.columns)
dask = {}
dask = merge(dask, *[_q.dask for _q in quantiles])
qnames = [(_q._name, 0) for _q in quantiles]
if isinstance(quantiles[0], Scalar):
dask[(keyname, 0)] = (pd.Series, qnames, num.columns)
divisions = (min(num.columns), max(num.columns))
return Series(dask, keyname, meta, divisions)
else:
dask[(keyname, 0)] = (methods.concat, qnames, 1)
return DataFrame(dask, keyname, meta, quantiles[0].divisions)
@derived_from(pd.DataFrame)
def describe(self, split_every=False):
# currently, only numeric describe is supported
num = self._get_numeric_data()
stats = [num.count(split_every=split_every),
num.mean(split_every=split_every),
num.std(split_every=split_every),
num.min(split_every=split_every),
num.quantile([0.25, 0.5, 0.75]),
num.max(split_every=split_every)]
stats_names = [(s._name, 0) for s in stats]
name = 'describe--' + tokenize(self, split_every)
dsk = merge(num.dask, *(s.dask for s in stats))
dsk[(name, 0)] = (methods.describe_aggregate, stats_names)
return new_dd_object(dsk, name, num._meta, divisions=[None, None])
def _cum_agg(self, token, chunk, aggregate, axis, skipna=True,
chunk_kwargs=None):
""" Wrapper for cumulative operation """
axis = self._validate_axis(axis)
if axis == 1:
name = '{0}{1}(axis=1)'.format(self._token_prefix, token)
return self.map_partitions(chunk, token=name, **chunk_kwargs)
else:
# cumulate each partitions
name1 = '{0}{1}-map'.format(self._token_prefix, token)
cumpart = map_partitions(chunk, self, token=name1, meta=self,
**chunk_kwargs)
name2 = '{0}{1}-take-last'.format(self._token_prefix, token)
cumlast = map_partitions(_take_last, cumpart, skipna,
meta=pd.Series([]), token=name2)
name = '{0}{1}'.format(self._token_prefix, token)
cname = '{0}{1}-cum-last'.format(self._token_prefix, token)
# aggregate cumulated partisions and its previous last element
dask = {}
dask[(name, 0)] = (cumpart._name, 0)
for i in range(1, self.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
dask[(cname, i)] = (cumlast._name, i - 1)
else:
# aggregate with previous cumulation results
dask[(cname, i)] = (aggregate, (cname, i - 1),
(cumlast._name, i - 1))
dask[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))
return new_dd_object(merge(dask, cumpart.dask, cumlast.dask),
name, chunk(self._meta), self.divisions)
@derived_from(pd.DataFrame)
def cumsum(self, axis=None, skipna=True):
return self._cum_agg('cumsum',
chunk=M.cumsum,
aggregate=operator.add,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cumprod(self, axis=None, skipna=True):
return self._cum_agg('cumprod',
chunk=M.cumprod,
aggregate=operator.mul,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummax(self, axis=None, skipna=True):
return self._cum_agg('cummax',
chunk=M.cummax,
aggregate=methods.cummax_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummin(self, axis=None, skipna=True):
return self._cum_agg('cummin',
chunk=M.cummin,
aggregate=methods.cummin_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def where(self, cond, other=np.nan):
# cond and other may be dask instance,
# passing map_partitions via keyword will not be aligned
return map_partitions(M.where, self, cond, other)
@derived_from(pd.DataFrame)
def mask(self, cond, other=np.nan):
return map_partitions(M.mask, self, cond, other)
@derived_from(pd.DataFrame)
def notnull(self):
return self.map_partitions(M.notnull)
@derived_from(pd.DataFrame)
def isnull(self):
return self.map_partitions(M.isnull)
@derived_from(pd.DataFrame)
def astype(self, dtype):
return self.map_partitions(M.astype, dtype=dtype,
meta=self._meta.astype(dtype))
@derived_from(pd.Series)
def append(self, other):
# because DataFrame.append will override the method,
# wrap by pd.Series.append docstring
if isinstance(other, (list, dict)):
msg = "append doesn't support list or dict input"
raise NotImplementedError(msg)
if not isinstance(other, _Frame):
from .io import from_pandas
other = from_pandas(other, 1)
from .multi import _append
if self.known_divisions and other.known_divisions:
if self.divisions[-1] < other.divisions[0]:
divisions = self.divisions[:-1] + other.divisions
return _append(self, other, divisions)
else:
msg = ("Unable to append two dataframes to each other with known "
"divisions if those divisions are not ordered. "
"The divisions/index of the second dataframe must be "
"greater than the divisions/index of the first dataframe.")
raise ValueError(msg)
else:
divisions = [None] * (self.npartitions + other.npartitions + 1)
return _append(self, other, divisions)
@derived_from(pd.DataFrame)
def align(self, other, join='outer', axis=None, fill_value=None):
meta1, meta2 = _emulate(M.align, self, other, join, axis=axis,
fill_value=fill_value)
aligned = self.map_partitions(M.align, other, join=join, axis=axis,
fill_value=fill_value)
token = tokenize(self, other, join, axis, fill_value)
name1 = 'align1-' + token
dsk1 = dict(((name1, i), (getitem, key, 0))
for i, key in enumerate(aligned._keys()))
dsk1.update(aligned.dask)
result1 = new_dd_object(dsk1, name1, meta1, aligned.divisions)
name2 = 'align2-' + token
dsk2 = dict(((name2, i), (getitem, key, 1))
for i, key in enumerate(aligned._keys()))
dsk2.update(aligned.dask)
result2 = new_dd_object(dsk2, name2, meta2, aligned.divisions)
return result1, result2
@derived_from(pd.DataFrame)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
raise NotImplementedError
@derived_from(pd.DataFrame)
def resample(self, rule, how=None, closed=None, label=None):
from .tseries.resample import _resample
return _resample(self, rule, how=how, closed=closed, label=label)
normalize_token.register((Scalar, _Frame), lambda a: a._name)
class Series(_Frame):
""" Out-of-core Series object
Mimics ``pandas.Series``.
Parameters
----------
dsk: dict
The dask graph to compute this Series
_name: str
The key prefix that specifies which keys in the dask comprise this
particular Series
meta: pandas.Series
An empty ``pandas.Series`` with names, dtypes, and index matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
_token_prefix = 'series-'
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
index = context[1][0].index
return pd.Series(array, index=index, name=self.name)
@property
def name(self):
return self._meta.name
@name.setter
def name(self, name):
self._meta.name = name
renamed = _rename_dask(self, name)
# update myself
self.dask.update(renamed.dask)
self._name = renamed._name
@property
def ndim(self):
""" Return dimensionality """
return 1
@property
def dtype(self):
""" Return data type """
return self._meta.dtype
@cache_readonly
def dt(self):
from .accessor import DatetimeAccessor
return DatetimeAccessor(self)
@derived_from(pd.Series)
def reset_index(self, drop=False):
return super(Series, self).reset_index(drop=drop)
@cache_readonly
def cat(self):
from .accessor import CategoricalAccessor
return CategoricalAccessor(self)
@cache_readonly
def str(self):
from .accessor import StringAccessor
return StringAccessor(self)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, 'cat'):
o.remove('cat') # cat only in `dir` if available
return list(o)
@property
def nbytes(self):
return self.reduction(methods.nbytes, np.sum, token='nbytes',
meta=int, split_every=False)
@derived_from(pd.Series)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def quantile(self, q=0.5):
""" Approximate quantiles of Series
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
"""
return quantile(self, q)
def _repartition_quantiles(self, npartitions, upsample=1.0):
""" Approximate quantiles of Series used for repartitioning
"""
from .partitionquantiles import partition_quantiles
return partition_quantiles(self, npartitions, upsample=upsample)
def __getitem__(self, key):
if isinstance(key, Series) and self.divisions == key.divisions:
name = 'index-%s' % tokenize(self, key)
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return Series(merge(self.dask, key.dask, dsk), name,
self._meta, self.divisions)
raise NotImplementedError()
@derived_from(pd.DataFrame)
def _get_numeric_data(self, how='any', subset=None):
return self
@derived_from(pd.Series)
def iteritems(self):
for i in range(self.npartitions):
s = self.get_partition(i).compute()
for item in s.iteritems():
yield item
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 'index', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0}.get(axis, axis)
@derived_from(pd.Series)
def groupby(self, index, **kwargs):
from dask.dataframe.groupby import SeriesGroupBy
return SeriesGroupBy(self, index, **kwargs)
@derived_from(pd.Series)
def count(self, split_every=False):
return super(Series, self).count(split_every=split_every)
def unique(self, split_every=None):
"""
Return Series of unique values in the object. Includes NA values.
Returns
-------
uniques : Series
"""
return aca(self, chunk=methods.unique, aggregate=methods.unique,
meta=self._meta, token='unique', split_every=split_every,
series_name=self.name)
@derived_from(pd.Series)
def nunique(self, split_every=None):
return self.drop_duplicates(split_every=split_every).count()
@derived_from(pd.Series)
def value_counts(self, split_every=None):
return aca(self, chunk=M.value_counts,
aggregate=methods.value_counts_aggregate,
combine=methods.value_counts_combine,
meta=self._meta.value_counts(), token='value-counts',
split_every=split_every)
@derived_from(pd.Series)
def nlargest(self, n=5, split_every=None):
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token='series-nlargest-n={0}'.format(n),
split_every=split_every, n=n)
@derived_from(pd.Series)
def isin(self, other):
return elemwise(M.isin, self, list(other))
@derived_from(pd.Series)
def map(self, arg, na_action=None, meta=no_default):
if not (isinstance(arg, (pd.Series, dict)) or callable(arg)):
raise TypeError("arg must be pandas.Series, dict or callable."
" Got {0}".format(type(arg)))
name = 'map-' + tokenize(self, arg, na_action)
dsk = dict(((name, i), (M.map, k, arg, na_action)) for i, k in
enumerate(self._keys()))
dsk.update(self.dask)
if meta is no_default:
meta = _emulate(M.map, self, arg, na_action=na_action)
else:
meta = make_meta(meta)
return Series(dsk, name, meta, self.divisions)
@derived_from(pd.Series)
def dropna(self):
return self.map_partitions(M.dropna)
@derived_from(pd.Series)
def between(self, left, right, inclusive=True):
return self.map_partitions(M.between, left=left,
right=right, inclusive=inclusive)
@derived_from(pd.Series)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
# np.clip may pass out
return self.map_partitions(M.clip, lower=lower, upper=upper)
@derived_from(pd.Series)
def clip_lower(self, threshold):
return self.map_partitions(M.clip_lower, threshold=threshold)
@derived_from(pd.Series)
def clip_upper(self, threshold):
return self.map_partitions(M.clip_upper, threshold=threshold)
@derived_from(pd.Series)
def align(self, other, join='outer', axis=None, fill_value=None):
return super(Series, self).align(other, join=join, axis=axis,
fill_value=fill_value)
@derived_from(pd.Series)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
def to_bag(self, index=False):
"""Convert to a dask Bag.
Parameters
----------
index : bool, optional
If True, the elements are tuples of ``(index, value)``, otherwise
they're just the ``value``. Default is False.
"""
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.Series)
def to_frame(self, name=None):
return self.map_partitions(M.to_frame, name,
meta=self._meta.to_frame(name))
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like DataFrame.add to this class """
def meth(self, other, level=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__doc__ = comparison.__doc__
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, convert_dtype=True, meta=no_default,
name=no_default, args=(), **kwds):
""" Parallel version of pandas.Series.apply
Parameters
----------
func : function
Function to apply
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results.
If False, leave as dtype=object.
$META
name : list, scalar or None, optional
Deprecated, use `meta` instead. If list is given, the result is a
DataFrame which columns is specified list. Otherwise, the result is
a Series which name is given scalar or None (no name). If name
keyword is not given, dask tries to infer the result type using its
beginning of data. This inference may take some time and lead to
unexpected result.
args : tuple
Positional arguments to pass to function in addition to the value.
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
applied : Series or DataFrame if func returns a Series.
Examples
--------
>>> import dask.dataframe as dd
>>> s = pd.Series(range(5), name='x')
>>> ds = dd.from_pandas(s, npartitions=2)
Apply a function elementwise across the Series, passing in extra
arguments in ``args`` and ``kwargs``:
>>> def myadd(x, a, b=1):
... return x + a + b
>>> res = ds.apply(myadd, args=(2,), b=1.5)
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ds.apply(myadd, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ds.apply(lambda x: x + 1, meta=ds)
See Also
--------
dask.Series.map_partitions
"""
if name is not no_default:
warnings.warn("`name` is deprecated, please use `meta` instead")
if meta is no_default and isinstance(name, (pd.DataFrame, pd.Series)):
meta = name
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
meta = _emulate(M.apply, self._meta_nonempty, func,
convert_dtype=convert_dtype,
args=args, **kwds)
return map_partitions(M.apply, self, func,
convert_dtype, args, meta=meta, **kwds)
@derived_from(pd.Series)
def cov(self, other, min_periods=None):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, scalar=True)
@derived_from(pd.Series)
def corr(self, other, method='pearson', min_periods=None):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, corr=True, scalar=True)
class Index(Series):
_partition_type = pd.Index
_token_prefix = 'index-'
@property
def index(self):
msg = "'{0}' object has no attribute 'index'"
raise AttributeError(msg.format(self.__class__.__name__))
def __array_wrap__(self, array, context=None):
return pd.Index(array, name=self.name)
def head(self, n=5, compute=True):
""" First n items of the Index.
Caveat, this only checks the first partition.
"""
name = 'head-%d-%s' % (n, self._name)
dsk = {(name, 0): (operator.getitem, (self._name, 0), slice(0, n))}
result = new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions[:2])
if compute:
result = result.compute()
return result
@derived_from(pd.Index)
def max(self, split_every=False):
return self.reduction(M.max, meta=self._meta_nonempty.max(),
token=self._token_prefix + 'max',
split_every=split_every)
@derived_from(pd.Index)
def min(self, split_every=False):
return self.reduction(M.min, meta=self._meta_nonempty.min(),
token=self._token_prefix + 'min',
split_every=split_every)
def count(self, split_every=False):
return self.reduction(methods.index_count, np.sum,
token='index-count', meta=int,
split_every=split_every)
class DataFrame(_Frame):
"""
Implements out-of-core DataFrame as a sequence of pandas DataFrames
Parameters
----------
dask: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
meta: pandas.DataFrame
An empty ``pandas.DataFrame`` with names, dtypes, and index matching
the expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
_token_prefix = 'dataframe-'
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
index = context[1][0].index
return pd.DataFrame(array, index=index, columns=self.columns)
@property
def columns(self):
return self._meta.columns
@columns.setter
def columns(self, columns):
renamed = _rename_dask(self, columns)
self._meta = renamed._meta
self._name = renamed._name
self.dask.update(renamed.dask)
def __getitem__(self, key):
name = 'getitem-%s' % tokenize(self, key)
if np.isscalar(key) or isinstance(key, tuple):
if isinstance(self._meta.index, (pd.DatetimeIndex, pd.PeriodIndex)):
if key not in self._meta.columns:
return self.loc[key]
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, list):
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
if isinstance(key, Series):
# do not perform dummy calculation, as columns will not be changed.
#
if self.divisions != key.divisions:
from .multi import _maybe_align_partitions
self, key = _maybe_align_partitions([self, key])
dsk = {(name, i): (M._getitem_array, (self._name, i), (key._name, i))
for i in range(self.npartitions)}
return new_dd_object(merge(self.dask, key.dask, dsk), name,
self, self.divisions)
raise NotImplementedError(key)
def __setitem__(self, key, value):
if isinstance(key, (tuple, list)):
df = self.assign(**{k: value[c]
for k, c in zip(key, value.columns)})
else:
df = self.assign(**{key: value})
self.dask = df.dask
self._name = df._name
self._meta = df._meta
def __setattr__(self, key, value):
try:
columns = object.__getattribute__(self, '_meta').columns
except AttributeError:
columns = ()
if key in columns:
self[key] = value
else:
object.__setattr__(self, key, value)
def __getattr__(self, key):
if key in self.columns:
meta = self._meta[key]
name = 'getitem-%s' % tokenize(self, key)
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
raise AttributeError("'DataFrame' object has no attribute %r" % key)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(c for c in self.columns if
(isinstance(c, pd.compat.string_types) and
pd.compat.isidentifier(c)))
return list(o)
@property
def ndim(self):
""" Return dimensionality """
return 2
@property
def dtypes(self):
""" Return data types """
return self._meta.dtypes
@derived_from(pd.DataFrame)
def get_dtype_counts(self):
return self._meta.get_dtype_counts()
@derived_from(pd.DataFrame)
def get_ftype_counts(self):
return self._meta.get_ftype_counts()
@derived_from(pd.DataFrame)
def select_dtypes(self, include=None, exclude=None):
cs = self._meta.select_dtypes(include=include, exclude=exclude).columns
return self[list(cs)]
def set_index(self, other, drop=True, sorted=False, **kwargs):
""" Set the DataFrame index (row labels) using an existing column
This operation in dask.dataframe is expensive. If the input column is
sorted then we accomplish the set_index in a single full read of that
column. However, if the input column is not sorted then this operation
triggers a full shuffle, which can take a while and only works on a
single machine (not distributed).
Parameters
----------
other: Series or label
drop: boolean, default True
Delete columns to be used as the new index
sorted: boolean, default False
Set to True if the new index column is already sorted
Examples
--------
>>> df.set_index('x') # doctest: +SKIP
>>> df.set_index(d.x) # doctest: +SKIP
>>> df.set_index(d.timestamp, sorted=True) # doctest: +SKIP
"""
if sorted:
return set_sorted_index(self, other, drop=drop, **kwargs)
else:
from .shuffle import set_index
return set_index(self, other, drop=drop, **kwargs)
def set_partition(self, column, divisions, **kwargs):
""" Set explicit divisions for new column index
>>> df2 = df.set_partition('new-index-column', divisions=[10, 20, 50]) # doctest: +SKIP
See Also
--------
set_index
"""
from .shuffle import set_partition
return set_partition(self, column, divisions, **kwargs)
@derived_from(pd.DataFrame)
def nlargest(self, n=5, columns=None, split_every=None):
token = 'dataframe-nlargest-n={0}'.format(n)
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token=token, split_every=split_every,
n=n, columns=columns)
@derived_from(pd.DataFrame)
def groupby(self, key, **kwargs):
from dask.dataframe.groupby import DataFrameGroupBy
return DataFrameGroupBy(self, key, **kwargs)
def categorize(self, columns=None, **kwargs):
"""
Convert columns of the DataFrame to category dtype
Parameters
----------
columns : list, optional
A list of column names to convert to the category type. By
default any column with an object dtype is converted to a
categorical.
kwargs
Keyword arguments are passed on to compute.
Notes
-----
When dealing with columns of repeated text values converting to
categorical type is often much more performant, both in terms of memory
and in writing to disk or communication over the network.
See also
--------
dask.dataframes.categorical.categorize
"""
from dask.dataframe.categorical import categorize
return categorize(self, columns, **kwargs)
@derived_from(pd.DataFrame)
def assign(self, **kwargs):
for k, v in kwargs.items():
if not (isinstance(v, (Series, Scalar, pd.Series)) or
np.isscalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df2 = self._meta.assign(**_extract_meta(kwargs))
return elemwise(methods.assign, self, *pairs, meta=df2)
@derived_from(pd.DataFrame)
def rename(self, index=None, columns=None):
if index is not None:
raise ValueError("Cannot rename index.")
# *args here is index, columns but columns arg is already used
return self.map_partitions(M.rename, None, columns)
def query(self, expr, **kwargs):
""" Blocked version of pd.DataFrame.query
This is like the sequential version except that this will also happen
in many threads. This may conflict with ``numexpr`` which will use
multiple threads itself. We recommend that you set numexpr to use a
single thread
import numexpr
numexpr.set_nthreads(1)
The original docstring follows below:\n
""" + (pd.DataFrame.query.__doc__
if pd.DataFrame.query.__doc__ is not None else '')
name = 'query-%s' % tokenize(self, expr)
if kwargs:
name = name + '--' + tokenize(kwargs)
dsk = dict(((name, i), (apply, M.query,
((self._name, i), (expr,), kwargs)))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (M.query, (self._name, i), expr))
for i in range(self.npartitions))
meta = self._meta.query(expr, **kwargs)
return new_dd_object(merge(dsk, self.dask), name,
meta, self.divisions)
@derived_from(pd.DataFrame)
def eval(self, expr, inplace=None, **kwargs):
if '=' in expr and inplace in (True, None):
raise NotImplementedError("Inplace eval not supported."
" Please use inplace=False")
meta = self._meta.eval(expr, inplace=inplace, **kwargs)
return self.map_partitions(M.eval, expr, meta=meta, inplace=inplace, **kwargs)
@derived_from(pd.DataFrame)
def dropna(self, how='any', subset=None):
return self.map_partitions(M.dropna, how=how, subset=subset)
@derived_from(pd.DataFrame)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
return self.map_partitions(M.clip, lower=lower, upper=upper)
@derived_from(pd.DataFrame)
def clip_lower(self, threshold):
return self.map_partitions(M.clip_lower, threshold=threshold)
@derived_from(pd.DataFrame)
def clip_upper(self, threshold):
return self.map_partitions(M.clip_upper, threshold=threshold)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def to_castra(self, fn=None, categories=None, sorted_index_column=None,
compute=True, get=get_sync):
""" Write DataFrame to Castra on-disk store
See https://github.com/blosc/castra for details
See Also
--------
Castra.to_dask
"""
from .io import to_castra
return to_castra(self, fn, categories, sorted_index_column,
compute=compute, get=get)
def to_bag(self, index=False):
"""Convert to a dask Bag of tuples of each row.
Parameters
----------
index : bool, optional
If True, the index is included as the first element of each tuple.
Default is False.
"""
from .io import to_bag
return to_bag(self, index)
def _get_numeric_data(self, how='any', subset=None):
# calculate columns to avoid unnecessary calculation
numerics = self._meta._get_numeric_data()
if len(numerics.columns) < len(self.columns):
name = self._token_prefix + '-get_numeric_data'
return self.map_partitions(M._get_numeric_data,
meta=numerics, token=name)
else:
# use myself if all numerics
return self
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
@derived_from(pd.DataFrame)
def drop(self, labels, axis=0, dtype=None):
if axis != 1:
raise NotImplementedError("Drop currently only works for axis=1")
if dtype is not None:
return elemwise(drop_columns, self, labels, dtype)
else:
return elemwise(M.drop, self, labels, axis)
@derived_from(pd.DataFrame)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, suffixes=('_x', '_y'),
indicator=False, npartitions=None, shuffle=None):
if not isinstance(right, (DataFrame, pd.DataFrame)):
raise ValueError('right must be DataFrame')
from .multi import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, suffixes=suffixes,
npartitions=npartitions, indicator=indicator,
shuffle=shuffle)
@derived_from(pd.DataFrame)
def join(self, other, on=None, how='left',
lsuffix='', rsuffix='', npartitions=None, shuffle=None):
if not isinstance(other, (DataFrame, pd.DataFrame)):
raise ValueError('other must be DataFrame')
from .multi import merge
return merge(self, other, how=how,
left_index=on is None, right_index=True,
left_on=on, suffixes=[lsuffix, rsuffix],
npartitions=npartitions, shuffle=shuffle)
@derived_from(pd.DataFrame)
def append(self, other):
if isinstance(other, Series):
msg = ('Unable to appending dd.Series to dd.DataFrame.'
'Use pd.Series to append as row.')
raise ValueError(msg)
elif isinstance(other, pd.Series):
other = other.to_frame().T
return super(DataFrame, self).append(other)
@derived_from(pd.DataFrame)
def iterrows(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.iterrows():
yield row
@derived_from(pd.DataFrame)
def itertuples(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.itertuples():
yield row
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
# name must be explicitly passed for div method whose name is truediv
def meth(self, other, axis='columns', level=None, fill_value=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
if axis in (1, 'columns'):
# When axis=1 and other is a series, `other` is transposed
# and the operator is applied broadcast across rows. This
# isn't supported with dd.Series.
if isinstance(other, Series):
msg = 'Unable to {0} dd.Series with axis=1'.format(name)
raise ValueError(msg)
elif isinstance(other, pd.Series):
# Special case for pd.Series to avoid unwanted partitioning
# of other. We pass it in as a kwarg to prevent this.
meta = _emulate(op, self, other=other, axis=axis,
fill_value=fill_value)
return map_partitions(op, self, other=other, meta=meta,
axis=axis, fill_value=fill_value)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like DataFrame.add to this class """
def meth(self, other, axis='columns', level=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__doc__ = comparison.__doc__
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, axis=0, args=(), meta=no_default,
columns=no_default, **kwds):
""" Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
$META
columns : list, scalar or None
Deprecated, please use `meta` instead. If list is given, the result
is a DataFrame which columns is specified list. Otherwise, the
result is a Series which name is given scalar or None (no name). If
name keyword is not given, dask tries to infer the result type
using its beginning of data. This inference may take some time and
lead to unexpected result
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame
Examples
--------
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.sum() + a + b
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5)
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)
See Also
--------
dask.DataFrame.map_partitions
"""
axis = self._validate_axis(axis)
if axis == 0:
msg = ("dd.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)")
raise NotImplementedError(msg)
if columns is not no_default:
warnings.warn("`columns` is deprecated, please use `meta` instead")
if meta is no_default and isinstance(columns, (pd.DataFrame, pd.Series)):
meta = columns
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
meta = _emulate(M.apply, self._meta_nonempty, func,
axis=axis, args=args, **kwds)
return map_partitions(M.apply, self, func, axis,
False, False, None, args, meta=meta, **kwds)
@derived_from(pd.DataFrame)
def applymap(self, func, meta='__no_default__'):
return elemwise(M.applymap, self, func, meta=meta)
@derived_from(pd.DataFrame)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def cov(self, min_periods=None):
return cov_corr(self, min_periods)
@derived_from(pd.DataFrame)
def corr(self, method='pearson', min_periods=None):
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
return cov_corr(self, min_periods, True)
def info(self, buf=None, verbose=False, memory_usage=False):
"""
Concise summary of a Dask DataFrame.
"""
if buf is None:
import sys
buf = sys.stdout
lines = [str(type(self))]
if len(self.columns) == 0:
lines.append('Index: 0 entries')
lines.append('Empty %s' % type(self).__name__)
put_lines(buf, lines)
return
# Group and execute the required computations
computations = {}
if verbose:
computations.update({'index': self.index, 'count': self.count()})
if memory_usage:
computations.update({'memory_usage': self.map_partitions(M.memory_usage, index=True)})
computations = dict(zip(computations.keys(), da.compute(*computations.values())))
column_template = "{0:<%d} {1}" % (self.columns.str.len().max() + 5)
if verbose:
index = computations['index']
counts = computations['count']
lines.append(index.summary())
column_template = column_template.format('{0}', '{1} non-null {2}')
column_info = [column_template.format(*x) for x in zip(self.columns, counts, self.dtypes)]
else:
column_info = [column_template.format(*x) for x in zip(self.columns, self.dtypes)]
lines.append('Data columns (total {} columns):'.format(len(self.columns)))
lines.extend(column_info)
dtype_counts = ['%s(%d)' % k for k in sorted(self.dtypes.value_counts().iteritems(), key=str)]
lines.append('dtypes: {}'.format(', '.join(dtype_counts)))
if memory_usage:
memory_int = computations['memory_usage'].sum()
lines.append('memory usage: {}\n'.format(memory_repr(memory_int)))
put_lines(buf, lines)
def pivot_table(self, index=None, columns=None,
values=None, aggfunc='mean'):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.
Parameters
----------
values : scalar
column to aggregate
index : scalar
column to be index
columns : scalar
column to be columns
aggfunc : {'mean', 'sum', 'count'}, default 'mean'
Returns
-------
table : DataFrame
"""
from .reshape import pivot_table
return pivot_table(self, index=index, columns=columns, values=values,
aggfunc=aggfunc)
# bind operators
for op in [operator.abs, operator.add, operator.and_, operator_div,
operator.eq, operator.gt, operator.ge, operator.inv,
operator.lt, operator.le, operator.mod, operator.mul,
operator.ne, operator.neg, operator.or_, operator.pow,
operator.sub, operator.truediv, operator.floordiv, operator.xor]:
_Frame._bind_operator(op)
Scalar._bind_operator(op)
for name in ['add', 'sub', 'mul', 'div',
'truediv', 'floordiv', 'mod', 'pow',
'radd', 'rsub', 'rmul', 'rdiv',
'rtruediv', 'rfloordiv', 'rmod', 'rpow']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_operator_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_operator_method(name, meth)
for name in ['lt', 'gt', 'le', 'ge', 'ne', 'eq']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_comparison_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_comparison_method(name, meth)
def elemwise_property(attr, s):
meta = pd.Series([], dtype=getattr(s._meta, attr).dtype)
return map_partitions(getattr, s, attr, meta=meta)
for name in ['nanosecond', 'microsecond', 'millisecond', 'second', 'minute',
'hour', 'day', 'dayofweek', 'dayofyear', 'week', 'weekday',
'weekofyear', 'month', 'quarter', 'year']:
setattr(Index, name, property(partial(elemwise_property, name)))
def elemwise(op, *args, **kwargs):
""" Elementwise operation for dask.Dataframes """
meta = kwargs.pop('meta', no_default)
_name = funcname(op) + '-' + tokenize(op, kwargs, *args)
args = _maybe_from_pandas(args)
from .multi import _maybe_align_partitions
args = _maybe_align_partitions(args)
dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar))]
dfs = [df for df in dasks if isinstance(df, _Frame)]
divisions = dfs[0].divisions
n = len(divisions) - 1
other = [(i, arg) for i, arg in enumerate(args)
if not isinstance(arg, (_Frame, Scalar))]
# adjust the key length of Scalar
keys = [d._keys() * n if isinstance(d, Scalar)
else d._keys() for d in dasks]
if other:
dsk = dict(((_name, i),
(apply, partial_by_order, list(frs),
{'function': op, 'other': other}))
for i, frs in enumerate(zip(*keys)))
else:
dsk = dict(((_name, i), (op,) + frs) for i, frs in enumerate(zip(*keys)))
dsk = merge(dsk, *[d.dask for d in dasks])
if meta is no_default:
if len(dfs) >= 2 and len(dasks) != len(dfs):
# should not occur in current funcs
msg = 'elemwise with 2 or more DataFrames and Scalar is not supported'
raise NotImplementedError(msg)
meta = _emulate(op, *args, **kwargs)
return new_dd_object(dsk, _name, meta, divisions)
def _maybe_from_pandas(dfs):
from .io import from_pandas
dfs = [from_pandas(df, 1) if isinstance(df, (pd.Series, pd.DataFrame))
else df for df in dfs]
return dfs
@insert_meta_param_description
def apply_concat_apply(args, chunk=None, aggregate=None, combine=None,
meta=no_default, token=None, split_every=None,
chunk_kwargs=None, aggregate_kwargs=None,
combine_kwargs=None, **kwargs):
"""Apply a function to blocks, then concat, then apply again
Parameters
----------
args :
Positional arguments for the `chunk` function. All `dask.dataframe`
objects should be partitioned and indexed equivalently.
chunk : function [block-per-arg] -> block
Function to operate on each block of data
aggregate : function concatenated-block -> block
Function to operate on the concatenated result of chunk
combine : function concatenated-block -> block, optional
Function to operate on intermediate concatenated results of chunk
in a tree-reduction. If not provided, defaults to aggregate.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to ``aggregate``.
Default is 8.
chunk_kwargs : dict, optional
Keywords for the chunk function only.
aggregate_kwargs : dict, optional
Keywords for the aggregate function only.
combine_kwargs : dict, optional
Keywords for the combine function only
kwargs :
All remaining keywords will be passed to ``chunk``, ``aggregate``, and
``combine``.
Examples
--------
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if chunk_kwargs is None:
chunk_kwargs = dict()
if aggregate_kwargs is None:
aggregate_kwargs = dict()
chunk_kwargs.update(kwargs)
aggregate_kwargs.update(kwargs)
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
else:
if combine_kwargs is None:
combine_kwargs = dict()
combine_kwargs.update(kwargs)
if not isinstance(args, (tuple, list)):
args = [args]
npartitions = set(arg.npartitions for arg in args
if isinstance(arg, _Frame))
if len(npartitions) > 1:
raise ValueError("All arguments must have same number of partitions")
npartitions = npartitions.pop()
if split_every is None:
split_every = 8
elif split_every is False:
split_every = npartitions
elif split_every < 2 or not isinstance(split_every, int):
raise ValueError("split_every must be an integer >= 2")
token_key = tokenize(token or (chunk, aggregate), meta, args,
chunk_kwargs, aggregate_kwargs, combine_kwargs,
split_every)
# Chunk
a = '{0}-chunk-{1}'.format(token or funcname(chunk), token_key)
if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:
dsk = {(a, i): (chunk, key) for i, key in enumerate(args[0]._keys())}
else:
dsk = {(a, i): (apply, chunk, [(x._name, i) if isinstance(x, _Frame)
else x for x in args], chunk_kwargs)
for i in range(args[0].npartitions)}
# Combine
prefix = '{0}-combine-{1}-'.format(token or funcname(combine), token_key)
k = npartitions
b = a
depth = 0
while k > split_every:
b = prefix + str(depth)
for part_i, inds in enumerate(partition_all(split_every, range(k))):
conc = (_concat, [(a, i) for i in inds])
if combine_kwargs:
dsk[(b, part_i)] = (apply, combine, [conc], combine_kwargs)
else:
dsk[(b, part_i)] = (combine, conc)
k = part_i + 1
a = b
depth += 1
# Aggregate
b = '{0}-agg-{1}'.format(token or funcname(aggregate), token_key)
conc = (_concat, [(a, i) for i in range(k)])
if aggregate_kwargs:
dsk[(b, 0)] = (apply, aggregate, [conc], aggregate_kwargs)
else:
dsk[(b, 0)] = (aggregate, conc)
if meta is no_default:
meta_chunk = _emulate(apply, chunk, args, chunk_kwargs)
meta = _emulate(apply, aggregate, [_concat([meta_chunk])],
aggregate_kwargs)
meta = make_meta(meta)
for arg in args:
if isinstance(arg, _Frame):
dsk.update(arg.dask)
return new_dd_object(dsk, b, meta, [None, None])
aca = apply_concat_apply
def _extract_meta(x, nonempty=False):
"""
Extract internal cache data (``_meta``) from dd.DataFrame / dd.Series
"""
if isinstance(x, (_Frame, Scalar)):
return x._meta_nonempty if nonempty else x._meta
elif isinstance(x, list):
return [_extract_meta(_x, nonempty) for _x in x]
elif isinstance(x, tuple):
return tuple([_extract_meta(_x, nonempty) for _x in x])
elif isinstance(x, dict):
res = {}
for k in x:
res[k] = _extract_meta(x[k], nonempty)
return res
else:
return x
def _emulate(func, *args, **kwargs):
"""
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_meta``) for calculation
"""
with raise_on_meta_error(funcname(func)):
return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))
@insert_meta_param_description
def map_partitions(func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. At least one of the
args should be a Dask.dataframe.
$META
"""
meta = kwargs.pop('meta', no_default)
if meta is not no_default:
meta = make_meta(meta)
assert callable(func)
if 'token' in kwargs:
name = kwargs.pop('token')
token = tokenize(meta, *args, **kwargs)
else:
name = funcname(func)
token = tokenize(func, meta, *args, **kwargs)
name = '{0}-{1}'.format(name, token)
from .multi import _maybe_align_partitions
args = _maybe_from_pandas(args)
args = _maybe_align_partitions(args)
if meta is no_default:
meta = _emulate(func, *args, **kwargs)
if all(isinstance(arg, Scalar) for arg in args):
dask = {(name, 0):
(apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)}
return Scalar(merge(dask, *[arg.dask for arg in args]), name, meta)
elif not isinstance(meta, (pd.Series, pd.DataFrame, pd.Index)):
# If `meta` is not a pandas object, the concatenated results will be a
# different type
meta = _concat([meta])
meta = make_meta(meta)
dfs = [df for df in args if isinstance(df, _Frame)]
dsk = {}
for i in range(dfs[0].npartitions):
values = [(arg._name, i if isinstance(arg, _Frame) else 0)
if isinstance(arg, (_Frame, Scalar)) else arg for arg in args]
dsk[(name, i)] = (apply_and_enforce, func, values, kwargs, meta)
dasks = [arg.dask for arg in args if isinstance(arg, (_Frame, Scalar))]
return new_dd_object(merge(dsk, *dasks), name, meta, args[0].divisions)
def apply_and_enforce(func, args, kwargs, meta):
"""Apply a function, and enforce the output to match meta
Ensures the output has the same columns, even if empty."""
df = func(*args, **kwargs)
if isinstance(df, (pd.DataFrame, pd.Series, pd.Index)):
if len(df) == 0:
return meta
c = meta.columns if isinstance(df, pd.DataFrame) else meta.name
return _rename(c, df)
return df
def _rename(columns, df):
"""
Rename columns of pd.DataFrame or name of pd.Series.
Not for dd.DataFrame or dd.Series.
Parameters
----------
columns : tuple, string, pd.DataFrame or pd.Series
Column names, Series name or pandas instance which has the
target column names / name.
df : pd.DataFrame or pd.Series
target DataFrame / Series to be renamed
"""
assert not isinstance(df, _Frame)
if columns is no_default:
return df
if isinstance(columns, Iterator):
columns = list(columns)
if isinstance(df, pd.DataFrame):
if isinstance(columns, pd.DataFrame):
columns = columns.columns
columns = pd.Index(columns)
if len(columns) == len(df.columns):
if columns.equals(df.columns):
# if target is identical, rename is not necessary
return df
# deep=False doesn't doesn't copy any data/indices, so this is cheap
df = df.copy(deep=False)
df.columns = columns
return df
elif isinstance(df, (pd.Series, pd.Index)):
if isinstance(columns, (pd.Series, pd.Index)):
columns = columns.name
if df.name == columns:
return df
return df.rename(columns)
# map_partition may pass other types
return df
def _rename_dask(df, names):
"""
Destructively rename columns of dd.DataFrame or name of dd.Series.
Not for pd.DataFrame or pd.Series.
Internaly used to overwrite dd.DataFrame.columns and dd.Series.name
We can't use map_partition because it applies function then rename
Parameters
----------
df : dd.DataFrame or dd.Series
target DataFrame / Series to be renamed
names : tuple, string
Column names/Series name
"""
assert isinstance(df, _Frame)
metadata = _rename(names, df._meta)
name = 'rename-{0}'.format(tokenize(df, metadata))
dsk = {}
for i in range(df.npartitions):
dsk[name, i] = (_rename, metadata, (df._name, i))
return new_dd_object(merge(dsk, df.dask), name, metadata, df.divisions)
def quantile(df, q):
"""Approximate quantiles of Series.
Parameters
----------
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
assert isinstance(df, Series)
from dask.array.percentile import _percentile, merge_percentiles
# currently, only Series has quantile method
if isinstance(df, Index):
meta = pd.Series(df._meta_nonempty).quantile(q)
else:
meta = df._meta_nonempty.quantile(q)
if isinstance(meta, pd.Series):
# Index.quantile(list-like) must be pd.Series, not pd.Index
df_name = df.name
finalize_tsk = lambda tsk: (pd.Series, tsk, q, None, df_name)
return_type = Series
else:
finalize_tsk = lambda tsk: (getitem, tsk, 0)
return_type = Scalar
q = [q]
# pandas uses quantile in [0, 1]
# numpy / everyone else uses [0, 100]
qs = np.asarray(q) * 100
token = tokenize(df, qs)
if len(qs) == 0:
name = 'quantiles-' + token
empty_index = pd.Index([], dtype=float)
return Series({(name, 0): pd.Series([], name=df.name, index=empty_index)},
name, df._meta, [None, None])
else:
new_divisions = [np.min(q), np.max(q)]
name = 'quantiles-1-' + token
val_dsk = dict(((name, i), (_percentile, (getattr, key, 'values'), qs))
for i, key in enumerate(df._keys()))
name2 = 'quantiles-2-' + token
len_dsk = dict(((name2, i), (len, key)) for i, key in enumerate(df._keys()))
name3 = 'quantiles-3-' + token
merge_dsk = {(name3, 0): finalize_tsk((merge_percentiles, qs,
[qs] * df.npartitions,
sorted(val_dsk), sorted(len_dsk)))}
dsk = merge(df.dask, val_dsk, len_dsk, merge_dsk)
return return_type(dsk, name3, meta, new_divisions)
def cov_corr(df, min_periods=None, corr=False, scalar=False):
"""DataFrame covariance and pearson correlation.
Computes pairwise covariance or correlation of columns, excluding NA/null
values.
Parameters
----------
df : DataFrame
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
corr : bool, optional
If True, compute the Pearson correlation. If False [default], compute
the covariance.
scalar : bool, optional
If True, compute covariance between two variables as a scalar. Only
valid if `df` has 2 columns. If False [default], compute the entire
covariance/correlation matrix.
"""
if min_periods is None:
min_periods = 2
elif min_periods < 2:
raise ValueError("min_periods must be >= 2")
prefix = 'corr' if corr else 'cov'
df = df._get_numeric_data()
name = '{0}-agg-{1}'.format(prefix, tokenize(df, min_periods, scalar))
if scalar and len(df.columns) != 2:
raise ValueError("scalar only valid for 2 column dataframe")
k = '{0}-chunk-{1}'.format(prefix, df._name)
dsk = dict(((k, i), (cov_corr_chunk, f, corr))
for (i, f) in enumerate(df._keys()))
dsk[(name, 0)] = (cov_corr_agg, list(dsk.keys()), df._meta, min_periods,
corr, scalar)
dsk = merge(df.dask, dsk)
if scalar:
return Scalar(dsk, name, 'f8')
meta = make_meta([(c, 'f8') for c in df.columns], index=df._meta.columns)
return DataFrame(dsk, name, meta, (df.columns[0], df.columns[-1]))
def cov_corr_chunk(df, corr=False):
"""Chunk part of a covariance or correlation computation"""
mat = df.values
mask = np.isfinite(mat)
keep = np.bitwise_and(mask[:, None, :], mask[:, :, None])
x = np.where(keep, mat[:, None, :], np.nan)
sums = np.nansum(x, 0)
counts = keep.astype('int').sum(0)
cov = df.cov().values
dtype = [('sum', sums.dtype), ('count', counts.dtype), ('cov', cov.dtype)]
if corr:
m = np.nansum((x - sums / np.where(counts, counts, np.nan)) ** 2, 0)
dtype.append(('m', m.dtype))
out = np.empty(counts.shape, dtype=dtype)
out['sum'] = sums
out['count'] = counts
out['cov'] = cov * (counts - 1)
if corr:
out['m'] = m
return out
def cov_corr_agg(data, meta, min_periods=2, corr=False, scalar=False):
"""Aggregation part of a covariance or correlation computation"""
data = np.concatenate(data).reshape((len(data),) + data[0].shape)
sums = np.nan_to_num(data['sum'])
counts = data['count']
cum_sums = np.cumsum(sums, 0)
cum_counts = np.cumsum(counts, 0)
s1 = cum_sums[:-1]
s2 = sums[1:]
n1 = cum_counts[:-1]
n2 = counts[1:]
d = (s2 / n2) - (s1 / n1)
C = (np.nansum((n1 * n2) / (n1 + n2) * (d * d.transpose((0, 2, 1))), 0) +
np.nansum(data['cov'], 0))
C[cum_counts[-1] < min_periods] = np.nan
nobs = np.where(cum_counts[-1], cum_counts[-1], np.nan)
if corr:
mu = cum_sums[-1] / nobs
counts_na = np.where(counts, counts, np.nan)
m2 = np.nansum(data['m'] + counts * (sums / counts_na - mu) ** 2,
axis=0)
den = np.sqrt(m2 * m2.T)
else:
den = nobs - 1
mat = C / den
if scalar:
return mat[0, 1]
return pd.DataFrame(mat, columns=meta.columns, index=meta.columns)
def pd_split(df, p, random_state=None):
""" Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(df, [0.5, 0.5], random_state=123) # roughly 50/50 split
>>> a
a b
1 2 3
2 3 4
5 6 7
>>> b
a b
0 1 2
3 4 5
4 5 6
"""
p = list(p)
index = pseudorandom(len(df), p, random_state)
return [df.iloc[index == i] for i in range(len(p))]
def _take_last(a, skipna=True):
"""
take last row (Series) of DataFrame / last value of Series
considering NaN.
Parameters
----------
a : pd.DataFrame or pd.Series
skipna : bool, default True
Whether to exclude NaN
"""
if skipna is False:
return a.iloc[-1]
else:
# take last valid value excluding NaN, NaN location may be different
# in each columns
group_dummy = np.ones(len(a.index))
last_row = a.groupby(group_dummy).last()
if isinstance(a, pd.DataFrame):
return pd.Series(last_row.values[0], index=a.columns)
else:
return last_row.values[0]
def repartition_divisions(a, b, name, out1, out2, force=False):
""" dask graph to repartition dataframe by new divisions
Parameters
----------
a : tuple
old divisions
b : tuple, list
new divisions
name : str
name of old dataframe
out1 : str
name of temporary splits
out2 : str
name of new dataframe
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
{('b', 0): (<function _loc_repartition at ...>, ('a', 0), 1, 3, False),
('b', 1): (<function _loc_repartition at ...>, ('a', 1), 3, 4, False),
('b', 2): (<function _loc_repartition at ...>, ('a', 1), 4, 6, False),
('b', 3): (<function _loc_repartition at ...>, ('a', 1), 6, 7, False)
('c', 0): (<function concat at ...>,
(<type 'list'>, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
"""
if not isinstance(b, (list, tuple)):
raise ValueError('New division must be list or tuple')
b = list(b)
if len(b) < 2:
# minimum division is 2 elements, like [0, 0]
raise ValueError('New division must be longer than 2 elements')
if b != sorted(b):
raise ValueError('New division must be sorted')
if len(b[:-1]) != len(list(unique(b[:-1]))):
msg = 'New division must be unique, except for the last element'
raise ValueError(msg)
if force:
if a[0] < b[0]:
msg = ('left side of the new division must be equal or smaller '
'than old division')
raise ValueError(msg)
if a[-1] > b[-1]:
msg = ('right side of the new division must be equal or larger '
'than old division')
raise ValueError(msg)
else:
if a[0] != b[0]:
msg = 'left side of old and new divisions are different'
raise ValueError(msg)
if a[-1] != b[-1]:
msg = 'right side of old and new divisions are different'
raise ValueError(msg)
def _is_single_last_div(x):
"""Whether last division only contains single label"""
return len(x) >= 2 and x[-1] == x[-2]
c = [a[0]]
d = dict()
low = a[0]
i, j = 1, 1 # indices for old/new divisions
k = 0 # index for temp divisions
last_elem = _is_single_last_div(a)
# process through old division
# left part of new division can be processed in this loop
while (i < len(a) and j < len(b)):
if a[i] < b[j]:
# tuple is something like:
# (methods._loc_partition, ('from_pandas-#', 0), 3, 4, False))
d[(out1, k)] = (methods._loc_repartition, (name, i - 1), low, a[i], False)
low = a[i]
i += 1
elif a[i] > b[j]:
d[(out1, k)] = (methods._loc_repartition, (name, i - 1), low, b[j], False)
low = b[j]
j += 1
else:
d[(out1, k)] = (methods._loc_repartition, (name, i - 1), low, b[j], False)
low = b[j]
i += 1
j += 1
c.append(low)
k += 1
# right part of new division can remain
if a[-1] < b[-1] or b[-1] == b[-2]:
for _j in range(j, len(b)):
# always use right-most of old division
# because it may contain last element
m = len(a) - 2
d[(out1, k)] = (methods._loc_repartition, (name, m), low, b[_j], False)
low = b[_j]
c.append(low)
k += 1
else:
# even if new division is processed through,
# right-most element of old division can remain
if last_elem and i < len(a):
d[(out1, k)] = (methods._loc_repartition, (name, i - 1), a[i], a[i], False)
k += 1
c.append(a[-1])
# replace last element of tuple with True
d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)
i, j = 0, 1
last_elem = _is_single_last_div(c)
while j < len(b):
tmp = []
while c[i] < b[j]:
tmp.append((out1, i))
i += 1
if last_elem and c[i] == b[-1] and (b[-1] != b[-2] or j == len(b) - 1) and i < k:
# append if last split is not included
tmp.append((out1, i))
i += 1
if len(tmp) == 0:
# dummy slice to return empty DataFrame or Series,
# which retain original data attributes (columns / name)
d[(out2, j - 1)] = (methods._loc_repartition, (name, 0), a[0], a[0], False)
elif len(tmp) == 1:
d[(out2, j - 1)] = tmp[0]
else:
if not tmp:
raise ValueError('check for duplicate partitions\nold:\n%s\n\n'
'new:\n%s\n\ncombined:\n%s'
% (pformat(a), pformat(b), pformat(c)))
d[(out2, j - 1)] = (pd.concat, tmp)
j += 1
return d
def repartition_npartitions(df, npartitions):
""" Repartition dataframe to a smaller number of partitions """
npartitions_ratio = df.npartitions / npartitions
new_partitions_boundaries = [int(new_partition_index * npartitions_ratio)
for new_partition_index in range(npartitions + 1)]
new_name = 'repartition-%d-%s' % (npartitions, tokenize(df))
dsk = {}
for new_partition_index in range(npartitions):
value = (pd.concat, [(df._name, old_partition_index)
for old_partition_index in
range(new_partitions_boundaries[new_partition_index],
new_partitions_boundaries[new_partition_index + 1])])
dsk[new_name, new_partition_index] = value
divisions = [df.divisions[new_partition_index]
for new_partition_index in new_partitions_boundaries]
return DataFrame(merge(df.dask, dsk), new_name, df._meta, divisions)
def repartition(df, divisions=None, force=False):
""" Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
Parameters
----------
divisions : list
List of partitions to be used
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
token = tokenize(df, divisions)
if isinstance(df, _Frame):
tmp = 'repartition-split-' + token
out = 'repartition-merge-' + token
dsk = repartition_divisions(df.divisions, divisions,
df._name, tmp, out, force=force)
return new_dd_object(merge(df.dask, dsk), out,
df._meta, divisions)
elif isinstance(df, (pd.Series, pd.DataFrame)):
name = 'repartition-dataframe-' + token
from .utils import shard_df_on_index
dfs = shard_df_on_index(df, divisions[1:-1])
dsk = dict(((name, i), df) for i, df in enumerate(dfs))
return new_dd_object(dsk, name, df, divisions)
raise ValueError('Data must be DataFrame or Series')
def set_sorted_index(df, index, drop=True, **kwargs):
if not isinstance(index, Series):
meta = df._meta.set_index(index, drop=drop)
else:
meta = df._meta.set_index(index._meta, drop=drop)
result = map_partitions(M.set_index, df, index, drop=drop, meta=meta)
return compute_divisions(result, **kwargs)
def compute_divisions(df, **kwargs):
mins = df.index.map_partitions(M.min, meta=df.index)
maxes = df.index.map_partitions(M.max, meta=df.index)
mins, maxes = compute(mins, maxes, **kwargs)
if (sorted(mins) != list(mins) or
sorted(maxes) != list(maxes) or
any(a > b for a, b in zip(mins, maxes))):
raise ValueError("Partitions must be sorted ascending with the index",
mins, maxes)
divisions = tuple(mins) + (list(maxes)[-1],)
df = copy(df)
df.divisions = divisions
return df
def _reduction_chunk(x, aca_chunk=None, **kwargs):
o = aca_chunk(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if isinstance(o, pd.Series) else o
def _reduction_combine(x, aca_combine=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
o = aca_combine(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if isinstance(o, pd.Series) else o
def _reduction_aggregate(x, aca_aggregate=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
return aca_aggregate(x, **kwargs)
def drop_columns(df, columns, dtype):
df = df.drop(columns, axis=1)
df.columns = df.columns.astype(dtype)
return df
def idxmaxmin_chunk(x, fn=None, skipna=True):
idx = getattr(x, fn)(skipna=skipna)
minmax = 'max' if fn == 'idxmax' else 'min'
value = getattr(x, minmax)(skipna=skipna)
if isinstance(x, pd.DataFrame):
return pd.DataFrame({'idx': idx, 'value': value})
return pd.DataFrame({'idx': [idx], 'value': [value]})
def idxmaxmin_row(x, fn=None, skipna=True):
x = x.set_index('idx')
idx = getattr(x.value, fn)(skipna=skipna)
minmax = 'max' if fn == 'idxmax' else 'min'
value = getattr(x.value, minmax)(skipna=skipna)
return pd.DataFrame({'idx': [idx], 'value': [value]})
def idxmaxmin_combine(x, fn=None, skipna=True):
return (x.groupby(level=0)
.apply(idxmaxmin_row, fn=fn, skipna=skipna)
.reset_index(level=1, drop=True))
def idxmaxmin_agg(x, fn=None, skipna=True, scalar=False):
res = idxmaxmin_combine(x, fn, skipna=skipna)['idx']
if scalar:
return res[0]
res.name = None
return res
def safe_head(df, n):
r = df.head(n=n)
if len(r) != n:
msg = ("Insufficient elements for `head`. {0} elements "
"requested, only {1} elements available. Try passing larger "
"`npartitions` to `head`.")
warnings.warn(msg.format(n, len(r)))
return r
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/dataframe/core.py",
"copies": "1",
"size": "125389",
"license": "mit",
"hash": 1574855083330469000,
"line_mean": 36.2737812128,
"line_max": 102,
"alpha_frac": 0.5747713117,
"autogenerated": false,
"ratio": 3.9836383276146905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.505840963931469,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from copy import copy
import operator
from operator import getitem, setitem
from pprint import pformat
import uuid
import warnings
from toolz import merge, partial, first, unique
import pandas as pd
from pandas.util.decorators import cache_readonly
import numpy as np
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..array.core import partial_by_order
from .. import threaded
from ..compatibility import apply, operator_div, bind_method
from ..utils import (repr_long_list, IndexCallable,
pseudorandom, derived_from, different_seeds, funcname,
memory_repr, put_lines, M)
from ..base import Base, compute, tokenize, normalize_token
from ..async import get_sync
from . import methods
from .indexing import (_partition_of_index_value, _loc, _try_loc,
_coerce_loc_index, _maybe_partial_time_string)
from .utils import meta_nonempty, make_meta, insert_meta_param_description
no_default = '__no_default__'
pd.computation.expressions.set_use_numexpr(False)
def _concat(args, **kwargs):
""" Generic concat operation """
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if isinstance(args[0], (pd.DataFrame, pd.Series)):
args2 = [arg for arg in args if len(arg)]
if not args2:
return args[0]
return pd.concat(args2)
if isinstance(args[0], (pd.Index)):
args = [arg for arg in args if len(arg)]
return args[0].append(args[1:])
try:
return pd.Series(args)
except:
return args
def _get_return_type(meta):
if isinstance(meta, _Frame):
meta = meta._meta
if isinstance(meta, pd.Series):
return Series
elif isinstance(meta, pd.DataFrame):
return DataFrame
elif isinstance(meta, pd.Index):
return Index
return Scalar
def new_dd_object(dsk, _name, meta, divisions):
"""Generic constructor for dask.dataframe objects.
Decides the appropriate output class based on the type of `meta` provided.
"""
return _get_return_type(meta)(dsk, _name, meta, divisions)
def optimize(dsk, keys, **kwargs):
from .optimize import optimize
return optimize(dsk, keys, **kwargs)
def finalize(results):
return _concat(results)
class Scalar(Base):
""" A Dask object to represent a pandas scalar"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(first)
def __init__(self, dsk, name, meta, divisions=None):
# divisions is ignored, only present to be compatible with other
# objects.
self.dask = dsk
self._name = name
self._meta = make_meta(meta)
@property
def _meta_nonempty(self):
return self._meta
@property
def dtype(self):
return self._meta.dtype
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, 'dtype'):
o.remove('dtype') # dtype only in `dir` if available
return list(o)
@property
def divisions(self):
"""Dummy divisions to be compat with Series and DataFrame"""
return [None, None]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + '...'
if hasattr(self._meta, 'dtype'):
extra = ', dtype=%s' % self._meta.dtype
else:
extra = ', type=%s' % type(self._meta).__name__
return "dd.Scalar<%s%s>" % (name, extra)
def __array__(self):
# array interface is required to support pandas instance + Scalar
# Otherwise, above op results in pd.Series of Scalar (object dtype)
return np.asarray(self.compute())
@property
def _args(self):
return (self.dask, self._name, self._meta)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta = state
@property
def key(self):
return (self._name, 0)
def _keys(self):
return [self.key]
@classmethod
def _get_unary_operator(cls, op):
def f(self):
name = funcname(op) + '-' + tokenize(self)
dsk = {(name, 0): (op, (self._name, 0))}
try:
meta = op(self._meta_nonempty)
except:
raise ValueError("Metadata inference failed in operator "
"{0}.".format(funcname(op)))
return Scalar(merge(dsk, self.dask), name, meta)
return f
@classmethod
def _get_binary_operator(cls, op, inv=False):
return lambda self, other: _scalar_binary(op, self, other, inv=inv)
def _scalar_binary(op, self, other, inv=False):
name = '{0}-{1}'.format(funcname(op), tokenize(self, other))
dsk = self.dask
return_type = _get_return_type(other)
if isinstance(other, Scalar):
dsk = merge(dsk, other.dask)
other_key = (other._name, 0)
elif isinstance(other, Base):
return NotImplemented
else:
other_key = other
if inv:
dsk.update({(name, 0): (op, other_key, (self._name, 0))})
else:
dsk.update({(name, 0): (op, (self._name, 0), other_key)})
try:
other_meta = make_meta(other)
other_meta_nonempty = meta_nonempty(other_meta)
if inv:
meta = op(other_meta_nonempty, self._meta_nonempty)
else:
meta = op(self._meta_nonempty, other_meta_nonempty)
except:
raise ValueError("Metadata inference failed in operator "
"{0}.".format(funcname(op)))
if return_type is not Scalar:
return return_type(dsk, name, meta,
[other.index.min(), other.index.max()])
else:
return Scalar(dsk, name, meta)
class _Frame(Base):
""" Superclass for DataFrame and Series
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
_name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
meta: pandas.DataFrame, pandas.Series, or pandas.Index
An empty pandas object with names, dtypes, and indices matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(finalize)
def __init__(self, dsk, _name, meta, divisions):
self.dask = dsk
self._name = _name
meta = make_meta(meta)
if not isinstance(meta, self._partition_type):
raise ValueError("Expected meta to specify type {0}, got type "
"{1}".format(self._partition_type.__name__,
type(meta).__name__))
self._meta = meta
self.divisions = tuple(divisions)
# constructor properties
# http://pandas.pydata.org/pandas-docs/stable/internals.html#override-constructor-properties
@property
def _constructor_sliced(self):
"""Constructor used when a result has one lower dimension(s) as the original"""
raise NotImplementedError
@property
def _constructor(self):
"""Constructor used when a result has the same dimension(s) as the original"""
raise NotImplementedError
@property
def npartitions(self):
"""Return number of partitions"""
return len(self.divisions) - 1
@property
def _pd(self):
warnings.warn('Deprecation warning: use `_meta` instead')
return self._meta
@property
def _pd_nonempty(self):
warnings.warn('Deprecation warning: use `_meta_nonempty` instead')
return self._meta_nonempty
@property
def _meta_nonempty(self):
""" A non-empty version of `_meta` with fake data."""
return meta_nonempty(self._meta)
@property
def _args(self):
return (self.dask, self._name, self._meta, self.divisions)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisions = state
def _keys(self):
return [(self._name, i) for i in range(self.npartitions)]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + '...'
if self.known_divisions:
div_text = ', divisions=%s' % repr_long_list(self.divisions)
else:
div_text = ''
return ("dd.%s<%s, npartitions=%s%s>" %
(self.__class__.__name__, name, self.npartitions, div_text))
@property
def index(self):
"""Return dask Index instance"""
name = self._name + '-index'
dsk = dict(((name, i), (getattr, key, 'index'))
for i, key in enumerate(self._keys()))
return Index(merge(dsk, self.dask), name,
self._meta.index, self.divisions)
@property
def known_divisions(self):
"""Whether divisions are already known"""
return len(self.divisions) > 0 and self.divisions[0] is not None
def clear_divisions(self):
divisions = (None,) * (self.npartitions + 1)
return type(self)(self.dask, self._name, self._meta, divisions)
def get_division(self, n):
warnings.warn("Deprecation warning: use `get_partition` instead")
return self.get_partition(self, n)
def get_partition(self, n):
"""Get a dask DataFrame/Series representing the `nth` partition."""
if 0 <= n < self.npartitions:
name = 'get-partition-%s-%s' % (str(n), self._name)
dsk = {(name, 0): (self._name, n)}
divisions = self.divisions[n:n+2]
return self._constructor(merge(self.dask, dsk), name,
self._meta, divisions)
else:
msg = "n must be 0 <= n < {0}".format(self.npartitions)
raise ValueError(msg)
def cache(self, cache=Cache):
""" Evaluate Dataframe and store in local cache
Uses chest by default to store data on disk
"""
if callable(cache):
cache = cache()
# Evaluate and store in cache
name = 'cache' + uuid.uuid1().hex
dsk = dict(((name, i), (setitem, cache, (tuple, list(key)), key))
for i, key in enumerate(self._keys()))
self._get(merge(dsk, self.dask), list(dsk.keys()))
# Create new dataFrame pointing to that cache
name = 'from-cache-' + self._name
dsk2 = dict(((name, i), (getitem, cache, (tuple, list(key))))
for i, key in enumerate(self._keys()))
return self._constructor(dsk2, name, self._meta, self.divisions)
@derived_from(pd.DataFrame)
def drop_duplicates(self, **kwargs):
assert all(k in ('keep', 'subset', 'take_last') for k in kwargs)
chunk = M.drop_duplicates
return aca(self, chunk=chunk, aggregate=chunk, meta=self._meta,
token='drop-duplicates', **kwargs)
def __len__(self):
return self.reduction(len, np.sum, token='len', meta=int).compute()
@insert_meta_param_description(pad=12)
def map_partitions(self, func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
One can use ``map_partitions`` to apply a function on each partition.
Extra arguments and keywords can optionally be provided, and will be
passed to the function after the partition.
Here we apply a function with arguments and keywords to a DataFrame,
resulting in a Series:
>>> def myadd(df, a, b=1):
... return df.x + df.y + a + b
>>> res = ddf.map_partitions(myadd, 1, b=2)
>>> res.dtype
dtype('float64')
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with no name, and dtype
``float64``:
>>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))
Here we map a function that takes in a DataFrame, and returns a
DataFrame with a new column:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))
>>> res.dtypes
x int64
y float64
z float64
dtype: object
As before, the output metadata can also be specified manually. This
time we pass in a ``dict``, as the output is a DataFrame:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),
... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.map_partitions(lambda df: df.head(), meta=df)
"""
return map_partitions(func, self, *args, **kwargs)
@insert_meta_param_description(pad=12)
def reduction(self, chunk, aggregate=None, meta=no_default,
token=None, chunk_kwargs=None, aggregate_kwargs=None,
**kwargs):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
$META
token : str, optional
The name to use for the output keys.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
kwargs :
All remaining keywords will be passed to both ``chunk`` and
``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'sum': x.sum(), 'count': x.count()})
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'sum': x.sum(), 'count': x.count()})
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs['aca_chunk'] = chunk
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs['aca_aggregate'] = aggregate
return aca(self, chunk=_reduction_chunk, aggregate=_reduction_aggregate,
meta=meta, token=token, chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs, **kwargs)
def random_split(self, p, random_state=None):
""" Pseudorandomly split dataframe into different pieces row-wise
Parameters
----------
frac : float, optional
Fraction of axis items to return.
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
Examples
--------
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.DataFrame.sample
"""
seeds = different_seeds(self.npartitions, random_state)
dsk_full = dict(((self._name + '-split-full', i),
(pd_split, (self._name, i), p, seed))
for i, seed in enumerate(seeds))
dsks = [dict(((self._name + '-split-%d' % i, j),
(getitem, (self._name + '-split-full', j), i))
for j in range(self.npartitions))
for i in range(len(p))]
return [type(self)(merge(self.dask, dsk_full, dsk),
self._name + '-split-%d' % i,
self._meta, self.divisions)
for i, dsk in enumerate(dsks)]
def head(self, n=5, npartitions=1, compute=True):
""" First n rows of the dataset
Parameters
----------
n : int, optional
The number of rows to return. Default is 5.
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``n`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
"""
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
raise ValueError("only {} partitions, head "
"received {}".format(self.npartitions, npartitions))
name = 'head-%d-%d-%s' % (npartitions, n, self._name)
if npartitions > 1:
name_p = 'head-partial-%d-%s' % (n, self._name)
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (M.head, (self._name, i), n)
concat = (_concat, ([(name_p, i) for i in range(npartitions)]))
dsk[(name, 0)] = (safe_head, concat, n)
else:
dsk = {(name, 0): (safe_head, (self._name, 0), n)}
result = self._constructor(merge(self.dask, dsk), name, self._meta,
[self.divisions[0], self.divisions[npartitions]])
if compute:
result = result.compute()
return result
def tail(self, n=5, compute=True):
""" Last n rows of the dataset
Caveat, the only checks the last n rows of the last partition.
"""
name = 'tail-%d-%s' % (n, self._name)
dsk = {(name, 0): (M.tail, (self._name, self.npartitions - 1), n)}
result = self._constructor(merge(self.dask, dsk), name,
self._meta, self.divisions[-2:])
if compute:
result = result.compute()
return result
def _loc(self, ind):
""" Helper function for the .loc accessor """
if isinstance(ind, Series):
return self._loc_series(ind)
if self.known_divisions:
ind = _maybe_partial_time_string(self._meta.index, ind, kind='loc')
if isinstance(ind, slice):
return self._loc_slice(ind)
else:
return self._loc_element(ind)
else:
return map_partitions(_try_loc, self, ind, meta=self)
def _loc_series(self, ind):
if not self.divisions == ind.divisions:
raise ValueError("Partitions of dataframe and index not the same")
return map_partitions(methods.loc, self, ind, token='loc-series',
meta=self)
def _loc_element(self, ind):
name = 'loc-%s' % tokenize(ind, self)
part = _partition_of_index_value(self.divisions, ind)
if ind < self.divisions[0] or ind > self.divisions[-1]:
raise KeyError('the label [%s] is not in the index' % str(ind))
dsk = {(name, 0): (methods.loc, (self._name, part), slice(ind, ind))}
return self._constructor(merge(self.dask, dsk), name, self, [ind, ind])
def _loc_slice(self, ind):
name = 'loc-%s' % tokenize(ind, self)
assert ind.step in (None, 1)
if ind.start:
start = _partition_of_index_value(self.divisions, ind.start)
else:
start = 0
if ind.stop is not None:
stop = _partition_of_index_value(self.divisions, ind.stop)
else:
stop = self.npartitions - 1
istart = _coerce_loc_index(self.divisions, ind.start)
istop = _coerce_loc_index(self.divisions, ind.stop)
if stop == start:
dsk = {(name, 0): (_loc, (self._name, start), ind.start, ind.stop)}
divisions = [istart, istop]
else:
dsk = merge(
{(name, 0): (_loc, (self._name, start), ind.start, None)},
dict(((name, i), (self._name, start + i))
for i in range(1, stop - start)),
{(name, stop - start): (_loc, (self._name, stop), None, ind.stop)})
divisions = ((max(istart, self.divisions[start])
if ind.start is not None
else self.divisions[0],) +
self.divisions[start+1:stop+1] +
(min(istop, self.divisions[stop+1])
if ind.stop is not None
else self.divisions[-1],))
assert len(divisions) == len(dsk) + 1
return self._constructor(merge(self.dask, dsk), name,
self._meta, divisions)
@property
def loc(self):
""" Purely label-location based indexer for selection by label.
>>> df.loc["b"] # doctest: +SKIP
>>> df.loc["b":"d"] # doctest: +SKIP"""
return IndexCallable(self._loc)
# NOTE: `iloc` is not implemented because of performance concerns.
# see https://github.com/dask/dask/pull/507
def repartition(self, divisions=None, npartitions=None, force=False):
""" Repartition dataframe along new divisions
Parameters
----------
divisions : list, optional
List of partitions to be used. If specified npartitions will be
ignored.
npartitions : int, optional
Number of partitions of output, must be less than npartitions of
input. Only used if divisions isn't specified.
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition(npartitions=10) # doctest: +SKIP
>>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP
"""
if npartitions is not None and divisions is not None:
warnings.warn("When providing both npartitions and divisions to "
"repartition only npartitions is used.")
if npartitions is not None:
if npartitions > self.npartitions:
raise ValueError("Can only repartition to fewer partitions")
return repartition_npartitions(self, npartitions)
elif divisions is not None:
return repartition(self, divisions, force=force)
else:
raise ValueError(
"Provide either divisions= or npartitions= to repartition")
@derived_from(pd.Series)
def fillna(self, value):
return self.map_partitions(M.fillna, value=value)
def sample(self, frac, replace=False, random_state=None):
""" Random sample of items
Parameters
----------
frac : float, optional
Fraction of axis items to return.
replace: boolean, optional
Sample with or without replacement. Default = False.
random_state: int or ``np.random.RandomState``
If int we create a new RandomState with this as the seed
Otherwise we draw from the passed RandomState
See Also
--------
dask.DataFrame.random_split, pd.DataFrame.sample
"""
if random_state is None:
random_state = np.random.randint(np.iinfo(np.int32).max)
name = 'sample-' + tokenize(self, frac, replace, random_state)
func = M.sample
seeds = different_seeds(self.npartitions, random_state)
dsk = dict(((name, i),
(apply, func, (tuple, [(self._name, i)]),
{'frac': frac, 'random_state': seed,
'replace': replace}))
for i, seed in zip(range(self.npartitions), seeds))
return self._constructor(merge(self.dask, dsk), name,
self._meta, self.divisions)
def to_hdf(self, path_or_buf, key, mode='a', append=False, get=None, **kwargs):
""" Export frame to hdf file(s)
Export dataframe to one or multiple hdf5 files or nodes.
Exported hdf format is pandas' hdf table format only.
Data saved by this function should be read by pandas dataframe
compatible reader.
By providing a single asterisk in either the path_or_buf or key
parameters you direct dask to save each partition to a different file
or node (respectively). The asterisk will be replaced with a zero
padded partition number, as this is the default implementation of
name_function.
When writing to a single hdf node in a single hdf file, all hdf save
tasks are required to execute in a specific order, often becoming the
bottleneck of the entire execution graph. Saving to multiple nodes or
files removes that restriction (order is still preserved by enforcing
order on output, using name_function) and enables executing save tasks
in parallel.
Parameters
----------
path_or_buf: HDFStore object or string
Destination file(s). If string, can contain a single asterisk to
save each partition to a different file. Only one asterisk is
allowed in both path_or_buf and key parameters.
key: string
A node / group path in file, can contain a single asterisk to save
each partition to a different hdf node in a single file. Only one
asterisk is allowed in both path_or_buf and key parameters.
format: optional, default 'table'
Default hdf storage format, currently only pandas' 'table' format
is supported.
mode: optional, {'a', 'w', 'r+'}, default 'a'
``'a'``
Append: Add data to existing file(s) or create new.
``'w'``
Write: overwrite any existing files with new ones.
``'r+'``
Append to existing files, files must already exist.
append: optional, default False
If False, overwrites existing node with the same name otherwise
appends to it.
complevel: optional, 0-9, default 0
compression level, higher means better compression ratio and
possibly more CPU time. Depends on complib.
complib: {'zlib', 'bzip2', 'lzo', 'blosc', None}, default None
If complevel > 0 compress using this compression library when
possible
fletcher32: bool, default False
If True and compression is used, additionally apply the fletcher32
checksum.
get: callable, optional
A scheduler `get` function to use. If not provided, the default is
to check the global settings first, and then fall back to defaults
for the collections.
dask_kwargs: dict, optional
A dictionary of keyword arguments passed to the `get` function
used.
name_function: callable, optional, default None
A callable called for each partition that accepts a single int
representing the partition number. name_function must return a
string representation of a partition's index in a way that will
preserve the partition's location after a string sort.
If None, a default name_function is used. The default name_function
will return a zero padded string of received int. See
dask.utils.build_name_function for more info.
compute: bool, default True
If True, execute computation of resulting dask graph.
If False, return a Delayed object.
lock: bool, None or lock object, default None
In to_hdf locks are needed for two reasons. First, to protect
against writing to the same file from multiple processes or threads
simultaneously. Second, default libhdf5 is not thread safe, so we
must additionally lock on it's usage. By default if lock is None
lock will be determined optimally based on path_or_buf, key and the
scheduler used. Manually setting this parameter is usually not
required to improve performance.
Alternatively, you can specify specific values:
If False, no locking will occur. If True, default lock object will
be created (multiprocessing.Manager.Lock on multiprocessing
scheduler, Threading.Lock otherwise), This can be used to force
using a lock in scenarios the default behavior will be to avoid
locking. Else, value is assumed to implement the lock interface,
and will be the lock object used.
See Also
--------
dask.DataFrame.read_hdf: reading hdf files
dask.Series.read_hdf: reading hdf files
Examples
--------
Saving data to a single file:
>>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP
Saving data to multiple nodes:
>>> with pd.HDFStore('output.hdf') as fh:
... df.to_hdf(fh, '/data*')
... fh.keys() # doctest: +SKIP
['/data0', '/data1']
Or multiple files:
>>> df.to_hdf('output_*.hdf', '/data') # doctest: +SKIP
Saving multiple files with the multiprocessing scheduler and manually
disabling locks:
>>> df.to_hdf('output_*.hdf', '/data',
... get=dask.multiprocessing.get, lock=False) # doctest: +SKIP
"""
from .io import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, get=get, **kwargs)
def to_csv(self, filename, **kwargs):
"""Write DataFrame to a series of comma-separated values (csv) files
One filename per partition will be created. You can specify the
filenames in a variety of ways.
Use a globstring::
>>> df.to_csv('/path/to/data/export-*.csv') # doctest: +SKIP
The * will be replaced by the increasing sequence 0, 1, 2, ...
::
/path/to/data/export-0.csv
/path/to/data/export-1.csv
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
Strings produced by name_function must preserve the order of their
respective partition indices.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> df.to_csv('/path/to/data/export-*.csv', name_function=name) # doctest: +SKIP
::
/path/to/data/export-2015-01-01.csv
/path/to/data/export-2015-01-02.csv
...
You can also provide an explicit list of paths::
>>> paths = ['/path/to/data/alice.csv', '/path/to/data/bob.csv', ...] # doctest: +SKIP
>>> df.to_csv(paths) # doctest: +SKIP
Parameters
----------
filename : string
Path glob indicating the naming scheme for the output files
name_function : callable, default None
Function accepting an integer (partition index) and producing a
string to replace the asterisk in the given filename globstring.
Should preserve the lexicographic order of partitions
compression : string or None
String like 'gzip' or 'xz'. Must support efficient random access.
Filenames with extensions corresponding to known compression
algorithms (gz, bz2) will be compressed accordingly automatically
sep : character, default ','
Field delimiter for the output file
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is assumed
to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
nanRep : None
deprecated, use na_rep
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2', 'xz',
only used when the first argument is a filename
line_terminator : string, default '\\n'
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
"""
from .io import to_csv
return to_csv(self, filename, **kwargs)
def to_delayed(self):
""" Convert dataframe into dask Values
Returns a list of values, one value per partition.
"""
from ..delayed import Delayed
return [Delayed(k, [self.dask]) for k in self._keys()]
@classmethod
def _get_unary_operator(cls, op):
return lambda self: elemwise(op, self)
@classmethod
def _get_binary_operator(cls, op, inv=False):
if inv:
return lambda self, other: elemwise(op, other, self)
else:
return lambda self, other: elemwise(op, self, other)
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, axis=0):
"""Provides rolling transformations.
Parameters
----------
window : int
Size of the moving window. This is the number of observations used
for calculating the statistic. The window size must not be so large
as to span more than one adjacent partition.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. The recognized window types are identical
to pandas.
axis : int, default 0
Returns
-------
a Rolling object on which to call a method to compute a statistic
Notes
-----
The `freq` argument is not supported.
"""
from dask.dataframe.rolling import Rolling
if not isinstance(window, int):
raise ValueError('window must be an integer')
if window < 0:
raise ValueError('window must be >= 0')
if min_periods is not None:
if not isinstance(min_periods, int):
raise ValueError('min_periods must be an integer')
if min_periods < 0:
raise ValueError('min_periods must be >= 0')
return Rolling(self, window=window, min_periods=min_periods,
freq=freq, center=center, win_type=win_type, axis=axis)
@derived_from(pd.DataFrame)
def sum(self, axis=None, skipna=True):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.sum(axis=axis, skipna=skipna)
token = self._token_prefix + 'sum'
if axis == 1:
return self.map_partitions(M.sum, meta=meta,
token=token, skipna=skipna, axis=axis)
else:
return self.reduction(M.sum, meta=meta, token=token,
skipna=skipna, axis=axis)
@derived_from(pd.DataFrame)
def max(self, axis=None, skipna=True):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.max(axis=axis, skipna=skipna)
token = self._token_prefix + 'max'
if axis == 1:
return self.map_partitions(M.max, meta=meta, token=token,
skipna=skipna, axis=axis)
else:
return self.reduction(M.max, meta=meta, token=token,
skipna=skipna, axis=axis)
@derived_from(pd.DataFrame)
def min(self, axis=None, skipna=True):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.min(axis=axis, skipna=skipna)
token = self._token_prefix + 'min'
if axis == 1:
return self.map_partitions(M.min, meta=meta, token=token,
skipna=skipna, axis=axis)
else:
return self.reduction(M.min, meta=meta, token=token,
skipna=skipna, axis=axis)
@derived_from(pd.DataFrame)
def idxmax(self, axis=None, skipna=True):
fn = 'idxmax'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.idxmax, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
return aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
meta=meta, token=self._token_prefix + fn, skipna=skipna,
known_divisions=self.known_divisions, fn=fn, axis=axis)
@derived_from(pd.DataFrame)
def idxmin(self, axis=None, skipna=True):
fn = 'idxmin'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis)
if axis == 1:
return map_partitions(M.idxmin, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
return aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
meta=meta, token=self._token_prefix + fn, skipna=skipna,
known_divisions=self.known_divisions, fn=fn, axis=axis)
@derived_from(pd.DataFrame)
def count(self, axis=None):
axis = self._validate_axis(axis)
token = self._token_prefix + 'count'
if axis == 1:
meta = self._meta_nonempty.count(axis=axis)
return self.map_partitions(M.count, meta=meta, token=token,
axis=axis)
else:
meta = self._meta_nonempty.count()
return self.reduction(M.count, meta=meta, token=token,
aggregate=M.sum)
@derived_from(pd.DataFrame)
def mean(self, axis=None, skipna=True):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.mean(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.mean, self, meta=meta,
token=self._token_prefix + 'mean',
axis=axis, skipna=skipna)
else:
num = self._get_numeric_data()
s = num.sum(skipna=skipna)
n = num.count()
name = self._token_prefix + 'mean-%s' % tokenize(self, axis, skipna)
return map_partitions(methods.mean_aggregate, s, n,
token=name, meta=meta)
@derived_from(pd.DataFrame)
def var(self, axis=None, skipna=True, ddof=1):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.var, self, meta=meta,
token=self._token_prefix + 'var',
axis=axis, skipna=skipna, ddof=ddof)
else:
num = self._get_numeric_data()
x = 1.0 * num.sum(skipna=skipna)
x2 = 1.0 * (num ** 2).sum(skipna=skipna)
n = num.count()
name = self._token_prefix + 'var-%s' % tokenize(self, axis, skipna, ddof)
return map_partitions(methods.var_aggregate, x2, x, n,
token=name, meta=meta, ddof=ddof)
@derived_from(pd.DataFrame)
def std(self, axis=None, skipna=True, ddof=1):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.std(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.std, self, meta=meta,
token=self._token_prefix + 'std',
axis=axis, skipna=skipna, ddof=ddof)
else:
v = self.var(skipna=skipna, ddof=ddof)
name = self._token_prefix + 'std-finish--%s' % tokenize(self, axis,
skipna, ddof)
return map_partitions(np.sqrt, v, meta=meta, token=name)
def quantile(self, q=0.5, axis=0):
""" Approximate row-wise and precise column-wise quantiles of DataFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
"""
axis = self._validate_axis(axis)
name = 'quantiles-concat--' + tokenize(self, q, axis)
if axis == 1:
if isinstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
meta = pd.Series([], dtype='f8')
return map_partitions(M.quantile, self, q, axis,
token=name, meta=meta)
else:
meta = self._meta.quantile(q, axis=axis)
num = self._get_numeric_data()
quantiles = tuple(quantile(self[c], q) for c in num.columns)
dask = {}
dask = merge(dask, *[q.dask for q in quantiles])
qnames = [(q._name, 0) for q in quantiles]
if isinstance(quantiles[0], Scalar):
dask[(name, 0)] = (pd.Series, (list, qnames), num.columns)
divisions = (min(num.columns), max(num.columns))
return Series(dask, name, meta, divisions)
else:
from .multi import _pdconcat
dask[(name, 0)] = (_pdconcat, (list, qnames), 1)
return DataFrame(dask, name, meta, quantiles[0].divisions)
@derived_from(pd.DataFrame)
def describe(self):
# currently, only numeric describe is supported
num = self._get_numeric_data()
stats = [num.count(), num.mean(), num.std(), num.min(),
num.quantile([0.25, 0.5, 0.75]), num.max()]
stats_names = [(s._name, 0) for s in stats]
name = 'describe--' + tokenize(self)
dsk = merge(num.dask, *(s.dask for s in stats))
dsk[(name, 0)] = (methods.describe_aggregate,(list, stats_names))
return self._constructor(dsk, name, num._meta, divisions=[None, None])
def _cum_agg(self, token, chunk, aggregate, axis, skipna=True,
chunk_kwargs=None):
""" Wrapper for cumulative operation """
axis = self._validate_axis(axis)
if axis == 1:
name = '{0}{1}(axis=1)'.format(self._token_prefix, token)
return self.map_partitions(chunk, token=name, **chunk_kwargs)
else:
# cumulate each partitions
name1 = '{0}{1}-map'.format(self._token_prefix, token)
cumpart = map_partitions(chunk, self, token=name1, meta=self,
**chunk_kwargs)
name2 = '{0}{1}-take-last'.format(self._token_prefix, token)
cumlast = map_partitions(_take_last, cumpart, skipna,
meta=pd.Series([]), token=name2)
name = '{0}{1}'.format(self._token_prefix, token)
cname = '{0}{1}-cum-last'.format(self._token_prefix, token)
# aggregate cumulated partisions and its previous last element
dask = {}
dask[(name, 0)] = (cumpart._name, 0)
for i in range(1, self.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
dask[(cname, i)] = (cumlast._name, i - 1)
else:
# aggregate with previous cumulation results
dask[(cname, i)] = (aggregate, (cname, i - 1),
(cumlast._name, i - 1))
dask[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))
return self._constructor(merge(dask, cumpart.dask, cumlast.dask),
name, chunk(self._meta), self.divisions)
@derived_from(pd.DataFrame)
def cumsum(self, axis=None, skipna=True):
return self._cum_agg('cumsum',
chunk=M.cumsum,
aggregate=operator.add,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cumprod(self, axis=None, skipna=True):
return self._cum_agg('cumprod',
chunk=M.cumprod,
aggregate=operator.mul,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummax(self, axis=None, skipna=True):
return self._cum_agg('cummax',
chunk=M.cummax,
aggregate=methods.cummax_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummin(self, axis=None, skipna=True):
return self._cum_agg('cummin',
chunk=M.cummin,
aggregate=methods.cummin_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def where(self, cond, other=np.nan):
# cond and other may be dask instance,
# passing map_partitions via keyword will not be aligned
return map_partitions(M.where, self, cond, other)
@derived_from(pd.DataFrame)
def mask(self, cond, other=np.nan):
return map_partitions(M.mask, self, cond, other)
@derived_from(pd.DataFrame)
def notnull(self):
return self.map_partitions(M.notnull)
@derived_from(pd.DataFrame)
def isnull(self):
return self.map_partitions(M.isnull)
@derived_from(pd.DataFrame)
def astype(self, dtype):
return self.map_partitions(M.astype, dtype=dtype,
meta=self._meta.astype(dtype))
@derived_from(pd.Series)
def append(self, other):
# because DataFrame.append will override the method,
# wrap by pd.Series.append docstring
if isinstance(other, (list, dict)):
msg = "append doesn't support list or dict input"
raise NotImplementedError(msg)
if not isinstance(other, _Frame):
from .io import from_pandas
other = from_pandas(other, 1)
from .multi import _append
if self.known_divisions and other.known_divisions:
if self.divisions[-1] < other.divisions[0]:
divisions = self.divisions[:-1] + other.divisions
return _append(self, other, divisions)
else:
msg = ("Unable to append two dataframes to each other with known "
"divisions if those divisions are not ordered. "
"The divisions/index of the second dataframe must be "
"greater than the divisions/index of the first dataframe.")
raise ValueError(msg)
else:
divisions = [None] * (self.npartitions + other.npartitions + 1)
return _append(self, other, divisions)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
raise NotImplementedError
normalize_token.register((Scalar, _Frame), lambda a: a._name)
class Series(_Frame):
""" Out-of-core Series object
Mimics ``pandas.Series``.
Parameters
----------
dsk: dict
The dask graph to compute this Series
_name: str
The key prefix that specifies which keys in the dask comprise this
particular Series
meta: pandas.Series
An empty ``pandas.Series`` with names, dtypes, and index matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
_token_prefix = 'series-'
@property
def _constructor_sliced(self):
return Scalar
@property
def _constructor(self):
return Series
@property
def name(self):
return self._meta.name
@name.setter
def name(self, name):
self._meta.name = name
renamed = _rename_dask(self, name)
# update myself
self.dask.update(renamed.dask)
self._name = renamed._name
@property
def ndim(self):
""" Return dimensionality """
return 1
@property
def dtype(self):
""" Return data type """
return self._meta.dtype
@property
def cat(self):
return self._meta.cat
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, 'cat'):
o.remove('cat') # cat only in `dir` if available
return list(o)
@property
def column_info(self):
""" Return Series.name """
warnings.warn('column_info is deprecated, use name')
return self.name
@property
def nbytes(self):
return self.reduction(methods.nbytes, np.sum, token='nbytes', meta=int)
def __array__(self, dtype=None, **kwargs):
x = np.array(self.compute())
if dtype and x.dtype != dtype:
x = x.astype(dtype)
return x
def __array_wrap__(self, array, context=None):
return pd.Series(array, name=self.name)
@cache_readonly
def dt(self):
return DatetimeAccessor(self)
@cache_readonly
def str(self):
return StringAccessor(self)
def quantile(self, q=0.5):
""" Approximate quantiles of Series
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
"""
return quantile(self, q)
def _repartition_quantiles(self, npartitions, upsample=1.0):
""" Approximate quantiles of Series used for repartitioning
"""
from .partitionquantiles import partition_quantiles
return partition_quantiles(self, npartitions, upsample=upsample)
@derived_from(pd.Series)
def resample(self, rule, how=None, closed=None, label=None):
from .tseries.resample import _resample
return _resample(self, rule, how=how, closed=closed, label=label)
def __getitem__(self, key):
if isinstance(key, Series) and self.divisions == key.divisions:
name = 'index-%s' % tokenize(self, key)
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return Series(merge(self.dask, key.dask, dsk), name,
self._meta, self.divisions)
raise NotImplementedError()
@derived_from(pd.DataFrame)
def _get_numeric_data(self, how='any', subset=None):
return self
@derived_from(pd.Series)
def iteritems(self):
for i in range(self.npartitions):
s = self.get_partition(i).compute()
for item in s.iteritems():
yield item
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 'index', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0}.get(axis, axis)
@derived_from(pd.Series)
def groupby(self, index, **kwargs):
from dask.dataframe.groupby import SeriesGroupBy
return SeriesGroupBy(self, index, **kwargs)
@derived_from(pd.Series)
def sum(self, axis=None, skipna=True):
return super(Series, self).sum(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def max(self, axis=None, skipna=True):
return super(Series, self).max(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def min(self, axis=None, skipna=True):
return super(Series, self).min(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def count(self):
return super(Series, self).count()
@derived_from(pd.Series)
def mean(self, axis=None, skipna=True):
return super(Series, self).mean(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def var(self, axis=None, ddof=1, skipna=True):
return super(Series, self).var(axis=axis, ddof=ddof, skipna=skipna)
@derived_from(pd.Series)
def std(self, axis=None, ddof=1, skipna=True):
return super(Series, self).std(axis=axis, ddof=ddof, skipna=skipna)
@derived_from(pd.Series)
def cumsum(self, axis=None, skipna=True):
return super(Series, self).cumsum(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def cumprod(self, axis=None, skipna=True):
return super(Series, self).cumprod(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def cummax(self, axis=None, skipna=True):
return super(Series, self).cummax(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def cummin(self, axis=None, skipna=True):
return super(Series, self).cummin(axis=axis, skipna=skipna)
def unique(self):
"""
Return Series of unique values in the object. Includes NA values.
Returns
-------
uniques : Series
"""
return aca(self, chunk=methods.unique, aggregate=methods.unique,
meta=self._meta, token='unique', series_name=self.name)
@derived_from(pd.Series)
def nunique(self):
return self.drop_duplicates().count()
@derived_from(pd.Series)
def value_counts(self):
return aca(self, chunk=M.value_counts,
aggregate=methods.value_counts_aggregate,
meta=self._meta.value_counts(), token='value-counts')
@derived_from(pd.Series)
def nlargest(self, n=5):
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token='series-nlargest-n={0}'.format(n),
n=n)
@derived_from(pd.Series)
def isin(self, other):
return elemwise(M.isin, self, list(other))
@derived_from(pd.Series)
def map(self, arg, na_action=None, meta=no_default):
if not (isinstance(arg, (pd.Series, dict)) or callable(arg)):
raise TypeError("arg must be pandas.Series, dict or callable."
" Got {0}".format(type(arg)))
name = 'map-' + tokenize(self, arg, na_action)
dsk = dict(((name, i), (M.map, k, arg, na_action)) for i, k in
enumerate(self._keys()))
dsk.update(self.dask)
if meta is no_default:
try:
meta = self._meta_nonempty.map(arg, na_action=na_action)
except Exception:
raise ValueError("Metadata inference failed, please provide "
"`meta` keyword")
else:
meta = make_meta(meta)
return Series(dsk, name, meta, self.divisions)
@derived_from(pd.Series)
def dropna(self):
return self.map_partitions(M.dropna)
@derived_from(pd.Series)
def between(self, left, right, inclusive=True):
return self.map_partitions(M.between, left=left,
right=right, inclusive=inclusive)
@derived_from(pd.Series)
def clip(self, lower=None, upper=None):
return self.map_partitions(M.clip, lower=lower, upper=upper)
def to_bag(self, index=False):
"""Convert to a dask Bag.
Parameters
----------
index : bool, optional
If True, the elements are tuples of ``(index, value)``, otherwise
they're just the ``value``. Default is False.
"""
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.Series)
def to_frame(self, name=None):
return self.map_partitions(M.to_frame, name,
meta=self._meta.to_frame(name))
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, convert_dtype=True, meta=no_default,
name=no_default, args=(), **kwds):
""" Parallel version of pandas.Series.apply
Parameters
----------
func : function
Function to apply
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results.
If False, leave as dtype=object.
$META
name : list, scalar or None, optional
Deprecated, use `meta` instead. If list is given, the result is a
DataFrame which columns is specified list. Otherwise, the result is
a Series which name is given scalar or None (no name). If name
keyword is not given, dask tries to infer the result type using its
beginning of data. This inference may take some time and lead to
unexpected result.
args : tuple
Positional arguments to pass to function in addition to the value.
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
applied : Series or DataFrame if func returns a Series.
Examples
--------
>>> import dask.dataframe as dd
>>> s = pd.Series(range(5), name='x')
>>> ds = dd.from_pandas(s, npartitions=2)
Apply a function elementwise across the Series, passing in extra
arguments in ``args`` and ``kwargs``:
>>> def myadd(x, a, b=1):
... return x + a + b
>>> res = ds.apply(myadd, args=(2,), b=1.5)
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ds.apply(myadd, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ds.apply(lambda x: x + 1, meta=ds)
See Also
--------
dask.Series.map_partitions
"""
if name is not no_default:
warnings.warn("`name` is deprecated, please use `meta` instead")
if meta is no_default and isinstance(name, (pd.DataFrame, pd.Series)):
meta = name
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
try:
meta = _emulate(M.apply, self._meta_nonempty, func,
convert_dtype=convert_dtype,
args=args, **kwds)
except Exception:
raise ValueError("Metadata inference failed, please provide "
"`meta` keyword")
return map_partitions(M.apply, self, func,
convert_dtype, args, meta=meta, **kwds)
@derived_from(pd.Series)
def cov(self, other, min_periods=None):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, scalar=True)
@derived_from(pd.Series)
def corr(self, other, method='pearson', min_periods=None):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, corr=True, scalar=True)
class Index(Series):
_partition_type = pd.Index
_token_prefix = 'index-'
@property
def index(self):
msg = "'{0}' object has no attribute 'index'"
raise AttributeError(msg.format(self.__class__.__name__))
@property
def _constructor(self):
return Index
def head(self, n=5, compute=True):
""" First n items of the Index.
Caveat, this only checks the first partition.
"""
name = 'head-%d-%s' % (n, self._name)
dsk = {(name, 0): (operator.getitem, (self._name, 0), slice(0, n))}
result = self._constructor(merge(self.dask, dsk), name,
self._meta, self.divisions[:2])
if compute:
result = result.compute()
return result
def nunique(self):
return self.drop_duplicates().count()
@derived_from(pd.Index)
def max(self):
return self.reduction(M.max, meta=self._meta_nonempty.max(),
token=self._token_prefix + 'max')
@derived_from(pd.Index)
def min(self):
return self.reduction(M.min, meta=self._meta_nonempty.min(),
token=self._token_prefix + 'min')
def count(self):
return self.reduction(methods.index_count, np.sum,
token='index-count', meta=int)
class DataFrame(_Frame):
"""
Implements out-of-core DataFrame as a sequence of pandas DataFrames
Parameters
----------
dask: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
meta: pandas.DataFrame
An empty ``pandas.DataFrame`` with names, dtypes, and index matching
the expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
_token_prefix = 'dataframe-'
@property
def _constructor_sliced(self):
return Series
@property
def _constructor(self):
return DataFrame
@property
def columns(self):
return self._meta.columns
@columns.setter
def columns(self, columns):
# if length mismatches, error is raised from pandas
self._meta.columns = columns
renamed = _rename_dask(self, columns)
# update myself
self.dask.update(renamed.dask)
self._name = renamed._name
def __getitem__(self, key):
name = 'getitem-%s' % tokenize(self, key)
if np.isscalar(key):
if isinstance(self._meta.index, (pd.DatetimeIndex, pd.PeriodIndex)):
if key not in self._meta.columns:
return self._loc(key)
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return self._constructor_sliced(merge(self.dask, dsk), name,
meta, self.divisions)
elif isinstance(key, slice):
return self._loc(key)
if isinstance(key, list):
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = dict(((name, i), (operator.getitem,
(self._name, i), (list, key)))
for i in range(self.npartitions))
return self._constructor(merge(self.dask, dsk), name,
meta, self.divisions)
if isinstance(key, Series):
# do not perform dummy calculation, as columns will not be changed.
#
if self.divisions != key.divisions:
from .multi import _maybe_align_partitions
self, key = _maybe_align_partitions([self, key])
dsk = {(name, i): (M._getitem_array, (self._name, i), (key._name, i))
for i in range(self.npartitions)}
return self._constructor(merge(self.dask, key.dask, dsk), name,
self, self.divisions)
raise NotImplementedError(key)
def __setitem__(self, key, value):
if isinstance(key, (tuple, list)):
df = self.assign(**{k: value[c]
for k, c in zip(key, value.columns)})
else:
df = self.assign(**{key: value})
self.dask = df.dask
self._name = df._name
self._meta = df._meta
def __getattr__(self, key):
if key in self.columns:
meta = self._meta[key]
name = 'getitem-%s' % tokenize(self, key)
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return self._constructor_sliced(merge(self.dask, dsk), name,
meta, self.divisions)
raise AttributeError("'DataFrame' object has no attribute %r" % key)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(c for c in self.columns if
(isinstance(c, pd.compat.string_types) and
pd.compat.isidentifier(c)))
return list(o)
@property
def ndim(self):
""" Return dimensionality """
return 2
@property
def dtypes(self):
""" Return data types """
return self._meta.dtypes
@derived_from(pd.DataFrame)
def select_dtypes(self, include=None, exclude=None):
cs = self._meta.select_dtypes(include=include, exclude=exclude).columns
return self[list(cs)]
def set_index(self, other, drop=True, sorted=False, **kwargs):
""" Set the DataFrame index (row labels) using an existing column
This operation in dask.dataframe is expensive. If the input column is
sorted then we accomplish the set_index in a single full read of that
column. However, if the input column is not sorted then this operation
triggers a full shuffle, which can take a while and only works on a
single machine (not distributed).
Parameters
----------
other: Series or label
drop: boolean, default True
Delete columns to be used as the new index
sorted: boolean, default False
Set to True if the new index column is already sorted
Examples
--------
>>> df.set_index('x') # doctest: +SKIP
>>> df.set_index(d.x) # doctest: +SKIP
>>> df.set_index(d.timestamp, sorted=True) # doctest: +SKIP
"""
if sorted:
return set_sorted_index(self, other, drop=drop, **kwargs)
else:
from .shuffle import set_index
return set_index(self, other, drop=drop, **kwargs)
def set_partition(self, column, divisions, **kwargs):
""" Set explicit divisions for new column index
>>> df2 = df.set_partition('new-index-column', divisions=[10, 20, 50]) # doctest: +SKIP
See Also
--------
set_index
"""
from .shuffle import set_partition
return set_partition(self, column, divisions, **kwargs)
@property
def column_info(self):
""" Return DataFrame.columns """
warnings.warn('column_info is deprecated, use columns')
return self.columns
@derived_from(pd.DataFrame)
def nlargest(self, n=5, columns=None):
token = 'dataframe-nlargest-n={0}'.format(n)
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token=token, n=n, columns=columns)
@derived_from(pd.DataFrame)
def reset_index(self):
out = self.map_partitions(M.reset_index)
out.divisions = [None] * (self.npartitions + 1)
return out
@derived_from(pd.DataFrame)
def groupby(self, key, **kwargs):
from dask.dataframe.groupby import DataFrameGroupBy
return DataFrameGroupBy(self, key, **kwargs)
def categorize(self, columns=None, **kwargs):
"""
Convert columns of the DataFrame to category dtype
Parameters
----------
columns : list, optional
A list of column names to convert to the category type. By
default any column with an object dtype is converted to a
categorical.
kwargs
Keyword arguments are passed on to compute.
Notes
-----
When dealing with columns of repeated text values converting to
categorical type is often much more performant, both in terms of memory
and in writing to disk or communication over the network.
See also
--------
dask.dataframes.categorical.categorize
"""
from dask.dataframe.categorical import categorize
return categorize(self, columns, **kwargs)
@derived_from(pd.DataFrame)
def assign(self, **kwargs):
for k, v in kwargs.items():
if not (isinstance(v, (Series, Scalar, pd.Series)) or
np.isscalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df2 = self._meta.assign(**_extract_meta(kwargs))
return elemwise(methods.assign, self, *pairs, meta=df2)
@derived_from(pd.DataFrame)
def rename(self, index=None, columns=None):
if index is not None:
raise ValueError("Cannot rename index.")
# *args here is index, columns but columns arg is already used
return self.map_partitions(M.rename, None, columns)
def query(self, expr, **kwargs):
""" Blocked version of pd.DataFrame.query
This is like the sequential version except that this will also happen
in many threads. This may conflict with ``numexpr`` which will use
multiple threads itself. We recommend that you set numexpr to use a
single thread
import numexpr
numexpr.set_nthreads(1)
The original docstring follows below:\n
""" + (pd.DataFrame.query.__doc__
if pd.DataFrame.query.__doc__ is not None else '')
name = 'query-%s' % tokenize(self, expr)
if kwargs:
name = name + '--' + tokenize(kwargs)
dsk = dict(((name, i), (apply, M.query,
((self._name, i), (expr,), kwargs)))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (M.query, (self._name, i), expr))
for i in range(self.npartitions))
meta = self._meta.query(expr, **kwargs)
return self._constructor(merge(dsk, self.dask), name,
meta, self.divisions)
@derived_from(pd.DataFrame)
def eval(self, expr, inplace=None, **kwargs):
if '=' in expr and inplace in (True, None):
raise NotImplementedError("Inplace eval not supported."
" Please use inplace=False")
meta = self._meta.eval(expr, inplace=inplace, **kwargs)
return self.map_partitions(M.eval, expr, meta=meta, inplace=inplace, **kwargs)
@derived_from(pd.DataFrame)
def dropna(self, how='any', subset=None):
return self.map_partitions(M.dropna, how=how, subset=subset)
def to_castra(self, fn=None, categories=None, sorted_index_column=None,
compute=True, get=get_sync):
""" Write DataFrame to Castra on-disk store
See https://github.com/blosc/castra for details
See Also
--------
Castra.to_dask
"""
from .io import to_castra
return to_castra(self, fn, categories, sorted_index_column,
compute=compute, get=get)
def to_bag(self, index=False):
"""Convert to a dask Bag of tuples of each row.
Parameters
----------
index : bool, optional
If True, the index is included as the first element of each tuple.
Default is False.
"""
from .io import to_bag
return to_bag(self, index)
def _get_numeric_data(self, how='any', subset=None):
# calculate columns to avoid unnecessary calculation
numerics = self._meta._get_numeric_data()
if len(numerics.columns) < len(self.columns):
name = self._token_prefix + '-get_numeric_data'
return self.map_partitions(M._get_numeric_data,
meta=numerics, token=name)
else:
# use myself if all numerics
return self
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
@derived_from(pd.DataFrame)
def drop(self, labels, axis=0, dtype=None):
if axis != 1:
raise NotImplementedError("Drop currently only works for axis=1")
if dtype is not None:
return elemwise(drop_columns, self, labels, dtype)
else:
return elemwise(M.drop, self, labels, axis)
@derived_from(pd.DataFrame)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False,
suffixes=('_x', '_y'), npartitions=None, shuffle=None):
if not isinstance(right, (DataFrame, pd.DataFrame)):
raise ValueError('right must be DataFrame')
from .multi import merge
return merge(self, right, how=how, on=on,
left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
suffixes=suffixes, npartitions=npartitions, shuffle=shuffle)
@derived_from(pd.DataFrame)
def join(self, other, on=None, how='left',
lsuffix='', rsuffix='', npartitions=None, shuffle=None):
if not isinstance(other, (DataFrame, pd.DataFrame)):
raise ValueError('other must be DataFrame')
from .multi import merge
return merge(self, other, how=how,
left_index=on is None, right_index=True,
left_on=on, suffixes=[lsuffix, rsuffix],
npartitions=npartitions, shuffle=shuffle)
@derived_from(pd.DataFrame)
def append(self, other):
if isinstance(other, Series):
msg = ('Unable to appending dd.Series to dd.DataFrame.'
'Use pd.Series to append as row.')
raise ValueError(msg)
elif isinstance(other, pd.Series):
other = other.to_frame().T
return super(DataFrame, self).append(other)
@derived_from(pd.DataFrame)
def iterrows(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.iterrows():
yield row
@derived_from(pd.DataFrame)
def itertuples(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.itertuples():
yield row
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
# name must be explicitly passed for div method whose name is truediv
def meth(self, other, axis='columns', level=None, fill_value=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
if axis == 1:
# when axis=1, series will be added to each row
# it not supported for dd.Series.
# dd.DataFrame is not affected as op is applied elemwise
if isinstance(other, Series):
msg = 'Unable to {0} dd.Series with axis=1'.format(name)
raise ValueError(msg)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, axis=0, args=(), meta=no_default,
columns=no_default, **kwds):
""" Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
$META
columns : list, scalar or None
Deprecated, please use `meta` instead. If list is given, the result
is a DataFrame which columns is specified list. Otherwise, the
result is a Series which name is given scalar or None (no name). If
name keyword is not given, dask tries to infer the result type
using its beginning of data. This inference may take some time and
lead to unexpected result
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame
Examples
--------
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.sum() + a + b
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5)
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)
See Also
--------
dask.DataFrame.map_partitions
"""
axis = self._validate_axis(axis)
if axis == 0:
raise NotImplementedError(
"dd.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)")
if columns is not no_default:
warnings.warn("`columns` is deprecated, please use `meta` instead")
if meta is no_default and isinstance(columns, (pd.DataFrame, pd.Series)):
meta = columns
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
try:
meta = _emulate(M.apply, self._meta_nonempty, func,
axis=axis, args=args, **kwds)
except Exception:
raise ValueError("Metadata inference failed, please provide "
"`meta` keyword")
return map_partitions(M.apply, self, func, axis,
False, False, None, args, meta=meta, **kwds)
@derived_from(pd.DataFrame)
def cov(self, min_periods=None):
return cov_corr(self, min_periods)
@derived_from(pd.DataFrame)
def corr(self, method='pearson', min_periods=None):
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
return cov_corr(self, min_periods, True)
def info(self, buf=None, verbose=False, memory_usage=False):
"""
Concise summary of a Dask DataFrame.
"""
if buf is None:
import sys
buf = sys.stdout
lines = [str(type(self))]
if len(self.columns) == 0:
lines.append('Index: 0 entries')
lines.append('Empty %s' % type(self).__name__)
put_lines(buf, lines)
return
# Group and execute the required computations
computations = {}
if verbose:
computations.update({'index': self.index, 'count': self.count()})
if memory_usage:
computations.update({'memory_usage': self.map_partitions(M.memory_usage, index=True)})
computations = dict(zip(computations.keys(), da.compute(*computations.values())))
column_template = "{0:<%d} {1}" % (self.columns.str.len().max() + 5)
if verbose:
index = computations['index']
counts = computations['count']
lines.append(index.summary())
column_template = column_template.format('{0}', '{1} non-null {2}')
column_info = [column_template.format(*x) for x in zip(self.columns, counts, self.dtypes)]
else:
column_info = [column_template.format(*x) for x in zip(self.columns, self.dtypes)]
lines.append('Data columns (total {} columns):'.format(len(self.columns)))
lines.extend(column_info)
dtype_counts = ['%s(%d)' % k for k in sorted(self.dtypes.value_counts().iteritems())]
lines.append('dtypes: {}'.format(', '.join(dtype_counts)))
if memory_usage:
memory_int = computations['memory_usage'].sum()
lines.append('memory usage: {}\n'.format(memory_repr(memory_int)))
put_lines(buf, lines)
# bind operators
for op in [operator.abs, operator.add, operator.and_, operator_div,
operator.eq, operator.gt, operator.ge, operator.inv,
operator.lt, operator.le, operator.mod, operator.mul,
operator.ne, operator.neg, operator.or_, operator.pow,
operator.sub, operator.truediv, operator.floordiv, operator.xor]:
_Frame._bind_operator(op)
Scalar._bind_operator(op)
for name in ['add', 'sub', 'mul', 'div',
'truediv', 'floordiv', 'mod', 'pow',
'radd', 'rsub', 'rmul', 'rdiv',
'rtruediv', 'rfloordiv', 'rmod', 'rpow']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_operator_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_operator_method(name, meth)
def elemwise_property(attr, s):
meta = pd.Series([], dtype=getattr(s._meta, attr).dtype)
return map_partitions(getattr, s, attr, meta=meta)
for name in ['nanosecond', 'microsecond', 'millisecond', 'second', 'minute',
'hour', 'day', 'dayofweek', 'dayofyear', 'week', 'weekday',
'weekofyear', 'month', 'quarter', 'year']:
setattr(Index, name, property(partial(elemwise_property, name)))
def elemwise(op, *args, **kwargs):
""" Elementwise operation for dask.Dataframes """
meta = kwargs.pop('meta', no_default)
_name = funcname(op) + '-' + tokenize(op, kwargs, *args)
args = _maybe_from_pandas(args)
from .multi import _maybe_align_partitions
args = _maybe_align_partitions(args)
dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar))]
dfs = [df for df in dasks if isinstance(df, _Frame)]
divisions = dfs[0].divisions
n = len(divisions) - 1
other = [(i, arg) for i, arg in enumerate(args)
if not isinstance(arg, (_Frame, Scalar))]
# adjust the key length of Scalar
keys = [d._keys() * n if isinstance(d, Scalar)
else d._keys() for d in dasks]
if other:
dsk = dict(((_name, i),
(apply, partial_by_order, list(frs),
{'function': op, 'other': other}))
for i, frs in enumerate(zip(*keys)))
else:
dsk = dict(((_name, i), (op,) + frs) for i, frs in enumerate(zip(*keys)))
dsk = merge(dsk, *[d.dask for d in dasks])
if meta is no_default:
if len(dfs) >= 2 and len(dasks) != len(dfs):
# should not occur in current funcs
msg = 'elemwise with 2 or more DataFrames and Scalar is not supported'
raise NotImplementedError(msg)
meta = _emulate(op, *args, **kwargs)
return new_dd_object(dsk, _name, meta, divisions)
def remove_empties(seq):
""" Remove items of length 0
>>> remove_empties([1, 2, ('empty', np.nan), 4, 5])
[1, 2, 4, 5]
>>> remove_empties([('empty', np.nan)])
[nan]
>>> remove_empties([])
[]
"""
if not seq:
return seq
seq2 = [x for x in seq
if not (isinstance(x, tuple) and x and x[0] == 'empty')]
if seq2:
return seq2
else:
return [seq[0][1]]
def empty_safe(func, arg):
"""
>>> empty_safe(sum, [1, 2, 3])
6
>>> empty_safe(sum, [])
('empty', 0)
"""
if len(arg) == 0:
return ('empty', func(arg))
else:
return func(arg)
def _maybe_from_pandas(dfs):
from .io import from_pandas
dfs = [from_pandas(df, 1) if isinstance(df, (pd.Series, pd.DataFrame))
else df for df in dfs]
return dfs
@insert_meta_param_description
def apply_concat_apply(args, chunk=None, aggregate=None, meta=no_default,
token=None, chunk_kwargs=None, aggregate_kwargs=None,
**kwargs):
"""Apply a function to blocks, then concat, then apply again
Parameters
----------
args :
Positional arguments for the `chunk` function. All `dask.dataframe`
objects should be partitioned and indexed equivalently.
chunk : function [block-per-arg] -> block
Function to operate on each block of data
aggregate : function concatenated-block -> block
Function to operate on the concatenated result of chunk
$META
token : str, optional
The name to use for the output keys.
chunk_kwargs : dict, optional
Keywords for the chunk function only.
aggregate_kwargs : dict, optional
Keywords for the aggregate function only.
kwargs :
All remaining keywords will be passed to both ``chunk`` and
``aggregate``.
Examples
--------
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if chunk_kwargs is None:
chunk_kwargs = dict()
if aggregate_kwargs is None:
aggregate_kwargs = dict()
chunk_kwargs.update(kwargs)
aggregate_kwargs.update(kwargs)
if not isinstance(args, (tuple, list)):
args = [args]
assert all(arg.npartitions == args[0].npartitions
for arg in args if isinstance(arg, _Frame))
token_key = tokenize(token or (chunk, aggregate), meta, args,
chunk_kwargs, aggregate_kwargs)
a = '{0}-chunk-{1}'.format(token or funcname(chunk), token_key)
if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:
dsk = dict(((a, i), (chunk, key))
for i, key in enumerate(args[0]._keys()))
else:
dsk = dict(((a, i), (apply, chunk, [(x._name, i)
if isinstance(x, _Frame)
else x for x in args],
chunk_kwargs))
for i in range(args[0].npartitions))
b = '{0}-{1}'.format(token or funcname(aggregate), token_key)
conc = (_concat, (list, [(a, i) for i in range(args[0].npartitions)]))
if not aggregate_kwargs:
dsk2 = {(b, 0): (aggregate, conc)}
else:
dsk2 = {(b, 0): (apply, aggregate, [conc], aggregate_kwargs)}
if meta is no_default:
try:
meta_chunk = _emulate(apply, chunk, args, chunk_kwargs)
meta = _emulate(apply, aggregate, [_concat([meta_chunk])],
aggregate_kwargs)
except Exception:
raise ValueError("Metadata inference failed, please provide "
"`meta` keyword")
meta = make_meta(meta)
dasks = [a.dask for a in args if isinstance(a, _Frame)]
return new_dd_object(merge(dsk, dsk2, *dasks), b, meta, [None, None])
aca = apply_concat_apply
def _extract_meta(x, nonempty=False):
"""
Extract internal cache data (``_meta``) from dd.DataFrame / dd.Series
"""
if isinstance(x, (_Frame, Scalar)):
return x._meta_nonempty if nonempty else x._meta
elif isinstance(x, list):
return [_extract_meta(_x, nonempty) for _x in x]
elif isinstance(x, tuple):
return tuple([_extract_meta(_x, nonempty) for _x in x])
elif isinstance(x, dict):
res = {}
for k in x:
res[k] = _extract_meta(x[k], nonempty)
return res
else:
return x
def _emulate(func, *args, **kwargs):
"""
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_meta``) for calculation
"""
return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))
@insert_meta_param_description
def map_partitions(func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function.
$META
"""
meta = kwargs.pop('meta', no_default)
if meta is not no_default:
meta = make_meta(meta)
assert callable(func)
if 'token' in kwargs:
name = kwargs.pop('token')
token = tokenize(meta, *args, **kwargs)
else:
name = funcname(func)
token = tokenize(func, meta, *args, **kwargs)
name = '{0}-{1}'.format(name, token)
from .multi import _maybe_align_partitions
args = _maybe_from_pandas(args)
args = _maybe_align_partitions(args)
if meta is no_default:
try:
meta = _emulate(func, *args, **kwargs)
except Exception:
raise ValueError("Metadata inference failed, please provide "
"`meta` keyword")
if all(isinstance(arg, Scalar) for arg in args):
dask = {(name, 0):
(apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)}
return Scalar(merge(dask, *[arg.dask for arg in args]), name, meta)
elif not isinstance(meta, (pd.Series, pd.DataFrame, pd.Index)):
# If `meta` is not a pandas object, the concatenated results will be a
# different type
meta = _concat([meta])
if isinstance(meta, pd.DataFrame):
columns = meta.columns
elif isinstance(meta, (pd.Series, pd.Index)):
columns = meta.name
else:
columns = None
dfs = [df for df in args if isinstance(df, _Frame)]
dsk = {}
for i in range(dfs[0].npartitions):
values = [(arg._name, i if isinstance(arg, _Frame) else 0)
if isinstance(arg, (_Frame, Scalar)) else arg for arg in args]
values = (apply, func, (tuple, values), kwargs)
if columns is not None:
values = (_rename, columns, values)
dsk[(name, i)] = values
dasks = [arg.dask for arg in args if isinstance(arg, (_Frame, Scalar))]
return new_dd_object(merge(dsk, *dasks), name, meta, args[0].divisions)
def _rename(columns, df):
"""
Rename columns of pd.DataFrame or name of pd.Series.
Not for dd.DataFrame or dd.Series.
Parameters
----------
columns : tuple, string, pd.DataFrame or pd.Series
Column names, Series name or pandas instance which has the
target column names / name.
df : pd.DataFrame or pd.Series
target DataFrame / Series to be renamed
"""
assert not isinstance(df, _Frame)
if isinstance(columns, Iterator):
columns = list(columns)
if columns is no_default:
return df
if isinstance(df, pd.DataFrame):
if isinstance(columns, pd.DataFrame):
columns = columns.columns
columns = pd.Index(columns)
if len(columns) == len(df.columns):
if columns.equals(df.columns):
# if target is identical, rename is not necessary
return df
# each functions must be pure op, do not use df.columns = columns
return df.rename(columns=dict(zip(df.columns, columns)))
elif isinstance(df, (pd.Series, pd.Index)):
if isinstance(columns, (pd.Series, pd.Index)):
columns = columns.name
if name == columns:
return df
return pd.Series(df, name=columns)
# map_partition may pass other types
return df
def _rename_dask(df, names):
"""
Destructively rename columns of dd.DataFrame or name of dd.Series.
Not for pd.DataFrame or pd.Series.
Internaly used to overwrite dd.DataFrame.columns and dd.Series.name
We can't use map_partition because it applies function then rename
Parameters
----------
df : dd.DataFrame or dd.Series
target DataFrame / Series to be renamed
names : tuple, string
Column names/Series name
"""
assert isinstance(df, _Frame)
metadata = _rename(names, df._meta)
name = 'rename-{0}'.format(tokenize(df, metadata))
dsk = {}
for i in range(df.npartitions):
dsk[name, i] = (_rename, metadata, (df._name, i))
return new_dd_object(merge(dsk, df.dask), name, metadata, df.divisions)
def quantile(df, q):
"""Approximate quantiles of Series.
Parameters
----------
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
assert isinstance(df, Series)
from dask.array.percentile import _percentile, merge_percentiles
# currently, only Series has quantile method
if isinstance(q, (list, tuple, np.ndarray)):
# Index.quantile(list-like) must be pd.Series, not pd.Index
df_name = df.name
finalize_tsk = lambda tsk: (pd.Series, tsk, q, None, df_name)
return_type = Series
else:
finalize_tsk = lambda tsk: (getitem, tsk, 0)
return_type = Scalar
q = [q]
if isinstance(df, Index):
meta = pd.Series(df._meta_nonempty).quantile(q)
else:
meta = df._meta_nonempty.quantile(q)
# pandas uses quantile in [0, 1]
# numpy / everyone else uses [0, 100]
qs = np.asarray(q) * 100
token = tokenize(df, qs)
if len(qs) == 0:
name = 'quantiles-' + token
empty_index = pd.Index([], dtype=float)
return Series({(name, 0): pd.Series([], name=df.name, index=empty_index)},
name, df._meta, [None, None])
else:
new_divisions = [np.min(q), np.max(q)]
name = 'quantiles-1-' + token
val_dsk = dict(((name, i), (_percentile, (getattr, key, 'values'), qs))
for i, key in enumerate(df._keys()))
name2 = 'quantiles-2-' + token
len_dsk = dict(((name2, i), (len, key)) for i, key in enumerate(df._keys()))
name3 = 'quantiles-3-' + token
merge_dsk = {(name3, 0): finalize_tsk((merge_percentiles, qs,
[qs] * df.npartitions,
sorted(val_dsk), sorted(len_dsk)))}
dsk = merge(df.dask, val_dsk, len_dsk, merge_dsk)
return return_type(dsk, name3, meta, new_divisions)
def cov_corr(df, min_periods=None, corr=False, scalar=False):
"""DataFrame covariance and pearson correlation.
Computes pairwise covariance or correlation of columns, excluding NA/null
values.
Parameters
----------
df : DataFrame
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
corr : bool, optional
If True, compute the Pearson correlation. If False [default], compute
the covariance.
scalar : bool, optional
If True, compute covariance between two variables as a scalar. Only
valid if `df` has 2 columns. If False [default], compute the entire
covariance/correlation matrix.
"""
if min_periods is None:
min_periods = 2
elif min_periods < 2:
raise ValueError("min_periods must be >= 2")
prefix = 'corr' if corr else 'cov'
df = df._get_numeric_data()
name = '{0}-agg-{1}'.format(prefix, tokenize(df, min_periods, scalar))
if scalar and len(df.columns) != 2:
raise ValueError("scalar only valid for 2 column dataframe")
k = '{0}-chunk-{1}'.format(prefix, df._name)
dsk = dict(((k, i), (cov_corr_chunk, f, corr))
for (i, f) in enumerate(df._keys()))
dsk[(name, 0)] = (cov_corr_agg, list(dsk.keys()), df._meta, min_periods,
corr, scalar)
dsk = merge(df.dask, dsk)
if scalar:
return Scalar(dsk, name, 'f8')
meta = make_meta([(c, 'f8') for c in df.columns], index=df._meta.columns)
return DataFrame(dsk, name, meta, (df.columns[0], df.columns[-1]))
def cov_corr_chunk(df, corr=False):
"""Chunk part of a covariance or correlation computation"""
mat = df.values
mask = np.isfinite(mat)
keep = np.bitwise_and(mask[:, None, :], mask[:, :, None])
x = np.where(keep, mat[:, None, :], np.nan)
sums = np.nansum(x, 0)
counts = keep.astype('int').sum(0)
cov = df.cov().values
dtype = [('sum', sums.dtype), ('count', counts.dtype), ('cov', cov.dtype)]
if corr:
m = np.nansum((x - sums/np.where(counts, counts, np.nan))**2, 0)
dtype.append(('m', m.dtype))
out = np.empty(counts.shape, dtype=dtype)
out['sum'] = sums
out['count'] = counts
out['cov'] = cov * (counts - 1)
if corr:
out['m'] = m
return out
def cov_corr_agg(data, meta, min_periods=2, corr=False, scalar=False):
"""Aggregation part of a covariance or correlation computation"""
data = np.concatenate(data).reshape((len(data),) + data[0].shape)
sums = np.nan_to_num(data['sum'])
counts = data['count']
cum_sums = np.cumsum(sums, 0)
cum_counts = np.cumsum(counts, 0)
s1 = cum_sums[:-1]
s2 = sums[1:]
n1 = cum_counts[:-1]
n2 = counts[1:]
d = (s2/n2) - (s1/n1)
C = (np.nansum((n1 * n2)/(n1 + n2) * (d * d.transpose((0, 2, 1))), 0) +
np.nansum(data['cov'], 0))
C[cum_counts[-1] < min_periods] = np.nan
nobs = np.where(cum_counts[-1], cum_counts[-1], np.nan)
if corr:
mu = cum_sums[-1] / nobs
counts_na = np.where(counts, counts, np.nan)
m2 = np.nansum(data['m'] + counts*(sums/counts_na - mu)**2, axis=0)
den = np.sqrt(m2 * m2.T)
else:
den = nobs - 1
mat = C/den
if scalar:
return mat[0, 1]
return pd.DataFrame(mat, columns=meta.columns, index=meta.columns)
def pd_split(df, p, random_state=None):
""" Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(df, [0.5, 0.5], random_state=123) # roughly 50/50 split
>>> a
a b
1 2 3
2 3 4
5 6 7
>>> b
a b
0 1 2
3 4 5
4 5 6
"""
p = list(p)
index = pseudorandom(len(df), p, random_state)
return [df.iloc[index == i] for i in range(len(p))]
def _take_last(a, skipna=True):
"""
take last row (Series) of DataFrame / last value of Series
considering NaN.
Parameters
----------
a : pd.DataFrame or pd.Series
skipna : bool, default True
Whether to exclude NaN
"""
if skipna is False:
return a.iloc[-1]
else:
# take last valid value excluding NaN, NaN location may be different
# in each columns
group_dummy = np.ones(len(a.index))
last_row = a.groupby(group_dummy).last()
if isinstance(a, pd.DataFrame):
return pd.Series(last_row.values[0], index=a.columns)
else:
return last_row.values[0]
def repartition_divisions(a, b, name, out1, out2, force=False):
""" dask graph to repartition dataframe by new divisions
Parameters
----------
a : tuple
old divisions
b : tuple, list
new divisions
name : str
name of old dataframe
out1 : str
name of temporary splits
out2 : str
name of new dataframe
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
{('b', 0): (<function _loc at ...>, ('a', 0), 1, 3, False),
('b', 1): (<function _loc at ...>, ('a', 1), 3, 4, False),
('b', 2): (<function _loc at ...>, ('a', 1), 4, 6, False),
('b', 3): (<function _loc at ...>, ('a', 1), 6, 7, False)
('c', 0): (<function concat at ...>,
(<type 'list'>, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
"""
if not isinstance(b, (list, tuple)):
raise ValueError('New division must be list or tuple')
b = list(b)
if len(b) < 2:
# minimum division is 2 elements, like [0, 0]
raise ValueError('New division must be longer than 2 elements')
if b != sorted(b):
raise ValueError('New division must be sorted')
if len(b[:-1]) != len(list(unique(b[:-1]))):
msg = 'New division must be unique, except for the last element'
raise ValueError(msg)
if force:
if a[0] < b[0]:
msg = ('left side of the new division must be equal or smaller '
'than old division')
raise ValueError(msg)
if a[-1] > b[-1]:
msg = ('right side of the new division must be equal or larger '
'than old division')
raise ValueError(msg)
else:
if a[0] != b[0]:
msg = 'left side of old and new divisions are different'
raise ValueError(msg)
if a[-1] != b[-1]:
msg = 'right side of old and new divisions are different'
raise ValueError(msg)
def _is_single_last_div(x):
"""Whether last division only contains single label"""
return len(x) >= 2 and x[-1] == x[-2]
c = [a[0]]
d = dict()
low = a[0]
i, j = 1, 1 # indices for old/new divisions
k = 0 # index for temp divisions
last_elem = _is_single_last_div(a)
# process through old division
# left part of new division can be processed in this loop
while (i < len(a) and j < len(b)):
if a[i] < b[j]:
# tuple is something like:
# (_loc, ('from_pandas-#', 0), 3, 4, False))
d[(out1, k)] = (_loc, (name, i - 1), low, a[i], False)
low = a[i]
i += 1
elif a[i] > b[j]:
d[(out1, k)] = (_loc, (name, i - 1), low, b[j], False)
low = b[j]
j += 1
else:
d[(out1, k)] = (_loc, (name, i - 1), low, b[j], False)
low = b[j]
i += 1
j += 1
c.append(low)
k += 1
# right part of new division can remain
if a[-1] < b[-1] or b[-1] == b[-2]:
for _j in range(j, len(b)):
# always use right-most of old division
# because it may contain last element
m = len(a) - 2
d[(out1, k)] = (_loc, (name, m), low, b[_j], False)
low = b[_j]
c.append(low)
k += 1
else:
# even if new division is processed through,
# right-most element of old division can remain
if last_elem and i < len(a):
d[(out1, k)] = (_loc, (name, i - 1), a[i], a[i], False)
k += 1
c.append(a[-1])
# replace last element of tuple with True
d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)
i, j = 0, 1
last_elem = _is_single_last_div(c)
while j < len(b):
tmp = []
while c[i] < b[j]:
tmp.append((out1, i))
i += 1
if last_elem and c[i] == b[-1] and (b[-1] != b[-2] or j == len(b) - 1) and i < k:
# append if last split is not included
tmp.append((out1, i))
i += 1
if len(tmp) == 0:
# dummy slice to return empty DataFrame or Series,
# which retain original data attributes (columns / name)
d[(out2, j - 1)] = (_loc, (name, 0), a[0], a[0], False)
elif len(tmp) == 1:
d[(out2, j - 1)] = tmp[0]
else:
if not tmp:
raise ValueError('check for duplicate partitions\nold:\n%s\n\n'
'new:\n%s\n\ncombined:\n%s'
% (pformat(a), pformat(b), pformat(c)))
d[(out2, j - 1)] = (pd.concat, (list, tmp))
j += 1
return d
def repartition_npartitions(df, npartitions):
""" Repartition dataframe to a smaller number of partitions """
npartitions_ratio = df.npartitions / npartitions
new_partitions_boundaries = [int(new_partition_index * npartitions_ratio)
for new_partition_index in range(npartitions + 1)]
new_name = 'repartition-%d-%s' % (npartitions, tokenize(df))
dsk = {(new_name, new_partition_index):
(pd.concat,
[(df._name, old_partition_index)
for old_partition_index in range(
new_partitions_boundaries[new_partition_index],
new_partitions_boundaries[new_partition_index + 1])])
for new_partition_index in range(npartitions)}
divisions = [df.divisions[new_partition_index]
for new_partition_index in new_partitions_boundaries]
return DataFrame(merge(df.dask, dsk), new_name, df._meta, divisions)
def repartition(df, divisions=None, force=False):
""" Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
Parameters
----------
divisions : list
List of partitions to be used
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
token = tokenize(df, divisions)
if isinstance(df, _Frame):
tmp = 'repartition-split-' + token
out = 'repartition-merge-' + token
dsk = repartition_divisions(df.divisions, divisions,
df._name, tmp, out, force=force)
return df._constructor(merge(df.dask, dsk), out,
df._meta, divisions)
elif isinstance(df, (pd.Series, pd.DataFrame)):
name = 'repartition-dataframe-' + token
from .utils import shard_df_on_index
dfs = shard_df_on_index(df, divisions[1:-1])
dsk = dict(((name, i), df) for i, df in enumerate(dfs))
return new_dd_object(dsk, name, df, divisions)
raise ValueError('Data must be DataFrame or Series')
class Accessor(object):
def __init__(self, series):
if not isinstance(series, Series):
raise ValueError('Accessor cannot be initialized')
self._series = series
def _property_map(self, key):
out = self.getattr(self._series._meta, key)
meta = pd.Series([], dtype=out.dtype, name=getattr(out, 'name', None))
return map_partitions(self.getattr, self._series, key, meta=meta)
def _function_map(self, key, *args):
out = self.call(self._series._meta, key, *args)
meta = pd.Series([], dtype=out.dtype, name=getattr(out, 'name', None))
return map_partitions(self.call, self._series, key, *args, meta=meta)
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
dir(self.ns)))
def __getattr__(self, key):
if key in dir(self.ns):
if isinstance(getattr(self.ns, key), property):
return self._property_map(key)
else:
return partial(self._function_map, key)
else:
raise AttributeError(key)
class DatetimeAccessor(Accessor):
""" Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.microsecond # doctest: +SKIP
"""
ns = pd.Series.dt
@staticmethod
def getattr(obj, attr):
return getattr(obj.dt, attr)
@staticmethod
def call(obj, attr, *args):
return getattr(obj.dt, attr)(*args)
class StringAccessor(Accessor):
""" Accessor object for string properties of the Series values.
Examples
--------
>>> s.str.lower() # doctest: +SKIP
"""
ns = pd.Series.str
@staticmethod
def getattr(obj, attr):
return getattr(obj.str, attr)
@staticmethod
def call(obj, attr, *args):
return getattr(obj.str, attr)(*args)
def set_sorted_index(df, index, drop=True, **kwargs):
if not isinstance(index, Series):
meta = df._meta.set_index(index, drop=drop)
else:
meta = df._meta.set_index(index._meta, drop=drop)
result = map_partitions(M.set_index, df, index, drop=drop, meta=meta)
return compute_divisions(result, **kwargs)
def compute_divisions(df, **kwargs):
mins = df.index.map_partitions(M.min, meta=df.index)
maxes = df.index.map_partitions(M.max, meta=df.index)
mins, maxes = compute(mins, maxes, **kwargs)
if (sorted(mins) != list(mins) or
sorted(maxes) != list(maxes) or
any(a >= b for a, b in zip(mins, maxes))):
raise ValueError("Partitions must be sorted ascending with the index",
mins, maxes)
divisions = tuple(mins) + (list(maxes)[-1],)
df = copy(df)
df.divisions = divisions
return df
def _reduction_chunk(x, aca_chunk=None, **kwargs):
o = aca_chunk(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if isinstance(o, pd.Series) else o
def _reduction_aggregate(x, aca_aggregate=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
return aca_aggregate(x, **kwargs)
def drop_columns(df, columns, dtype):
df = df.drop(columns, axis=1)
df.columns = df.columns.astype(dtype)
return df
def idxmaxmin_chunk(x, fn, axis=0, skipna=True, **kwargs):
idx = getattr(x, fn)(axis=axis, skipna=skipna)
minmax = 'max' if fn == 'idxmax' else 'min'
value = getattr(x, minmax)(axis=axis, skipna=skipna)
n = len(x)
if isinstance(idx, pd.Series):
chunk = pd.DataFrame({'idx': idx, 'value': value, 'n': [n] * len(idx)})
chunk['idx'] = chunk['idx'].astype(type(idx.iloc[0]))
else:
chunk = pd.DataFrame({'idx': [idx], 'value': [value], 'n': [n]})
chunk['idx'] = chunk['idx'].astype(type(idx))
return chunk
def idxmaxmin_row(x, fn, skipna=True):
idx = x.idx.reset_index(drop=True)
value = x.value.reset_index(drop=True)
subidx = getattr(value, fn)(skipna=skipna)
# if skipna is False, pandas returns NaN so mimic behavior
if pd.isnull(subidx):
return subidx
return idx.iloc[subidx]
def idxmaxmin_agg(x, fn, skipna=True, **kwargs):
indices = list(set(x.index.tolist()))
idxmaxmin = [idxmaxmin_row(x.ix[idx], fn, skipna=skipna) for idx in indices]
if len(idxmaxmin) == 1:
return idxmaxmin[0]
else:
return pd.Series(idxmaxmin, index=indices)
def safe_head(df, n):
r = df.head(n=n)
if len(r) != n:
warnings.warn("Insufficient elements for `head`. {0} elements "
"requested, only {1} elements available. Try passing larger "
"`npartitions` to `head`.".format(n, len(r)))
return r
| {
"repo_name": "cowlicks/dask",
"path": "dask/dataframe/core.py",
"copies": "1",
"size": "121256",
"license": "bsd-3-clause",
"hash": -7731512705150762000,
"line_mean": 35.4022815971,
"line_max": 102,
"alpha_frac": 0.5692089464,
"autogenerated": false,
"ratio": 3.9731314918575316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00038994047311262847,
"num_lines": 3331
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from distutils.version import LooseVersion
import operator
from operator import getitem, setitem
from pprint import pformat
import uuid
import warnings
from toolz import merge, partial, first, unique, partition_all
import pandas as pd
from pandas.util.decorators import cache_readonly
import numpy as np
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..array.core import partial_by_order
from .. import threaded
from ..compatibility import apply, operator_div, bind_method, PY3
from ..utils import (random_state_data,
pseudorandom, derived_from, funcname, memory_repr,
put_lines, M, key_split)
from ..base import Base, compute, tokenize, normalize_token
from ..async import get_sync
from . import methods
from .utils import (meta_nonempty, make_meta, insert_meta_param_description,
raise_on_meta_error)
from .hashing import hash_pandas_object
no_default = '__no_default__'
pd.computation.expressions.set_use_numexpr(False)
def _concat(args, **kwargs):
""" Generic concat operation """
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if isinstance(args[0], (pd.DataFrame, pd.Series)):
args2 = [arg for arg in args if len(arg)]
if not args2:
return args[0]
return pd.concat(args2)
if isinstance(args[0], (pd.Index)):
args = [arg for arg in args if len(arg)]
return args[0].append(args[1:])
try:
return pd.Series(args)
except:
return args
def _get_return_type(meta):
if isinstance(meta, _Frame):
meta = meta._meta
if isinstance(meta, pd.Series):
return Series
elif isinstance(meta, pd.DataFrame):
return DataFrame
elif isinstance(meta, pd.Index):
return Index
return Scalar
def new_dd_object(dsk, _name, meta, divisions):
"""Generic constructor for dask.dataframe objects.
Decides the appropriate output class based on the type of `meta` provided.
"""
return _get_return_type(meta)(dsk, _name, meta, divisions)
def optimize(dsk, keys, **kwargs):
from .optimize import optimize
return optimize(dsk, keys, **kwargs)
def finalize(results):
return _concat(results)
class Scalar(Base):
""" A Dask object to represent a pandas scalar"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(first)
def __init__(self, dsk, name, meta, divisions=None):
# divisions is ignored, only present to be compatible with other
# objects.
self.dask = dsk
self._name = name
meta = make_meta(meta)
if isinstance(meta, (pd.DataFrame, pd.Series, pd.Index)):
raise ValueError("Expected meta to specify scalar, got "
"{0}".format(type(meta).__name__))
self._meta = meta
@property
def _meta_nonempty(self):
return self._meta
@property
def dtype(self):
return self._meta.dtype
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, 'dtype'):
o.remove('dtype') # dtype only in `dir` if available
return list(o)
@property
def divisions(self):
"""Dummy divisions to be compat with Series and DataFrame"""
return [None, None]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + '...'
if hasattr(self._meta, 'dtype'):
extra = ', dtype=%s' % self._meta.dtype
else:
extra = ', type=%s' % type(self._meta).__name__
return "dd.Scalar<%s%s>" % (name, extra)
def __array__(self):
# array interface is required to support pandas instance + Scalar
# Otherwise, above op results in pd.Series of Scalar (object dtype)
return np.asarray(self.compute())
@property
def _args(self):
return (self.dask, self._name, self._meta)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta = state
@property
def key(self):
return (self._name, 0)
def _keys(self):
return [self.key]
@classmethod
def _get_unary_operator(cls, op):
def f(self):
name = funcname(op) + '-' + tokenize(self)
dsk = {(name, 0): (op, (self._name, 0))}
meta = op(self._meta_nonempty)
return Scalar(merge(dsk, self.dask), name, meta)
return f
@classmethod
def _get_binary_operator(cls, op, inv=False):
return lambda self, other: _scalar_binary(op, self, other, inv=inv)
def _scalar_binary(op, self, other, inv=False):
name = '{0}-{1}'.format(funcname(op), tokenize(self, other))
dsk = self.dask
return_type = _get_return_type(other)
if isinstance(other, Scalar):
dsk = merge(dsk, other.dask)
other_key = (other._name, 0)
elif isinstance(other, Base):
return NotImplemented
else:
other_key = other
if inv:
dsk.update({(name, 0): (op, other_key, (self._name, 0))})
else:
dsk.update({(name, 0): (op, (self._name, 0), other_key)})
other_meta = make_meta(other)
other_meta_nonempty = meta_nonempty(other_meta)
if inv:
meta = op(other_meta_nonempty, self._meta_nonempty)
else:
meta = op(self._meta_nonempty, other_meta_nonempty)
if return_type is not Scalar:
return return_type(dsk, name, meta,
[other.index.min(), other.index.max()])
else:
return Scalar(dsk, name, meta)
class _Frame(Base):
""" Superclass for DataFrame and Series
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
meta: pandas.DataFrame, pandas.Series, or pandas.Index
An empty pandas object with names, dtypes, and indices matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(finalize)
def __init__(self, dsk, name, meta, divisions):
self.dask = dsk
self._name = name
meta = make_meta(meta)
if not isinstance(meta, self._partition_type):
raise ValueError("Expected meta to specify type {0}, got type "
"{1}".format(self._partition_type.__name__,
type(meta).__name__))
self._meta = meta
self.divisions = tuple(divisions)
@property
def _constructor(self):
return new_dd_object
@property
def npartitions(self):
"""Return number of partitions"""
return len(self.divisions) - 1
@property
def size(self):
return self.reduction(methods.size, np.sum, token='size', meta=int,
split_every=False)
@property
def _meta_nonempty(self):
""" A non-empty version of `_meta` with fake data."""
return meta_nonempty(self._meta)
@property
def _args(self):
return (self.dask, self._name, self._meta, self.divisions)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisions = state
def copy(self):
""" Make a copy of the dataframe
This is strictly a shallow copy of the underlying computational graph.
It does not affect the underlying data
"""
return new_dd_object(self.dask, self._name,
self._meta, self.divisions)
def _keys(self):
return [(self._name, i) for i in range(self.npartitions)]
def __array__(self, dtype=None, **kwargs):
self._computed = self.compute()
x = np.array(self._computed)
return x
def __array_wrap__(self, array, context=None):
raise NotImplementedError
@property
def _elemwise(self):
return elemwise
@property
def _repr_data(self):
raise NotImplementedError
@property
def _repr_divisions(self):
name = "npartitions={0}".format(self.npartitions)
if self.known_divisions:
divisions = pd.Index(self.divisions, name=name)
else:
# avoid to be converted to NaN
divisions = pd.Index(['None'] * (self.npartitions + 1),
name=name)
return divisions
def __repr__(self):
data = self._repr_data.to_string(max_rows=5, show_dimensions=False)
return """Dask {klass} Structure:
{data}
Dask Name: {name}, {task} tasks""".format(klass=self.__class__.__name__,
data=data, name=key_split(self._name),
task=len(self.dask))
@property
def index(self):
"""Return dask Index instance"""
name = self._name + '-index'
dsk = dict(((name, i), (getattr, key, 'index'))
for i, key in enumerate(self._keys()))
return Index(merge(dsk, self.dask), name,
self._meta.index, self.divisions)
def reset_index(self, drop=False):
"""Reset the index to the default index.
Note that unlike in ``pandas``, the reset ``dask.dataframe`` index will
not be monotonically increasing from 0. Instead, it will restart at 0
for each partition (e.g. ``index1 = [0, ..., 10], index2 = [0, ...]``).
This is due to the inability to statically know the full length of the
index.
For DataFrame with multi-level index, returns a new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
drop : boolean, default False
Do not try to insert index into dataframe columns.
"""
return self.map_partitions(M.reset_index, drop=drop).clear_divisions()
@property
def known_divisions(self):
"""Whether divisions are already known"""
return len(self.divisions) > 0 and self.divisions[0] is not None
def clear_divisions(self):
divisions = (None,) * (self.npartitions + 1)
return type(self)(self.dask, self._name, self._meta, divisions)
def get_partition(self, n):
"""Get a dask DataFrame/Series representing the `nth` partition."""
if 0 <= n < self.npartitions:
name = 'get-partition-%s-%s' % (str(n), self._name)
dsk = {(name, 0): (self._name, n)}
divisions = self.divisions[n:n + 2]
return new_dd_object(merge(self.dask, dsk), name,
self._meta, divisions)
else:
msg = "n must be 0 <= n < {0}".format(self.npartitions)
raise ValueError(msg)
def cache(self, cache=Cache):
""" Evaluate Dataframe and store in local cache
Uses chest by default to store data on disk
"""
warnings.warn("Deprecation Warning: The `cache` method is deprecated, "
"and will be removed in the next release. To achieve "
"the same behavior, either write to disk or use "
"`Client.persist`, from `dask.distributed`.")
if callable(cache):
cache = cache()
# Evaluate and store in cache
name = 'cache' + uuid.uuid1().hex
dsk = dict(((name, i), (setitem, cache, (tuple, list(key)), key))
for i, key in enumerate(self._keys()))
self._get(merge(dsk, self.dask), list(dsk.keys()))
# Create new dataFrame pointing to that cache
name = 'from-cache-' + self._name
dsk2 = dict(((name, i), (getitem, cache, (tuple, list(key))))
for i, key in enumerate(self._keys()))
return new_dd_object(dsk2, name, self._meta, self.divisions)
@derived_from(pd.DataFrame)
def drop_duplicates(self, split_every=None, split_out=1, **kwargs):
# Let pandas error on bad inputs
self._meta_nonempty.drop_duplicates(**kwargs)
if 'subset' in kwargs and kwargs['subset'] is not None:
split_out_setup = split_out_on_cols
split_out_setup_kwargs = {'cols': kwargs['subset']}
else:
split_out_setup = split_out_setup_kwargs = None
if kwargs.get('keep', True) is False:
raise NotImplementedError("drop_duplicates with keep=False")
chunk = M.drop_duplicates
return aca(self, chunk=chunk, aggregate=chunk, meta=self._meta,
token='drop-duplicates', split_every=split_every,
split_out=split_out, split_out_setup=split_out_setup,
split_out_setup_kwargs=split_out_setup_kwargs, **kwargs)
def __len__(self):
return self.reduction(len, np.sum, token='len', meta=int,
split_every=False).compute()
@insert_meta_param_description(pad=12)
def map_partitions(self, func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
One can use ``map_partitions`` to apply a function on each partition.
Extra arguments and keywords can optionally be provided, and will be
passed to the function after the partition.
Here we apply a function with arguments and keywords to a DataFrame,
resulting in a Series:
>>> def myadd(df, a, b=1):
... return df.x + df.y + a + b
>>> res = ddf.map_partitions(myadd, 1, b=2)
>>> res.dtype
dtype('float64')
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with no name, and dtype
``float64``:
>>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))
Here we map a function that takes in a DataFrame, and returns a
DataFrame with a new column:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))
>>> res.dtypes
x int64
y float64
z float64
dtype: object
As before, the output metadata can also be specified manually. This
time we pass in a ``dict``, as the output is a DataFrame:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),
... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.map_partitions(lambda df: df.head(), meta=df)
"""
return map_partitions(func, self, *args, **kwargs)
@insert_meta_param_description(pad=12)
def map_overlap(self, func, before, after, *args, **kwargs):
"""Apply a function to each partition, sharing rows with adjacent partitions.
This can be useful for implementing windowing functions such as
``df.rolling(...).mean()`` or ``df.diff()``.
Parameters
----------
func : function
Function applied to each partition.
before : int
The number of rows to prepend to partition ``i`` from the end of
partition ``i - 1``.
after : int
The number of rows to append to partition ``i`` from the beginning
of partition ``i + 1``.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Notes
-----
Given positive integers ``before`` and ``after``, and a function
``func``, ``map_overlap`` does the following:
1. Prepend ``before`` rows to each partition ``i`` from the end of
partition ``i - 1``. The first partition has no rows prepended.
2. Append ``after`` rows to each partition ``i`` from the beginning of
partition ``i + 1``. The last partition has no rows appended.
3. Apply ``func`` to each partition, passing in any extra ``args`` and
``kwargs`` if provided.
4. Trim ``before`` rows from the beginning of all but the first
partition.
5. Trim ``after`` rows from the end of all but the last partition.
Note that the index and divisions are assumed to remain unchanged.
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 4, 7, 11],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
A rolling sum with a trailing moving window of size 2 can be computed by
overlapping 2 rows before each partition, and then mapping calls to
``df.rolling(2).sum()``:
>>> ddf.compute()
x y
0 1 1.0
1 2 2.0
2 4 3.0
3 7 4.0
4 11 5.0
>>> ddf.map_overlap(lambda df: df.rolling(2).sum(), 2, 0).compute()
x y
0 NaN NaN
1 3.0 3.0
2 6.0 5.0
3 11.0 7.0
4 18.0 9.0
The pandas ``diff`` method computes a discrete difference shifted by a
number of periods (can be positive or negative). This can be
implemented by mapping calls to ``df.diff`` to each partition after
prepending/appending that many rows, depending on sign:
>>> def diff(df, periods=1):
... before, after = (periods, 0) if periods > 0 else (0, -periods)
... return df.map_overlap(lambda df, periods=1: df.diff(periods),
... periods, 0, periods=periods)
>>> diff(ddf, 1).compute()
x y
0 NaN NaN
1 1.0 1.0
2 2.0 1.0
3 3.0 1.0
4 4.0 1.0
"""
from .rolling import map_overlap
return map_overlap(func, self, before, after, *args, **kwargs)
@insert_meta_param_description(pad=12)
def reduction(self, chunk, aggregate=None, combine=None, meta=no_default,
token=None, split_every=None, chunk_kwargs=None,
aggregate_kwargs=None, combine_kwargs=None, **kwargs):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
combine : callable, optional
Function to operate on intermediate concatenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'sum': x.sum(), 'count': x.count()})
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'sum': x.sum(), 'count': x.count()})
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs['aca_chunk'] = chunk
combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}
combine_kwargs['aca_combine'] = combine
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs['aca_aggregate'] = aggregate
return aca(self, chunk=_reduction_chunk, aggregate=_reduction_aggregate,
combine=_reduction_combine, meta=meta, token=token,
split_every=split_every, chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
combine_kwargs=combine_kwargs, **kwargs)
@derived_from(pd.DataFrame)
def pipe(self, func, *args, **kwargs):
# Taken from pandas:
# https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def random_split(self, frac, random_state=None):
""" Pseudorandomly split dataframe into different pieces row-wise
Parameters
----------
frac : list
List of floats that should sum to one.
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
Examples
--------
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.DataFrame.sample
"""
if not np.allclose(sum(frac), 1):
raise ValueError("frac should sum to 1")
state_data = random_state_data(self.npartitions, random_state)
token = tokenize(self, frac, random_state)
name = 'split-' + token
dsk = {(name, i): (pd_split, (self._name, i), frac, state)
for i, state in enumerate(state_data)}
out = []
for i in range(len(frac)):
name2 = 'split-%d-%s' % (i, token)
dsk2 = {(name2, j): (getitem, (name, j), i)
for j in range(self.npartitions)}
out.append(type(self)(merge(self.dask, dsk, dsk2), name2,
self._meta, self.divisions))
return out
def head(self, n=5, npartitions=1, compute=True):
""" First n rows of the dataset
Parameters
----------
n : int, optional
The number of rows to return. Default is 5.
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``n`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
"""
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
msg = "only {} partitions, head received {}"
raise ValueError(msg.format(self.npartitions, npartitions))
name = 'head-%d-%d-%s' % (npartitions, n, self._name)
if npartitions > 1:
name_p = 'head-partial-%d-%s' % (n, self._name)
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (M.head, (self._name, i), n)
concat = (_concat, [(name_p, i) for i in range(npartitions)])
dsk[(name, 0)] = (safe_head, concat, n)
else:
dsk = {(name, 0): (safe_head, (self._name, 0), n)}
result = new_dd_object(merge(self.dask, dsk), name, self._meta,
[self.divisions[0], self.divisions[npartitions]])
if compute:
result = result.compute()
return result
def tail(self, n=5, compute=True):
""" Last n rows of the dataset
Caveat, the only checks the last n rows of the last partition.
"""
name = 'tail-%d-%s' % (n, self._name)
dsk = {(name, 0): (M.tail, (self._name, self.npartitions - 1), n)}
result = new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions[-2:])
if compute:
result = result.compute()
return result
@property
def loc(self):
""" Purely label-location based indexer for selection by label.
>>> df.loc["b"] # doctest: +SKIP
>>> df.loc["b":"d"] # doctest: +SKIP"""
from .indexing import _LocIndexer
return _LocIndexer(self)
# NOTE: `iloc` is not implemented because of performance concerns.
# see https://github.com/dask/dask/pull/507
def repartition(self, divisions=None, npartitions=None, force=False):
""" Repartition dataframe along new divisions
Parameters
----------
divisions : list, optional
List of partitions to be used. If specified npartitions will be
ignored.
npartitions : int, optional
Number of partitions of output, must be less than npartitions of
input. Only used if divisions isn't specified.
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition(npartitions=10) # doctest: +SKIP
>>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP
"""
if npartitions is not None and divisions is not None:
warnings.warn("When providing both npartitions and divisions to "
"repartition only npartitions is used.")
if npartitions is not None:
if npartitions > self.npartitions:
raise ValueError("Can only repartition to fewer partitions")
return repartition_npartitions(self, npartitions)
elif divisions is not None:
return repartition(self, divisions, force=force)
else:
raise ValueError(
"Provide either divisions= or npartitions= to repartition")
@derived_from(pd.DataFrame)
def fillna(self, value=None, method=None, limit=None, axis=None):
axis = self._validate_axis(axis)
if method is None and limit is not None:
raise NotImplementedError("fillna with set limit and method=None")
if isinstance(value, _Frame):
test_value = value._meta_nonempty.values[0]
else:
test_value = value
meta = self._meta_nonempty.fillna(value=test_value, method=method,
limit=limit, axis=axis)
if axis == 1 or method is None:
return self.map_partitions(M.fillna, value, method=method,
limit=limit, axis=axis, meta=meta)
if method in ('pad', 'ffill'):
method = 'ffill'
skip_check = 0
before, after = 1 if limit is None else limit, 0
else:
method = 'bfill'
skip_check = self.npartitions - 1
before, after = 0, 1 if limit is None else limit
if limit is None:
name = 'fillna-chunk-' + tokenize(self, method)
dsk = {(name, i): (methods.fillna_check, (self._name, i),
method, i != skip_check)
for i in range(self.npartitions)}
parts = new_dd_object(merge(dsk, self.dask), name, meta,
self.divisions)
else:
parts = self
return parts.map_overlap(M.fillna, before, after, method=method,
limit=limit, meta=meta)
@derived_from(pd.DataFrame)
def ffill(self, axis=None, limit=None):
return self.fillna(method='ffill', limit=limit, axis=axis)
@derived_from(pd.DataFrame)
def bfill(self, axis=None, limit=None):
return self.fillna(method='bfill', limit=limit, axis=axis)
def sample(self, frac, replace=False, random_state=None):
""" Random sample of items
Parameters
----------
frac : float, optional
Fraction of axis items to return.
replace: boolean, optional
Sample with or without replacement. Default = False.
random_state: int or ``np.random.RandomState``
If int we create a new RandomState with this as the seed
Otherwise we draw from the passed RandomState
See Also
--------
dask.DataFrame.random_split, pd.DataFrame.sample
"""
if random_state is None:
random_state = np.random.RandomState()
name = 'sample-' + tokenize(self, frac, replace, random_state)
state_data = random_state_data(self.npartitions, random_state)
dsk = {(name, i): (methods.sample, (self._name, i), state, frac, replace)
for i, state in enumerate(state_data)}
return new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions)
def to_hdf(self, path_or_buf, key, mode='a', append=False, get=None, **kwargs):
""" See dd.to_hdf docstring for more information """
from .io import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, get=get, **kwargs)
def to_parquet(self, path, *args, **kwargs):
""" See dd.to_parquet docstring for more information """
from .io import to_parquet
return to_parquet(path, self, *args, **kwargs)
def to_csv(self, filename, **kwargs):
""" See dd.to_csv docstring for more information """
from .io import to_csv
return to_csv(self, filename, **kwargs)
def to_delayed(self):
""" See dd.to_delayed docstring for more information """
return to_delayed(self)
@classmethod
def _get_unary_operator(cls, op):
return lambda self: elemwise(op, self)
@classmethod
def _get_binary_operator(cls, op, inv=False):
if inv:
return lambda self, other: elemwise(op, other, self)
else:
return lambda self, other: elemwise(op, self, other)
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, axis=0):
"""Provides rolling transformations.
Parameters
----------
window : int
Size of the moving window. This is the number of observations used
for calculating the statistic. The window size must not be so large
as to span more than one adjacent partition.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. The recognized window types are identical
to pandas.
axis : int, default 0
Returns
-------
a Rolling object on which to call a method to compute a statistic
Notes
-----
The `freq` argument is not supported.
"""
from dask.dataframe.rolling import Rolling
if not isinstance(window, int):
raise ValueError('window must be an integer')
if window < 0:
raise ValueError('window must be >= 0')
if min_periods is not None:
if not isinstance(min_periods, int):
raise ValueError('min_periods must be an integer')
if min_periods < 0:
raise ValueError('min_periods must be >= 0')
return Rolling(self, window=window, min_periods=min_periods,
freq=freq, center=center, win_type=win_type, axis=axis)
@derived_from(pd.DataFrame)
def diff(self, periods=1, axis=0):
axis = self._validate_axis(axis)
if not isinstance(periods, int):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(M.diff, token='diff', periods=periods,
axis=1)
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(M.diff, before, after, token='diff',
periods=periods)
@derived_from(pd.DataFrame)
def shift(self, periods=1, freq=None, axis=0):
axis = self._validate_axis(axis)
if not isinstance(periods, int):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(M.shift, token='shift', periods=periods,
freq=freq, axis=1)
if freq is None:
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(M.shift, before, after, token='shift',
periods=periods)
# Let pandas error on invalid arguments
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(M.shift, token='shift', periods=periods,
freq=freq, meta=meta)
return maybe_shift_divisions(out, periods, freq=freq)
def _reduction_agg(self, name, axis=None, skipna=True,
split_every=False):
axis = self._validate_axis(axis)
meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)
token = self._token_prefix + name
method = getattr(M, name)
if axis == 1:
return self.map_partitions(method, meta=meta,
token=token, skipna=skipna, axis=axis)
else:
return self.reduction(method, meta=meta, token=token,
skipna=skipna, axis=axis,
split_every=split_every)
@derived_from(pd.DataFrame)
def abs(self):
meta = self._meta_nonempty.abs()
return self.map_partitions(M.abs, meta=meta)
@derived_from(pd.DataFrame)
def all(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('all', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def any(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('any', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def sum(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('sum', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def prod(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('prod', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def max(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('max', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def min(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('min', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def idxmax(self, axis=None, skipna=True, split_every=False):
fn = 'idxmax'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.idxmax, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not isinstance(meta, pd.Series)
return aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
@derived_from(pd.DataFrame)
def idxmin(self, axis=None, skipna=True, split_every=False):
fn = 'idxmin'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis)
if axis == 1:
return map_partitions(M.idxmin, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not isinstance(meta, pd.Series)
return aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
@derived_from(pd.DataFrame)
def count(self, axis=None, split_every=False):
axis = self._validate_axis(axis)
token = self._token_prefix + 'count'
if axis == 1:
meta = self._meta_nonempty.count(axis=axis)
return self.map_partitions(M.count, meta=meta, token=token,
axis=axis)
else:
meta = self._meta_nonempty.count()
return self.reduction(M.count, aggregate=M.sum, meta=meta,
token=token, split_every=split_every)
@derived_from(pd.DataFrame)
def mean(self, axis=None, skipna=True, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.mean(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.mean, self, meta=meta,
token=self._token_prefix + 'mean',
axis=axis, skipna=skipna)
else:
num = self._get_numeric_data()
s = num.sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'mean-%s' % tokenize(self, axis, skipna)
return map_partitions(methods.mean_aggregate, s, n,
token=name, meta=meta)
@derived_from(pd.DataFrame)
def var(self, axis=None, skipna=True, ddof=1, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.var, self, meta=meta,
token=self._token_prefix + 'var',
axis=axis, skipna=skipna, ddof=ddof)
else:
num = self._get_numeric_data()
x = 1.0 * num.sum(skipna=skipna, split_every=split_every)
x2 = 1.0 * (num ** 2).sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'var'
return map_partitions(methods.var_aggregate, x2, x, n,
token=name, meta=meta, ddof=ddof)
@derived_from(pd.DataFrame)
def std(self, axis=None, skipna=True, ddof=1, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.std(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.std, self, meta=meta,
token=self._token_prefix + 'std',
axis=axis, skipna=skipna, ddof=ddof)
else:
v = self.var(skipna=skipna, ddof=ddof, split_every=split_every)
name = self._token_prefix + 'std'
return map_partitions(np.sqrt, v, meta=meta, token=name)
@derived_from(pd.DataFrame)
def sem(self, axis=None, skipna=None, ddof=1, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.sem(axis=axis, skipna=skipna, ddof=ddof)
if axis == 1:
return map_partitions(M.sem, self, meta=meta,
token=self._token_prefix + 'sem',
axis=axis, skipna=skipna, ddof=ddof)
else:
num = self._get_numeric_data()
v = num.var(skipna=skipna, ddof=ddof, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'sem'
return map_partitions(np.sqrt, v / n, meta=meta, token=name)
def quantile(self, q=0.5, axis=0):
""" Approximate row-wise and precise column-wise quantiles of DataFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
"""
axis = self._validate_axis(axis)
keyname = 'quantiles-concat--' + tokenize(self, q, axis)
if axis == 1:
if isinstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
if LooseVersion(pd.__version__) >= '0.19':
name = q
else:
name = None
meta = pd.Series([], dtype='f8', name=name)
return map_partitions(M.quantile, self, q, axis,
token=keyname, meta=meta)
else:
meta = self._meta.quantile(q, axis=axis)
num = self._get_numeric_data()
quantiles = tuple(quantile(self[c], q) for c in num.columns)
dask = {}
dask = merge(dask, *[_q.dask for _q in quantiles])
qnames = [(_q._name, 0) for _q in quantiles]
if isinstance(quantiles[0], Scalar):
dask[(keyname, 0)] = (pd.Series, qnames, num.columns)
divisions = (min(num.columns), max(num.columns))
return Series(dask, keyname, meta, divisions)
else:
dask[(keyname, 0)] = (methods.concat, qnames, 1)
return DataFrame(dask, keyname, meta, quantiles[0].divisions)
@derived_from(pd.DataFrame)
def describe(self, split_every=False):
# currently, only numeric describe is supported
num = self._get_numeric_data()
stats = [num.count(split_every=split_every),
num.mean(split_every=split_every),
num.std(split_every=split_every),
num.min(split_every=split_every),
num.quantile([0.25, 0.5, 0.75]),
num.max(split_every=split_every)]
stats_names = [(s._name, 0) for s in stats]
name = 'describe--' + tokenize(self, split_every)
dsk = merge(num.dask, *(s.dask for s in stats))
dsk[(name, 0)] = (methods.describe_aggregate, stats_names)
return new_dd_object(dsk, name, num._meta, divisions=[None, None])
def _cum_agg(self, token, chunk, aggregate, axis, skipna=True,
chunk_kwargs=None):
""" Wrapper for cumulative operation """
axis = self._validate_axis(axis)
if axis == 1:
name = '{0}{1}(axis=1)'.format(self._token_prefix, token)
return self.map_partitions(chunk, token=name, **chunk_kwargs)
else:
# cumulate each partitions
name1 = '{0}{1}-map'.format(self._token_prefix, token)
cumpart = map_partitions(chunk, self, token=name1, meta=self,
**chunk_kwargs)
name2 = '{0}{1}-take-last'.format(self._token_prefix, token)
cumlast = map_partitions(_take_last, cumpart, skipna,
meta=pd.Series([]), token=name2)
name = '{0}{1}'.format(self._token_prefix, token)
cname = '{0}{1}-cum-last'.format(self._token_prefix, token)
# aggregate cumulated partisions and its previous last element
dask = {}
dask[(name, 0)] = (cumpart._name, 0)
for i in range(1, self.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
dask[(cname, i)] = (cumlast._name, i - 1)
else:
# aggregate with previous cumulation results
dask[(cname, i)] = (aggregate, (cname, i - 1),
(cumlast._name, i - 1))
dask[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))
return new_dd_object(merge(dask, cumpart.dask, cumlast.dask),
name, chunk(self._meta), self.divisions)
@derived_from(pd.DataFrame)
def cumsum(self, axis=None, skipna=True):
return self._cum_agg('cumsum',
chunk=M.cumsum,
aggregate=operator.add,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cumprod(self, axis=None, skipna=True):
return self._cum_agg('cumprod',
chunk=M.cumprod,
aggregate=operator.mul,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummax(self, axis=None, skipna=True):
return self._cum_agg('cummax',
chunk=M.cummax,
aggregate=methods.cummax_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummin(self, axis=None, skipna=True):
return self._cum_agg('cummin',
chunk=M.cummin,
aggregate=methods.cummin_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def where(self, cond, other=np.nan):
# cond and other may be dask instance,
# passing map_partitions via keyword will not be aligned
return map_partitions(M.where, self, cond, other)
@derived_from(pd.DataFrame)
def mask(self, cond, other=np.nan):
return map_partitions(M.mask, self, cond, other)
@derived_from(pd.DataFrame)
def notnull(self):
return self.map_partitions(M.notnull)
@derived_from(pd.DataFrame)
def isnull(self):
return self.map_partitions(M.isnull)
@derived_from(pd.DataFrame)
def astype(self, dtype):
return self.map_partitions(M.astype, dtype=dtype,
meta=self._meta.astype(dtype))
@derived_from(pd.Series)
def append(self, other):
# because DataFrame.append will override the method,
# wrap by pd.Series.append docstring
if isinstance(other, (list, dict)):
msg = "append doesn't support list or dict input"
raise NotImplementedError(msg)
if not isinstance(other, _Frame):
from .io import from_pandas
other = from_pandas(other, 1)
from .multi import _append
if self.known_divisions and other.known_divisions:
if self.divisions[-1] < other.divisions[0]:
divisions = self.divisions[:-1] + other.divisions
return _append(self, other, divisions)
else:
msg = ("Unable to append two dataframes to each other with known "
"divisions if those divisions are not ordered. "
"The divisions/index of the second dataframe must be "
"greater than the divisions/index of the first dataframe.")
raise ValueError(msg)
else:
divisions = [None] * (self.npartitions + other.npartitions + 1)
return _append(self, other, divisions)
@derived_from(pd.DataFrame)
def align(self, other, join='outer', axis=None, fill_value=None):
meta1, meta2 = _emulate(M.align, self, other, join, axis=axis,
fill_value=fill_value)
aligned = self.map_partitions(M.align, other, join=join, axis=axis,
fill_value=fill_value)
token = tokenize(self, other, join, axis, fill_value)
name1 = 'align1-' + token
dsk1 = dict(((name1, i), (getitem, key, 0))
for i, key in enumerate(aligned._keys()))
dsk1.update(aligned.dask)
result1 = new_dd_object(dsk1, name1, meta1, aligned.divisions)
name2 = 'align2-' + token
dsk2 = dict(((name2, i), (getitem, key, 1))
for i, key in enumerate(aligned._keys()))
dsk2.update(aligned.dask)
result2 = new_dd_object(dsk2, name2, meta2, aligned.divisions)
return result1, result2
@derived_from(pd.DataFrame)
def combine(self, other, func, fill_value=None, overwrite=True):
return self.map_partitions(M.combine, other, func,
fill_value=fill_value, overwrite=overwrite)
@derived_from(pd.DataFrame)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
raise NotImplementedError
@derived_from(pd.DataFrame)
def resample(self, rule, how=None, closed=None, label=None):
from .tseries.resample import _resample
return _resample(self, rule, how=how, closed=closed, label=label)
@derived_from(pd.DataFrame)
def first(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`first` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[0] + offset
end = self.loc._get_partitions(date)
include_right = offset.isAnchored() or not hasattr(offset, '_inc')
if end == self.npartitions - 1:
divs = self.divisions
else:
divs = self.divisions[:end + 1] + (date,)
name = 'first-' + tokenize(self, offset)
dsk = {(name, i): (self._name, i) for i in range(end)}
dsk[(name, end)] = (methods.boundary_slice, (self._name, end),
None, date, include_right, True, 'ix')
return new_dd_object(merge(self.dask, dsk), name, self, divs)
@derived_from(pd.DataFrame)
def last(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`last` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[-1] - offset
start = self.loc._get_partitions(date)
if start == 0:
divs = self.divisions
else:
divs = (date,) + self.divisions[start + 1:]
name = 'last-' + tokenize(self, offset)
dsk = {(name, i + 1): (self._name, j + 1)
for i, j in enumerate(range(start, self.npartitions))}
dsk[(name, 0)] = (methods.boundary_slice, (self._name, start),
date, None, True, False, 'ix')
return new_dd_object(merge(self.dask, dsk), name, self, divs)
def nunique_approx(self, split_every=None):
"""Approximate number of unique rows.
This method uses the HyperLogLog algorithm for cardinality
estimation to compute the approximate number of unique rows.
The approximate error is 0.406%.
Parameters
----------
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is 8.
Returns
-------
a float representing the approximate number of elements
"""
from . import hyperloglog # here to avoid circular import issues
return aca([self], chunk=hyperloglog.compute_hll_array,
combine=hyperloglog.reduce_state,
aggregate=hyperloglog.estimate_count,
split_every=split_every, b=16, meta=float)
@property
def values(self):
""" Return a dask.array of the values of this dataframe
Warning: This creates a dask.array without precise shape information.
Operations that depend on shape information, like slicing or reshaping,
will not work.
"""
from ..array.core import Array
name = 'values-' + tokenize(self)
chunks = ((np.nan,) * self.npartitions,)
x = self._meta.values
if isinstance(self, DataFrame):
chunks = chunks + ((x.shape[1],),)
suffix = (0,)
else:
suffix = ()
dsk = {(name, i) + suffix: (getattr, key, 'values')
for (i, key) in enumerate(self._keys())}
return Array(merge(self.dask, dsk), name, chunks, x.dtype)
normalize_token.register((Scalar, _Frame), lambda a: a._name)
class Series(_Frame):
""" Out-of-core Series object
Mimics ``pandas.Series``.
Parameters
----------
dsk: dict
The dask graph to compute this Series
_name: str
The key prefix that specifies which keys in the dask comprise this
particular Series
meta: pandas.Series
An empty ``pandas.Series`` with names, dtypes, and index matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
_token_prefix = 'series-'
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
index = context[1][0].index
return pd.Series(array, index=index, name=self.name)
@property
def name(self):
return self._meta.name
@name.setter
def name(self, name):
self._meta.name = name
renamed = _rename_dask(self, name)
# update myself
self.dask.update(renamed.dask)
self._name = renamed._name
@property
def ndim(self):
""" Return dimensionality """
return 1
@property
def dtype(self):
""" Return data type """
return self._meta.dtype
@cache_readonly
def dt(self):
from .accessor import DatetimeAccessor
return DatetimeAccessor(self)
@cache_readonly
def cat(self):
from .accessor import CategoricalAccessor
return CategoricalAccessor(self)
@cache_readonly
def str(self):
from .accessor import StringAccessor
return StringAccessor(self)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, 'cat'):
o.remove('cat') # cat only in `dir` if available
return list(o)
@property
def nbytes(self):
return self.reduction(methods.nbytes, np.sum, token='nbytes',
meta=int, split_every=False)
@cache_readonly
def _repr_data(self):
values = [str(self.dtype)] + ['...'] * self.npartitions
return pd.Series(values, index=self._repr_divisions, name=self.name)
def __repr__(self):
""" have to overwrite footer """
if self.name is not None:
footer = "Name: {name}, dtype: {dtype}".format(name=self.name,
dtype=self.dtype)
else:
footer = "dtype: {dtype}".format(dtype=self.dtype)
return """Dask {klass} Structure:
{data}
{footer}
Dask Name: {name}, {task} tasks""".format(klass=self.__class__.__name__,
data=self.to_string(),
footer=footer,
name=key_split(self._name),
task=len(self.dask))
@derived_from(pd.Series)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def quantile(self, q=0.5):
""" Approximate quantiles of Series
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
"""
return quantile(self, q)
def _repartition_quantiles(self, npartitions, upsample=1.0):
""" Approximate quantiles of Series used for repartitioning
"""
from .partitionquantiles import partition_quantiles
return partition_quantiles(self, npartitions, upsample=upsample)
def __getitem__(self, key):
if isinstance(key, Series) and self.divisions == key.divisions:
name = 'index-%s' % tokenize(self, key)
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return Series(merge(self.dask, key.dask, dsk), name,
self._meta, self.divisions)
raise NotImplementedError()
@derived_from(pd.DataFrame)
def _get_numeric_data(self, how='any', subset=None):
return self
@derived_from(pd.Series)
def iteritems(self):
for i in range(self.npartitions):
s = self.get_partition(i).compute()
for item in s.iteritems():
yield item
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 'index', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0}.get(axis, axis)
@derived_from(pd.Series)
def groupby(self, index, **kwargs):
from dask.dataframe.groupby import SeriesGroupBy
return SeriesGroupBy(self, index, **kwargs)
@derived_from(pd.Series)
def count(self, split_every=False):
return super(Series, self).count(split_every=split_every)
def unique(self, split_every=None, split_out=1):
"""
Return Series of unique values in the object. Includes NA values.
Returns
-------
uniques : Series
"""
return aca(self, chunk=methods.unique, aggregate=methods.unique,
meta=self._meta, token='unique', split_every=split_every,
series_name=self.name, split_out=split_out)
@derived_from(pd.Series)
def nunique(self, split_every=None):
return self.drop_duplicates(split_every=split_every).count()
@derived_from(pd.Series)
def value_counts(self, split_every=None, split_out=1):
return aca(self, chunk=M.value_counts,
aggregate=methods.value_counts_aggregate,
combine=methods.value_counts_combine,
meta=self._meta.value_counts(), token='value-counts',
split_every=split_every, split_out=split_out,
split_out_setup=split_out_on_index)
@derived_from(pd.Series)
def nlargest(self, n=5, split_every=None):
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token='series-nlargest',
split_every=split_every, n=n)
@derived_from(pd.Series)
def nsmallest(self, n=5, split_every=None):
return aca(self, chunk=M.nsmallest, aggregate=M.nsmallest,
meta=self._meta, token='series-nsmallest',
split_every=split_every, n=n)
@derived_from(pd.Series)
def isin(self, other):
return elemwise(M.isin, self, list(other))
@derived_from(pd.Series)
def map(self, arg, na_action=None, meta=no_default):
if not (isinstance(arg, (pd.Series, dict)) or callable(arg)):
raise TypeError("arg must be pandas.Series, dict or callable."
" Got {0}".format(type(arg)))
name = 'map-' + tokenize(self, arg, na_action)
dsk = dict(((name, i), (M.map, k, arg, na_action)) for i, k in
enumerate(self._keys()))
dsk.update(self.dask)
if meta is no_default:
meta = _emulate(M.map, self, arg, na_action=na_action)
else:
meta = make_meta(meta)
return Series(dsk, name, meta, self.divisions)
@derived_from(pd.Series)
def dropna(self):
return self.map_partitions(M.dropna)
@derived_from(pd.Series)
def between(self, left, right, inclusive=True):
return self.map_partitions(M.between, left=left,
right=right, inclusive=inclusive)
@derived_from(pd.Series)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
# np.clip may pass out
return self.map_partitions(M.clip, lower=lower, upper=upper)
@derived_from(pd.Series)
def clip_lower(self, threshold):
return self.map_partitions(M.clip_lower, threshold=threshold)
@derived_from(pd.Series)
def clip_upper(self, threshold):
return self.map_partitions(M.clip_upper, threshold=threshold)
@derived_from(pd.Series)
def align(self, other, join='outer', axis=None, fill_value=None):
return super(Series, self).align(other, join=join, axis=axis,
fill_value=fill_value)
@derived_from(pd.Series)
def combine(self, other, func, fill_value=None):
return self.map_partitions(M.combine, other, func,
fill_value=fill_value)
@derived_from(pd.Series)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
def to_bag(self, index=False):
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.Series)
def to_frame(self, name=None):
return self.map_partitions(M.to_frame, name,
meta=self._meta.to_frame(name))
@derived_from(pd.Series)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data.to_string(max_rows=max_rows)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like DataFrame.add to this class """
def meth(self, other, level=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__doc__ = comparison.__doc__
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, convert_dtype=True, meta=no_default,
name=no_default, args=(), **kwds):
""" Parallel version of pandas.Series.apply
Parameters
----------
func : function
Function to apply
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results.
If False, leave as dtype=object.
$META
name : list, scalar or None, optional
Deprecated, use `meta` instead. If list is given, the result is a
DataFrame which columns is specified list. Otherwise, the result is
a Series which name is given scalar or None (no name). If name
keyword is not given, dask tries to infer the result type using its
beginning of data. This inference may take some time and lead to
unexpected result.
args : tuple
Positional arguments to pass to function in addition to the value.
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
applied : Series or DataFrame if func returns a Series.
Examples
--------
>>> import dask.dataframe as dd
>>> s = pd.Series(range(5), name='x')
>>> ds = dd.from_pandas(s, npartitions=2)
Apply a function elementwise across the Series, passing in extra
arguments in ``args`` and ``kwargs``:
>>> def myadd(x, a, b=1):
... return x + a + b
>>> res = ds.apply(myadd, args=(2,), b=1.5)
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ds.apply(myadd, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ds.apply(lambda x: x + 1, meta=ds)
See Also
--------
dask.Series.map_partitions
"""
if name is not no_default:
warnings.warn("`name` is deprecated, please use `meta` instead")
if meta is no_default and isinstance(name, (pd.DataFrame, pd.Series)):
meta = name
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
meta = _emulate(M.apply, self._meta_nonempty, func,
convert_dtype=convert_dtype,
args=args, **kwds)
return map_partitions(M.apply, self, func,
convert_dtype, args, meta=meta, **kwds)
@derived_from(pd.Series)
def cov(self, other, min_periods=None, split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, scalar=True, split_every=split_every)
@derived_from(pd.Series)
def corr(self, other, method='pearson', min_periods=None,
split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, corr=True, scalar=True,
split_every=split_every)
@derived_from(pd.Series)
def autocorr(self, lag=1, split_every=False):
if not isinstance(lag, int):
raise TypeError("lag must be an integer")
return self.corr(self if lag == 0 else self.shift(lag),
split_every=split_every)
class Index(Series):
_partition_type = pd.Index
_token_prefix = 'index-'
@property
def index(self):
msg = "'{0}' object has no attribute 'index'"
raise AttributeError(msg.format(self.__class__.__name__))
def __array_wrap__(self, array, context=None):
return pd.Index(array, name=self.name)
def head(self, n=5, compute=True):
""" First n items of the Index.
Caveat, this only checks the first partition.
"""
name = 'head-%d-%s' % (n, self._name)
dsk = {(name, 0): (operator.getitem, (self._name, 0), slice(0, n))}
result = new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions[:2])
if compute:
result = result.compute()
return result
@derived_from(pd.Index)
def max(self, split_every=False):
return self.reduction(M.max, meta=self._meta_nonempty.max(),
token=self._token_prefix + 'max',
split_every=split_every)
@derived_from(pd.Index)
def min(self, split_every=False):
return self.reduction(M.min, meta=self._meta_nonempty.min(),
token=self._token_prefix + 'min',
split_every=split_every)
def count(self, split_every=False):
return self.reduction(methods.index_count, np.sum,
token='index-count', meta=int,
split_every=split_every)
@derived_from(pd.Index)
def shift(self, periods=1, freq=None):
if isinstance(self._meta, pd.PeriodIndex):
if freq is not None:
raise ValueError("PeriodIndex doesn't accept `freq` argument")
meta = self._meta_nonempty.shift(periods)
out = self.map_partitions(M.shift, periods, meta=meta,
token='shift')
else:
# Pandas will raise for other index types that don't implement shift
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(M.shift, periods, token='shift',
meta=meta, freq=freq)
if freq is None:
freq = meta.freq
return maybe_shift_divisions(out, periods, freq=freq)
class DataFrame(_Frame):
"""
Implements out-of-core DataFrame as a sequence of pandas DataFrames
Parameters
----------
dask: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
meta: pandas.DataFrame
An empty ``pandas.DataFrame`` with names, dtypes, and index matching
the expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
_token_prefix = 'dataframe-'
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
index = context[1][0].index
return pd.DataFrame(array, index=index, columns=self.columns)
@property
def columns(self):
return self._meta.columns
@columns.setter
def columns(self, columns):
renamed = _rename_dask(self, columns)
self._meta = renamed._meta
self._name = renamed._name
self.dask.update(renamed.dask)
def __getitem__(self, key):
name = 'getitem-%s' % tokenize(self, key)
if np.isscalar(key) or isinstance(key, tuple):
if isinstance(self._meta.index, (pd.DatetimeIndex, pd.PeriodIndex)):
if key not in self._meta.columns:
return self.loc[key]
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, list):
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
if isinstance(key, Series):
# do not perform dummy calculation, as columns will not be changed.
#
if self.divisions != key.divisions:
from .multi import _maybe_align_partitions
self, key = _maybe_align_partitions([self, key])
dsk = {(name, i): (M._getitem_array, (self._name, i), (key._name, i))
for i in range(self.npartitions)}
return new_dd_object(merge(self.dask, key.dask, dsk), name,
self, self.divisions)
raise NotImplementedError(key)
def __setitem__(self, key, value):
if isinstance(key, (tuple, list)):
df = self.assign(**{k: value[c]
for k, c in zip(key, value.columns)})
else:
df = self.assign(**{key: value})
self.dask = df.dask
self._name = df._name
self._meta = df._meta
def __delitem__(self, key):
result = self.drop([key], axis=1)
self.dask = result.dask
self._name = result._name
self._meta = result._meta
def __setattr__(self, key, value):
try:
columns = object.__getattribute__(self, '_meta').columns
except AttributeError:
columns = ()
if key in columns:
self[key] = value
else:
object.__setattr__(self, key, value)
def __getattr__(self, key):
if key in self.columns:
meta = self._meta[key]
name = 'getitem-%s' % tokenize(self, key)
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
raise AttributeError("'DataFrame' object has no attribute %r" % key)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(c for c in self.columns if
(isinstance(c, pd.compat.string_types) and
pd.compat.isidentifier(c)))
return list(o)
@property
def ndim(self):
""" Return dimensionality """
return 2
@property
def dtypes(self):
""" Return data types """
return self._meta.dtypes
@derived_from(pd.DataFrame)
def get_dtype_counts(self):
return self._meta.get_dtype_counts()
@derived_from(pd.DataFrame)
def get_ftype_counts(self):
return self._meta.get_ftype_counts()
@derived_from(pd.DataFrame)
def select_dtypes(self, include=None, exclude=None):
cs = self._meta.select_dtypes(include=include, exclude=exclude).columns
return self[list(cs)]
def set_index(self, other, drop=True, sorted=False, **kwargs):
"""
Set the DataFrame index (row labels) using an existing column
This realigns the dataset to be sorted by a new column. This can have a
significant impact on performance, because joins, groupbys, lookups, etc.
are all much faster on that column. However, this performance increase
comes with a cost, sorting a parallel dataset requires expensive shuffles.
Often we ``set_index`` once directly after data ingest and filtering and
then perform many cheap computations off of the sorted dataset.
This function operates exactly like ``pandas.set_index`` except with
different performance costs (it is much more expensive). Under normal
operation this function does an initial pass over the index column to
compute approximate qunatiles to serve as future divisions. It then passes
over the data a second time, splitting up each input partition into several
pieces and sharing those pieces to all of the output partitions now in
sorted order.
In some cases we can alleviate those costs, for example if your dataset is
sorted already then we can avoid making many small pieces or if you know
good values to split the new index column then we can avoid the initial
pass over the data. For example if your new index is a datetime index and
your data is already sorted by day then this entire operation can be done
for free. You can control these options with the following parameters.
Parameters
----------
df: Dask DataFrame
index: string or Dask Series
npartitions: int
The ideal number of output partitions
shuffle: string, optional
Either ``'disk'`` for single-node operation or ``'tasks'`` for
distributed operation. Will be inferred by your current scheduler.
sorted: bool, optional
If the index column is already sorted in increasing order.
Defaults to False
divisions: list, optional
Known values on which to separate index values of the partitions.
See http://dask.pydata.org/en/latest/dataframe-design.html#partitions
Defaults to computing this with a single pass over the data
compute: bool
Whether or not to trigger an immediate computation. Defaults to True.
Examples
--------
>>> df2 = df.set_index('x') # doctest: +SKIP
>>> df2 = df.set_index(d.x) # doctest: +SKIP
>>> df2 = df.set_index(d.timestamp, sorted=True) # doctest: +SKIP
A common case is when we have a datetime column that we know to be
sorted and is cleanly divided by day. We can set this index for free
by specifying both that the column is pre-sorted and the particular
divisions along which is is separated
>>> import pandas as pd
>>> divisions = pd.date_range('2000', '2010', freq='1D')
>>> df2 = df.set_index('timestamp', sorted=True, divisions=divisions) # doctest: +SKIP
"""
if sorted:
return set_sorted_index(self, other, drop=drop, **kwargs)
else:
from .shuffle import set_index
return set_index(self, other, drop=drop, **kwargs)
def set_partition(self, column, divisions, **kwargs):
""" Set explicit divisions for new column index
>>> df2 = df.set_partition('new-index-column', divisions=[10, 20, 50]) # doctest: +SKIP
See Also
--------
set_index
"""
raise Exception("Deprecated, use set_index(..., divisions=...) instead")
@derived_from(pd.DataFrame)
def nlargest(self, n=5, columns=None, split_every=None):
token = 'dataframe-nlargest'
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token=token, split_every=split_every,
n=n, columns=columns)
@derived_from(pd.DataFrame)
def nsmallest(self, n=5, columns=None, split_every=None):
token = 'dataframe-nsmallest'
return aca(self, chunk=M.nsmallest, aggregate=M.nsmallest,
meta=self._meta, token=token, split_every=split_every,
n=n, columns=columns)
@derived_from(pd.DataFrame)
def groupby(self, key, **kwargs):
from dask.dataframe.groupby import DataFrameGroupBy
return DataFrameGroupBy(self, key, **kwargs)
def categorize(self, columns=None, **kwargs):
"""
Convert columns of the DataFrame to category dtype
Parameters
----------
columns : list, optional
A list of column names to convert to the category type. By
default any column with an object dtype is converted to a
categorical.
kwargs
Keyword arguments are passed on to compute.
Notes
-----
When dealing with columns of repeated text values converting to
categorical type is often much more performant, both in terms of memory
and in writing to disk or communication over the network.
See also
--------
dask.dataframes.categorical.categorize
"""
from dask.dataframe.categorical import categorize
return categorize(self, columns, **kwargs)
@derived_from(pd.DataFrame)
def assign(self, **kwargs):
for k, v in kwargs.items():
if not (isinstance(v, (Series, Scalar, pd.Series)) or
np.isscalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df2 = self._meta.assign(**_extract_meta(kwargs))
return elemwise(methods.assign, self, *pairs, meta=df2)
@derived_from(pd.DataFrame)
def rename(self, index=None, columns=None):
if index is not None:
raise ValueError("Cannot rename index.")
# *args here is index, columns but columns arg is already used
return self.map_partitions(M.rename, None, columns)
def query(self, expr, **kwargs):
""" Blocked version of pd.DataFrame.query
This is like the sequential version except that this will also happen
in many threads. This may conflict with ``numexpr`` which will use
multiple threads itself. We recommend that you set numexpr to use a
single thread
import numexpr
numexpr.set_nthreads(1)
The original docstring follows below:\n
""" + (pd.DataFrame.query.__doc__
if pd.DataFrame.query.__doc__ is not None else '')
name = 'query-%s' % tokenize(self, expr)
if kwargs:
name = name + '--' + tokenize(kwargs)
dsk = dict(((name, i), (apply, M.query,
((self._name, i), (expr,), kwargs)))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (M.query, (self._name, i), expr))
for i in range(self.npartitions))
meta = self._meta.query(expr, **kwargs)
return new_dd_object(merge(dsk, self.dask), name,
meta, self.divisions)
@derived_from(pd.DataFrame)
def eval(self, expr, inplace=None, **kwargs):
if '=' in expr and inplace in (True, None):
raise NotImplementedError("Inplace eval not supported."
" Please use inplace=False")
meta = self._meta.eval(expr, inplace=inplace, **kwargs)
return self.map_partitions(M.eval, expr, meta=meta, inplace=inplace, **kwargs)
@derived_from(pd.DataFrame)
def dropna(self, how='any', subset=None):
return self.map_partitions(M.dropna, how=how, subset=subset)
@derived_from(pd.DataFrame)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
return self.map_partitions(M.clip, lower=lower, upper=upper)
@derived_from(pd.DataFrame)
def clip_lower(self, threshold):
return self.map_partitions(M.clip_lower, threshold=threshold)
@derived_from(pd.DataFrame)
def clip_upper(self, threshold):
return self.map_partitions(M.clip_upper, threshold=threshold)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def to_castra(self, fn=None, categories=None, sorted_index_column=None,
compute=True, get=get_sync):
""" Write DataFrame to Castra on-disk store
See https://github.com/blosc/castra for details
See Also
--------
Castra.to_dask
"""
from .io import to_castra
return to_castra(self, fn, categories, sorted_index_column,
compute=compute, get=get)
def to_bag(self, index=False):
"""Convert to a dask Bag of tuples of each row.
Parameters
----------
index : bool, optional
If True, the index is included as the first element of each tuple.
Default is False.
"""
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.DataFrame)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data.to_string(max_rows=max_rows,
show_dimensions=False)
def _get_numeric_data(self, how='any', subset=None):
# calculate columns to avoid unnecessary calculation
numerics = self._meta._get_numeric_data()
if len(numerics.columns) < len(self.columns):
name = self._token_prefix + '-get_numeric_data'
return self.map_partitions(M._get_numeric_data,
meta=numerics, token=name)
else:
# use myself if all numerics
return self
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
@derived_from(pd.DataFrame)
def drop(self, labels, axis=0, errors='raise'):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(M.drop, labels, axis=axis, errors=errors)
raise NotImplementedError("Drop currently only works for axis=1")
@derived_from(pd.DataFrame)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, suffixes=('_x', '_y'),
indicator=False, npartitions=None, shuffle=None):
if not isinstance(right, (DataFrame, pd.DataFrame)):
raise ValueError('right must be DataFrame')
from .multi import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, suffixes=suffixes,
npartitions=npartitions, indicator=indicator,
shuffle=shuffle)
@derived_from(pd.DataFrame)
def join(self, other, on=None, how='left',
lsuffix='', rsuffix='', npartitions=None, shuffle=None):
if not isinstance(other, (DataFrame, pd.DataFrame)):
raise ValueError('other must be DataFrame')
from .multi import merge
return merge(self, other, how=how,
left_index=on is None, right_index=True,
left_on=on, suffixes=[lsuffix, rsuffix],
npartitions=npartitions, shuffle=shuffle)
@derived_from(pd.DataFrame)
def append(self, other):
if isinstance(other, Series):
msg = ('Unable to appending dd.Series to dd.DataFrame.'
'Use pd.Series to append as row.')
raise ValueError(msg)
elif isinstance(other, pd.Series):
other = other.to_frame().T
return super(DataFrame, self).append(other)
@derived_from(pd.DataFrame)
def iterrows(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.iterrows():
yield row
@derived_from(pd.DataFrame)
def itertuples(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.itertuples():
yield row
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
# name must be explicitly passed for div method whose name is truediv
def meth(self, other, axis='columns', level=None, fill_value=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
if axis in (1, 'columns'):
# When axis=1 and other is a series, `other` is transposed
# and the operator is applied broadcast across rows. This
# isn't supported with dd.Series.
if isinstance(other, Series):
msg = 'Unable to {0} dd.Series with axis=1'.format(name)
raise ValueError(msg)
elif isinstance(other, pd.Series):
# Special case for pd.Series to avoid unwanted partitioning
# of other. We pass it in as a kwarg to prevent this.
meta = _emulate(op, self, other=other, axis=axis,
fill_value=fill_value)
return map_partitions(op, self, other=other, meta=meta,
axis=axis, fill_value=fill_value)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like DataFrame.add to this class """
def meth(self, other, axis='columns', level=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__doc__ = comparison.__doc__
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, axis=0, args=(), meta=no_default,
columns=no_default, **kwds):
""" Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
$META
columns : list, scalar or None
Deprecated, please use `meta` instead. If list is given, the result
is a DataFrame which columns is specified list. Otherwise, the
result is a Series which name is given scalar or None (no name). If
name keyword is not given, dask tries to infer the result type
using its beginning of data. This inference may take some time and
lead to unexpected result
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame
Examples
--------
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.sum() + a + b
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5)
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)
See Also
--------
dask.DataFrame.map_partitions
"""
axis = self._validate_axis(axis)
if axis == 0:
msg = ("dd.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)")
raise NotImplementedError(msg)
if columns is not no_default:
warnings.warn("`columns` is deprecated, please use `meta` instead")
if meta is no_default and isinstance(columns, (pd.DataFrame, pd.Series)):
meta = columns
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
meta = _emulate(M.apply, self._meta_nonempty, func,
axis=axis, args=args, **kwds)
return map_partitions(M.apply, self, func, axis,
False, False, None, args, meta=meta, **kwds)
@derived_from(pd.DataFrame)
def applymap(self, func, meta='__no_default__'):
return elemwise(M.applymap, self, func, meta=meta)
@derived_from(pd.DataFrame)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def cov(self, min_periods=None, split_every=False):
return cov_corr(self, min_periods, split_every=split_every)
@derived_from(pd.DataFrame)
def corr(self, method='pearson', min_periods=None, split_every=False):
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
return cov_corr(self, min_periods, True, split_every=split_every)
def info(self, buf=None, verbose=False, memory_usage=False):
"""
Concise summary of a Dask DataFrame.
"""
if buf is None:
import sys
buf = sys.stdout
lines = [str(type(self))]
if len(self.columns) == 0:
lines.append('Index: 0 entries')
lines.append('Empty %s' % type(self).__name__)
put_lines(buf, lines)
return
# Group and execute the required computations
computations = {}
if verbose:
computations.update({'index': self.index, 'count': self.count()})
if memory_usage:
computations.update({'memory_usage': self.map_partitions(M.memory_usage, index=True)})
computations = dict(zip(computations.keys(), da.compute(*computations.values())))
column_template = "{0:<%d} {1}" % (self.columns.str.len().max() + 5)
if verbose:
index = computations['index']
counts = computations['count']
lines.append(index.summary())
column_template = column_template.format('{0}', '{1} non-null {2}')
column_info = [column_template.format(*x) for x in zip(self.columns, counts, self.dtypes)]
else:
column_info = [column_template.format(*x) for x in zip(self.columns, self.dtypes)]
lines.append('Data columns (total {} columns):'.format(len(self.columns)))
lines.extend(column_info)
dtype_counts = ['%s(%d)' % k for k in sorted(self.dtypes.value_counts().iteritems(), key=str)]
lines.append('dtypes: {}'.format(', '.join(dtype_counts)))
if memory_usage:
memory_int = computations['memory_usage'].sum()
lines.append('memory usage: {}\n'.format(memory_repr(memory_int)))
put_lines(buf, lines)
def pivot_table(self, index=None, columns=None,
values=None, aggfunc='mean'):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.
Parameters
----------
values : scalar
column to aggregate
index : scalar
column to be index
columns : scalar
column to be columns
aggfunc : {'mean', 'sum', 'count'}, default 'mean'
Returns
-------
table : DataFrame
"""
from .reshape import pivot_table
return pivot_table(self, index=index, columns=columns, values=values,
aggfunc=aggfunc)
def to_records(self, index=False):
from .io import to_records
return to_records(self)
@derived_from(pd.DataFrame)
def to_html(self, max_rows=5):
# pd.Series doesn't have html repr
data = self._repr_data.to_html(max_rows=max_rows,
show_dimensions=False)
return self._HTML_FMT.format(data=data, name=key_split(self._name),
task=len(self.dask))
@cache_readonly
def _repr_data(self):
dtypes = self.dtypes
values = {key: [value] + ['...'] * self.npartitions for key, value
in zip(dtypes.index, dtypes.values)}
return pd.DataFrame(values,
index=self._repr_divisions,
columns=self.columns)
_HTML_FMT = """<div><strong>Dask DataFrame Structure:</strong></div>
{data}
<div>Dask Name: {name}, {task} tasks</div>"""
def _repr_html_(self):
data = self._repr_data.to_html(max_rows=5,
show_dimensions=False, notebook=True)
return self._HTML_FMT.format(data=data, name=key_split(self._name),
task=len(self.dask))
# bind operators
for op in [operator.abs, operator.add, operator.and_, operator_div,
operator.eq, operator.gt, operator.ge, operator.inv,
operator.lt, operator.le, operator.mod, operator.mul,
operator.ne, operator.neg, operator.or_, operator.pow,
operator.sub, operator.truediv, operator.floordiv, operator.xor]:
_Frame._bind_operator(op)
Scalar._bind_operator(op)
for name in ['add', 'sub', 'mul', 'div',
'truediv', 'floordiv', 'mod', 'pow',
'radd', 'rsub', 'rmul', 'rdiv',
'rtruediv', 'rfloordiv', 'rmod', 'rpow']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_operator_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_operator_method(name, meth)
for name in ['lt', 'gt', 'le', 'ge', 'ne', 'eq']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_comparison_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_comparison_method(name, meth)
def elemwise_property(attr, s):
meta = pd.Series([], dtype=getattr(s._meta, attr).dtype)
return map_partitions(getattr, s, attr, meta=meta)
for name in ['nanosecond', 'microsecond', 'millisecond', 'second', 'minute',
'hour', 'day', 'dayofweek', 'dayofyear', 'week', 'weekday',
'weekofyear', 'month', 'quarter', 'year']:
setattr(Index, name, property(partial(elemwise_property, name)))
def elemwise(op, *args, **kwargs):
""" Elementwise operation for dask.Dataframes """
meta = kwargs.pop('meta', no_default)
_name = funcname(op) + '-' + tokenize(op, kwargs, *args)
args = _maybe_from_pandas(args)
from .multi import _maybe_align_partitions
args = _maybe_align_partitions(args)
dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar))]
dfs = [df for df in dasks if isinstance(df, _Frame)]
divisions = dfs[0].divisions
n = len(divisions) - 1
other = [(i, arg) for i, arg in enumerate(args)
if not isinstance(arg, (_Frame, Scalar))]
# adjust the key length of Scalar
keys = [d._keys() * n if isinstance(d, Scalar)
else d._keys() for d in dasks]
if other:
dsk = dict(((_name, i),
(apply, partial_by_order, list(frs),
{'function': op, 'other': other}))
for i, frs in enumerate(zip(*keys)))
else:
dsk = dict(((_name, i), (op,) + frs) for i, frs in enumerate(zip(*keys)))
dsk = merge(dsk, *[d.dask for d in dasks])
if meta is no_default:
if len(dfs) >= 2 and len(dasks) != len(dfs):
# should not occur in current funcs
msg = 'elemwise with 2 or more DataFrames and Scalar is not supported'
raise NotImplementedError(msg)
meta = _emulate(op, *args, **kwargs)
return new_dd_object(dsk, _name, meta, divisions)
def _maybe_from_pandas(dfs):
from .io import from_pandas
dfs = [from_pandas(df, 1) if isinstance(df, (pd.Series, pd.DataFrame))
else df for df in dfs]
return dfs
def hash_shard(df, nparts, split_out_setup=None, split_out_setup_kwargs=None):
if split_out_setup:
h = split_out_setup(df, **(split_out_setup_kwargs or {}))
else:
h = df
h = hash_pandas_object(h, index=False)
if isinstance(h, pd.Series):
h = h._values
h %= nparts
return {i: df.iloc[h == i] for i in range(nparts)}
def split_out_on_index(df):
h = df.index
if isinstance(h, pd.MultiIndex):
h = pd.DataFrame([], index=h).reset_index()
return h
def split_out_on_cols(df, cols=None):
return df[cols]
@insert_meta_param_description
def apply_concat_apply(args, chunk=None, aggregate=None, combine=None,
meta=no_default, token=None, chunk_kwargs=None,
aggregate_kwargs=None, combine_kwargs=None,
split_every=None, split_out=None, split_out_setup=None,
split_out_setup_kwargs=None, **kwargs):
"""Apply a function to blocks, then concat, then apply again
Parameters
----------
args :
Positional arguments for the `chunk` function. All `dask.dataframe`
objects should be partitioned and indexed equivalently.
chunk : function [block-per-arg] -> block
Function to operate on each block of data
aggregate : function concatenated-block -> block
Function to operate on the concatenated result of chunk
combine : function concatenated-block -> block, optional
Function to operate on intermediate concatenated results of chunk
in a tree-reduction. If not provided, defaults to aggregate.
$META
token : str, optional
The name to use for the output keys.
chunk_kwargs : dict, optional
Keywords for the chunk function only.
aggregate_kwargs : dict, optional
Keywords for the aggregate function only.
combine_kwargs : dict, optional
Keywords for the combine function only.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to ``aggregate``.
Default is 8.
split_out : int, optional
Number of output partitions. Split occurs after first chunk reduction.
split_out_setup : callable, optional
If provided, this function is called on each chunk before performing
the hash-split. It should return a pandas object, where each row
(excluding the index) is hashed. If not provided, the chunk is hashed
as is.
split_out_setup_kwargs : dict, optional
Keywords for the `split_out_setup` function only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``aggregate``, and
``combine``.
Examples
--------
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if chunk_kwargs is None:
chunk_kwargs = dict()
if aggregate_kwargs is None:
aggregate_kwargs = dict()
chunk_kwargs.update(kwargs)
aggregate_kwargs.update(kwargs)
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
else:
if combine_kwargs is None:
combine_kwargs = dict()
combine_kwargs.update(kwargs)
if not isinstance(args, (tuple, list)):
args = [args]
npartitions = set(arg.npartitions for arg in args
if isinstance(arg, _Frame))
if len(npartitions) > 1:
raise ValueError("All arguments must have same number of partitions")
npartitions = npartitions.pop()
if split_every is None:
split_every = 8
elif split_every is False:
split_every = npartitions
elif split_every < 2 or not isinstance(split_every, int):
raise ValueError("split_every must be an integer >= 2")
token_key = tokenize(token or (chunk, aggregate), meta, args,
chunk_kwargs, aggregate_kwargs, combine_kwargs,
split_every, split_out, split_out_setup,
split_out_setup_kwargs)
# Chunk
a = '{0}-chunk-{1}'.format(token or funcname(chunk), token_key)
if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:
dsk = {(a, 0, i, 0): (chunk, key) for i, key in enumerate(args[0]._keys())}
else:
dsk = {(a, 0, i, 0): (apply, chunk,
[(x._name, i) if isinstance(x, _Frame)
else x for x in args], chunk_kwargs)
for i in range(args[0].npartitions)}
# Split
if split_out and split_out > 1:
split_prefix = 'split-%s' % token_key
shard_prefix = 'shard-%s' % token_key
for i in range(args[0].npartitions):
dsk[(split_prefix, i)] = (hash_shard, (a, 0, i, 0), split_out,
split_out_setup, split_out_setup_kwargs)
for j in range(split_out):
dsk[(shard_prefix, 0, i, j)] = (getitem, (split_prefix, i), j)
a = shard_prefix
else:
split_out = 1
# Combine
b = '{0}-combine-{1}'.format(token or funcname(combine), token_key)
k = npartitions
depth = 0
while k > split_every:
for part_i, inds in enumerate(partition_all(split_every, range(k))):
for j in range(split_out):
conc = (_concat, [(a, depth, i, j) for i in inds])
if combine_kwargs:
dsk[(b, depth + 1, part_i, j)] = (apply, combine, [conc], combine_kwargs)
else:
dsk[(b, depth + 1, part_i, j)] = (combine, conc)
k = part_i + 1
a = b
depth += 1
# Aggregate
for j in range(split_out):
b = '{0}-agg-{1}'.format(token or funcname(aggregate), token_key)
conc = (_concat, [(a, depth, i, j) for i in range(k)])
if aggregate_kwargs:
dsk[(b, j)] = (apply, aggregate, [conc], aggregate_kwargs)
else:
dsk[(b, j)] = (aggregate, conc)
if meta is no_default:
meta_chunk = _emulate(apply, chunk, args, chunk_kwargs)
meta = _emulate(apply, aggregate, [_concat([meta_chunk])],
aggregate_kwargs)
meta = make_meta(meta)
for arg in args:
if isinstance(arg, _Frame):
dsk.update(arg.dask)
divisions = [None] * (split_out + 1)
return new_dd_object(dsk, b, meta, divisions)
aca = apply_concat_apply
def _extract_meta(x, nonempty=False):
"""
Extract internal cache data (``_meta``) from dd.DataFrame / dd.Series
"""
if isinstance(x, (_Frame, Scalar)):
return x._meta_nonempty if nonempty else x._meta
elif isinstance(x, list):
return [_extract_meta(_x, nonempty) for _x in x]
elif isinstance(x, tuple):
return tuple([_extract_meta(_x, nonempty) for _x in x])
elif isinstance(x, dict):
res = {}
for k in x:
res[k] = _extract_meta(x[k], nonempty)
return res
else:
return x
def _emulate(func, *args, **kwargs):
"""
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_meta``) for calculation
"""
with raise_on_meta_error(funcname(func)):
return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))
@insert_meta_param_description
def map_partitions(func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. At least one of the
args should be a Dask.dataframe.
$META
"""
meta = kwargs.pop('meta', no_default)
if meta is not no_default:
meta = make_meta(meta)
assert callable(func)
if 'token' in kwargs:
name = kwargs.pop('token')
token = tokenize(meta, *args, **kwargs)
else:
name = funcname(func)
token = tokenize(func, meta, *args, **kwargs)
name = '{0}-{1}'.format(name, token)
from .multi import _maybe_align_partitions
args = _maybe_from_pandas(args)
args = _maybe_align_partitions(args)
if meta is no_default:
meta = _emulate(func, *args, **kwargs)
if all(isinstance(arg, Scalar) for arg in args):
dask = {(name, 0):
(apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)}
return Scalar(merge(dask, *[arg.dask for arg in args]), name, meta)
elif not isinstance(meta, (pd.Series, pd.DataFrame, pd.Index)):
# If `meta` is not a pandas object, the concatenated results will be a
# different type
meta = _concat([meta])
meta = make_meta(meta)
dfs = [df for df in args if isinstance(df, _Frame)]
dsk = {}
for i in range(dfs[0].npartitions):
values = [(arg._name, i if isinstance(arg, _Frame) else 0)
if isinstance(arg, (_Frame, Scalar)) else arg for arg in args]
dsk[(name, i)] = (apply_and_enforce, func, values, kwargs, meta)
dasks = [arg.dask for arg in args if isinstance(arg, (_Frame, Scalar))]
return new_dd_object(merge(dsk, *dasks), name, meta, args[0].divisions)
def apply_and_enforce(func, args, kwargs, meta):
"""Apply a function, and enforce the output to match meta
Ensures the output has the same columns, even if empty."""
df = func(*args, **kwargs)
if isinstance(df, (pd.DataFrame, pd.Series, pd.Index)):
if len(df) == 0:
return meta
c = meta.columns if isinstance(df, pd.DataFrame) else meta.name
return _rename(c, df)
return df
def _rename(columns, df):
"""
Rename columns of pd.DataFrame or name of pd.Series.
Not for dd.DataFrame or dd.Series.
Parameters
----------
columns : tuple, string, pd.DataFrame or pd.Series
Column names, Series name or pandas instance which has the
target column names / name.
df : pd.DataFrame or pd.Series
target DataFrame / Series to be renamed
"""
assert not isinstance(df, _Frame)
if columns is no_default:
return df
if isinstance(columns, Iterator):
columns = list(columns)
if isinstance(df, pd.DataFrame):
if isinstance(columns, pd.DataFrame):
columns = columns.columns
if not isinstance(columns, pd.Index):
columns = pd.Index(columns)
if (len(columns) == len(df.columns) and
type(columns) is type(df.columns) and
columns.equals(df.columns)):
# if target is identical, rename is not necessary
return df
# deep=False doesn't doesn't copy any data/indices, so this is cheap
df = df.copy(deep=False)
df.columns = columns
return df
elif isinstance(df, (pd.Series, pd.Index)):
if isinstance(columns, (pd.Series, pd.Index)):
columns = columns.name
if df.name == columns:
return df
return df.rename(columns)
# map_partition may pass other types
return df
def _rename_dask(df, names):
"""
Destructively rename columns of dd.DataFrame or name of dd.Series.
Not for pd.DataFrame or pd.Series.
Internaly used to overwrite dd.DataFrame.columns and dd.Series.name
We can't use map_partition because it applies function then rename
Parameters
----------
df : dd.DataFrame or dd.Series
target DataFrame / Series to be renamed
names : tuple, string
Column names/Series name
"""
assert isinstance(df, _Frame)
metadata = _rename(names, df._meta)
name = 'rename-{0}'.format(tokenize(df, metadata))
dsk = {}
for i in range(df.npartitions):
dsk[name, i] = (_rename, metadata, (df._name, i))
return new_dd_object(merge(dsk, df.dask), name, metadata, df.divisions)
def quantile(df, q):
"""Approximate quantiles of Series.
Parameters
----------
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
assert isinstance(df, Series)
from dask.array.percentile import _percentile, merge_percentiles
# currently, only Series has quantile method
if isinstance(df, Index):
meta = pd.Series(df._meta_nonempty).quantile(q)
else:
meta = df._meta_nonempty.quantile(q)
if isinstance(meta, pd.Series):
# Index.quantile(list-like) must be pd.Series, not pd.Index
df_name = df.name
finalize_tsk = lambda tsk: (pd.Series, tsk, q, None, df_name)
return_type = Series
else:
finalize_tsk = lambda tsk: (getitem, tsk, 0)
return_type = Scalar
q = [q]
# pandas uses quantile in [0, 1]
# numpy / everyone else uses [0, 100]
qs = np.asarray(q) * 100
token = tokenize(df, qs)
if len(qs) == 0:
name = 'quantiles-' + token
empty_index = pd.Index([], dtype=float)
return Series({(name, 0): pd.Series([], name=df.name, index=empty_index)},
name, df._meta, [None, None])
else:
new_divisions = [np.min(q), np.max(q)]
name = 'quantiles-1-' + token
val_dsk = dict(((name, i), (_percentile, (getattr, key, 'values'), qs))
for i, key in enumerate(df._keys()))
name2 = 'quantiles-2-' + token
len_dsk = dict(((name2, i), (len, key)) for i, key in enumerate(df._keys()))
name3 = 'quantiles-3-' + token
merge_dsk = {(name3, 0): finalize_tsk((merge_percentiles, qs,
[qs] * df.npartitions,
sorted(val_dsk), sorted(len_dsk)))}
dsk = merge(df.dask, val_dsk, len_dsk, merge_dsk)
return return_type(dsk, name3, meta, new_divisions)
def cov_corr(df, min_periods=None, corr=False, scalar=False, split_every=False):
"""DataFrame covariance and pearson correlation.
Computes pairwise covariance or correlation of columns, excluding NA/null
values.
Parameters
----------
df : DataFrame
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
corr : bool, optional
If True, compute the Pearson correlation. If False [default], compute
the covariance.
scalar : bool, optional
If True, compute covariance between two variables as a scalar. Only
valid if `df` has 2 columns. If False [default], compute the entire
covariance/correlation matrix.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is False.
"""
if min_periods is None:
min_periods = 2
elif min_periods < 2:
raise ValueError("min_periods must be >= 2")
if split_every is False:
split_every = df.npartitions
elif split_every < 2 or not isinstance(split_every, int):
raise ValueError("split_every must be an integer >= 2")
df = df._get_numeric_data()
if scalar and len(df.columns) != 2:
raise ValueError("scalar only valid for 2 column dataframe")
token = tokenize(df, min_periods, scalar, split_every)
funcname = 'corr' if corr else 'cov'
a = '{0}-chunk-{1}'.format(funcname, df._name)
dsk = {(a, i): (cov_corr_chunk, f, corr)
for (i, f) in enumerate(df._keys())}
prefix = '{0}-combine-{1}-'.format(funcname, df._name)
k = df.npartitions
b = a
depth = 0
while k > split_every:
b = prefix + str(depth)
for part_i, inds in enumerate(partition_all(split_every, range(k))):
dsk[(b, part_i)] = (cov_corr_combine, [(a, i) for i in inds], corr)
k = part_i + 1
a = b
depth += 1
name = '{0}-{1}'.format(funcname, token)
dsk[(name, 0)] = (cov_corr_agg, [(a, i) for i in range(k)],
df.columns, min_periods, corr, scalar)
dsk.update(df.dask)
if scalar:
return Scalar(dsk, name, 'f8')
meta = make_meta([(c, 'f8') for c in df.columns], index=df.columns)
return DataFrame(dsk, name, meta, (df.columns[0], df.columns[-1]))
def cov_corr_chunk(df, corr=False):
"""Chunk part of a covariance or correlation computation"""
mat = df.values
mask = np.isfinite(mat)
keep = np.bitwise_and(mask[:, None, :], mask[:, :, None])
x = np.where(keep, mat[:, None, :], np.nan)
sums = np.nansum(x, 0)
counts = keep.astype('int').sum(0)
cov = df.cov().values
dtype = [('sum', sums.dtype), ('count', counts.dtype), ('cov', cov.dtype)]
if corr:
m = np.nansum((x - sums / np.where(counts, counts, np.nan)) ** 2, 0)
dtype.append(('m', m.dtype))
out = np.empty(counts.shape, dtype=dtype)
out['sum'] = sums
out['count'] = counts
out['cov'] = cov * (counts - 1)
if corr:
out['m'] = m
return out
def cov_corr_combine(data, corr=False):
data = np.concatenate(data).reshape((len(data),) + data[0].shape)
sums = np.nan_to_num(data['sum'])
counts = data['count']
cum_sums = np.cumsum(sums, 0)
cum_counts = np.cumsum(counts, 0)
s1 = cum_sums[:-1]
s2 = sums[1:]
n1 = cum_counts[:-1]
n2 = counts[1:]
d = (s2 / n2) - (s1 / n1)
C = (np.nansum((n1 * n2) / (n1 + n2) * (d * d.transpose((0, 2, 1))), 0) +
np.nansum(data['cov'], 0))
out = np.empty(C.shape, dtype=data.dtype)
out['sum'] = cum_sums[-1]
out['count'] = cum_counts[-1]
out['cov'] = C
if corr:
nobs = np.where(cum_counts[-1], cum_counts[-1], np.nan)
mu = cum_sums[-1] / nobs
counts_na = np.where(counts, counts, np.nan)
m = np.nansum(data['m'] + counts * (sums / counts_na - mu) ** 2,
axis=0)
out['m'] = m
return out
def cov_corr_agg(data, cols, min_periods=2, corr=False, scalar=False):
out = cov_corr_combine(data, corr)
counts = out['count']
C = out['cov']
C[counts < min_periods] = np.nan
if corr:
m2 = out['m']
den = np.sqrt(m2 * m2.T)
else:
den = np.where(counts, counts, np.nan) - 1
mat = C / den
if scalar:
return mat[0, 1]
return pd.DataFrame(mat, columns=cols, index=cols)
def pd_split(df, p, random_state=None):
""" Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(df, [0.5, 0.5], random_state=123) # roughly 50/50 split
>>> a
a b
1 2 3
2 3 4
5 6 7
>>> b
a b
0 1 2
3 4 5
4 5 6
"""
p = list(p)
index = pseudorandom(len(df), p, random_state)
return [df.iloc[index == i] for i in range(len(p))]
def _take_last(a, skipna=True):
"""
take last row (Series) of DataFrame / last value of Series
considering NaN.
Parameters
----------
a : pd.DataFrame or pd.Series
skipna : bool, default True
Whether to exclude NaN
"""
if skipna is False:
return a.iloc[-1]
else:
# take last valid value excluding NaN, NaN location may be different
# in each columns
group_dummy = np.ones(len(a.index))
last_row = a.groupby(group_dummy).last()
if isinstance(a, pd.DataFrame):
return pd.Series(last_row.values[0], index=a.columns)
else:
return last_row.values[0]
def repartition_divisions(a, b, name, out1, out2, force=False):
""" dask graph to repartition dataframe by new divisions
Parameters
----------
a : tuple
old divisions
b : tuple, list
new divisions
name : str
name of old dataframe
out1 : str
name of temporary splits
out2 : str
name of new dataframe
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
{('b', 0): (<function boundary_slice at ...>, ('a', 0), 1, 3, False),
('b', 1): (<function boundary_slice at ...>, ('a', 1), 3, 4, False),
('b', 2): (<function boundary_slice at ...>, ('a', 1), 4, 6, False),
('b', 3): (<function boundary_slice at ...>, ('a', 1), 6, 7, False)
('c', 0): (<function concat at ...>,
(<type 'list'>, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
"""
if not isinstance(b, (list, tuple)):
raise ValueError('New division must be list or tuple')
b = list(b)
if len(b) < 2:
# minimum division is 2 elements, like [0, 0]
raise ValueError('New division must be longer than 2 elements')
if b != sorted(b):
raise ValueError('New division must be sorted')
if len(b[:-1]) != len(list(unique(b[:-1]))):
msg = 'New division must be unique, except for the last element'
raise ValueError(msg)
if force:
if a[0] < b[0]:
msg = ('left side of the new division must be equal or smaller '
'than old division')
raise ValueError(msg)
if a[-1] > b[-1]:
msg = ('right side of the new division must be equal or larger '
'than old division')
raise ValueError(msg)
else:
if a[0] != b[0]:
msg = 'left side of old and new divisions are different'
raise ValueError(msg)
if a[-1] != b[-1]:
msg = 'right side of old and new divisions are different'
raise ValueError(msg)
def _is_single_last_div(x):
"""Whether last division only contains single label"""
return len(x) >= 2 and x[-1] == x[-2]
c = [a[0]]
d = dict()
low = a[0]
i, j = 1, 1 # indices for old/new divisions
k = 0 # index for temp divisions
last_elem = _is_single_last_div(a)
# process through old division
# left part of new division can be processed in this loop
while (i < len(a) and j < len(b)):
if a[i] < b[j]:
# tuple is something like:
# (methods.boundary_slice, ('from_pandas-#', 0), 3, 4, False))
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, a[i], False)
low = a[i]
i += 1
elif a[i] > b[j]:
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)
low = b[j]
j += 1
else:
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)
low = b[j]
i += 1
j += 1
c.append(low)
k += 1
# right part of new division can remain
if a[-1] < b[-1] or b[-1] == b[-2]:
for _j in range(j, len(b)):
# always use right-most of old division
# because it may contain last element
m = len(a) - 2
d[(out1, k)] = (methods.boundary_slice, (name, m), low, b[_j], False)
low = b[_j]
c.append(low)
k += 1
else:
# even if new division is processed through,
# right-most element of old division can remain
if last_elem and i < len(a):
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), a[i], a[i], False)
k += 1
c.append(a[-1])
# replace last element of tuple with True
d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)
i, j = 0, 1
last_elem = _is_single_last_div(c)
while j < len(b):
tmp = []
while c[i] < b[j]:
tmp.append((out1, i))
i += 1
if last_elem and c[i] == b[-1] and (b[-1] != b[-2] or j == len(b) - 1) and i < k:
# append if last split is not included
tmp.append((out1, i))
i += 1
if len(tmp) == 0:
# dummy slice to return empty DataFrame or Series,
# which retain original data attributes (columns / name)
d[(out2, j - 1)] = (methods.boundary_slice, (name, 0), a[0], a[0], False)
elif len(tmp) == 1:
d[(out2, j - 1)] = tmp[0]
else:
if not tmp:
raise ValueError('check for duplicate partitions\nold:\n%s\n\n'
'new:\n%s\n\ncombined:\n%s'
% (pformat(a), pformat(b), pformat(c)))
d[(out2, j - 1)] = (pd.concat, tmp)
j += 1
return d
def repartition_npartitions(df, npartitions):
""" Repartition dataframe to a smaller number of partitions """
npartitions_ratio = df.npartitions / npartitions
new_partitions_boundaries = [int(new_partition_index * npartitions_ratio)
for new_partition_index in range(npartitions + 1)]
new_name = 'repartition-%d-%s' % (npartitions, tokenize(df))
dsk = {}
for new_partition_index in range(npartitions):
value = (pd.concat, [(df._name, old_partition_index)
for old_partition_index in
range(new_partitions_boundaries[new_partition_index],
new_partitions_boundaries[new_partition_index + 1])])
dsk[new_name, new_partition_index] = value
divisions = [df.divisions[new_partition_index]
for new_partition_index in new_partitions_boundaries]
return DataFrame(merge(df.dask, dsk), new_name, df._meta, divisions)
def repartition(df, divisions=None, force=False):
""" Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
Parameters
----------
divisions : list
List of partitions to be used
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
token = tokenize(df, divisions)
if isinstance(df, _Frame):
tmp = 'repartition-split-' + token
out = 'repartition-merge-' + token
dsk = repartition_divisions(df.divisions, divisions,
df._name, tmp, out, force=force)
return new_dd_object(merge(df.dask, dsk), out,
df._meta, divisions)
elif isinstance(df, (pd.Series, pd.DataFrame)):
name = 'repartition-dataframe-' + token
from .utils import shard_df_on_index
dfs = shard_df_on_index(df, divisions[1:-1])
dsk = dict(((name, i), df) for i, df in enumerate(dfs))
return new_dd_object(dsk, name, df, divisions)
raise ValueError('Data must be DataFrame or Series')
def set_sorted_index(df, index, drop=True, divisions=None, **kwargs):
if not isinstance(index, Series):
meta = df._meta.set_index(index, drop=drop)
else:
meta = df._meta.set_index(index._meta, drop=drop)
result = map_partitions(M.set_index, df, index, drop=drop, meta=meta)
if not divisions:
divisions = compute_divisions(result, **kwargs)
result.divisions = divisions
return result
def compute_divisions(df, **kwargs):
mins = df.index.map_partitions(M.min, meta=df.index)
maxes = df.index.map_partitions(M.max, meta=df.index)
mins, maxes = compute(mins, maxes, **kwargs)
if (sorted(mins) != list(mins) or
sorted(maxes) != list(maxes) or
any(a > b for a, b in zip(mins, maxes))):
raise ValueError("Partitions must be sorted ascending with the index",
mins, maxes)
divisions = tuple(mins) + (list(maxes)[-1],)
return divisions
def _reduction_chunk(x, aca_chunk=None, **kwargs):
o = aca_chunk(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if isinstance(o, pd.Series) else o
def _reduction_combine(x, aca_combine=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
o = aca_combine(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if isinstance(o, pd.Series) else o
def _reduction_aggregate(x, aca_aggregate=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
return aca_aggregate(x, **kwargs)
def idxmaxmin_chunk(x, fn=None, skipna=True):
idx = getattr(x, fn)(skipna=skipna)
minmax = 'max' if fn == 'idxmax' else 'min'
value = getattr(x, minmax)(skipna=skipna)
if isinstance(x, pd.DataFrame):
return pd.DataFrame({'idx': idx, 'value': value})
return pd.DataFrame({'idx': [idx], 'value': [value]})
def idxmaxmin_row(x, fn=None, skipna=True):
x = x.set_index('idx')
idx = getattr(x.value, fn)(skipna=skipna)
minmax = 'max' if fn == 'idxmax' else 'min'
value = getattr(x.value, minmax)(skipna=skipna)
return pd.DataFrame({'idx': [idx], 'value': [value]})
def idxmaxmin_combine(x, fn=None, skipna=True):
return (x.groupby(level=0)
.apply(idxmaxmin_row, fn=fn, skipna=skipna)
.reset_index(level=1, drop=True))
def idxmaxmin_agg(x, fn=None, skipna=True, scalar=False):
res = idxmaxmin_combine(x, fn, skipna=skipna)['idx']
if scalar:
return res[0]
res.name = None
return res
def safe_head(df, n):
r = df.head(n=n)
if len(r) != n:
msg = ("Insufficient elements for `head`. {0} elements "
"requested, only {1} elements available. Try passing larger "
"`npartitions` to `head`.")
warnings.warn(msg.format(n, len(r)))
return r
def maybe_shift_divisions(df, periods, freq):
"""Maybe shift divisions by periods of size freq
Used to shift the divisions for the `shift` method. If freq isn't a fixed
size (not anchored or relative), then the divisions are shifted
appropriately. Otherwise the divisions are cleared.
Parameters
----------
df : dd.DataFrame, dd.Series, or dd.Index
periods : int
The number of periods to shift.
freq : DateOffset, timedelta, or time rule string
The frequency to shift by.
"""
if isinstance(freq, str):
freq = pd.tseries.frequencies.to_offset(freq)
if (isinstance(freq, pd.DateOffset) and
(freq.isAnchored() or not hasattr(freq, 'delta'))):
# Can't infer divisions on relative or anchored offsets, as
# divisions may now split identical index value.
# (e.g. index_partitions = [[1, 2, 3], [3, 4, 5]])
return df.clear_divisions()
if df.known_divisions:
divs = pd.Series(range(len(df.divisions)), index=df.divisions)
divisions = divs.shift(periods, freq=freq).index
return type(df)(df.dask, df._name, df._meta, divisions)
return df
def to_delayed(df):
""" Create Dask Delayed objects from a Dask Dataframe
Returns a list of delayed values, one value per partition.
Examples
--------
>>> partitions = df.to_delayed() # doctest: +SKIP
"""
from ..delayed import Delayed
return [Delayed(k, [df.dask]) for k in df._keys()]
def _escape_html_tag(s):
return s.replace('<', r'<', 1).replace('>', r'>', 1)
if PY3:
_Frame.to_delayed.__doc__ = to_delayed.__doc__
| {
"repo_name": "chrisbarber/dask",
"path": "dask/dataframe/core.py",
"copies": "1",
"size": "140501",
"license": "bsd-3-clause",
"hash": -7848993123937994000,
"line_mean": 36.3474215843,
"line_max": 102,
"alpha_frac": 0.5711133729,
"autogenerated": false,
"ratio": 3.9394644609561196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5010577833856119,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from flask import Flask, request, jsonify, json
from functools import partial, wraps
from .index import parse_index
class Server(object):
__slots__ = 'app', 'datasets'
def __init__(self, name='Blaze-Server', datasets=None):
app = self.app = Flask(name)
self.datasets = datasets or dict()
for args, kwargs, func in routes:
func2 = wraps(func)(partial(func, self.datasets))
app.route(*args, **kwargs)(func2)
def __getitem__(self, key):
return self.datasets[key]
def __setitem__(self, key, value):
self.datasets[key] = value
return value
routes = list()
def route(*args, **kwargs):
def f(func):
routes.append((args, kwargs, func))
return func
return f
@route('/datasets.json')
def dataset(datasets):
return jsonify(dict((k, str(v.dshape)) for k, v in datasets.items()))
@route('/data/<name>.json', methods=['POST', 'PUT', 'GET'])
def data(datasets, name):
""" Basic indexing API
Allows remote indexing of datasets. Takes indexing data as JSON
Takes requests like
Example
-------
For the following array:
[['Alice', 100],
['Bob', 200],
['Charlie', 300]]
schema = '{name: string, amount: int32}'
And the following
url: /data/table-name.json
POST-data: {'index': [{'start': 0, 'step': 3}, 'name']}
and returns responses like
{"name": "table-name",
"index": [0, "name"],
"datashape": "3 * string",
"data": ["Alice", "Bob", "Charlie"]}
"""
if request.headers['content-type'] != 'application/json':
return ("Expected JSON data", 404)
try:
data = json.loads(request.data)
except ValueError:
return ("Bad JSON. Got %s " % request.data, 404)
try:
dset = datasets[name]
except KeyError:
return ("Dataset %s not found" % name, 404)
try:
index = parse_index(data['index'])
except ValueError:
return ("Bad index", 404)
try:
rv = dset.py[index]
except RuntimeError:
return ("Bad index: %s" % (str(index)), 404)
if isinstance(rv, Iterator):
rv = list(rv)
return jsonify({'name': name,
'index': data['index'],
'datashape': str(dset.dshape.subshape[index]),
'data': rv})
| {
"repo_name": "aterrel/blaze",
"path": "blaze/serve/server.py",
"copies": "1",
"size": "2462",
"license": "bsd-3-clause",
"hash": -1180216000817895000,
"line_mean": 23.3762376238,
"line_max": 73,
"alpha_frac": 0.5759545085,
"autogenerated": false,
"ratio": 3.799382716049383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9871318965580629,
"avg_score": 0.0008036517937508036,
"num_lines": 101
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from functools import wraps
from itertools import chain, count
import operator
import uuid
from toolz import merge, unique, curry
from .optimize import cull, fuse
from .utils import concrete, funcname
from . import base
from .compatibility import apply
from . import threaded
__all__ = ['compute', 'do', 'value', 'Value']
def flat_unique(ls):
"""Flatten ``ls``, filter by unique id, and return a list"""
return list(unique(chain.from_iterable(ls), key=id))
def unzip(ls, nout):
"""Unzip a list of lists into ``nout`` outputs."""
out = list(zip(*ls))
if not out:
out = [()] * nout
return out
def to_task_dasks(expr):
"""Normalize a python object and extract all sub-dasks.
- Replace ``Values`` with their keys
- Convert literals to things the schedulers can handle
- Extract dasks from all enclosed values
Parameters
----------
expr : object
The object to be normalized. This function knows how to handle
``Value``s, as well as most builtin python types.
Returns
-------
task : normalized task to be run
dasks : list of dasks that form the dag for this task
Examples
--------
>>> a = value(1, 'a')
>>> b = value(2, 'b')
>>> task, dasks = to_task_dasks([a, b, 3])
>>> task # doctest: +SKIP
['a', 'b', 3]
>>> dasks # doctest: +SKIP
[{'a': 1}, {'b': 2}]
>>> task, dasks = to_task_dasks({a: 1, b: 2})
>>> task # doctest: +SKIP
(dict, [['a', 1], ['b', 2]])
>>> dasks # doctest: +SKIP
[{'a': 1}, {'b': 2}]
"""
if isinstance(expr, Value):
return expr.key, expr._dasks
if isinstance(expr, base.Base):
name = tokenize(expr, pure=True)
keys = expr._keys()
dsk = expr._optimize(expr.dask, keys)
dsk[name] = (expr._finalize, expr, (concrete, keys))
return name, [dsk]
if isinstance(expr, tuple) and type(expr) != tuple:
return expr, []
if isinstance(expr, (Iterator, list, tuple, set)):
args, dasks = unzip(map(to_task_dasks, expr), 2)
args = list(args)
dasks = flat_unique(dasks)
# Ensure output type matches input type
if isinstance(expr, (tuple, set)):
return (type(expr), args), dasks
else:
return args, dasks
if isinstance(expr, dict):
args, dasks = to_task_dasks([[k, v] for k, v in expr.items()])
return (dict, args), dasks
return expr, []
def tokenize(*args, **kwargs):
"""Mapping function from task -> consistent name.
Parameters
----------
args : object
Python objects that summarize the task.
pure : boolean, optional
If True, a consistent hash function is tried on the input. If this
fails, then a unique identifier is used. If False (default), then a
unique identifier is always used.
"""
if kwargs.pop('pure', False):
return base.tokenize(*args)
else:
return str(uuid.uuid4())
def applyfunc(func, args, kwargs, pure=False):
"""Create a Value by applying a function to args.
Given a function and arguments, return a Value that represents the result
of that computation."""
args, dasks = unzip(map(to_task_dasks, args), 2)
if kwargs:
dask_kwargs, dasks2 = to_task_dasks(kwargs)
dasks = dasks + (dasks2,)
task = (apply, func, list(args), dask_kwargs)
else:
task = (func,) + args
name = funcname(func) + '-' + tokenize(*task, pure=pure)
dasks = flat_unique(dasks)
dasks.append({name: task})
return Value(name, dasks)
@curry
def do(func, pure=False):
"""Wraps a function so that it outputs a ``Value``.
Examples
--------
Can be used as a decorator:
>>> @do
... def add(a, b):
... return a + b
>>> res = add(1, 2)
>>> type(res) == Value
True
>>> res.compute()
3
For other cases, it may be cleaner to call ``do`` on a function at call
time:
>>> res2 = do(sum)([res, 2, 3])
>>> res2.compute()
8
``do`` also accepts an optional keyword ``pure``. If False (default), then
subsequent calls will always produce a different ``Value``. This is useful
for non-pure functions (such as ``time`` or ``random``).
>>> from random import random
>>> out1 = do(random)()
>>> out2 = do(random)()
>>> out1.key == out2.key
False
If you know a function is pure (output only depends on the input, with no
global state), then you can set ``pure=True``. This will attempt to apply a
consistent name to the output, but will fallback on the same behavior of
``pure=False`` if this fails.
>>> @do(pure=True)
... def add(a, b):
... return a + b
>>> out1 = add(1, 2)
>>> out2 = add(1, 2)
>>> out1.key == out2.key
True
"""
@wraps(func)
def _dfunc(*args, **kwargs):
return applyfunc(func, args, kwargs, pure=pure)
return _dfunc
def compute(*args, **kwargs):
"""Evaluate more than one ``Value`` at once.
Note that the only difference between this function and
``dask.base.compute`` is that this implicitly wraps python objects in
``Value``, allowing for collections of dask objects to be computed.
Examples
--------
>>> a = value(1)
>>> b = a + 2
>>> c = a + 3
>>> compute(b, c) # Compute both simultaneously
(3, 4)
>>> compute(a, [b, c]) # Works for lists of Values
(1, [3, 4])
"""
args = [value(a) for a in args]
return base.compute(*args, **kwargs)
def right(method):
"""Wrapper to create 'right' version of operator given left version"""
def _inner(self, other):
return method(other, self)
return _inner
class Value(base.Base):
"""Represents a value to be computed by dask.
Equivalent to the output from a single key in a dask graph.
"""
__slots__ = ('_key', '_dasks')
_optimize = staticmethod(lambda dsk, keys, **kwargs: dsk)
_finalize = staticmethod(lambda a, r: r[0])
_default_get = staticmethod(threaded.get)
def __init__(self, name, dasks):
object.__setattr__(self, '_key', name)
object.__setattr__(self, '_dasks', dasks)
def __setstate__(self, state):
self.__init__(*state)
return self
def __getstate__(self):
return (self._key, self._dasks)
@property
def dask(self):
return merge(*self._dasks)
@property
def key(self):
return self._key
def _keys(self):
return [self.key]
def __repr__(self):
return "Value({0})".format(repr(self.key))
def __hash__(self):
return hash(self.key)
def __dir__(self):
return dir(type(self))
def __getattr__(self, attr):
if not attr.startswith('_'):
return do(getattr, pure=True)(self, attr)
else:
raise AttributeError("Attribute {0} not found".format(attr))
def __setattr__(self, attr, val):
raise TypeError("Value objects are immutable")
def __setitem__(self, index, val):
raise TypeError("Value objects are immutable")
def __iter__(self):
raise TypeError("Value objects are not iterable")
def __call__(self, *args, **kwargs):
return do(apply, kwargs.pop('pure', False))(self, args, kwargs)
def __bool__(self):
raise TypeError("Truth of Value objects is not supported")
__nonzero__ = __bool__
__abs__ = do(operator.abs, True)
__add__ = do(operator.add, True)
__and__ = do(operator.and_, True)
__div__ = do(operator.floordiv, True)
__eq__ = do(operator.eq, True)
__floordiv__ = do(operator.floordiv, True)
__ge__ = do(operator.ge, True)
__getitem__ = do(operator.getitem, True)
__gt__ = do(operator.gt, True)
__index__ = do(operator.index, True)
__invert__ = do(operator.invert, True)
__le__ = do(operator.le, True)
__lshift__ = do(operator.lshift, True)
__lt__ = do(operator.lt, True)
__mod__ = do(operator.mod, True)
__mul__ = do(operator.mul, True)
__ne__ = do(operator.ne, True)
__neg__ = do(operator.neg, True)
__or__ = do(operator.or_, True)
__pos__ = do(operator.pos, True)
__pow__ = do(operator.pow, True)
__radd__ = do(right(operator.add), True)
__rand__ = do(right(operator.and_), True)
__rdiv__ = do(right(operator.floordiv), True)
__rfloordiv__ = do(right(operator.floordiv), True)
__rlshift__ = do(right(operator.lshift), True)
__rmod__ = do(right(operator.mod), True)
__rmul__ = do(right(operator.mul), True)
__ror__ = do(right(operator.or_), True)
__rpow__ = do(right(operator.pow), True)
__rrshift__ = do(right(operator.rshift), True)
__rshift__ = do(operator.rshift, True)
__rsub__ = do(right(operator.sub), True)
__rtruediv__ = do(right(operator.truediv), True)
__rxor__ = do(right(operator.xor), True)
__sub__ = do(operator.sub, True)
__truediv__ = do(operator.truediv, True)
__xor__ = do(operator.xor, True)
base.normalize_token.register(Value, lambda a: a.key)
def value(val, name=None):
"""Create a ``Value`` from a python object.
Parameters
----------
val : object
Object to be wrapped.
name : string, optional
Name to be used in the resulting dask.
Examples
--------
>>> a = value([1, 2, 3])
>>> a.compute()
[1, 2, 3]
Values can act as a proxy to the underlying object. Many operators are
supported:
>>> (a + [1, 2]).compute()
[1, 2, 3, 1, 2]
>>> a[1].compute()
2
Method and attribute access also works:
>>> a.count(2).compute()
1
Note that if a method doesn't exist, no error will be thrown until runtime:
>>> res = a.not_a_real_method()
>>> res.compute() # doctest: +SKIP
AttributeError("'list' object has no attribute 'not_a_real_method'")
Methods are assumed to be impure by default, meaning that subsequent calls
may return different results. To assume purity, set `pure=True`. This
allows sharing of any intermediate values.
>>> a.count(2, pure=True).key == a.count(2, pure=True).key
True
"""
if isinstance(val, Value):
return val
task, dasks = to_task_dasks(val)
name = name or (type(val).__name__ + '-' + tokenize(task, pure=True))
dasks.append({name: task})
return Value(name, dasks)
| {
"repo_name": "vikhyat/dask",
"path": "dask/imperative.py",
"copies": "1",
"size": "10504",
"license": "bsd-3-clause",
"hash": 2676777250751107600,
"line_mean": 27.7780821918,
"line_max": 79,
"alpha_frac": 0.5850152323,
"autogenerated": false,
"ratio": 3.51892797319933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.460394320549933,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from functools import wraps, partial
import operator
from operator import getitem
from pprint import pformat
import warnings
from toolz import merge, first, unique, partition_all, remove
import pandas as pd
import numpy as np
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..array.core import partial_by_order
from .. import threaded
from ..compatibility import apply, operator_div, bind_method, PY3
from ..utils import (random_state_data,
pseudorandom, derived_from, funcname, memory_repr,
put_lines, M, key_split)
from ..base import Base, tokenize, normalize_token
from . import methods
from .accessor import DatetimeAccessor, StringAccessor
from .categorical import CategoricalAccessor, categorize
from .hashing import hash_pandas_object
from .utils import (meta_nonempty, make_meta, insert_meta_param_description,
raise_on_meta_error, clear_known_categories,
is_categorical_dtype, has_known_categories, PANDAS_VERSION)
no_default = '__no_default__'
if PANDAS_VERSION >= '0.20.0':
from pandas.util import cache_readonly
pd.core.computation.expressions.set_use_numexpr(False)
else:
from pandas.util.decorators import cache_readonly
pd.computation.expressions.set_use_numexpr(False)
def _concat(args):
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if not isinstance(args[0], (pd.DataFrame, pd.Series, pd.Index)):
try:
return pd.Series(args)
except:
return args
# We filter out empty partitions here because pandas frequently has
# inconsistent dtypes in results between empty and non-empty frames.
# Ideally this would be handled locally for each operation, but in practice
# this seems easier. TODO: don't do this.
args2 = [i for i in args if len(i)]
return args[0] if not args2 else methods.concat(args2, uniform=True)
def _get_return_type(meta):
if isinstance(meta, _Frame):
meta = meta._meta
if isinstance(meta, pd.Series):
return Series
elif isinstance(meta, pd.DataFrame):
return DataFrame
elif isinstance(meta, pd.Index):
return Index
return Scalar
def new_dd_object(dsk, _name, meta, divisions):
"""Generic constructor for dask.dataframe objects.
Decides the appropriate output class based on the type of `meta` provided.
"""
return _get_return_type(meta)(dsk, _name, meta, divisions)
def optimize(dsk, keys, **kwargs):
from .optimize import optimize
return optimize(dsk, keys, **kwargs)
def finalize(results):
return _concat(results)
class Scalar(Base):
""" A Dask object to represent a pandas scalar"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(first)
def __init__(self, dsk, name, meta, divisions=None):
# divisions is ignored, only present to be compatible with other
# objects.
self.dask = dsk
self._name = name
meta = make_meta(meta)
if isinstance(meta, (pd.DataFrame, pd.Series, pd.Index)):
raise TypeError("Expected meta to specify scalar, got "
"{0}".format(type(meta).__name__))
self._meta = meta
@property
def _meta_nonempty(self):
return self._meta
@property
def dtype(self):
return self._meta.dtype
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, 'dtype'):
o.remove('dtype') # dtype only in `dir` if available
return list(o)
@property
def divisions(self):
"""Dummy divisions to be compat with Series and DataFrame"""
return [None, None]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + '...'
if hasattr(self._meta, 'dtype'):
extra = ', dtype=%s' % self._meta.dtype
else:
extra = ', type=%s' % type(self._meta).__name__
return "dd.Scalar<%s%s>" % (name, extra)
def __array__(self):
# array interface is required to support pandas instance + Scalar
# Otherwise, above op results in pd.Series of Scalar (object dtype)
return np.asarray(self.compute())
@property
def _args(self):
return (self.dask, self._name, self._meta)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta = state
@property
def key(self):
return (self._name, 0)
def _keys(self):
return [self.key]
@classmethod
def _get_unary_operator(cls, op):
def f(self):
name = funcname(op) + '-' + tokenize(self)
dsk = {(name, 0): (op, (self._name, 0))}
meta = op(self._meta_nonempty)
return Scalar(merge(dsk, self.dask), name, meta)
return f
@classmethod
def _get_binary_operator(cls, op, inv=False):
return lambda self, other: _scalar_binary(op, self, other, inv=inv)
def _scalar_binary(op, self, other, inv=False):
name = '{0}-{1}'.format(funcname(op), tokenize(self, other))
dsk = self.dask
return_type = _get_return_type(other)
if isinstance(other, Scalar):
dsk = merge(dsk, other.dask)
other_key = (other._name, 0)
elif isinstance(other, Base):
return NotImplemented
else:
other_key = other
if inv:
dsk.update({(name, 0): (op, other_key, (self._name, 0))})
else:
dsk.update({(name, 0): (op, (self._name, 0), other_key)})
other_meta = make_meta(other)
other_meta_nonempty = meta_nonempty(other_meta)
if inv:
meta = op(other_meta_nonempty, self._meta_nonempty)
else:
meta = op(self._meta_nonempty, other_meta_nonempty)
if return_type is not Scalar:
return return_type(dsk, name, meta,
[other.index.min(), other.index.max()])
else:
return Scalar(dsk, name, meta)
class _Frame(Base):
""" Superclass for DataFrame and Series
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
meta: pandas.DataFrame, pandas.Series, or pandas.Index
An empty pandas object with names, dtypes, and indices matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(finalize)
def __init__(self, dsk, name, meta, divisions):
self.dask = dsk
self._name = name
meta = make_meta(meta)
if not isinstance(meta, self._partition_type):
raise TypeError("Expected meta to specify type {0}, got type "
"{1}".format(self._partition_type.__name__,
type(meta).__name__))
self._meta = meta
self.divisions = tuple(divisions)
@property
def _constructor(self):
return new_dd_object
@property
def npartitions(self):
"""Return number of partitions"""
return len(self.divisions) - 1
@property
def size(self):
return self.reduction(methods.size, np.sum, token='size', meta=int,
split_every=False)
@property
def _meta_nonempty(self):
""" A non-empty version of `_meta` with fake data."""
return meta_nonempty(self._meta)
@property
def _args(self):
return (self.dask, self._name, self._meta, self.divisions)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisions = state
def copy(self):
""" Make a copy of the dataframe
This is strictly a shallow copy of the underlying computational graph.
It does not affect the underlying data
"""
return new_dd_object(self.dask, self._name,
self._meta, self.divisions)
def _keys(self):
return [(self._name, i) for i in range(self.npartitions)]
def __array__(self, dtype=None, **kwargs):
self._computed = self.compute()
x = np.array(self._computed)
return x
def __array_wrap__(self, array, context=None):
raise NotImplementedError
@property
def _elemwise(self):
return elemwise
@property
def _repr_data(self):
raise NotImplementedError
@property
def _repr_divisions(self):
name = "npartitions={0}".format(self.npartitions)
if self.known_divisions:
divisions = pd.Index(self.divisions, name=name)
else:
# avoid to be converted to NaN
divisions = pd.Index(['None'] * (self.npartitions + 1),
name=name)
return divisions
def __repr__(self):
data = self._repr_data.to_string(max_rows=5, show_dimensions=False)
return """Dask {klass} Structure:
{data}
Dask Name: {name}, {task} tasks""".format(klass=self.__class__.__name__,
data=data, name=key_split(self._name),
task=len(self.dask))
@property
def index(self):
"""Return dask Index instance"""
name = self._name + '-index'
dsk = dict(((name, i), (getattr, key, 'index'))
for i, key in enumerate(self._keys()))
return Index(merge(dsk, self.dask), name,
self._meta.index, self.divisions)
def reset_index(self, drop=False):
"""Reset the index to the default index.
Note that unlike in ``pandas``, the reset ``dask.dataframe`` index will
not be monotonically increasing from 0. Instead, it will restart at 0
for each partition (e.g. ``index1 = [0, ..., 10], index2 = [0, ...]``).
This is due to the inability to statically know the full length of the
index.
For DataFrame with multi-level index, returns a new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
drop : boolean, default False
Do not try to insert index into dataframe columns.
"""
return self.map_partitions(M.reset_index, drop=drop).clear_divisions()
@property
def known_divisions(self):
"""Whether divisions are already known"""
return len(self.divisions) > 0 and self.divisions[0] is not None
def clear_divisions(self):
divisions = (None,) * (self.npartitions + 1)
return type(self)(self.dask, self._name, self._meta, divisions)
def get_partition(self, n):
"""Get a dask DataFrame/Series representing the `nth` partition."""
if 0 <= n < self.npartitions:
name = 'get-partition-%s-%s' % (str(n), self._name)
dsk = {(name, 0): (self._name, n)}
divisions = self.divisions[n:n + 2]
return new_dd_object(merge(self.dask, dsk), name,
self._meta, divisions)
else:
msg = "n must be 0 <= n < {0}".format(self.npartitions)
raise ValueError(msg)
@derived_from(pd.DataFrame)
def drop_duplicates(self, split_every=None, split_out=1, **kwargs):
# Let pandas error on bad inputs
self._meta_nonempty.drop_duplicates(**kwargs)
if 'subset' in kwargs and kwargs['subset'] is not None:
split_out_setup = split_out_on_cols
split_out_setup_kwargs = {'cols': kwargs['subset']}
else:
split_out_setup = split_out_setup_kwargs = None
if kwargs.get('keep', True) is False:
raise NotImplementedError("drop_duplicates with keep=False")
chunk = M.drop_duplicates
return aca(self, chunk=chunk, aggregate=chunk, meta=self._meta,
token='drop-duplicates', split_every=split_every,
split_out=split_out, split_out_setup=split_out_setup,
split_out_setup_kwargs=split_out_setup_kwargs, **kwargs)
def __len__(self):
return self.reduction(len, np.sum, token='len', meta=int,
split_every=False).compute()
@insert_meta_param_description(pad=12)
def map_partitions(self, func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Note that the index and divisions are assumed to remain unchanged.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
One can use ``map_partitions`` to apply a function on each partition.
Extra arguments and keywords can optionally be provided, and will be
passed to the function after the partition.
Here we apply a function with arguments and keywords to a DataFrame,
resulting in a Series:
>>> def myadd(df, a, b=1):
... return df.x + df.y + a + b
>>> res = ddf.map_partitions(myadd, 1, b=2)
>>> res.dtype
dtype('float64')
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with no name, and dtype
``float64``:
>>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))
Here we map a function that takes in a DataFrame, and returns a
DataFrame with a new column:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))
>>> res.dtypes
x int64
y float64
z float64
dtype: object
As before, the output metadata can also be specified manually. This
time we pass in a ``dict``, as the output is a DataFrame:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),
... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.map_partitions(lambda df: df.head(), meta=df)
Also note that the index and divisions are assumed to remain unchanged.
If the function you're mapping changes the index/divisions, you'll need
to clear them afterwards:
>>> ddf.map_partitions(func).clear_divisions() # doctest: +SKIP
"""
return map_partitions(func, self, *args, **kwargs)
@insert_meta_param_description(pad=12)
def map_overlap(self, func, before, after, *args, **kwargs):
"""Apply a function to each partition, sharing rows with adjacent partitions.
This can be useful for implementing windowing functions such as
``df.rolling(...).mean()`` or ``df.diff()``.
Parameters
----------
func : function
Function applied to each partition.
before : int
The number of rows to prepend to partition ``i`` from the end of
partition ``i - 1``.
after : int
The number of rows to append to partition ``i`` from the beginning
of partition ``i + 1``.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Notes
-----
Given positive integers ``before`` and ``after``, and a function
``func``, ``map_overlap`` does the following:
1. Prepend ``before`` rows to each partition ``i`` from the end of
partition ``i - 1``. The first partition has no rows prepended.
2. Append ``after`` rows to each partition ``i`` from the beginning of
partition ``i + 1``. The last partition has no rows appended.
3. Apply ``func`` to each partition, passing in any extra ``args`` and
``kwargs`` if provided.
4. Trim ``before`` rows from the beginning of all but the first
partition.
5. Trim ``after`` rows from the end of all but the last partition.
Note that the index and divisions are assumed to remain unchanged.
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 4, 7, 11],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
A rolling sum with a trailing moving window of size 2 can be computed by
overlapping 2 rows before each partition, and then mapping calls to
``df.rolling(2).sum()``:
>>> ddf.compute()
x y
0 1 1.0
1 2 2.0
2 4 3.0
3 7 4.0
4 11 5.0
>>> ddf.map_overlap(lambda df: df.rolling(2).sum(), 2, 0).compute()
x y
0 NaN NaN
1 3.0 3.0
2 6.0 5.0
3 11.0 7.0
4 18.0 9.0
The pandas ``diff`` method computes a discrete difference shifted by a
number of periods (can be positive or negative). This can be
implemented by mapping calls to ``df.diff`` to each partition after
prepending/appending that many rows, depending on sign:
>>> def diff(df, periods=1):
... before, after = (periods, 0) if periods > 0 else (0, -periods)
... return df.map_overlap(lambda df, periods=1: df.diff(periods),
... periods, 0, periods=periods)
>>> diff(ddf, 1).compute()
x y
0 NaN NaN
1 1.0 1.0
2 2.0 1.0
3 3.0 1.0
4 4.0 1.0
If you have a ``DatetimeIndex``, you can use a `timedelta` for time-
based windows.
>>> ts = pd.Series(range(10), index=pd.date_range('2017', periods=10))
>>> dts = dd.from_pandas(ts, npartitions=2)
>>> dts.map_overlap(lambda df: df.rolling('2D').sum(),
... pd.Timedelta('2D'), 0).compute()
2017-01-01 0.0
2017-01-02 1.0
2017-01-03 3.0
2017-01-04 5.0
2017-01-05 7.0
2017-01-06 9.0
2017-01-07 11.0
2017-01-08 13.0
2017-01-09 15.0
2017-01-10 17.0
dtype: float64
"""
from .rolling import map_overlap
return map_overlap(func, self, before, after, *args, **kwargs)
@insert_meta_param_description(pad=12)
def reduction(self, chunk, aggregate=None, combine=None, meta=no_default,
token=None, split_every=None, chunk_kwargs=None,
aggregate_kwargs=None, combine_kwargs=None, **kwargs):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
combine : callable, optional
Function to operate on intermediate concatenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'sum': x.sum(), 'count': x.count()})
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'sum': x.sum(), 'count': x.count()})
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs['aca_chunk'] = chunk
combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}
combine_kwargs['aca_combine'] = combine
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs['aca_aggregate'] = aggregate
return aca(self, chunk=_reduction_chunk, aggregate=_reduction_aggregate,
combine=_reduction_combine, meta=meta, token=token,
split_every=split_every, chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
combine_kwargs=combine_kwargs, **kwargs)
@derived_from(pd.DataFrame)
def pipe(self, func, *args, **kwargs):
# Taken from pandas:
# https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def random_split(self, frac, random_state=None):
""" Pseudorandomly split dataframe into different pieces row-wise
Parameters
----------
frac : list
List of floats that should sum to one.
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
Examples
--------
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.DataFrame.sample
"""
if not np.allclose(sum(frac), 1):
raise ValueError("frac should sum to 1")
state_data = random_state_data(self.npartitions, random_state)
token = tokenize(self, frac, random_state)
name = 'split-' + token
dsk = {(name, i): (pd_split, (self._name, i), frac, state)
for i, state in enumerate(state_data)}
out = []
for i in range(len(frac)):
name2 = 'split-%d-%s' % (i, token)
dsk2 = {(name2, j): (getitem, (name, j), i)
for j in range(self.npartitions)}
out.append(type(self)(merge(self.dask, dsk, dsk2), name2,
self._meta, self.divisions))
return out
def head(self, n=5, npartitions=1, compute=True):
""" First n rows of the dataset
Parameters
----------
n : int, optional
The number of rows to return. Default is 5.
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``n`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
"""
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
msg = "only {} partitions, head received {}"
raise ValueError(msg.format(self.npartitions, npartitions))
name = 'head-%d-%d-%s' % (npartitions, n, self._name)
if npartitions > 1:
name_p = 'head-partial-%d-%s' % (n, self._name)
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (M.head, (self._name, i), n)
concat = (_concat, [(name_p, i) for i in range(npartitions)])
dsk[(name, 0)] = (safe_head, concat, n)
else:
dsk = {(name, 0): (safe_head, (self._name, 0), n)}
result = new_dd_object(merge(self.dask, dsk), name, self._meta,
[self.divisions[0], self.divisions[npartitions]])
if compute:
result = result.compute()
return result
def tail(self, n=5, compute=True):
""" Last n rows of the dataset
Caveat, the only checks the last n rows of the last partition.
"""
name = 'tail-%d-%s' % (n, self._name)
dsk = {(name, 0): (M.tail, (self._name, self.npartitions - 1), n)}
result = new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions[-2:])
if compute:
result = result.compute()
return result
@property
def loc(self):
""" Purely label-location based indexer for selection by label.
>>> df.loc["b"] # doctest: +SKIP
>>> df.loc["b":"d"] # doctest: +SKIP"""
from .indexing import _LocIndexer
return _LocIndexer(self)
# NOTE: `iloc` is not implemented because of performance concerns.
# see https://github.com/dask/dask/pull/507
def repartition(self, divisions=None, npartitions=None, freq=None, force=False):
""" Repartition dataframe along new divisions
Parameters
----------
divisions : list, optional
List of partitions to be used. If specified npartitions will be
ignored.
npartitions : int, optional
Number of partitions of output, must be less than npartitions of
input. Only used if divisions isn't specified.
freq : str, pd.Timedelta
A period on which to partition timeseries data like ``'7D'`` or
``'12h'`` or ``pd.Timedelta(hours=12)``. Assumes a datetime index.
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition(npartitions=10) # doctest: +SKIP
>>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP
>>> df = df.repartition(freq='7d') # doctest: +SKIP
"""
if npartitions is not None and divisions is not None:
warnings.warn("When providing both npartitions and divisions to "
"repartition only npartitions is used.")
if npartitions is not None:
return repartition_npartitions(self, npartitions)
elif divisions is not None:
return repartition(self, divisions, force=force)
elif freq is not None:
return repartition_freq(self, freq=freq)
else:
raise ValueError(
"Provide either divisions= or npartitions= to repartition")
@derived_from(pd.DataFrame)
def fillna(self, value=None, method=None, limit=None, axis=None):
axis = self._validate_axis(axis)
if method is None and limit is not None:
raise NotImplementedError("fillna with set limit and method=None")
if isinstance(value, _Frame):
test_value = value._meta_nonempty.values[0]
else:
test_value = value
meta = self._meta_nonempty.fillna(value=test_value, method=method,
limit=limit, axis=axis)
if axis == 1 or method is None:
return self.map_partitions(M.fillna, value, method=method,
limit=limit, axis=axis, meta=meta)
if method in ('pad', 'ffill'):
method = 'ffill'
skip_check = 0
before, after = 1 if limit is None else limit, 0
else:
method = 'bfill'
skip_check = self.npartitions - 1
before, after = 0, 1 if limit is None else limit
if limit is None:
name = 'fillna-chunk-' + tokenize(self, method)
dsk = {(name, i): (methods.fillna_check, (self._name, i),
method, i != skip_check)
for i in range(self.npartitions)}
parts = new_dd_object(merge(dsk, self.dask), name, meta,
self.divisions)
else:
parts = self
return parts.map_overlap(M.fillna, before, after, method=method,
limit=limit, meta=meta)
@derived_from(pd.DataFrame)
def ffill(self, axis=None, limit=None):
return self.fillna(method='ffill', limit=limit, axis=axis)
@derived_from(pd.DataFrame)
def bfill(self, axis=None, limit=None):
return self.fillna(method='bfill', limit=limit, axis=axis)
def sample(self, frac, replace=False, random_state=None):
""" Random sample of items
Parameters
----------
frac : float, optional
Fraction of axis items to return.
replace: boolean, optional
Sample with or without replacement. Default = False.
random_state: int or ``np.random.RandomState``
If int we create a new RandomState with this as the seed
Otherwise we draw from the passed RandomState
See Also
--------
dask.DataFrame.random_split, pd.DataFrame.sample
"""
if random_state is None:
random_state = np.random.RandomState()
name = 'sample-' + tokenize(self, frac, replace, random_state)
state_data = random_state_data(self.npartitions, random_state)
dsk = {(name, i): (methods.sample, (self._name, i), state, frac, replace)
for i, state in enumerate(state_data)}
return new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions)
def to_hdf(self, path_or_buf, key, mode='a', append=False, get=None, **kwargs):
""" See dd.to_hdf docstring for more information """
from .io import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, get=get, **kwargs)
def to_parquet(self, path, *args, **kwargs):
""" See dd.to_parquet docstring for more information """
from .io import to_parquet
return to_parquet(path, self, *args, **kwargs)
def to_csv(self, filename, **kwargs):
""" See dd.to_csv docstring for more information """
from .io import to_csv
return to_csv(self, filename, **kwargs)
def to_delayed(self):
""" See dd.to_delayed docstring for more information """
return to_delayed(self)
@classmethod
def _get_unary_operator(cls, op):
return lambda self: elemwise(op, self)
@classmethod
def _get_binary_operator(cls, op, inv=False):
if inv:
return lambda self, other: elemwise(op, other, self)
else:
return lambda self, other: elemwise(op, self, other)
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, axis=0):
"""Provides rolling transformations.
Parameters
----------
window : int, str, offset
Size of the moving window. This is the number of observations used
for calculating the statistic. The window size must not be so large
as to span more than one adjacent partition. If using an offset
or offset alias like '5D', the data must have a ``DatetimeIndex``
.. versionchanged:: 0.15.0
Now accepts offsets and string offset aliases
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. The recognized window types are identical
to pandas.
axis : int, default 0
Returns
-------
a Rolling object on which to call a method to compute a statistic
Notes
-----
The `freq` argument is not supported.
"""
from dask.dataframe.rolling import Rolling
if isinstance(window, int):
if window < 0:
raise ValueError('window must be >= 0')
if min_periods is not None:
if not isinstance(min_periods, int):
raise ValueError('min_periods must be an integer')
if min_periods < 0:
raise ValueError('min_periods must be >= 0')
return Rolling(self, window=window, min_periods=min_periods,
freq=freq, center=center, win_type=win_type, axis=axis)
@derived_from(pd.DataFrame)
def diff(self, periods=1, axis=0):
axis = self._validate_axis(axis)
if not isinstance(periods, int):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(M.diff, token='diff', periods=periods,
axis=1)
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(M.diff, before, after, token='diff',
periods=periods)
@derived_from(pd.DataFrame)
def shift(self, periods=1, freq=None, axis=0):
axis = self._validate_axis(axis)
if not isinstance(periods, int):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(M.shift, token='shift', periods=periods,
freq=freq, axis=1)
if freq is None:
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(M.shift, before, after, token='shift',
periods=periods)
# Let pandas error on invalid arguments
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(M.shift, token='shift', periods=periods,
freq=freq, meta=meta)
return maybe_shift_divisions(out, periods, freq=freq)
def _reduction_agg(self, name, axis=None, skipna=True,
split_every=False):
axis = self._validate_axis(axis)
meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)
token = self._token_prefix + name
method = getattr(M, name)
if axis == 1:
return self.map_partitions(method, meta=meta,
token=token, skipna=skipna, axis=axis)
else:
result = self.reduction(method, meta=meta, token=token,
skipna=skipna, axis=axis,
split_every=split_every)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def abs(self):
meta = self._meta_nonempty.abs()
return self.map_partitions(M.abs, meta=meta)
@derived_from(pd.DataFrame)
def all(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('all', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def any(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('any', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def sum(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('sum', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def prod(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('prod', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def max(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('max', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def min(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('min', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def idxmax(self, axis=None, skipna=True, split_every=False):
fn = 'idxmax'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.idxmax, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not isinstance(meta, pd.Series)
result = aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def idxmin(self, axis=None, skipna=True, split_every=False):
fn = 'idxmin'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis)
if axis == 1:
return map_partitions(M.idxmin, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not isinstance(meta, pd.Series)
result = aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def count(self, axis=None, split_every=False):
axis = self._validate_axis(axis)
token = self._token_prefix + 'count'
if axis == 1:
meta = self._meta_nonempty.count(axis=axis)
return self.map_partitions(M.count, meta=meta, token=token,
axis=axis)
else:
meta = self._meta_nonempty.count()
result = self.reduction(M.count, aggregate=M.sum, meta=meta,
token=token, split_every=split_every)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def mean(self, axis=None, skipna=True, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.mean(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.mean, self, meta=meta,
token=self._token_prefix + 'mean',
axis=axis, skipna=skipna)
else:
num = self._get_numeric_data()
s = num.sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'mean-%s' % tokenize(self, axis, skipna)
result = map_partitions(methods.mean_aggregate, s, n,
token=name, meta=meta)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def var(self, axis=None, skipna=True, ddof=1, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.var, self, meta=meta,
token=self._token_prefix + 'var',
axis=axis, skipna=skipna, ddof=ddof)
else:
num = self._get_numeric_data()
x = 1.0 * num.sum(skipna=skipna, split_every=split_every)
x2 = 1.0 * (num ** 2).sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'var'
result = map_partitions(methods.var_aggregate, x2, x, n,
token=name, meta=meta, ddof=ddof)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def std(self, axis=None, skipna=True, ddof=1, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.std(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.std, self, meta=meta,
token=self._token_prefix + 'std',
axis=axis, skipna=skipna, ddof=ddof)
else:
v = self.var(skipna=skipna, ddof=ddof, split_every=split_every)
name = self._token_prefix + 'std'
return map_partitions(np.sqrt, v, meta=meta, token=name)
@derived_from(pd.DataFrame)
def sem(self, axis=None, skipna=None, ddof=1, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.sem(axis=axis, skipna=skipna, ddof=ddof)
if axis == 1:
return map_partitions(M.sem, self, meta=meta,
token=self._token_prefix + 'sem',
axis=axis, skipna=skipna, ddof=ddof)
else:
num = self._get_numeric_data()
v = num.var(skipna=skipna, ddof=ddof, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'sem'
result = map_partitions(np.sqrt, v / n, meta=meta, token=name)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
def quantile(self, q=0.5, axis=0):
""" Approximate row-wise and precise column-wise quantiles of DataFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
"""
axis = self._validate_axis(axis)
keyname = 'quantiles-concat--' + tokenize(self, q, axis)
if axis == 1:
if isinstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
return map_partitions(M.quantile, self, q, axis,
token=keyname, meta=(q, 'f8'))
else:
meta = self._meta.quantile(q, axis=axis)
num = self._get_numeric_data()
quantiles = tuple(quantile(self[c], q) for c in num.columns)
dask = {}
dask = merge(dask, *[_q.dask for _q in quantiles])
qnames = [(_q._name, 0) for _q in quantiles]
if isinstance(quantiles[0], Scalar):
dask[(keyname, 0)] = (pd.Series, qnames, num.columns)
divisions = (min(num.columns), max(num.columns))
return Series(dask, keyname, meta, divisions)
else:
dask[(keyname, 0)] = (methods.concat, qnames, 1)
return DataFrame(dask, keyname, meta, quantiles[0].divisions)
@derived_from(pd.DataFrame)
def describe(self, split_every=False):
# currently, only numeric describe is supported
num = self._get_numeric_data()
if self.ndim == 2 and len(num.columns) == 0:
raise ValueError("DataFrame contains only non-numeric data.")
elif self.ndim == 1 and self.dtype == 'object':
raise ValueError("Cannot compute ``describe`` on object dtype.")
stats = [num.count(split_every=split_every),
num.mean(split_every=split_every),
num.std(split_every=split_every),
num.min(split_every=split_every),
num.quantile([0.25, 0.5, 0.75]),
num.max(split_every=split_every)]
stats_names = [(s._name, 0) for s in stats]
name = 'describe--' + tokenize(self, split_every)
dsk = merge(num.dask, *(s.dask for s in stats))
dsk[(name, 0)] = (methods.describe_aggregate, stats_names)
return new_dd_object(dsk, name, num._meta, divisions=[None, None])
def _cum_agg(self, token, chunk, aggregate, axis, skipna=True,
chunk_kwargs=None):
""" Wrapper for cumulative operation """
axis = self._validate_axis(axis)
if axis == 1:
name = '{0}{1}(axis=1)'.format(self._token_prefix, token)
return self.map_partitions(chunk, token=name, **chunk_kwargs)
else:
# cumulate each partitions
name1 = '{0}{1}-map'.format(self._token_prefix, token)
cumpart = map_partitions(chunk, self, token=name1, meta=self,
**chunk_kwargs)
name2 = '{0}{1}-take-last'.format(self._token_prefix, token)
cumlast = map_partitions(_take_last, cumpart, skipna,
meta=pd.Series([]), token=name2)
name = '{0}{1}'.format(self._token_prefix, token)
cname = '{0}{1}-cum-last'.format(self._token_prefix, token)
# aggregate cumulated partisions and its previous last element
dask = {}
dask[(name, 0)] = (cumpart._name, 0)
for i in range(1, self.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
dask[(cname, i)] = (cumlast._name, i - 1)
else:
# aggregate with previous cumulation results
dask[(cname, i)] = (aggregate, (cname, i - 1),
(cumlast._name, i - 1))
dask[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))
return new_dd_object(merge(dask, cumpart.dask, cumlast.dask),
name, chunk(self._meta), self.divisions)
@derived_from(pd.DataFrame)
def cumsum(self, axis=None, skipna=True):
return self._cum_agg('cumsum',
chunk=M.cumsum,
aggregate=operator.add,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cumprod(self, axis=None, skipna=True):
return self._cum_agg('cumprod',
chunk=M.cumprod,
aggregate=operator.mul,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummax(self, axis=None, skipna=True):
return self._cum_agg('cummax',
chunk=M.cummax,
aggregate=methods.cummax_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummin(self, axis=None, skipna=True):
return self._cum_agg('cummin',
chunk=M.cummin,
aggregate=methods.cummin_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def where(self, cond, other=np.nan):
# cond and other may be dask instance,
# passing map_partitions via keyword will not be aligned
return map_partitions(M.where, self, cond, other)
@derived_from(pd.DataFrame)
def mask(self, cond, other=np.nan):
return map_partitions(M.mask, self, cond, other)
@derived_from(pd.DataFrame)
def notnull(self):
return self.map_partitions(M.notnull)
@derived_from(pd.DataFrame)
def isnull(self):
return self.map_partitions(M.isnull)
@derived_from(pd.DataFrame)
def astype(self, dtype):
meta = self._meta.astype(dtype)
meta = clear_known_categories(meta)
return self.map_partitions(M.astype, dtype=dtype, meta=meta)
@derived_from(pd.Series)
def append(self, other):
# because DataFrame.append will override the method,
# wrap by pd.Series.append docstring
from .multi import concat
if isinstance(other, (list, dict)):
msg = "append doesn't support list or dict input"
raise NotImplementedError(msg)
return concat([self, other], join='outer', interleave_partitions=False)
@derived_from(pd.DataFrame)
def align(self, other, join='outer', axis=None, fill_value=None):
meta1, meta2 = _emulate(M.align, self, other, join, axis=axis,
fill_value=fill_value)
aligned = self.map_partitions(M.align, other, join=join, axis=axis,
fill_value=fill_value)
token = tokenize(self, other, join, axis, fill_value)
name1 = 'align1-' + token
dsk1 = dict(((name1, i), (getitem, key, 0))
for i, key in enumerate(aligned._keys()))
dsk1.update(aligned.dask)
result1 = new_dd_object(dsk1, name1, meta1, aligned.divisions)
name2 = 'align2-' + token
dsk2 = dict(((name2, i), (getitem, key, 1))
for i, key in enumerate(aligned._keys()))
dsk2.update(aligned.dask)
result2 = new_dd_object(dsk2, name2, meta2, aligned.divisions)
return result1, result2
@derived_from(pd.DataFrame)
def combine(self, other, func, fill_value=None, overwrite=True):
return self.map_partitions(M.combine, other, func,
fill_value=fill_value, overwrite=overwrite)
@derived_from(pd.DataFrame)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
raise NotImplementedError
@derived_from(pd.DataFrame)
def resample(self, rule, how=None, closed=None, label=None):
from .tseries.resample import _resample
return _resample(self, rule, how=how, closed=closed, label=label)
@derived_from(pd.DataFrame)
def first(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`first` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[0] + offset
end = self.loc._get_partitions(date)
include_right = offset.isAnchored() or not hasattr(offset, '_inc')
if end == self.npartitions - 1:
divs = self.divisions
else:
divs = self.divisions[:end + 1] + (date,)
name = 'first-' + tokenize(self, offset)
dsk = {(name, i): (self._name, i) for i in range(end)}
dsk[(name, end)] = (methods.boundary_slice, (self._name, end),
None, date, include_right, True, 'ix')
return new_dd_object(merge(self.dask, dsk), name, self, divs)
@derived_from(pd.DataFrame)
def last(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`last` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[-1] - offset
start = self.loc._get_partitions(date)
if start == 0:
divs = self.divisions
else:
divs = (date,) + self.divisions[start + 1:]
name = 'last-' + tokenize(self, offset)
dsk = {(name, i + 1): (self._name, j + 1)
for i, j in enumerate(range(start, self.npartitions))}
dsk[(name, 0)] = (methods.boundary_slice, (self._name, start),
date, None, True, False, 'ix')
return new_dd_object(merge(self.dask, dsk), name, self, divs)
def nunique_approx(self, split_every=None):
"""Approximate number of unique rows.
This method uses the HyperLogLog algorithm for cardinality
estimation to compute the approximate number of unique rows.
The approximate error is 0.406%.
Parameters
----------
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is 8.
Returns
-------
a float representing the approximate number of elements
"""
from . import hyperloglog # here to avoid circular import issues
return aca([self], chunk=hyperloglog.compute_hll_array,
combine=hyperloglog.reduce_state,
aggregate=hyperloglog.estimate_count,
split_every=split_every, b=16, meta=float)
@property
def values(self):
""" Return a dask.array of the values of this dataframe
Warning: This creates a dask.array without precise shape information.
Operations that depend on shape information, like slicing or reshaping,
will not work.
"""
from ..array.core import Array
name = 'values-' + tokenize(self)
chunks = ((np.nan,) * self.npartitions,)
x = self._meta.values
if isinstance(self, DataFrame):
chunks = chunks + ((x.shape[1],),)
suffix = (0,)
else:
suffix = ()
dsk = {(name, i) + suffix: (getattr, key, 'values')
for (i, key) in enumerate(self._keys())}
return Array(merge(self.dask, dsk), name, chunks, x.dtype)
normalize_token.register((Scalar, _Frame), lambda a: a._name)
class Series(_Frame):
""" Out-of-core Series object
Mimics ``pandas.Series``.
Parameters
----------
dsk: dict
The dask graph to compute this Series
_name: str
The key prefix that specifies which keys in the dask comprise this
particular Series
meta: pandas.Series
An empty ``pandas.Series`` with names, dtypes, and index matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
_token_prefix = 'series-'
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
index = context[1][0].index
return pd.Series(array, index=index, name=self.name)
@property
def name(self):
return self._meta.name
@name.setter
def name(self, name):
self._meta.name = name
renamed = _rename_dask(self, name)
# update myself
self.dask.update(renamed.dask)
self._name = renamed._name
@property
def ndim(self):
""" Return dimensionality """
return 1
@property
def dtype(self):
""" Return data type """
return self._meta.dtype
@cache_readonly
def dt(self):
return DatetimeAccessor(self)
@cache_readonly
def cat(self):
return CategoricalAccessor(self)
@cache_readonly
def str(self):
return StringAccessor(self)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
# Remove the `cat` and `str` accessors if not available. We can't
# decide this statically for the `dt` accessor, as it works on
# datetime-like things as well.
for accessor in ['cat', 'str']:
if not hasattr(self._meta, accessor):
o.remove(accessor)
return list(o)
@property
def nbytes(self):
return self.reduction(methods.nbytes, np.sum, token='nbytes',
meta=int, split_every=False)
@property
def _repr_data(self):
return _repr_data_series(self._meta, self._repr_divisions)
def __repr__(self):
""" have to overwrite footer """
if self.name is not None:
footer = "Name: {name}, dtype: {dtype}".format(name=self.name,
dtype=self.dtype)
else:
footer = "dtype: {dtype}".format(dtype=self.dtype)
return """Dask {klass} Structure:
{data}
{footer}
Dask Name: {name}, {task} tasks""".format(klass=self.__class__.__name__,
data=self.to_string(),
footer=footer,
name=key_split(self._name),
task=len(self.dask))
@derived_from(pd.Series)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def quantile(self, q=0.5):
""" Approximate quantiles of Series
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
"""
return quantile(self, q)
def _repartition_quantiles(self, npartitions, upsample=1.0):
""" Approximate quantiles of Series used for repartitioning
"""
from .partitionquantiles import partition_quantiles
return partition_quantiles(self, npartitions, upsample=upsample)
def __getitem__(self, key):
if isinstance(key, Series) and self.divisions == key.divisions:
name = 'index-%s' % tokenize(self, key)
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return Series(merge(self.dask, key.dask, dsk), name,
self._meta, self.divisions)
raise NotImplementedError()
@derived_from(pd.DataFrame)
def _get_numeric_data(self, how='any', subset=None):
return self
@derived_from(pd.Series)
def iteritems(self):
for i in range(self.npartitions):
s = self.get_partition(i).compute()
for item in s.iteritems():
yield item
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 'index', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0}.get(axis, axis)
@derived_from(pd.Series)
def groupby(self, by=None, **kwargs):
from dask.dataframe.groupby import SeriesGroupBy
return SeriesGroupBy(self, by=by, **kwargs)
@derived_from(pd.Series)
def count(self, split_every=False):
return super(Series, self).count(split_every=split_every)
def unique(self, split_every=None, split_out=1):
"""
Return Series of unique values in the object. Includes NA values.
Returns
-------
uniques : Series
"""
return aca(self, chunk=methods.unique, aggregate=methods.unique,
meta=self._meta, token='unique', split_every=split_every,
series_name=self.name, split_out=split_out)
@derived_from(pd.Series)
def nunique(self, split_every=None):
return self.drop_duplicates(split_every=split_every).count()
@derived_from(pd.Series)
def value_counts(self, split_every=None, split_out=1):
return aca(self, chunk=M.value_counts,
aggregate=methods.value_counts_aggregate,
combine=methods.value_counts_combine,
meta=self._meta.value_counts(), token='value-counts',
split_every=split_every, split_out=split_out,
split_out_setup=split_out_on_index)
@derived_from(pd.Series)
def nlargest(self, n=5, split_every=None):
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token='series-nlargest',
split_every=split_every, n=n)
@derived_from(pd.Series)
def nsmallest(self, n=5, split_every=None):
return aca(self, chunk=M.nsmallest, aggregate=M.nsmallest,
meta=self._meta, token='series-nsmallest',
split_every=split_every, n=n)
@derived_from(pd.Series)
def isin(self, other):
return elemwise(M.isin, self, list(other))
@insert_meta_param_description(pad=12)
@derived_from(pd.Series)
def map(self, arg, na_action=None, meta=no_default):
if not (isinstance(arg, (pd.Series, dict)) or callable(arg)):
raise TypeError("arg must be pandas.Series, dict or callable."
" Got {0}".format(type(arg)))
name = 'map-' + tokenize(self, arg, na_action)
dsk = dict(((name, i), (M.map, k, arg, na_action)) for i, k in
enumerate(self._keys()))
dsk.update(self.dask)
if meta is no_default:
meta = _emulate(M.map, self, arg, na_action=na_action)
else:
meta = make_meta(meta)
return Series(dsk, name, meta, self.divisions)
@derived_from(pd.Series)
def dropna(self):
return self.map_partitions(M.dropna)
@derived_from(pd.Series)
def between(self, left, right, inclusive=True):
return self.map_partitions(M.between, left=left,
right=right, inclusive=inclusive)
@derived_from(pd.Series)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
# np.clip may pass out
return self.map_partitions(M.clip, lower=lower, upper=upper)
@derived_from(pd.Series)
def clip_lower(self, threshold):
return self.map_partitions(M.clip_lower, threshold=threshold)
@derived_from(pd.Series)
def clip_upper(self, threshold):
return self.map_partitions(M.clip_upper, threshold=threshold)
@derived_from(pd.Series)
def align(self, other, join='outer', axis=None, fill_value=None):
return super(Series, self).align(other, join=join, axis=axis,
fill_value=fill_value)
@derived_from(pd.Series)
def combine(self, other, func, fill_value=None):
return self.map_partitions(M.combine, other, func,
fill_value=fill_value)
@derived_from(pd.Series)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
def to_bag(self, index=False):
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.Series)
def to_frame(self, name=None):
return self.map_partitions(M.to_frame, name,
meta=self._meta.to_frame(name))
@derived_from(pd.Series)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data.to_string(max_rows=max_rows)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like DataFrame.add to this class """
def meth(self, other, level=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__doc__ = comparison.__doc__
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, convert_dtype=True, meta=no_default, args=(), **kwds):
""" Parallel version of pandas.Series.apply
Parameters
----------
func : function
Function to apply
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results.
If False, leave as dtype=object.
$META
args : tuple
Positional arguments to pass to function in addition to the value.
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
applied : Series or DataFrame if func returns a Series.
Examples
--------
>>> import dask.dataframe as dd
>>> s = pd.Series(range(5), name='x')
>>> ds = dd.from_pandas(s, npartitions=2)
Apply a function elementwise across the Series, passing in extra
arguments in ``args`` and ``kwargs``:
>>> def myadd(x, a, b=1):
... return x + a + b
>>> res = ds.apply(myadd, args=(2,), b=1.5)
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ds.apply(myadd, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ds.apply(lambda x: x + 1, meta=ds)
See Also
--------
dask.Series.map_partitions
"""
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
meta = _emulate(M.apply, self._meta_nonempty, func,
convert_dtype=convert_dtype,
args=args, **kwds)
return map_partitions(M.apply, self, func,
convert_dtype, args, meta=meta, **kwds)
@derived_from(pd.Series)
def cov(self, other, min_periods=None, split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, scalar=True, split_every=split_every)
@derived_from(pd.Series)
def corr(self, other, method='pearson', min_periods=None,
split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, corr=True, scalar=True,
split_every=split_every)
@derived_from(pd.Series)
def autocorr(self, lag=1, split_every=False):
if not isinstance(lag, int):
raise TypeError("lag must be an integer")
return self.corr(self if lag == 0 else self.shift(lag),
split_every=split_every)
@derived_from(pd.Series)
def memory_usage(self, index=True, deep=False):
from ..delayed import delayed
result = self.map_partitions(M.memory_usage, index=index, deep=deep)
return delayed(sum)(result.to_delayed())
class Index(Series):
_partition_type = pd.Index
_token_prefix = 'index-'
_dt_attributes = {'nanosecond', 'microsecond', 'millisecond', 'dayofyear',
'minute', 'hour', 'day', 'dayofweek', 'second', 'week',
'weekday', 'weekofyear', 'month', 'quarter', 'year'}
_cat_attributes = {'known', 'as_known', 'as_unknown', 'add_categories',
'categories', 'remove_categories', 'reorder_categories',
'as_ordered', 'codes', 'remove_unused_categories',
'set_categories', 'as_unordered', 'ordered',
'rename_categories'}
def __getattr__(self, key):
if is_categorical_dtype(self.dtype) and key in self._cat_attributes:
return getattr(self.cat, key)
elif key in self._dt_attributes:
return getattr(self.dt, key)
raise AttributeError("'Index' object has no attribute %r" % key)
def __dir__(self):
out = super(Index, self).__dir__()
out.extend(self._dt_attributes)
if is_categorical_dtype(self.dtype):
out.extend(self._cat_attributes)
return out
@property
def index(self):
msg = "'{0}' object has no attribute 'index'"
raise AttributeError(msg.format(self.__class__.__name__))
def __array_wrap__(self, array, context=None):
return pd.Index(array, name=self.name)
def head(self, n=5, compute=True):
""" First n items of the Index.
Caveat, this only checks the first partition.
"""
name = 'head-%d-%s' % (n, self._name)
dsk = {(name, 0): (operator.getitem, (self._name, 0), slice(0, n))}
result = new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions[:2])
if compute:
result = result.compute()
return result
@derived_from(pd.Index)
def max(self, split_every=False):
return self.reduction(M.max, meta=self._meta_nonempty.max(),
token=self._token_prefix + 'max',
split_every=split_every)
@derived_from(pd.Index)
def min(self, split_every=False):
return self.reduction(M.min, meta=self._meta_nonempty.min(),
token=self._token_prefix + 'min',
split_every=split_every)
def count(self, split_every=False):
return self.reduction(methods.index_count, np.sum,
token='index-count', meta=int,
split_every=split_every)
@derived_from(pd.Index)
def shift(self, periods=1, freq=None):
if isinstance(self._meta, pd.PeriodIndex):
if freq is not None:
raise ValueError("PeriodIndex doesn't accept `freq` argument")
meta = self._meta_nonempty.shift(periods)
out = self.map_partitions(M.shift, periods, meta=meta,
token='shift')
else:
# Pandas will raise for other index types that don't implement shift
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(M.shift, periods, token='shift',
meta=meta, freq=freq)
if freq is None:
freq = meta.freq
return maybe_shift_divisions(out, periods, freq=freq)
class DataFrame(_Frame):
"""
Implements out-of-core DataFrame as a sequence of pandas DataFrames
Parameters
----------
dask: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
meta: pandas.DataFrame
An empty ``pandas.DataFrame`` with names, dtypes, and index matching
the expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
_token_prefix = 'dataframe-'
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
index = context[1][0].index
return pd.DataFrame(array, index=index, columns=self.columns)
@property
def columns(self):
return self._meta.columns
@columns.setter
def columns(self, columns):
renamed = _rename_dask(self, columns)
self._meta = renamed._meta
self._name = renamed._name
self.dask.update(renamed.dask)
def __getitem__(self, key):
name = 'getitem-%s' % tokenize(self, key)
if np.isscalar(key) or isinstance(key, tuple):
if isinstance(self._meta.index, (pd.DatetimeIndex, pd.PeriodIndex)):
if key not in self._meta.columns:
return self.loc[key]
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, list):
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
if isinstance(key, Series):
# do not perform dummy calculation, as columns will not be changed.
#
if self.divisions != key.divisions:
from .multi import _maybe_align_partitions
self, key = _maybe_align_partitions([self, key])
dsk = {(name, i): (M._getitem_array, (self._name, i), (key._name, i))
for i in range(self.npartitions)}
return new_dd_object(merge(self.dask, key.dask, dsk), name,
self, self.divisions)
raise NotImplementedError(key)
def __setitem__(self, key, value):
if isinstance(key, (tuple, list)):
df = self.assign(**{k: value[c]
for k, c in zip(key, value.columns)})
else:
df = self.assign(**{key: value})
self.dask = df.dask
self._name = df._name
self._meta = df._meta
def __delitem__(self, key):
result = self.drop([key], axis=1)
self.dask = result.dask
self._name = result._name
self._meta = result._meta
def __setattr__(self, key, value):
try:
columns = object.__getattribute__(self, '_meta').columns
except AttributeError:
columns = ()
if key in columns:
self[key] = value
else:
object.__setattr__(self, key, value)
def __getattr__(self, key):
if key in self.columns:
meta = self._meta[key]
name = 'getitem-%s' % tokenize(self, key)
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
raise AttributeError("'DataFrame' object has no attribute %r" % key)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(c for c in self.columns if
(isinstance(c, pd.compat.string_types) and
pd.compat.isidentifier(c)))
return list(o)
@property
def ndim(self):
""" Return dimensionality """
return 2
@property
def dtypes(self):
""" Return data types """
return self._meta.dtypes
@derived_from(pd.DataFrame)
def get_dtype_counts(self):
return self._meta.get_dtype_counts()
@derived_from(pd.DataFrame)
def get_ftype_counts(self):
return self._meta.get_ftype_counts()
@derived_from(pd.DataFrame)
def select_dtypes(self, include=None, exclude=None):
cs = self._meta.select_dtypes(include=include, exclude=exclude).columns
return self[list(cs)]
def set_index(self, other, drop=True, sorted=False, npartitions=None,
divisions=None, **kwargs):
"""Set the DataFrame index (row labels) using an existing column
This realigns the dataset to be sorted by a new column. This can have a
significant impact on performance, because joins, groupbys, lookups, etc.
are all much faster on that column. However, this performance increase
comes with a cost, sorting a parallel dataset requires expensive shuffles.
Often we ``set_index`` once directly after data ingest and filtering and
then perform many cheap computations off of the sorted dataset.
This function operates exactly like ``pandas.set_index`` except with
different performance costs (it is much more expensive). Under normal
operation this function does an initial pass over the index column to
compute approximate qunatiles to serve as future divisions. It then passes
over the data a second time, splitting up each input partition into several
pieces and sharing those pieces to all of the output partitions now in
sorted order.
In some cases we can alleviate those costs, for example if your dataset is
sorted already then we can avoid making many small pieces or if you know
good values to split the new index column then we can avoid the initial
pass over the data. For example if your new index is a datetime index and
your data is already sorted by day then this entire operation can be done
for free. You can control these options with the following parameters.
Parameters
----------
df: Dask DataFrame
index: string or Dask Series
npartitions: int, None, or 'auto'
The ideal number of output partitions. If None use the same as
the input. If 'auto' then decide by memory use.
shuffle: string, optional
Either ``'disk'`` for single-node operation or ``'tasks'`` for
distributed operation. Will be inferred by your current scheduler.
sorted: bool, optional
If the index column is already sorted in increasing order.
Defaults to False
divisions: list, optional
Known values on which to separate index values of the partitions.
See http://dask.pydata.org/en/latest/dataframe-design.html#partitions
Defaults to computing this with a single pass over the data. Note
that if ``sorted=True``, specified divisions are assumed to match
the existing partitions in the data. If this is untrue, you should
leave divisions empty and call ``repartition`` after ``set_index``.
compute: bool
Whether or not to trigger an immediate computation. Defaults to False.
Examples
--------
>>> df2 = df.set_index('x') # doctest: +SKIP
>>> df2 = df.set_index(d.x) # doctest: +SKIP
>>> df2 = df.set_index(d.timestamp, sorted=True) # doctest: +SKIP
A common case is when we have a datetime column that we know to be
sorted and is cleanly divided by day. We can set this index for free
by specifying both that the column is pre-sorted and the particular
divisions along which is is separated
>>> import pandas as pd
>>> divisions = pd.date_range('2000', '2010', freq='1D')
>>> df2 = df.set_index('timestamp', sorted=True, divisions=divisions) # doctest: +SKIP
"""
pre_sorted = sorted
del sorted
if divisions is not None:
check_divisions(divisions)
if pre_sorted:
from .shuffle import set_sorted_index
return set_sorted_index(self, other, drop=drop, divisions=divisions,
**kwargs)
else:
from .shuffle import set_index
return set_index(self, other, drop=drop, npartitions=npartitions,
divisions=divisions, **kwargs)
@derived_from(pd.DataFrame)
def nlargest(self, n=5, columns=None, split_every=None):
token = 'dataframe-nlargest'
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token=token, split_every=split_every,
n=n, columns=columns)
@derived_from(pd.DataFrame)
def nsmallest(self, n=5, columns=None, split_every=None):
token = 'dataframe-nsmallest'
return aca(self, chunk=M.nsmallest, aggregate=M.nsmallest,
meta=self._meta, token=token, split_every=split_every,
n=n, columns=columns)
@derived_from(pd.DataFrame)
def groupby(self, by=None, **kwargs):
from dask.dataframe.groupby import DataFrameGroupBy
return DataFrameGroupBy(self, by=by, **kwargs)
@wraps(categorize)
def categorize(self, columns=None, index=None, split_every=None, **kwargs):
return categorize(self, columns=columns, index=index,
split_every=split_every, **kwargs)
@derived_from(pd.DataFrame)
def assign(self, **kwargs):
for k, v in kwargs.items():
if not (isinstance(v, (Series, Scalar, pd.Series)) or
np.isscalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df2 = self._meta.assign(**_extract_meta(kwargs))
return elemwise(methods.assign, self, *pairs, meta=df2)
@derived_from(pd.DataFrame)
def rename(self, index=None, columns=None):
if index is not None:
raise ValueError("Cannot rename index.")
# *args here is index, columns but columns arg is already used
return self.map_partitions(M.rename, None, columns)
def query(self, expr, **kwargs):
""" Blocked version of pd.DataFrame.query
This is like the sequential version except that this will also happen
in many threads. This may conflict with ``numexpr`` which will use
multiple threads itself. We recommend that you set numexpr to use a
single thread
import numexpr
numexpr.set_nthreads(1)
The original docstring follows below:\n
""" + (pd.DataFrame.query.__doc__
if pd.DataFrame.query.__doc__ is not None else '')
name = 'query-%s' % tokenize(self, expr)
if kwargs:
name = name + '--' + tokenize(kwargs)
dsk = dict(((name, i), (apply, M.query,
((self._name, i), (expr,), kwargs)))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (M.query, (self._name, i), expr))
for i in range(self.npartitions))
meta = self._meta.query(expr, **kwargs)
return new_dd_object(merge(dsk, self.dask), name,
meta, self.divisions)
@derived_from(pd.DataFrame)
def eval(self, expr, inplace=None, **kwargs):
if '=' in expr and inplace in (True, None):
raise NotImplementedError("Inplace eval not supported."
" Please use inplace=False")
meta = self._meta.eval(expr, inplace=inplace, **kwargs)
return self.map_partitions(M.eval, expr, meta=meta, inplace=inplace, **kwargs)
@derived_from(pd.DataFrame)
def dropna(self, how='any', subset=None):
return self.map_partitions(M.dropna, how=how, subset=subset)
@derived_from(pd.DataFrame)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
return self.map_partitions(M.clip, lower=lower, upper=upper)
@derived_from(pd.DataFrame)
def clip_lower(self, threshold):
return self.map_partitions(M.clip_lower, threshold=threshold)
@derived_from(pd.DataFrame)
def clip_upper(self, threshold):
return self.map_partitions(M.clip_upper, threshold=threshold)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def to_bag(self, index=False):
"""Convert to a dask Bag of tuples of each row.
Parameters
----------
index : bool, optional
If True, the index is included as the first element of each tuple.
Default is False.
"""
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.DataFrame)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data.to_string(max_rows=max_rows,
show_dimensions=False)
def _get_numeric_data(self, how='any', subset=None):
# calculate columns to avoid unnecessary calculation
numerics = self._meta._get_numeric_data()
if len(numerics.columns) < len(self.columns):
name = self._token_prefix + '-get_numeric_data'
return self.map_partitions(M._get_numeric_data,
meta=numerics, token=name)
else:
# use myself if all numerics
return self
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
@derived_from(pd.DataFrame)
def drop(self, labels, axis=0, errors='raise'):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(M.drop, labels, axis=axis, errors=errors)
raise NotImplementedError("Drop currently only works for axis=1")
@derived_from(pd.DataFrame)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, suffixes=('_x', '_y'),
indicator=False, npartitions=None, shuffle=None):
if not isinstance(right, (DataFrame, pd.DataFrame)):
raise ValueError('right must be DataFrame')
from .multi import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, suffixes=suffixes,
npartitions=npartitions, indicator=indicator,
shuffle=shuffle)
@derived_from(pd.DataFrame)
def join(self, other, on=None, how='left',
lsuffix='', rsuffix='', npartitions=None, shuffle=None):
if not isinstance(other, (DataFrame, pd.DataFrame)):
raise ValueError('other must be DataFrame')
from .multi import merge
return merge(self, other, how=how,
left_index=on is None, right_index=True,
left_on=on, suffixes=[lsuffix, rsuffix],
npartitions=npartitions, shuffle=shuffle)
@derived_from(pd.DataFrame)
def append(self, other):
if isinstance(other, Series):
msg = ('Unable to appending dd.Series to dd.DataFrame.'
'Use pd.Series to append as row.')
raise ValueError(msg)
elif isinstance(other, pd.Series):
other = other.to_frame().T
return super(DataFrame, self).append(other)
@derived_from(pd.DataFrame)
def iterrows(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.iterrows():
yield row
@derived_from(pd.DataFrame)
def itertuples(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.itertuples():
yield row
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
# name must be explicitly passed for div method whose name is truediv
def meth(self, other, axis='columns', level=None, fill_value=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
if axis in (1, 'columns'):
# When axis=1 and other is a series, `other` is transposed
# and the operator is applied broadcast across rows. This
# isn't supported with dd.Series.
if isinstance(other, Series):
msg = 'Unable to {0} dd.Series with axis=1'.format(name)
raise ValueError(msg)
elif isinstance(other, pd.Series):
# Special case for pd.Series to avoid unwanted partitioning
# of other. We pass it in as a kwarg to prevent this.
meta = _emulate(op, self, other=other, axis=axis,
fill_value=fill_value)
return map_partitions(op, self, other=other, meta=meta,
axis=axis, fill_value=fill_value)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like DataFrame.add to this class """
def meth(self, other, axis='columns', level=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__doc__ = comparison.__doc__
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, axis=0, args=(), meta=no_default, **kwds):
""" Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
$META
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame
Examples
--------
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.sum() + a + b
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5)
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)
See Also
--------
dask.DataFrame.map_partitions
"""
axis = self._validate_axis(axis)
if axis == 0:
msg = ("dd.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)")
raise NotImplementedError(msg)
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
meta = _emulate(M.apply, self._meta_nonempty, func,
axis=axis, args=args, **kwds)
return map_partitions(M.apply, self, func, axis,
False, False, None, args, meta=meta, **kwds)
@derived_from(pd.DataFrame)
def applymap(self, func, meta='__no_default__'):
return elemwise(M.applymap, self, func, meta=meta)
@derived_from(pd.DataFrame)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def cov(self, min_periods=None, split_every=False):
return cov_corr(self, min_periods, split_every=split_every)
@derived_from(pd.DataFrame)
def corr(self, method='pearson', min_periods=None, split_every=False):
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
return cov_corr(self, min_periods, True, split_every=split_every)
def info(self, buf=None, verbose=False, memory_usage=False):
"""
Concise summary of a Dask DataFrame.
"""
if buf is None:
import sys
buf = sys.stdout
lines = [str(type(self))]
if len(self.columns) == 0:
lines.append('Index: 0 entries')
lines.append('Empty %s' % type(self).__name__)
put_lines(buf, lines)
return
# Group and execute the required computations
computations = {}
if verbose:
computations.update({'index': self.index, 'count': self.count()})
if memory_usage:
computations.update({'memory_usage': self.map_partitions(M.memory_usage, index=True)})
computations = dict(zip(computations.keys(), da.compute(*computations.values())))
if verbose:
index = computations['index']
counts = computations['count']
lines.append(index.summary())
lines.append('Data columns (total {} columns):'.format(len(self.columns)))
if PANDAS_VERSION >= '0.20.0':
from pandas.io.formats.printing import pprint_thing
else:
from pandas.formats.printing import pprint_thing
space = max([len(pprint_thing(k)) for k in self.columns]) + 3
column_template = '{!s:<%d} {} non-null {}' % space
column_info = [column_template.format(pprint_thing(x[0]), x[1], x[2])
for x in zip(self.columns, counts, self.dtypes)]
else:
column_info = [self.columns.summary(name='Columns')]
lines.extend(column_info)
dtype_counts = ['%s(%d)' % k for k in sorted(self.dtypes.value_counts().iteritems(), key=str)]
lines.append('dtypes: {}'.format(', '.join(dtype_counts)))
if memory_usage:
memory_int = computations['memory_usage'].sum()
lines.append('memory usage: {}\n'.format(memory_repr(memory_int)))
put_lines(buf, lines)
@derived_from(pd.DataFrame)
def memory_usage(self, index=True, deep=False):
result = self.map_partitions(M.memory_usage, index=index, deep=deep)
result = result.groupby(result.index).sum()
return result
def pivot_table(self, index=None, columns=None,
values=None, aggfunc='mean'):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.
Parameters
----------
values : scalar
column to aggregate
index : scalar
column to be index
columns : scalar
column to be columns
aggfunc : {'mean', 'sum', 'count'}, default 'mean'
Returns
-------
table : DataFrame
"""
from .reshape import pivot_table
return pivot_table(self, index=index, columns=columns, values=values,
aggfunc=aggfunc)
def to_records(self, index=False):
from .io import to_records
return to_records(self)
@derived_from(pd.DataFrame)
def to_html(self, max_rows=5):
# pd.Series doesn't have html repr
data = self._repr_data.to_html(max_rows=max_rows,
show_dimensions=False)
return self._HTML_FMT.format(data=data, name=key_split(self._name),
task=len(self.dask))
@property
def _repr_data(self):
meta = self._meta
index = self._repr_divisions
values = {c: _repr_data_series(meta[c], index) for c in meta.columns}
return pd.DataFrame(values, columns=meta.columns)
_HTML_FMT = """<div><strong>Dask DataFrame Structure:</strong></div>
{data}
<div>Dask Name: {name}, {task} tasks</div>"""
def _repr_html_(self):
data = self._repr_data.to_html(max_rows=5,
show_dimensions=False, notebook=True)
return self._HTML_FMT.format(data=data, name=key_split(self._name),
task=len(self.dask))
# bind operators
for op in [operator.abs, operator.add, operator.and_, operator_div,
operator.eq, operator.gt, operator.ge, operator.inv,
operator.lt, operator.le, operator.mod, operator.mul,
operator.ne, operator.neg, operator.or_, operator.pow,
operator.sub, operator.truediv, operator.floordiv, operator.xor]:
_Frame._bind_operator(op)
Scalar._bind_operator(op)
for name in ['add', 'sub', 'mul', 'div',
'truediv', 'floordiv', 'mod', 'pow',
'radd', 'rsub', 'rmul', 'rdiv',
'rtruediv', 'rfloordiv', 'rmod', 'rpow']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_operator_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_operator_method(name, meth)
for name in ['lt', 'gt', 'le', 'ge', 'ne', 'eq']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_comparison_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_comparison_method(name, meth)
def is_broadcastable(dfs, s):
"""
This Series is broadcastable against another dataframe in the sequence
"""
return (isinstance(s, Series) and
s.npartitions == 1 and
s.known_divisions and
any(s.divisions == (min(df.columns), max(df.columns))
for df in dfs if isinstance(df, DataFrame)))
def elemwise(op, *args, **kwargs):
""" Elementwise operation for dask.Dataframes """
meta = kwargs.pop('meta', no_default)
_name = funcname(op) + '-' + tokenize(op, kwargs, *args)
args = _maybe_from_pandas(args)
from .multi import _maybe_align_partitions
args = _maybe_align_partitions(args)
dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar))]
dfs = [df for df in dasks if isinstance(df, _Frame)]
divisions = dfs[0].divisions
_is_broadcastable = partial(is_broadcastable, dfs)
dfs = list(remove(_is_broadcastable, dfs))
n = len(divisions) - 1
other = [(i, arg) for i, arg in enumerate(args)
if not isinstance(arg, (_Frame, Scalar))]
# adjust the key length of Scalar
keys = [d._keys() * n if isinstance(d, Scalar) or _is_broadcastable(d)
else d._keys() for d in dasks]
if other:
dsk = {(_name, i):
(apply, partial_by_order, list(frs),
{'function': op, 'other': other})
for i, frs in enumerate(zip(*keys))}
else:
dsk = {(_name, i): (op,) + frs for i, frs in enumerate(zip(*keys))}
dsk = merge(dsk, *[d.dask for d in dasks])
if meta is no_default:
if len(dfs) >= 2 and len(dasks) != len(dfs):
# should not occur in current funcs
msg = 'elemwise with 2 or more DataFrames and Scalar is not supported'
raise NotImplementedError(msg)
meta = _emulate(op, *args, **kwargs)
return new_dd_object(dsk, _name, meta, divisions)
def _maybe_from_pandas(dfs):
from .io import from_pandas
dfs = [from_pandas(df, 1) if isinstance(df, (pd.Series, pd.DataFrame))
else df for df in dfs]
return dfs
def hash_shard(df, nparts, split_out_setup=None, split_out_setup_kwargs=None):
if split_out_setup:
h = split_out_setup(df, **(split_out_setup_kwargs or {}))
else:
h = df
h = hash_pandas_object(h, index=False)
if isinstance(h, pd.Series):
h = h._values
h %= nparts
return {i: df.iloc[h == i] for i in range(nparts)}
def split_evenly(df, k):
""" Split dataframe into k roughly equal parts """
divisions = np.linspace(0, len(df), k + 1).astype(int)
return {i: df.iloc[divisions[i]: divisions[i + 1]] for i in range(k)}
def split_out_on_index(df):
h = df.index
if isinstance(h, pd.MultiIndex):
h = pd.DataFrame([], index=h).reset_index()
return h
def split_out_on_cols(df, cols=None):
return df[cols]
@insert_meta_param_description
def apply_concat_apply(args, chunk=None, aggregate=None, combine=None,
meta=no_default, token=None, chunk_kwargs=None,
aggregate_kwargs=None, combine_kwargs=None,
split_every=None, split_out=None, split_out_setup=None,
split_out_setup_kwargs=None, **kwargs):
"""Apply a function to blocks, then concat, then apply again
Parameters
----------
args :
Positional arguments for the `chunk` function. All `dask.dataframe`
objects should be partitioned and indexed equivalently.
chunk : function [block-per-arg] -> block
Function to operate on each block of data
aggregate : function concatenated-block -> block
Function to operate on the concatenated result of chunk
combine : function concatenated-block -> block, optional
Function to operate on intermediate concatenated results of chunk
in a tree-reduction. If not provided, defaults to aggregate.
$META
token : str, optional
The name to use for the output keys.
chunk_kwargs : dict, optional
Keywords for the chunk function only.
aggregate_kwargs : dict, optional
Keywords for the aggregate function only.
combine_kwargs : dict, optional
Keywords for the combine function only.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to ``aggregate``.
Default is 8.
split_out : int, optional
Number of output partitions. Split occurs after first chunk reduction.
split_out_setup : callable, optional
If provided, this function is called on each chunk before performing
the hash-split. It should return a pandas object, where each row
(excluding the index) is hashed. If not provided, the chunk is hashed
as is.
split_out_setup_kwargs : dict, optional
Keywords for the `split_out_setup` function only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``aggregate``, and
``combine``.
Examples
--------
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if chunk_kwargs is None:
chunk_kwargs = dict()
if aggregate_kwargs is None:
aggregate_kwargs = dict()
chunk_kwargs.update(kwargs)
aggregate_kwargs.update(kwargs)
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
else:
if combine_kwargs is None:
combine_kwargs = dict()
combine_kwargs.update(kwargs)
if not isinstance(args, (tuple, list)):
args = [args]
npartitions = set(arg.npartitions for arg in args
if isinstance(arg, _Frame))
if len(npartitions) > 1:
raise ValueError("All arguments must have same number of partitions")
npartitions = npartitions.pop()
if split_every is None:
split_every = 8
elif split_every is False:
split_every = npartitions
elif split_every < 2 or not isinstance(split_every, int):
raise ValueError("split_every must be an integer >= 2")
token_key = tokenize(token or (chunk, aggregate), meta, args,
chunk_kwargs, aggregate_kwargs, combine_kwargs,
split_every, split_out, split_out_setup,
split_out_setup_kwargs)
# Chunk
a = '{0}-chunk-{1}'.format(token or funcname(chunk), token_key)
if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:
dsk = {(a, 0, i, 0): (chunk, key) for i, key in enumerate(args[0]._keys())}
else:
dsk = {(a, 0, i, 0): (apply, chunk,
[(x._name, i) if isinstance(x, _Frame)
else x for x in args], chunk_kwargs)
for i in range(args[0].npartitions)}
# Split
if split_out and split_out > 1:
split_prefix = 'split-%s' % token_key
shard_prefix = 'shard-%s' % token_key
for i in range(args[0].npartitions):
dsk[(split_prefix, i)] = (hash_shard, (a, 0, i, 0), split_out,
split_out_setup, split_out_setup_kwargs)
for j in range(split_out):
dsk[(shard_prefix, 0, i, j)] = (getitem, (split_prefix, i), j)
a = shard_prefix
else:
split_out = 1
# Combine
b = '{0}-combine-{1}'.format(token or funcname(combine), token_key)
k = npartitions
depth = 0
while k > split_every:
for part_i, inds in enumerate(partition_all(split_every, range(k))):
for j in range(split_out):
conc = (_concat, [(a, depth, i, j) for i in inds])
if combine_kwargs:
dsk[(b, depth + 1, part_i, j)] = (apply, combine, [conc], combine_kwargs)
else:
dsk[(b, depth + 1, part_i, j)] = (combine, conc)
k = part_i + 1
a = b
depth += 1
# Aggregate
for j in range(split_out):
b = '{0}-agg-{1}'.format(token or funcname(aggregate), token_key)
conc = (_concat, [(a, depth, i, j) for i in range(k)])
if aggregate_kwargs:
dsk[(b, j)] = (apply, aggregate, [conc], aggregate_kwargs)
else:
dsk[(b, j)] = (aggregate, conc)
if meta is no_default:
meta_chunk = _emulate(apply, chunk, args, chunk_kwargs)
meta = _emulate(apply, aggregate, [_concat([meta_chunk])],
aggregate_kwargs)
meta = make_meta(meta)
for arg in args:
if isinstance(arg, _Frame):
dsk.update(arg.dask)
divisions = [None] * (split_out + 1)
return new_dd_object(dsk, b, meta, divisions)
aca = apply_concat_apply
def _extract_meta(x, nonempty=False):
"""
Extract internal cache data (``_meta``) from dd.DataFrame / dd.Series
"""
if isinstance(x, Scalar):
return x._meta_nonempty if nonempty else x._meta
elif isinstance(x, _Frame):
if (isinstance(x, Series) and
x.npartitions == 1 and
x.known_divisions): # may be broadcastable
return x._meta
else:
return x._meta_nonempty if nonempty else x._meta
elif isinstance(x, list):
return [_extract_meta(_x, nonempty) for _x in x]
elif isinstance(x, tuple):
return tuple([_extract_meta(_x, nonempty) for _x in x])
elif isinstance(x, dict):
res = {}
for k in x:
res[k] = _extract_meta(x[k], nonempty)
return res
else:
return x
def _emulate(func, *args, **kwargs):
"""
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_meta``) for calculation
"""
with raise_on_meta_error(funcname(func)):
return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))
@insert_meta_param_description
def map_partitions(func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. At least one of the
args should be a Dask.dataframe.
$META
"""
meta = kwargs.pop('meta', no_default)
if meta is not no_default:
meta = make_meta(meta)
assert callable(func)
if 'token' in kwargs:
name = kwargs.pop('token')
token = tokenize(meta, *args, **kwargs)
else:
name = funcname(func)
token = tokenize(func, meta, *args, **kwargs)
name = '{0}-{1}'.format(name, token)
from .multi import _maybe_align_partitions
args = _maybe_from_pandas(args)
args = _maybe_align_partitions(args)
if meta is no_default:
meta = _emulate(func, *args, **kwargs)
if all(isinstance(arg, Scalar) for arg in args):
dask = {(name, 0):
(apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)}
return Scalar(merge(dask, *[arg.dask for arg in args]), name, meta)
elif not isinstance(meta, (pd.Series, pd.DataFrame, pd.Index)):
# If `meta` is not a pandas object, the concatenated results will be a
# different type
meta = _concat([meta])
meta = make_meta(meta)
dfs = [df for df in args if isinstance(df, _Frame)]
dsk = {}
for i in range(dfs[0].npartitions):
values = [(arg._name, i if isinstance(arg, _Frame) else 0)
if isinstance(arg, (_Frame, Scalar)) else arg for arg in args]
dsk[(name, i)] = (apply_and_enforce, func, values, kwargs, meta)
dasks = [arg.dask for arg in args if isinstance(arg, (_Frame, Scalar))]
return new_dd_object(merge(dsk, *dasks), name, meta, args[0].divisions)
def apply_and_enforce(func, args, kwargs, meta):
"""Apply a function, and enforce the output to match meta
Ensures the output has the same columns, even if empty."""
df = func(*args, **kwargs)
if isinstance(df, (pd.DataFrame, pd.Series, pd.Index)):
if len(df) == 0:
return meta
c = meta.columns if isinstance(df, pd.DataFrame) else meta.name
return _rename(c, df)
return df
def _rename(columns, df):
"""
Rename columns of pd.DataFrame or name of pd.Series.
Not for dd.DataFrame or dd.Series.
Parameters
----------
columns : tuple, string, pd.DataFrame or pd.Series
Column names, Series name or pandas instance which has the
target column names / name.
df : pd.DataFrame or pd.Series
target DataFrame / Series to be renamed
"""
assert not isinstance(df, _Frame)
if columns is no_default:
return df
if isinstance(columns, Iterator):
columns = list(columns)
if isinstance(df, pd.DataFrame):
if isinstance(columns, pd.DataFrame):
columns = columns.columns
if not isinstance(columns, pd.Index):
columns = pd.Index(columns)
if (len(columns) == len(df.columns) and
type(columns) is type(df.columns) and
columns.equals(df.columns)):
# if target is identical, rename is not necessary
return df
# deep=False doesn't doesn't copy any data/indices, so this is cheap
df = df.copy(deep=False)
df.columns = columns
return df
elif isinstance(df, (pd.Series, pd.Index)):
if isinstance(columns, (pd.Series, pd.Index)):
columns = columns.name
if df.name == columns:
return df
return df.rename(columns)
# map_partition may pass other types
return df
def _rename_dask(df, names):
"""
Destructively rename columns of dd.DataFrame or name of dd.Series.
Not for pd.DataFrame or pd.Series.
Internaly used to overwrite dd.DataFrame.columns and dd.Series.name
We can't use map_partition because it applies function then rename
Parameters
----------
df : dd.DataFrame or dd.Series
target DataFrame / Series to be renamed
names : tuple, string
Column names/Series name
"""
assert isinstance(df, _Frame)
metadata = _rename(names, df._meta)
name = 'rename-{0}'.format(tokenize(df, metadata))
dsk = {}
for i in range(df.npartitions):
dsk[name, i] = (_rename, metadata, (df._name, i))
return new_dd_object(merge(dsk, df.dask), name, metadata, df.divisions)
def quantile(df, q):
"""Approximate quantiles of Series.
Parameters
----------
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
assert isinstance(df, Series)
from dask.array.percentile import _percentile, merge_percentiles
# currently, only Series has quantile method
if isinstance(df, Index):
meta = pd.Series(df._meta_nonempty).quantile(q)
else:
meta = df._meta_nonempty.quantile(q)
if isinstance(meta, pd.Series):
# Index.quantile(list-like) must be pd.Series, not pd.Index
df_name = df.name
finalize_tsk = lambda tsk: (pd.Series, tsk, q, None, df_name)
return_type = Series
else:
finalize_tsk = lambda tsk: (getitem, tsk, 0)
return_type = Scalar
q = [q]
# pandas uses quantile in [0, 1]
# numpy / everyone else uses [0, 100]
qs = np.asarray(q) * 100
token = tokenize(df, qs)
if len(qs) == 0:
name = 'quantiles-' + token
empty_index = pd.Index([], dtype=float)
return Series({(name, 0): pd.Series([], name=df.name, index=empty_index)},
name, df._meta, [None, None])
else:
new_divisions = [np.min(q), np.max(q)]
name = 'quantiles-1-' + token
val_dsk = dict(((name, i), (_percentile, (getattr, key, 'values'), qs))
for i, key in enumerate(df._keys()))
name2 = 'quantiles-2-' + token
len_dsk = dict(((name2, i), (len, key)) for i, key in enumerate(df._keys()))
name3 = 'quantiles-3-' + token
merge_dsk = {(name3, 0): finalize_tsk((merge_percentiles, qs,
[qs] * df.npartitions,
sorted(val_dsk), sorted(len_dsk)))}
dsk = merge(df.dask, val_dsk, len_dsk, merge_dsk)
return return_type(dsk, name3, meta, new_divisions)
def cov_corr(df, min_periods=None, corr=False, scalar=False, split_every=False):
"""DataFrame covariance and pearson correlation.
Computes pairwise covariance or correlation of columns, excluding NA/null
values.
Parameters
----------
df : DataFrame
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
corr : bool, optional
If True, compute the Pearson correlation. If False [default], compute
the covariance.
scalar : bool, optional
If True, compute covariance between two variables as a scalar. Only
valid if `df` has 2 columns. If False [default], compute the entire
covariance/correlation matrix.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is False.
"""
if min_periods is None:
min_periods = 2
elif min_periods < 2:
raise ValueError("min_periods must be >= 2")
if split_every is False:
split_every = df.npartitions
elif split_every < 2 or not isinstance(split_every, int):
raise ValueError("split_every must be an integer >= 2")
df = df._get_numeric_data()
if scalar and len(df.columns) != 2:
raise ValueError("scalar only valid for 2 column dataframe")
token = tokenize(df, min_periods, scalar, split_every)
funcname = 'corr' if corr else 'cov'
a = '{0}-chunk-{1}'.format(funcname, df._name)
dsk = {(a, i): (cov_corr_chunk, f, corr)
for (i, f) in enumerate(df._keys())}
prefix = '{0}-combine-{1}-'.format(funcname, df._name)
k = df.npartitions
b = a
depth = 0
while k > split_every:
b = prefix + str(depth)
for part_i, inds in enumerate(partition_all(split_every, range(k))):
dsk[(b, part_i)] = (cov_corr_combine, [(a, i) for i in inds], corr)
k = part_i + 1
a = b
depth += 1
name = '{0}-{1}'.format(funcname, token)
dsk[(name, 0)] = (cov_corr_agg, [(a, i) for i in range(k)],
df.columns, min_periods, corr, scalar)
dsk.update(df.dask)
if scalar:
return Scalar(dsk, name, 'f8')
meta = make_meta([(c, 'f8') for c in df.columns], index=df.columns)
return DataFrame(dsk, name, meta, (df.columns[0], df.columns[-1]))
def cov_corr_chunk(df, corr=False):
"""Chunk part of a covariance or correlation computation"""
mat = df.values
mask = np.isfinite(mat)
keep = np.bitwise_and(mask[:, None, :], mask[:, :, None])
x = np.where(keep, mat[:, None, :], np.nan)
sums = np.nansum(x, 0)
counts = keep.astype('int').sum(0)
cov = df.cov().values
dtype = [('sum', sums.dtype), ('count', counts.dtype), ('cov', cov.dtype)]
if corr:
m = np.nansum((x - sums / np.where(counts, counts, np.nan)) ** 2, 0)
dtype.append(('m', m.dtype))
out = np.empty(counts.shape, dtype=dtype)
out['sum'] = sums
out['count'] = counts
out['cov'] = cov * (counts - 1)
if corr:
out['m'] = m
return out
def cov_corr_combine(data, corr=False):
data = np.concatenate(data).reshape((len(data),) + data[0].shape)
sums = np.nan_to_num(data['sum'])
counts = data['count']
cum_sums = np.cumsum(sums, 0)
cum_counts = np.cumsum(counts, 0)
s1 = cum_sums[:-1]
s2 = sums[1:]
n1 = cum_counts[:-1]
n2 = counts[1:]
d = (s2 / n2) - (s1 / n1)
C = (np.nansum((n1 * n2) / (n1 + n2) * (d * d.transpose((0, 2, 1))), 0) +
np.nansum(data['cov'], 0))
out = np.empty(C.shape, dtype=data.dtype)
out['sum'] = cum_sums[-1]
out['count'] = cum_counts[-1]
out['cov'] = C
if corr:
nobs = np.where(cum_counts[-1], cum_counts[-1], np.nan)
mu = cum_sums[-1] / nobs
counts_na = np.where(counts, counts, np.nan)
m = np.nansum(data['m'] + counts * (sums / counts_na - mu) ** 2,
axis=0)
out['m'] = m
return out
def cov_corr_agg(data, cols, min_periods=2, corr=False, scalar=False):
out = cov_corr_combine(data, corr)
counts = out['count']
C = out['cov']
C[counts < min_periods] = np.nan
if corr:
m2 = out['m']
den = np.sqrt(m2 * m2.T)
else:
den = np.where(counts, counts, np.nan) - 1
mat = C / den
if scalar:
return mat[0, 1]
return pd.DataFrame(mat, columns=cols, index=cols)
def pd_split(df, p, random_state=None):
""" Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(df, [0.5, 0.5], random_state=123) # roughly 50/50 split
>>> a
a b
1 2 3
2 3 4
5 6 7
>>> b
a b
0 1 2
3 4 5
4 5 6
"""
p = list(p)
index = pseudorandom(len(df), p, random_state)
return [df.iloc[index == i] for i in range(len(p))]
def _take_last(a, skipna=True):
"""
take last row (Series) of DataFrame / last value of Series
considering NaN.
Parameters
----------
a : pd.DataFrame or pd.Series
skipna : bool, default True
Whether to exclude NaN
"""
if skipna is False:
return a.iloc[-1]
else:
# take last valid value excluding NaN, NaN location may be different
# in each columns
group_dummy = np.ones(len(a.index))
last_row = a.groupby(group_dummy).last()
if isinstance(a, pd.DataFrame):
return pd.Series(last_row.values[0], index=a.columns)
else:
return last_row.values[0]
def check_divisions(divisions):
if not isinstance(divisions, (list, tuple)):
raise ValueError('New division must be list or tuple')
divisions = list(divisions)
if divisions != sorted(divisions):
raise ValueError('New division must be sorted')
if len(divisions[:-1]) != len(list(unique(divisions[:-1]))):
msg = 'New division must be unique, except for the last element'
raise ValueError(msg)
def repartition_divisions(a, b, name, out1, out2, force=False):
""" dask graph to repartition dataframe by new divisions
Parameters
----------
a : tuple
old divisions
b : tuple, list
new divisions
name : str
name of old dataframe
out1 : str
name of temporary splits
out2 : str
name of new dataframe
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
{('b', 0): (<function boundary_slice at ...>, ('a', 0), 1, 3, False),
('b', 1): (<function boundary_slice at ...>, ('a', 1), 3, 4, False),
('b', 2): (<function boundary_slice at ...>, ('a', 1), 4, 6, False),
('b', 3): (<function boundary_slice at ...>, ('a', 1), 6, 7, False)
('c', 0): (<function concat at ...>,
(<type 'list'>, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
"""
check_divisions(b)
if len(b) < 2:
# minimum division is 2 elements, like [0, 0]
raise ValueError('New division must be longer than 2 elements')
if force:
if a[0] < b[0]:
msg = ('left side of the new division must be equal or smaller '
'than old division')
raise ValueError(msg)
if a[-1] > b[-1]:
msg = ('right side of the new division must be equal or larger '
'than old division')
raise ValueError(msg)
else:
if a[0] != b[0]:
msg = 'left side of old and new divisions are different'
raise ValueError(msg)
if a[-1] != b[-1]:
msg = 'right side of old and new divisions are different'
raise ValueError(msg)
def _is_single_last_div(x):
"""Whether last division only contains single label"""
return len(x) >= 2 and x[-1] == x[-2]
c = [a[0]]
d = dict()
low = a[0]
i, j = 1, 1 # indices for old/new divisions
k = 0 # index for temp divisions
last_elem = _is_single_last_div(a)
# process through old division
# left part of new division can be processed in this loop
while (i < len(a) and j < len(b)):
if a[i] < b[j]:
# tuple is something like:
# (methods.boundary_slice, ('from_pandas-#', 0), 3, 4, False))
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, a[i], False)
low = a[i]
i += 1
elif a[i] > b[j]:
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)
low = b[j]
j += 1
else:
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)
low = b[j]
i += 1
j += 1
c.append(low)
k += 1
# right part of new division can remain
if a[-1] < b[-1] or b[-1] == b[-2]:
for _j in range(j, len(b)):
# always use right-most of old division
# because it may contain last element
m = len(a) - 2
d[(out1, k)] = (methods.boundary_slice, (name, m), low, b[_j], False)
low = b[_j]
c.append(low)
k += 1
else:
# even if new division is processed through,
# right-most element of old division can remain
if last_elem and i < len(a):
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), a[i], a[i], False)
k += 1
c.append(a[-1])
# replace last element of tuple with True
d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)
i, j = 0, 1
last_elem = _is_single_last_div(c)
while j < len(b):
tmp = []
while c[i] < b[j]:
tmp.append((out1, i))
i += 1
if last_elem and c[i] == b[-1] and (b[-1] != b[-2] or j == len(b) - 1) and i < k:
# append if last split is not included
tmp.append((out1, i))
i += 1
if len(tmp) == 0:
# dummy slice to return empty DataFrame or Series,
# which retain original data attributes (columns / name)
d[(out2, j - 1)] = (methods.boundary_slice, (name, 0), a[0], a[0], False)
elif len(tmp) == 1:
d[(out2, j - 1)] = tmp[0]
else:
if not tmp:
raise ValueError('check for duplicate partitions\nold:\n%s\n\n'
'new:\n%s\n\ncombined:\n%s'
% (pformat(a), pformat(b), pformat(c)))
d[(out2, j - 1)] = (pd.concat, tmp)
j += 1
return d
def repartition_freq(df, freq=None):
""" Repartition a timeseries dataframe by a new frequency """
freq = pd.Timedelta(freq)
if not isinstance(df.divisions[0], pd.Timestamp):
raise TypeError("Can only repartition on frequency for timeseries")
divisions = pd.DatetimeIndex(start=df.divisions[0].ceil(freq),
end=df.divisions[-1],
freq=freq).tolist()
if divisions[-1] != df.divisions[-1]:
divisions.append(df.divisions[-1])
if divisions[0] != df.divisions[0]:
divisions = [df.divisions[0]] + divisions
return df.repartition(divisions=divisions)
def repartition_npartitions(df, npartitions):
""" Repartition dataframe to a smaller number of partitions """
new_name = 'repartition-%d-%s' % (npartitions, tokenize(df))
if df.npartitions == npartitions:
return df
elif df.npartitions > npartitions:
npartitions_ratio = df.npartitions / npartitions
new_partitions_boundaries = [int(new_partition_index * npartitions_ratio)
for new_partition_index in range(npartitions + 1)]
dsk = {}
for new_partition_index in range(npartitions):
value = (pd.concat, [(df._name, old_partition_index)
for old_partition_index in
range(new_partitions_boundaries[new_partition_index],
new_partitions_boundaries[new_partition_index + 1])])
dsk[new_name, new_partition_index] = value
divisions = [df.divisions[new_partition_index]
for new_partition_index in new_partitions_boundaries]
return new_dd_object(merge(df.dask, dsk), new_name, df._meta, divisions)
else:
original_divisions = divisions = pd.Series(df.divisions)
if (df.known_divisions and (np.issubdtype(divisions.dtype, np.datetime64) or
np.issubdtype(divisions.dtype, np.number))):
if np.issubdtype(divisions.dtype, np.datetime64):
divisions = divisions.values.astype('float64')
if isinstance(divisions, pd.Series):
divisions = divisions.values
n = len(divisions)
divisions = np.interp(x=np.linspace(0, n, npartitions + 1),
xp=np.linspace(0, n, n),
fp=divisions)
if np.issubdtype(original_divisions.dtype, np.datetime64):
divisions = pd.Series(divisions).astype(original_divisions.dtype).tolist()
elif np.issubdtype(original_divisions.dtype, np.integer):
divisions = divisions.astype(original_divisions.dtype)
if isinstance(divisions, np.ndarray):
divisions = tuple(divisions.tolist())
return df.repartition(divisions=divisions)
else:
ratio = npartitions / df.npartitions
split_name = 'split-%s' % tokenize(df, npartitions)
dsk = {}
last = 0
j = 0
for i in range(df.npartitions):
new = last + ratio
if i == df.npartitions - 1:
k = npartitions - j
else:
k = int(new - last)
dsk[(split_name, i)] = (split_evenly, (df._name, i), k)
for jj in range(k):
dsk[(new_name, j)] = (getitem, (split_name, i), jj)
j += 1
last = new
divisions = [None] * (npartitions + 1)
return new_dd_object(merge(df.dask, dsk), new_name, df._meta, divisions)
def repartition(df, divisions=None, force=False):
""" Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
Parameters
----------
divisions : list
List of partitions to be used
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
token = tokenize(df, divisions)
if isinstance(df, _Frame):
tmp = 'repartition-split-' + token
out = 'repartition-merge-' + token
dsk = repartition_divisions(df.divisions, divisions,
df._name, tmp, out, force=force)
return new_dd_object(merge(df.dask, dsk), out,
df._meta, divisions)
elif isinstance(df, (pd.Series, pd.DataFrame)):
name = 'repartition-dataframe-' + token
from .utils import shard_df_on_index
dfs = shard_df_on_index(df, divisions[1:-1])
dsk = dict(((name, i), df) for i, df in enumerate(dfs))
return new_dd_object(dsk, name, df, divisions)
raise ValueError('Data must be DataFrame or Series')
def _reduction_chunk(x, aca_chunk=None, **kwargs):
o = aca_chunk(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if isinstance(o, pd.Series) else o
def _reduction_combine(x, aca_combine=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
o = aca_combine(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if isinstance(o, pd.Series) else o
def _reduction_aggregate(x, aca_aggregate=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
return aca_aggregate(x, **kwargs)
def idxmaxmin_chunk(x, fn=None, skipna=True):
idx = getattr(x, fn)(skipna=skipna)
minmax = 'max' if fn == 'idxmax' else 'min'
value = getattr(x, minmax)(skipna=skipna)
if isinstance(x, pd.DataFrame):
return pd.DataFrame({'idx': idx, 'value': value})
return pd.DataFrame({'idx': [idx], 'value': [value]})
def idxmaxmin_row(x, fn=None, skipna=True):
x = x.set_index('idx')
idx = getattr(x.value, fn)(skipna=skipna)
minmax = 'max' if fn == 'idxmax' else 'min'
value = getattr(x.value, minmax)(skipna=skipna)
return pd.DataFrame({'idx': [idx], 'value': [value]})
def idxmaxmin_combine(x, fn=None, skipna=True):
return (x.groupby(level=0)
.apply(idxmaxmin_row, fn=fn, skipna=skipna)
.reset_index(level=1, drop=True))
def idxmaxmin_agg(x, fn=None, skipna=True, scalar=False):
res = idxmaxmin_combine(x, fn, skipna=skipna)['idx']
if scalar:
return res[0]
res.name = None
return res
def safe_head(df, n):
r = df.head(n=n)
if len(r) != n:
msg = ("Insufficient elements for `head`. {0} elements "
"requested, only {1} elements available. Try passing larger "
"`npartitions` to `head`.")
warnings.warn(msg.format(n, len(r)))
return r
def maybe_shift_divisions(df, periods, freq):
"""Maybe shift divisions by periods of size freq
Used to shift the divisions for the `shift` method. If freq isn't a fixed
size (not anchored or relative), then the divisions are shifted
appropriately. Otherwise the divisions are cleared.
Parameters
----------
df : dd.DataFrame, dd.Series, or dd.Index
periods : int
The number of periods to shift.
freq : DateOffset, timedelta, or time rule string
The frequency to shift by.
"""
if isinstance(freq, str):
freq = pd.tseries.frequencies.to_offset(freq)
if (isinstance(freq, pd.DateOffset) and
(freq.isAnchored() or not hasattr(freq, 'delta'))):
# Can't infer divisions on relative or anchored offsets, as
# divisions may now split identical index value.
# (e.g. index_partitions = [[1, 2, 3], [3, 4, 5]])
return df.clear_divisions()
if df.known_divisions:
divs = pd.Series(range(len(df.divisions)), index=df.divisions)
divisions = divs.shift(periods, freq=freq).index
return type(df)(df.dask, df._name, df._meta, divisions)
return df
def to_delayed(df):
""" Create Dask Delayed objects from a Dask Dataframe
Returns a list of delayed values, one value per partition.
Examples
--------
>>> partitions = df.to_delayed() # doctest: +SKIP
"""
from ..delayed import Delayed
return [Delayed(k, df.dask) for k in df._keys()]
@wraps(pd.to_datetime)
def to_datetime(arg, **kwargs):
meta = pd.Series([pd.Timestamp('2000')])
return map_partitions(pd.to_datetime, arg, meta=meta, **kwargs)
def _repr_data_series(s, index):
"""A helper for creating the ``_repr_data`` property"""
npartitions = len(index) - 1
if is_categorical_dtype(s):
if has_known_categories(s):
dtype = 'category[known]'
else:
dtype = 'category[unknown]'
else:
dtype = str(s.dtype)
return pd.Series([dtype] + ['...'] * npartitions, index=index, name=s.name)
if PY3:
_Frame.to_delayed.__doc__ = to_delayed.__doc__
| {
"repo_name": "mraspaud/dask",
"path": "dask/dataframe/core.py",
"copies": "1",
"size": "143916",
"license": "bsd-3-clause",
"hash": -8130388364455913000,
"line_mean": 36.6349372385,
"line_max": 102,
"alpha_frac": 0.5709858529,
"autogenerated": false,
"ratio": 3.927731230042848,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4998717082942848,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from functools import wraps, partial
import operator
from operator import getitem, setitem
from pprint import pformat
import uuid
import warnings
from toolz import merge, first, unique, partition_all, remove
import pandas as pd
from pandas.util.decorators import cache_readonly
import numpy as np
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..array.core import partial_by_order
from .. import threaded
from ..compatibility import apply, operator_div, bind_method, PY3
from ..utils import (random_state_data,
pseudorandom, derived_from, funcname, memory_repr,
put_lines, M, key_split)
from ..base import Base, tokenize, normalize_token
from . import methods
from .accessor import DatetimeAccessor, StringAccessor
from .categorical import CategoricalAccessor, categorize
from .hashing import hash_pandas_object
from .utils import (meta_nonempty, make_meta, insert_meta_param_description,
raise_on_meta_error, clear_known_categories,
is_categorical_dtype, has_known_categories)
no_default = '__no_default__'
pd.computation.expressions.set_use_numexpr(False)
def _concat(args):
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if not isinstance(args[0], (pd.DataFrame, pd.Series, pd.Index)):
try:
return pd.Series(args)
except:
return args
# We filter out empty partitions here because pandas frequently has
# inconsistent dtypes in results between empty and non-empty frames.
# Ideally this would be handled locally for each operation, but in practice
# this seems easier. TODO: don't do this.
args2 = [i for i in args if len(i)]
return args[0] if not args2 else methods.concat(args2, uniform=True)
def _get_return_type(meta):
if isinstance(meta, _Frame):
meta = meta._meta
if isinstance(meta, pd.Series):
return Series
elif isinstance(meta, pd.DataFrame):
return DataFrame
elif isinstance(meta, pd.Index):
return Index
return Scalar
def new_dd_object(dsk, _name, meta, divisions):
"""Generic constructor for dask.dataframe objects.
Decides the appropriate output class based on the type of `meta` provided.
"""
return _get_return_type(meta)(dsk, _name, meta, divisions)
def optimize(dsk, keys, **kwargs):
from .optimize import optimize
return optimize(dsk, keys, **kwargs)
def finalize(results):
return _concat(results)
class Scalar(Base):
""" A Dask object to represent a pandas scalar"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(first)
def __init__(self, dsk, name, meta, divisions=None):
# divisions is ignored, only present to be compatible with other
# objects.
self.dask = dsk
self._name = name
meta = make_meta(meta)
if isinstance(meta, (pd.DataFrame, pd.Series, pd.Index)):
raise TypeError("Expected meta to specify scalar, got "
"{0}".format(type(meta).__name__))
self._meta = meta
@property
def _meta_nonempty(self):
return self._meta
@property
def dtype(self):
return self._meta.dtype
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, 'dtype'):
o.remove('dtype') # dtype only in `dir` if available
return list(o)
@property
def divisions(self):
"""Dummy divisions to be compat with Series and DataFrame"""
return [None, None]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + '...'
if hasattr(self._meta, 'dtype'):
extra = ', dtype=%s' % self._meta.dtype
else:
extra = ', type=%s' % type(self._meta).__name__
return "dd.Scalar<%s%s>" % (name, extra)
def __array__(self):
# array interface is required to support pandas instance + Scalar
# Otherwise, above op results in pd.Series of Scalar (object dtype)
return np.asarray(self.compute())
@property
def _args(self):
return (self.dask, self._name, self._meta)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta = state
@property
def key(self):
return (self._name, 0)
def _keys(self):
return [self.key]
@classmethod
def _get_unary_operator(cls, op):
def f(self):
name = funcname(op) + '-' + tokenize(self)
dsk = {(name, 0): (op, (self._name, 0))}
meta = op(self._meta_nonempty)
return Scalar(merge(dsk, self.dask), name, meta)
return f
@classmethod
def _get_binary_operator(cls, op, inv=False):
return lambda self, other: _scalar_binary(op, self, other, inv=inv)
def _scalar_binary(op, self, other, inv=False):
name = '{0}-{1}'.format(funcname(op), tokenize(self, other))
dsk = self.dask
return_type = _get_return_type(other)
if isinstance(other, Scalar):
dsk = merge(dsk, other.dask)
other_key = (other._name, 0)
elif isinstance(other, Base):
return NotImplemented
else:
other_key = other
if inv:
dsk.update({(name, 0): (op, other_key, (self._name, 0))})
else:
dsk.update({(name, 0): (op, (self._name, 0), other_key)})
other_meta = make_meta(other)
other_meta_nonempty = meta_nonempty(other_meta)
if inv:
meta = op(other_meta_nonempty, self._meta_nonempty)
else:
meta = op(self._meta_nonempty, other_meta_nonempty)
if return_type is not Scalar:
return return_type(dsk, name, meta,
[other.index.min(), other.index.max()])
else:
return Scalar(dsk, name, meta)
class _Frame(Base):
""" Superclass for DataFrame and Series
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
meta: pandas.DataFrame, pandas.Series, or pandas.Index
An empty pandas object with names, dtypes, and indices matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(finalize)
def __init__(self, dsk, name, meta, divisions):
self.dask = dsk
self._name = name
meta = make_meta(meta)
if not isinstance(meta, self._partition_type):
raise TypeError("Expected meta to specify type {0}, got type "
"{1}".format(self._partition_type.__name__,
type(meta).__name__))
self._meta = meta
self.divisions = tuple(divisions)
@property
def _constructor(self):
return new_dd_object
@property
def npartitions(self):
"""Return number of partitions"""
return len(self.divisions) - 1
@property
def size(self):
return self.reduction(methods.size, np.sum, token='size', meta=int,
split_every=False)
@property
def _meta_nonempty(self):
""" A non-empty version of `_meta` with fake data."""
return meta_nonempty(self._meta)
@property
def _args(self):
return (self.dask, self._name, self._meta, self.divisions)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisions = state
def copy(self):
""" Make a copy of the dataframe
This is strictly a shallow copy of the underlying computational graph.
It does not affect the underlying data
"""
return new_dd_object(self.dask, self._name,
self._meta, self.divisions)
def _keys(self):
return [(self._name, i) for i in range(self.npartitions)]
def __array__(self, dtype=None, **kwargs):
self._computed = self.compute()
x = np.array(self._computed)
return x
def __array_wrap__(self, array, context=None):
raise NotImplementedError
@property
def _elemwise(self):
return elemwise
@property
def _repr_data(self):
raise NotImplementedError
@property
def _repr_divisions(self):
name = "npartitions={0}".format(self.npartitions)
if self.known_divisions:
divisions = pd.Index(self.divisions, name=name)
else:
# avoid to be converted to NaN
divisions = pd.Index(['None'] * (self.npartitions + 1),
name=name)
return divisions
def __repr__(self):
data = self._repr_data.to_string(max_rows=5, show_dimensions=False)
return """Dask {klass} Structure:
{data}
Dask Name: {name}, {task} tasks""".format(klass=self.__class__.__name__,
data=data, name=key_split(self._name),
task=len(self.dask))
@property
def index(self):
"""Return dask Index instance"""
name = self._name + '-index'
dsk = dict(((name, i), (getattr, key, 'index'))
for i, key in enumerate(self._keys()))
return Index(merge(dsk, self.dask), name,
self._meta.index, self.divisions)
def reset_index(self, drop=False):
"""Reset the index to the default index.
Note that unlike in ``pandas``, the reset ``dask.dataframe`` index will
not be monotonically increasing from 0. Instead, it will restart at 0
for each partition (e.g. ``index1 = [0, ..., 10], index2 = [0, ...]``).
This is due to the inability to statically know the full length of the
index.
For DataFrame with multi-level index, returns a new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
drop : boolean, default False
Do not try to insert index into dataframe columns.
"""
return self.map_partitions(M.reset_index, drop=drop).clear_divisions()
@property
def known_divisions(self):
"""Whether divisions are already known"""
return len(self.divisions) > 0 and self.divisions[0] is not None
def clear_divisions(self):
divisions = (None,) * (self.npartitions + 1)
return type(self)(self.dask, self._name, self._meta, divisions)
def get_partition(self, n):
"""Get a dask DataFrame/Series representing the `nth` partition."""
if 0 <= n < self.npartitions:
name = 'get-partition-%s-%s' % (str(n), self._name)
dsk = {(name, 0): (self._name, n)}
divisions = self.divisions[n:n + 2]
return new_dd_object(merge(self.dask, dsk), name,
self._meta, divisions)
else:
msg = "n must be 0 <= n < {0}".format(self.npartitions)
raise ValueError(msg)
def cache(self, cache=Cache):
""" Evaluate Dataframe and store in local cache
Uses chest by default to store data on disk
"""
warnings.warn("Deprecation Warning: The `cache` method is deprecated, "
"and will be removed in the next release. To achieve "
"the same behavior, either write to disk or use "
"`Client.persist`, from `dask.distributed`.")
if callable(cache):
cache = cache()
# Evaluate and store in cache
name = 'cache' + uuid.uuid1().hex
dsk = dict(((name, i), (setitem, cache, (tuple, list(key)), key))
for i, key in enumerate(self._keys()))
self._get(merge(dsk, self.dask), list(dsk.keys()))
# Create new dataFrame pointing to that cache
name = 'from-cache-' + self._name
dsk2 = dict(((name, i), (getitem, cache, (tuple, list(key))))
for i, key in enumerate(self._keys()))
return new_dd_object(dsk2, name, self._meta, self.divisions)
@derived_from(pd.DataFrame)
def drop_duplicates(self, split_every=None, split_out=1, **kwargs):
# Let pandas error on bad inputs
self._meta_nonempty.drop_duplicates(**kwargs)
if 'subset' in kwargs and kwargs['subset'] is not None:
split_out_setup = split_out_on_cols
split_out_setup_kwargs = {'cols': kwargs['subset']}
else:
split_out_setup = split_out_setup_kwargs = None
if kwargs.get('keep', True) is False:
raise NotImplementedError("drop_duplicates with keep=False")
chunk = M.drop_duplicates
return aca(self, chunk=chunk, aggregate=chunk, meta=self._meta,
token='drop-duplicates', split_every=split_every,
split_out=split_out, split_out_setup=split_out_setup,
split_out_setup_kwargs=split_out_setup_kwargs, **kwargs)
def __len__(self):
return self.reduction(len, np.sum, token='len', meta=int,
split_every=False).compute()
@insert_meta_param_description(pad=12)
def map_partitions(self, func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
One can use ``map_partitions`` to apply a function on each partition.
Extra arguments and keywords can optionally be provided, and will be
passed to the function after the partition.
Here we apply a function with arguments and keywords to a DataFrame,
resulting in a Series:
>>> def myadd(df, a, b=1):
... return df.x + df.y + a + b
>>> res = ddf.map_partitions(myadd, 1, b=2)
>>> res.dtype
dtype('float64')
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with no name, and dtype
``float64``:
>>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))
Here we map a function that takes in a DataFrame, and returns a
DataFrame with a new column:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))
>>> res.dtypes
x int64
y float64
z float64
dtype: object
As before, the output metadata can also be specified manually. This
time we pass in a ``dict``, as the output is a DataFrame:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),
... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.map_partitions(lambda df: df.head(), meta=df)
"""
return map_partitions(func, self, *args, **kwargs)
@insert_meta_param_description(pad=12)
def map_overlap(self, func, before, after, *args, **kwargs):
"""Apply a function to each partition, sharing rows with adjacent partitions.
This can be useful for implementing windowing functions such as
``df.rolling(...).mean()`` or ``df.diff()``.
Parameters
----------
func : function
Function applied to each partition.
before : int
The number of rows to prepend to partition ``i`` from the end of
partition ``i - 1``.
after : int
The number of rows to append to partition ``i`` from the beginning
of partition ``i + 1``.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Notes
-----
Given positive integers ``before`` and ``after``, and a function
``func``, ``map_overlap`` does the following:
1. Prepend ``before`` rows to each partition ``i`` from the end of
partition ``i - 1``. The first partition has no rows prepended.
2. Append ``after`` rows to each partition ``i`` from the beginning of
partition ``i + 1``. The last partition has no rows appended.
3. Apply ``func`` to each partition, passing in any extra ``args`` and
``kwargs`` if provided.
4. Trim ``before`` rows from the beginning of all but the first
partition.
5. Trim ``after`` rows from the end of all but the last partition.
Note that the index and divisions are assumed to remain unchanged.
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 4, 7, 11],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
A rolling sum with a trailing moving window of size 2 can be computed by
overlapping 2 rows before each partition, and then mapping calls to
``df.rolling(2).sum()``:
>>> ddf.compute()
x y
0 1 1.0
1 2 2.0
2 4 3.0
3 7 4.0
4 11 5.0
>>> ddf.map_overlap(lambda df: df.rolling(2).sum(), 2, 0).compute()
x y
0 NaN NaN
1 3.0 3.0
2 6.0 5.0
3 11.0 7.0
4 18.0 9.0
The pandas ``diff`` method computes a discrete difference shifted by a
number of periods (can be positive or negative). This can be
implemented by mapping calls to ``df.diff`` to each partition after
prepending/appending that many rows, depending on sign:
>>> def diff(df, periods=1):
... before, after = (periods, 0) if periods > 0 else (0, -periods)
... return df.map_overlap(lambda df, periods=1: df.diff(periods),
... periods, 0, periods=periods)
>>> diff(ddf, 1).compute()
x y
0 NaN NaN
1 1.0 1.0
2 2.0 1.0
3 3.0 1.0
4 4.0 1.0
"""
from .rolling import map_overlap
return map_overlap(func, self, before, after, *args, **kwargs)
@insert_meta_param_description(pad=12)
def reduction(self, chunk, aggregate=None, combine=None, meta=no_default,
token=None, split_every=None, chunk_kwargs=None,
aggregate_kwargs=None, combine_kwargs=None, **kwargs):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
combine : callable, optional
Function to operate on intermediate concatenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'sum': x.sum(), 'count': x.count()})
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'sum': x.sum(), 'count': x.count()})
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs['aca_chunk'] = chunk
combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}
combine_kwargs['aca_combine'] = combine
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs['aca_aggregate'] = aggregate
return aca(self, chunk=_reduction_chunk, aggregate=_reduction_aggregate,
combine=_reduction_combine, meta=meta, token=token,
split_every=split_every, chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
combine_kwargs=combine_kwargs, **kwargs)
@derived_from(pd.DataFrame)
def pipe(self, func, *args, **kwargs):
# Taken from pandas:
# https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def random_split(self, frac, random_state=None):
""" Pseudorandomly split dataframe into different pieces row-wise
Parameters
----------
frac : list
List of floats that should sum to one.
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
Examples
--------
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.DataFrame.sample
"""
if not np.allclose(sum(frac), 1):
raise ValueError("frac should sum to 1")
state_data = random_state_data(self.npartitions, random_state)
token = tokenize(self, frac, random_state)
name = 'split-' + token
dsk = {(name, i): (pd_split, (self._name, i), frac, state)
for i, state in enumerate(state_data)}
out = []
for i in range(len(frac)):
name2 = 'split-%d-%s' % (i, token)
dsk2 = {(name2, j): (getitem, (name, j), i)
for j in range(self.npartitions)}
out.append(type(self)(merge(self.dask, dsk, dsk2), name2,
self._meta, self.divisions))
return out
def head(self, n=5, npartitions=1, compute=True):
""" First n rows of the dataset
Parameters
----------
n : int, optional
The number of rows to return. Default is 5.
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``n`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
"""
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
msg = "only {} partitions, head received {}"
raise ValueError(msg.format(self.npartitions, npartitions))
name = 'head-%d-%d-%s' % (npartitions, n, self._name)
if npartitions > 1:
name_p = 'head-partial-%d-%s' % (n, self._name)
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (M.head, (self._name, i), n)
concat = (_concat, [(name_p, i) for i in range(npartitions)])
dsk[(name, 0)] = (safe_head, concat, n)
else:
dsk = {(name, 0): (safe_head, (self._name, 0), n)}
result = new_dd_object(merge(self.dask, dsk), name, self._meta,
[self.divisions[0], self.divisions[npartitions]])
if compute:
result = result.compute()
return result
def tail(self, n=5, compute=True):
""" Last n rows of the dataset
Caveat, the only checks the last n rows of the last partition.
"""
name = 'tail-%d-%s' % (n, self._name)
dsk = {(name, 0): (M.tail, (self._name, self.npartitions - 1), n)}
result = new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions[-2:])
if compute:
result = result.compute()
return result
@property
def loc(self):
""" Purely label-location based indexer for selection by label.
>>> df.loc["b"] # doctest: +SKIP
>>> df.loc["b":"d"] # doctest: +SKIP"""
from .indexing import _LocIndexer
return _LocIndexer(self)
# NOTE: `iloc` is not implemented because of performance concerns.
# see https://github.com/dask/dask/pull/507
def repartition(self, divisions=None, npartitions=None, freq=None, force=False):
""" Repartition dataframe along new divisions
Parameters
----------
divisions : list, optional
List of partitions to be used. If specified npartitions will be
ignored.
npartitions : int, optional
Number of partitions of output, must be less than npartitions of
input. Only used if divisions isn't specified.
freq : str, pd.Timedelta
A period on which to partition timeseries data like ``'7D'`` or
``'12h'`` or ``pd.Timedelta(hours=12)``. Assumes a datetime index.
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition(npartitions=10) # doctest: +SKIP
>>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP
>>> df = df.repartition(freq='7d') # doctest: +SKIP
"""
if npartitions is not None and divisions is not None:
warnings.warn("When providing both npartitions and divisions to "
"repartition only npartitions is used.")
if npartitions is not None:
return repartition_npartitions(self, npartitions)
elif divisions is not None:
return repartition(self, divisions, force=force)
elif freq is not None:
return repartition_freq(self, freq=freq)
else:
raise ValueError(
"Provide either divisions= or npartitions= to repartition")
@derived_from(pd.DataFrame)
def fillna(self, value=None, method=None, limit=None, axis=None):
axis = self._validate_axis(axis)
if method is None and limit is not None:
raise NotImplementedError("fillna with set limit and method=None")
if isinstance(value, _Frame):
test_value = value._meta_nonempty.values[0]
else:
test_value = value
meta = self._meta_nonempty.fillna(value=test_value, method=method,
limit=limit, axis=axis)
if axis == 1 or method is None:
return self.map_partitions(M.fillna, value, method=method,
limit=limit, axis=axis, meta=meta)
if method in ('pad', 'ffill'):
method = 'ffill'
skip_check = 0
before, after = 1 if limit is None else limit, 0
else:
method = 'bfill'
skip_check = self.npartitions - 1
before, after = 0, 1 if limit is None else limit
if limit is None:
name = 'fillna-chunk-' + tokenize(self, method)
dsk = {(name, i): (methods.fillna_check, (self._name, i),
method, i != skip_check)
for i in range(self.npartitions)}
parts = new_dd_object(merge(dsk, self.dask), name, meta,
self.divisions)
else:
parts = self
return parts.map_overlap(M.fillna, before, after, method=method,
limit=limit, meta=meta)
@derived_from(pd.DataFrame)
def ffill(self, axis=None, limit=None):
return self.fillna(method='ffill', limit=limit, axis=axis)
@derived_from(pd.DataFrame)
def bfill(self, axis=None, limit=None):
return self.fillna(method='bfill', limit=limit, axis=axis)
def sample(self, frac, replace=False, random_state=None):
""" Random sample of items
Parameters
----------
frac : float, optional
Fraction of axis items to return.
replace: boolean, optional
Sample with or without replacement. Default = False.
random_state: int or ``np.random.RandomState``
If int we create a new RandomState with this as the seed
Otherwise we draw from the passed RandomState
See Also
--------
dask.DataFrame.random_split, pd.DataFrame.sample
"""
if random_state is None:
random_state = np.random.RandomState()
name = 'sample-' + tokenize(self, frac, replace, random_state)
state_data = random_state_data(self.npartitions, random_state)
dsk = {(name, i): (methods.sample, (self._name, i), state, frac, replace)
for i, state in enumerate(state_data)}
return new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions)
def to_hdf(self, path_or_buf, key, mode='a', append=False, get=None, **kwargs):
""" See dd.to_hdf docstring for more information """
from .io import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, get=get, **kwargs)
def to_parquet(self, path, *args, **kwargs):
""" See dd.to_parquet docstring for more information """
from .io import to_parquet
return to_parquet(path, self, *args, **kwargs)
def to_csv(self, filename, **kwargs):
""" See dd.to_csv docstring for more information """
from .io import to_csv
return to_csv(self, filename, **kwargs)
def to_delayed(self):
""" See dd.to_delayed docstring for more information """
return to_delayed(self)
@classmethod
def _get_unary_operator(cls, op):
return lambda self: elemwise(op, self)
@classmethod
def _get_binary_operator(cls, op, inv=False):
if inv:
return lambda self, other: elemwise(op, other, self)
else:
return lambda self, other: elemwise(op, self, other)
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, axis=0):
"""Provides rolling transformations.
Parameters
----------
window : int
Size of the moving window. This is the number of observations used
for calculating the statistic. The window size must not be so large
as to span more than one adjacent partition.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. The recognized window types are identical
to pandas.
axis : int, default 0
Returns
-------
a Rolling object on which to call a method to compute a statistic
Notes
-----
The `freq` argument is not supported.
"""
from dask.dataframe.rolling import Rolling
if not isinstance(window, int):
raise ValueError('window must be an integer')
if window < 0:
raise ValueError('window must be >= 0')
if min_periods is not None:
if not isinstance(min_periods, int):
raise ValueError('min_periods must be an integer')
if min_periods < 0:
raise ValueError('min_periods must be >= 0')
return Rolling(self, window=window, min_periods=min_periods,
freq=freq, center=center, win_type=win_type, axis=axis)
@derived_from(pd.DataFrame)
def diff(self, periods=1, axis=0):
axis = self._validate_axis(axis)
if not isinstance(periods, int):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(M.diff, token='diff', periods=periods,
axis=1)
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(M.diff, before, after, token='diff',
periods=periods)
@derived_from(pd.DataFrame)
def shift(self, periods=1, freq=None, axis=0):
axis = self._validate_axis(axis)
if not isinstance(periods, int):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(M.shift, token='shift', periods=periods,
freq=freq, axis=1)
if freq is None:
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(M.shift, before, after, token='shift',
periods=periods)
# Let pandas error on invalid arguments
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(M.shift, token='shift', periods=periods,
freq=freq, meta=meta)
return maybe_shift_divisions(out, periods, freq=freq)
def _reduction_agg(self, name, axis=None, skipna=True,
split_every=False):
axis = self._validate_axis(axis)
meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)
token = self._token_prefix + name
method = getattr(M, name)
if axis == 1:
return self.map_partitions(method, meta=meta,
token=token, skipna=skipna, axis=axis)
else:
result = self.reduction(method, meta=meta, token=token,
skipna=skipna, axis=axis,
split_every=split_every)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def abs(self):
meta = self._meta_nonempty.abs()
return self.map_partitions(M.abs, meta=meta)
@derived_from(pd.DataFrame)
def all(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('all', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def any(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('any', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def sum(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('sum', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def prod(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('prod', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def max(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('max', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def min(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('min', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def idxmax(self, axis=None, skipna=True, split_every=False):
fn = 'idxmax'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.idxmax, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not isinstance(meta, pd.Series)
result = aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def idxmin(self, axis=None, skipna=True, split_every=False):
fn = 'idxmin'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis)
if axis == 1:
return map_partitions(M.idxmin, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not isinstance(meta, pd.Series)
result = aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def count(self, axis=None, split_every=False):
axis = self._validate_axis(axis)
token = self._token_prefix + 'count'
if axis == 1:
meta = self._meta_nonempty.count(axis=axis)
return self.map_partitions(M.count, meta=meta, token=token,
axis=axis)
else:
meta = self._meta_nonempty.count()
result = self.reduction(M.count, aggregate=M.sum, meta=meta,
token=token, split_every=split_every)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def mean(self, axis=None, skipna=True, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.mean(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.mean, self, meta=meta,
token=self._token_prefix + 'mean',
axis=axis, skipna=skipna)
else:
num = self._get_numeric_data()
s = num.sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'mean-%s' % tokenize(self, axis, skipna)
result = map_partitions(methods.mean_aggregate, s, n,
token=name, meta=meta)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def var(self, axis=None, skipna=True, ddof=1, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.var, self, meta=meta,
token=self._token_prefix + 'var',
axis=axis, skipna=skipna, ddof=ddof)
else:
num = self._get_numeric_data()
x = 1.0 * num.sum(skipna=skipna, split_every=split_every)
x2 = 1.0 * (num ** 2).sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'var'
result = map_partitions(methods.var_aggregate, x2, x, n,
token=name, meta=meta, ddof=ddof)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def std(self, axis=None, skipna=True, ddof=1, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.std(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.std, self, meta=meta,
token=self._token_prefix + 'std',
axis=axis, skipna=skipna, ddof=ddof)
else:
v = self.var(skipna=skipna, ddof=ddof, split_every=split_every)
name = self._token_prefix + 'std'
return map_partitions(np.sqrt, v, meta=meta, token=name)
@derived_from(pd.DataFrame)
def sem(self, axis=None, skipna=None, ddof=1, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.sem(axis=axis, skipna=skipna, ddof=ddof)
if axis == 1:
return map_partitions(M.sem, self, meta=meta,
token=self._token_prefix + 'sem',
axis=axis, skipna=skipna, ddof=ddof)
else:
num = self._get_numeric_data()
v = num.var(skipna=skipna, ddof=ddof, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'sem'
result = map_partitions(np.sqrt, v / n, meta=meta, token=name)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
def quantile(self, q=0.5, axis=0):
""" Approximate row-wise and precise column-wise quantiles of DataFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
"""
axis = self._validate_axis(axis)
keyname = 'quantiles-concat--' + tokenize(self, q, axis)
if axis == 1:
if isinstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
return map_partitions(M.quantile, self, q, axis,
token=keyname, meta=(q, 'f8'))
else:
meta = self._meta.quantile(q, axis=axis)
num = self._get_numeric_data()
quantiles = tuple(quantile(self[c], q) for c in num.columns)
dask = {}
dask = merge(dask, *[_q.dask for _q in quantiles])
qnames = [(_q._name, 0) for _q in quantiles]
if isinstance(quantiles[0], Scalar):
dask[(keyname, 0)] = (pd.Series, qnames, num.columns)
divisions = (min(num.columns), max(num.columns))
return Series(dask, keyname, meta, divisions)
else:
dask[(keyname, 0)] = (methods.concat, qnames, 1)
return DataFrame(dask, keyname, meta, quantiles[0].divisions)
@derived_from(pd.DataFrame)
def describe(self, split_every=False):
# currently, only numeric describe is supported
num = self._get_numeric_data()
stats = [num.count(split_every=split_every),
num.mean(split_every=split_every),
num.std(split_every=split_every),
num.min(split_every=split_every),
num.quantile([0.25, 0.5, 0.75]),
num.max(split_every=split_every)]
stats_names = [(s._name, 0) for s in stats]
name = 'describe--' + tokenize(self, split_every)
dsk = merge(num.dask, *(s.dask for s in stats))
dsk[(name, 0)] = (methods.describe_aggregate, stats_names)
return new_dd_object(dsk, name, num._meta, divisions=[None, None])
def _cum_agg(self, token, chunk, aggregate, axis, skipna=True,
chunk_kwargs=None):
""" Wrapper for cumulative operation """
axis = self._validate_axis(axis)
if axis == 1:
name = '{0}{1}(axis=1)'.format(self._token_prefix, token)
return self.map_partitions(chunk, token=name, **chunk_kwargs)
else:
# cumulate each partitions
name1 = '{0}{1}-map'.format(self._token_prefix, token)
cumpart = map_partitions(chunk, self, token=name1, meta=self,
**chunk_kwargs)
name2 = '{0}{1}-take-last'.format(self._token_prefix, token)
cumlast = map_partitions(_take_last, cumpart, skipna,
meta=pd.Series([]), token=name2)
name = '{0}{1}'.format(self._token_prefix, token)
cname = '{0}{1}-cum-last'.format(self._token_prefix, token)
# aggregate cumulated partisions and its previous last element
dask = {}
dask[(name, 0)] = (cumpart._name, 0)
for i in range(1, self.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
dask[(cname, i)] = (cumlast._name, i - 1)
else:
# aggregate with previous cumulation results
dask[(cname, i)] = (aggregate, (cname, i - 1),
(cumlast._name, i - 1))
dask[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))
return new_dd_object(merge(dask, cumpart.dask, cumlast.dask),
name, chunk(self._meta), self.divisions)
@derived_from(pd.DataFrame)
def cumsum(self, axis=None, skipna=True):
return self._cum_agg('cumsum',
chunk=M.cumsum,
aggregate=operator.add,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cumprod(self, axis=None, skipna=True):
return self._cum_agg('cumprod',
chunk=M.cumprod,
aggregate=operator.mul,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummax(self, axis=None, skipna=True):
return self._cum_agg('cummax',
chunk=M.cummax,
aggregate=methods.cummax_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummin(self, axis=None, skipna=True):
return self._cum_agg('cummin',
chunk=M.cummin,
aggregate=methods.cummin_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def where(self, cond, other=np.nan):
# cond and other may be dask instance,
# passing map_partitions via keyword will not be aligned
return map_partitions(M.where, self, cond, other)
@derived_from(pd.DataFrame)
def mask(self, cond, other=np.nan):
return map_partitions(M.mask, self, cond, other)
@derived_from(pd.DataFrame)
def notnull(self):
return self.map_partitions(M.notnull)
@derived_from(pd.DataFrame)
def isnull(self):
return self.map_partitions(M.isnull)
@derived_from(pd.DataFrame)
def astype(self, dtype):
meta = self._meta.astype(dtype)
meta = clear_known_categories(meta)
return self.map_partitions(M.astype, dtype=dtype, meta=meta)
@derived_from(pd.Series)
def append(self, other):
# because DataFrame.append will override the method,
# wrap by pd.Series.append docstring
from .multi import concat
if isinstance(other, (list, dict)):
msg = "append doesn't support list or dict input"
raise NotImplementedError(msg)
return concat([self, other], join='outer', interleave_partitions=False)
@derived_from(pd.DataFrame)
def align(self, other, join='outer', axis=None, fill_value=None):
meta1, meta2 = _emulate(M.align, self, other, join, axis=axis,
fill_value=fill_value)
aligned = self.map_partitions(M.align, other, join=join, axis=axis,
fill_value=fill_value)
token = tokenize(self, other, join, axis, fill_value)
name1 = 'align1-' + token
dsk1 = dict(((name1, i), (getitem, key, 0))
for i, key in enumerate(aligned._keys()))
dsk1.update(aligned.dask)
result1 = new_dd_object(dsk1, name1, meta1, aligned.divisions)
name2 = 'align2-' + token
dsk2 = dict(((name2, i), (getitem, key, 1))
for i, key in enumerate(aligned._keys()))
dsk2.update(aligned.dask)
result2 = new_dd_object(dsk2, name2, meta2, aligned.divisions)
return result1, result2
@derived_from(pd.DataFrame)
def combine(self, other, func, fill_value=None, overwrite=True):
return self.map_partitions(M.combine, other, func,
fill_value=fill_value, overwrite=overwrite)
@derived_from(pd.DataFrame)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
raise NotImplementedError
@derived_from(pd.DataFrame)
def resample(self, rule, how=None, closed=None, label=None):
from .tseries.resample import _resample
return _resample(self, rule, how=how, closed=closed, label=label)
@derived_from(pd.DataFrame)
def first(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`first` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[0] + offset
end = self.loc._get_partitions(date)
include_right = offset.isAnchored() or not hasattr(offset, '_inc')
if end == self.npartitions - 1:
divs = self.divisions
else:
divs = self.divisions[:end + 1] + (date,)
name = 'first-' + tokenize(self, offset)
dsk = {(name, i): (self._name, i) for i in range(end)}
dsk[(name, end)] = (methods.boundary_slice, (self._name, end),
None, date, include_right, True, 'ix')
return new_dd_object(merge(self.dask, dsk), name, self, divs)
@derived_from(pd.DataFrame)
def last(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`last` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[-1] - offset
start = self.loc._get_partitions(date)
if start == 0:
divs = self.divisions
else:
divs = (date,) + self.divisions[start + 1:]
name = 'last-' + tokenize(self, offset)
dsk = {(name, i + 1): (self._name, j + 1)
for i, j in enumerate(range(start, self.npartitions))}
dsk[(name, 0)] = (methods.boundary_slice, (self._name, start),
date, None, True, False, 'ix')
return new_dd_object(merge(self.dask, dsk), name, self, divs)
def nunique_approx(self, split_every=None):
"""Approximate number of unique rows.
This method uses the HyperLogLog algorithm for cardinality
estimation to compute the approximate number of unique rows.
The approximate error is 0.406%.
Parameters
----------
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is 8.
Returns
-------
a float representing the approximate number of elements
"""
from . import hyperloglog # here to avoid circular import issues
return aca([self], chunk=hyperloglog.compute_hll_array,
combine=hyperloglog.reduce_state,
aggregate=hyperloglog.estimate_count,
split_every=split_every, b=16, meta=float)
@property
def values(self):
""" Return a dask.array of the values of this dataframe
Warning: This creates a dask.array without precise shape information.
Operations that depend on shape information, like slicing or reshaping,
will not work.
"""
from ..array.core import Array
name = 'values-' + tokenize(self)
chunks = ((np.nan,) * self.npartitions,)
x = self._meta.values
if isinstance(self, DataFrame):
chunks = chunks + ((x.shape[1],),)
suffix = (0,)
else:
suffix = ()
dsk = {(name, i) + suffix: (getattr, key, 'values')
for (i, key) in enumerate(self._keys())}
return Array(merge(self.dask, dsk), name, chunks, x.dtype)
normalize_token.register((Scalar, _Frame), lambda a: a._name)
class Series(_Frame):
""" Out-of-core Series object
Mimics ``pandas.Series``.
Parameters
----------
dsk: dict
The dask graph to compute this Series
_name: str
The key prefix that specifies which keys in the dask comprise this
particular Series
meta: pandas.Series
An empty ``pandas.Series`` with names, dtypes, and index matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
_token_prefix = 'series-'
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
index = context[1][0].index
return pd.Series(array, index=index, name=self.name)
@property
def name(self):
return self._meta.name
@name.setter
def name(self, name):
self._meta.name = name
renamed = _rename_dask(self, name)
# update myself
self.dask.update(renamed.dask)
self._name = renamed._name
@property
def ndim(self):
""" Return dimensionality """
return 1
@property
def dtype(self):
""" Return data type """
return self._meta.dtype
@cache_readonly
def dt(self):
return DatetimeAccessor(self)
@cache_readonly
def cat(self):
return CategoricalAccessor(self)
@cache_readonly
def str(self):
return StringAccessor(self)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
# Remove the `cat` and `str` accessors if not available. We can't
# decide this statically for the `dt` accessor, as it works on
# datetime-like things as well.
for accessor in ['cat', 'str']:
if not hasattr(self._meta, accessor):
o.remove(accessor)
return list(o)
@property
def nbytes(self):
return self.reduction(methods.nbytes, np.sum, token='nbytes',
meta=int, split_every=False)
@property
def _repr_data(self):
return _repr_data_series(self._meta, self._repr_divisions)
def __repr__(self):
""" have to overwrite footer """
if self.name is not None:
footer = "Name: {name}, dtype: {dtype}".format(name=self.name,
dtype=self.dtype)
else:
footer = "dtype: {dtype}".format(dtype=self.dtype)
return """Dask {klass} Structure:
{data}
{footer}
Dask Name: {name}, {task} tasks""".format(klass=self.__class__.__name__,
data=self.to_string(),
footer=footer,
name=key_split(self._name),
task=len(self.dask))
@derived_from(pd.Series)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def quantile(self, q=0.5):
""" Approximate quantiles of Series
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
"""
return quantile(self, q)
def _repartition_quantiles(self, npartitions, upsample=1.0):
""" Approximate quantiles of Series used for repartitioning
"""
from .partitionquantiles import partition_quantiles
return partition_quantiles(self, npartitions, upsample=upsample)
def __getitem__(self, key):
if isinstance(key, Series) and self.divisions == key.divisions:
name = 'index-%s' % tokenize(self, key)
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return Series(merge(self.dask, key.dask, dsk), name,
self._meta, self.divisions)
raise NotImplementedError()
@derived_from(pd.DataFrame)
def _get_numeric_data(self, how='any', subset=None):
return self
@derived_from(pd.Series)
def iteritems(self):
for i in range(self.npartitions):
s = self.get_partition(i).compute()
for item in s.iteritems():
yield item
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 'index', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0}.get(axis, axis)
@derived_from(pd.Series)
def groupby(self, by=None, **kwargs):
from dask.dataframe.groupby import SeriesGroupBy
return SeriesGroupBy(self, by=by, **kwargs)
@derived_from(pd.Series)
def count(self, split_every=False):
return super(Series, self).count(split_every=split_every)
def unique(self, split_every=None, split_out=1):
"""
Return Series of unique values in the object. Includes NA values.
Returns
-------
uniques : Series
"""
return aca(self, chunk=methods.unique, aggregate=methods.unique,
meta=self._meta, token='unique', split_every=split_every,
series_name=self.name, split_out=split_out)
@derived_from(pd.Series)
def nunique(self, split_every=None):
return self.drop_duplicates(split_every=split_every).count()
@derived_from(pd.Series)
def value_counts(self, split_every=None, split_out=1):
return aca(self, chunk=M.value_counts,
aggregate=methods.value_counts_aggregate,
combine=methods.value_counts_combine,
meta=self._meta.value_counts(), token='value-counts',
split_every=split_every, split_out=split_out,
split_out_setup=split_out_on_index)
@derived_from(pd.Series)
def nlargest(self, n=5, split_every=None):
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token='series-nlargest',
split_every=split_every, n=n)
@derived_from(pd.Series)
def nsmallest(self, n=5, split_every=None):
return aca(self, chunk=M.nsmallest, aggregate=M.nsmallest,
meta=self._meta, token='series-nsmallest',
split_every=split_every, n=n)
@derived_from(pd.Series)
def isin(self, other):
return elemwise(M.isin, self, list(other))
@derived_from(pd.Series)
def map(self, arg, na_action=None, meta=no_default):
if not (isinstance(arg, (pd.Series, dict)) or callable(arg)):
raise TypeError("arg must be pandas.Series, dict or callable."
" Got {0}".format(type(arg)))
name = 'map-' + tokenize(self, arg, na_action)
dsk = dict(((name, i), (M.map, k, arg, na_action)) for i, k in
enumerate(self._keys()))
dsk.update(self.dask)
if meta is no_default:
meta = _emulate(M.map, self, arg, na_action=na_action)
else:
meta = make_meta(meta)
return Series(dsk, name, meta, self.divisions)
@derived_from(pd.Series)
def dropna(self):
return self.map_partitions(M.dropna)
@derived_from(pd.Series)
def between(self, left, right, inclusive=True):
return self.map_partitions(M.between, left=left,
right=right, inclusive=inclusive)
@derived_from(pd.Series)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
# np.clip may pass out
return self.map_partitions(M.clip, lower=lower, upper=upper)
@derived_from(pd.Series)
def clip_lower(self, threshold):
return self.map_partitions(M.clip_lower, threshold=threshold)
@derived_from(pd.Series)
def clip_upper(self, threshold):
return self.map_partitions(M.clip_upper, threshold=threshold)
@derived_from(pd.Series)
def align(self, other, join='outer', axis=None, fill_value=None):
return super(Series, self).align(other, join=join, axis=axis,
fill_value=fill_value)
@derived_from(pd.Series)
def combine(self, other, func, fill_value=None):
return self.map_partitions(M.combine, other, func,
fill_value=fill_value)
@derived_from(pd.Series)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
def to_bag(self, index=False):
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.Series)
def to_frame(self, name=None):
return self.map_partitions(M.to_frame, name,
meta=self._meta.to_frame(name))
@derived_from(pd.Series)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data.to_string(max_rows=max_rows)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like DataFrame.add to this class """
def meth(self, other, level=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__doc__ = comparison.__doc__
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, convert_dtype=True, meta=no_default,
name=no_default, args=(), **kwds):
""" Parallel version of pandas.Series.apply
Parameters
----------
func : function
Function to apply
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results.
If False, leave as dtype=object.
$META
name : list, scalar or None, optional
Deprecated, use `meta` instead. If list is given, the result is a
DataFrame which columns is specified list. Otherwise, the result is
a Series which name is given scalar or None (no name). If name
keyword is not given, dask tries to infer the result type using its
beginning of data. This inference may take some time and lead to
unexpected result.
args : tuple
Positional arguments to pass to function in addition to the value.
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
applied : Series or DataFrame if func returns a Series.
Examples
--------
>>> import dask.dataframe as dd
>>> s = pd.Series(range(5), name='x')
>>> ds = dd.from_pandas(s, npartitions=2)
Apply a function elementwise across the Series, passing in extra
arguments in ``args`` and ``kwargs``:
>>> def myadd(x, a, b=1):
... return x + a + b
>>> res = ds.apply(myadd, args=(2,), b=1.5)
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ds.apply(myadd, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ds.apply(lambda x: x + 1, meta=ds)
See Also
--------
dask.Series.map_partitions
"""
if name is not no_default:
warnings.warn("`name` is deprecated, please use `meta` instead")
if meta is no_default and isinstance(name, (pd.DataFrame, pd.Series)):
meta = name
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
meta = _emulate(M.apply, self._meta_nonempty, func,
convert_dtype=convert_dtype,
args=args, **kwds)
return map_partitions(M.apply, self, func,
convert_dtype, args, meta=meta, **kwds)
@derived_from(pd.Series)
def cov(self, other, min_periods=None, split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, scalar=True, split_every=split_every)
@derived_from(pd.Series)
def corr(self, other, method='pearson', min_periods=None,
split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, corr=True, scalar=True,
split_every=split_every)
@derived_from(pd.Series)
def autocorr(self, lag=1, split_every=False):
if not isinstance(lag, int):
raise TypeError("lag must be an integer")
return self.corr(self if lag == 0 else self.shift(lag),
split_every=split_every)
@derived_from(pd.Series)
def memory_usage(self, index=True, deep=False):
from ..delayed import delayed
result = self.map_partitions(M.memory_usage, index=index, deep=deep)
return delayed(sum)(result.to_delayed())
class Index(Series):
_partition_type = pd.Index
_token_prefix = 'index-'
_dt_attributes = {'nanosecond', 'microsecond', 'millisecond', 'dayofyear',
'minute', 'hour', 'day', 'dayofweek', 'second', 'week',
'weekday', 'weekofyear', 'month', 'quarter', 'year'}
_cat_attributes = {'known', 'as_known', 'as_unknown', 'add_categories',
'categories', 'remove_categories', 'reorder_categories',
'as_ordered', 'codes', 'remove_unused_categories',
'set_categories', 'as_unordered', 'ordered',
'rename_categories'}
def __getattr__(self, key):
if is_categorical_dtype(self.dtype) and key in self._cat_attributes:
return getattr(self.cat, key)
elif key in self._dt_attributes:
return getattr(self.dt, key)
raise AttributeError("'Index' object has no attribute %r" % key)
def __dir__(self):
out = super(Index, self).__dir__()
out.extend(self._dt_attributes)
if is_categorical_dtype(self.dtype):
out.extend(self._cat_attributes)
return out
@property
def index(self):
msg = "'{0}' object has no attribute 'index'"
raise AttributeError(msg.format(self.__class__.__name__))
def __array_wrap__(self, array, context=None):
return pd.Index(array, name=self.name)
def head(self, n=5, compute=True):
""" First n items of the Index.
Caveat, this only checks the first partition.
"""
name = 'head-%d-%s' % (n, self._name)
dsk = {(name, 0): (operator.getitem, (self._name, 0), slice(0, n))}
result = new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions[:2])
if compute:
result = result.compute()
return result
@derived_from(pd.Index)
def max(self, split_every=False):
return self.reduction(M.max, meta=self._meta_nonempty.max(),
token=self._token_prefix + 'max',
split_every=split_every)
@derived_from(pd.Index)
def min(self, split_every=False):
return self.reduction(M.min, meta=self._meta_nonempty.min(),
token=self._token_prefix + 'min',
split_every=split_every)
def count(self, split_every=False):
return self.reduction(methods.index_count, np.sum,
token='index-count', meta=int,
split_every=split_every)
@derived_from(pd.Index)
def shift(self, periods=1, freq=None):
if isinstance(self._meta, pd.PeriodIndex):
if freq is not None:
raise ValueError("PeriodIndex doesn't accept `freq` argument")
meta = self._meta_nonempty.shift(periods)
out = self.map_partitions(M.shift, periods, meta=meta,
token='shift')
else:
# Pandas will raise for other index types that don't implement shift
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(M.shift, periods, token='shift',
meta=meta, freq=freq)
if freq is None:
freq = meta.freq
return maybe_shift_divisions(out, periods, freq=freq)
class DataFrame(_Frame):
"""
Implements out-of-core DataFrame as a sequence of pandas DataFrames
Parameters
----------
dask: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
meta: pandas.DataFrame
An empty ``pandas.DataFrame`` with names, dtypes, and index matching
the expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
_token_prefix = 'dataframe-'
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
index = context[1][0].index
return pd.DataFrame(array, index=index, columns=self.columns)
@property
def columns(self):
return self._meta.columns
@columns.setter
def columns(self, columns):
renamed = _rename_dask(self, columns)
self._meta = renamed._meta
self._name = renamed._name
self.dask.update(renamed.dask)
def __getitem__(self, key):
name = 'getitem-%s' % tokenize(self, key)
if np.isscalar(key) or isinstance(key, tuple):
if isinstance(self._meta.index, (pd.DatetimeIndex, pd.PeriodIndex)):
if key not in self._meta.columns:
return self.loc[key]
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, list):
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
if isinstance(key, Series):
# do not perform dummy calculation, as columns will not be changed.
#
if self.divisions != key.divisions:
from .multi import _maybe_align_partitions
self, key = _maybe_align_partitions([self, key])
dsk = {(name, i): (M._getitem_array, (self._name, i), (key._name, i))
for i in range(self.npartitions)}
return new_dd_object(merge(self.dask, key.dask, dsk), name,
self, self.divisions)
raise NotImplementedError(key)
def __setitem__(self, key, value):
if isinstance(key, (tuple, list)):
df = self.assign(**{k: value[c]
for k, c in zip(key, value.columns)})
else:
df = self.assign(**{key: value})
self.dask = df.dask
self._name = df._name
self._meta = df._meta
def __delitem__(self, key):
result = self.drop([key], axis=1)
self.dask = result.dask
self._name = result._name
self._meta = result._meta
def __setattr__(self, key, value):
try:
columns = object.__getattribute__(self, '_meta').columns
except AttributeError:
columns = ()
if key in columns:
self[key] = value
else:
object.__setattr__(self, key, value)
def __getattr__(self, key):
if key in self.columns:
meta = self._meta[key]
name = 'getitem-%s' % tokenize(self, key)
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
raise AttributeError("'DataFrame' object has no attribute %r" % key)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(c for c in self.columns if
(isinstance(c, pd.compat.string_types) and
pd.compat.isidentifier(c)))
return list(o)
@property
def ndim(self):
""" Return dimensionality """
return 2
@property
def dtypes(self):
""" Return data types """
return self._meta.dtypes
@derived_from(pd.DataFrame)
def get_dtype_counts(self):
return self._meta.get_dtype_counts()
@derived_from(pd.DataFrame)
def get_ftype_counts(self):
return self._meta.get_ftype_counts()
@derived_from(pd.DataFrame)
def select_dtypes(self, include=None, exclude=None):
cs = self._meta.select_dtypes(include=include, exclude=exclude).columns
return self[list(cs)]
def set_index(self, other, drop=True, sorted=False, npartitions=None, **kwargs):
"""
Set the DataFrame index (row labels) using an existing column
This realigns the dataset to be sorted by a new column. This can have a
significant impact on performance, because joins, groupbys, lookups, etc.
are all much faster on that column. However, this performance increase
comes with a cost, sorting a parallel dataset requires expensive shuffles.
Often we ``set_index`` once directly after data ingest and filtering and
then perform many cheap computations off of the sorted dataset.
This function operates exactly like ``pandas.set_index`` except with
different performance costs (it is much more expensive). Under normal
operation this function does an initial pass over the index column to
compute approximate qunatiles to serve as future divisions. It then passes
over the data a second time, splitting up each input partition into several
pieces and sharing those pieces to all of the output partitions now in
sorted order.
In some cases we can alleviate those costs, for example if your dataset is
sorted already then we can avoid making many small pieces or if you know
good values to split the new index column then we can avoid the initial
pass over the data. For example if your new index is a datetime index and
your data is already sorted by day then this entire operation can be done
for free. You can control these options with the following parameters.
Parameters
----------
df: Dask DataFrame
index: string or Dask Series
npartitions: int, None, or 'auto'
The ideal number of output partitions. If None use the same as
the input. If 'auto' then decide by memory use.
shuffle: string, optional
Either ``'disk'`` for single-node operation or ``'tasks'`` for
distributed operation. Will be inferred by your current scheduler.
sorted: bool, optional
If the index column is already sorted in increasing order.
Defaults to False
divisions: list, optional
Known values on which to separate index values of the partitions.
See http://dask.pydata.org/en/latest/dataframe-design.html#partitions
Defaults to computing this with a single pass over the data
compute: bool
Whether or not to trigger an immediate computation. Defaults to True.
Examples
--------
>>> df2 = df.set_index('x') # doctest: +SKIP
>>> df2 = df.set_index(d.x) # doctest: +SKIP
>>> df2 = df.set_index(d.timestamp, sorted=True) # doctest: +SKIP
A common case is when we have a datetime column that we know to be
sorted and is cleanly divided by day. We can set this index for free
by specifying both that the column is pre-sorted and the particular
divisions along which is is separated
>>> import pandas as pd
>>> divisions = pd.date_range('2000', '2010', freq='1D')
>>> df2 = df.set_index('timestamp', sorted=True, divisions=divisions) # doctest: +SKIP
"""
if sorted:
from .shuffle import set_sorted_index
return set_sorted_index(self, other, drop=drop, **kwargs)
else:
from .shuffle import set_index
return set_index(self, other, drop=drop, npartitions=npartitions,
**kwargs)
def set_partition(self, column, divisions, **kwargs):
""" Set explicit divisions for new column index
>>> df2 = df.set_partition('new-index-column', divisions=[10, 20, 50]) # doctest: +SKIP
See Also
--------
set_index
"""
raise Exception("Deprecated, use set_index(..., divisions=...) instead")
@derived_from(pd.DataFrame)
def nlargest(self, n=5, columns=None, split_every=None):
token = 'dataframe-nlargest'
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token=token, split_every=split_every,
n=n, columns=columns)
@derived_from(pd.DataFrame)
def nsmallest(self, n=5, columns=None, split_every=None):
token = 'dataframe-nsmallest'
return aca(self, chunk=M.nsmallest, aggregate=M.nsmallest,
meta=self._meta, token=token, split_every=split_every,
n=n, columns=columns)
@derived_from(pd.DataFrame)
def groupby(self, by=None, **kwargs):
from dask.dataframe.groupby import DataFrameGroupBy
return DataFrameGroupBy(self, by=by, **kwargs)
@wraps(categorize)
def categorize(self, columns=None, index=None, split_every=None, **kwargs):
return categorize(self, columns=columns, index=index,
split_every=split_every, **kwargs)
@derived_from(pd.DataFrame)
def assign(self, **kwargs):
for k, v in kwargs.items():
if not (isinstance(v, (Series, Scalar, pd.Series)) or
np.isscalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df2 = self._meta.assign(**_extract_meta(kwargs))
return elemwise(methods.assign, self, *pairs, meta=df2)
@derived_from(pd.DataFrame)
def rename(self, index=None, columns=None):
if index is not None:
raise ValueError("Cannot rename index.")
# *args here is index, columns but columns arg is already used
return self.map_partitions(M.rename, None, columns)
def query(self, expr, **kwargs):
""" Blocked version of pd.DataFrame.query
This is like the sequential version except that this will also happen
in many threads. This may conflict with ``numexpr`` which will use
multiple threads itself. We recommend that you set numexpr to use a
single thread
import numexpr
numexpr.set_nthreads(1)
The original docstring follows below:\n
""" + (pd.DataFrame.query.__doc__
if pd.DataFrame.query.__doc__ is not None else '')
name = 'query-%s' % tokenize(self, expr)
if kwargs:
name = name + '--' + tokenize(kwargs)
dsk = dict(((name, i), (apply, M.query,
((self._name, i), (expr,), kwargs)))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (M.query, (self._name, i), expr))
for i in range(self.npartitions))
meta = self._meta.query(expr, **kwargs)
return new_dd_object(merge(dsk, self.dask), name,
meta, self.divisions)
@derived_from(pd.DataFrame)
def eval(self, expr, inplace=None, **kwargs):
if '=' in expr and inplace in (True, None):
raise NotImplementedError("Inplace eval not supported."
" Please use inplace=False")
meta = self._meta.eval(expr, inplace=inplace, **kwargs)
return self.map_partitions(M.eval, expr, meta=meta, inplace=inplace, **kwargs)
@derived_from(pd.DataFrame)
def dropna(self, how='any', subset=None):
return self.map_partitions(M.dropna, how=how, subset=subset)
@derived_from(pd.DataFrame)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
return self.map_partitions(M.clip, lower=lower, upper=upper)
@derived_from(pd.DataFrame)
def clip_lower(self, threshold):
return self.map_partitions(M.clip_lower, threshold=threshold)
@derived_from(pd.DataFrame)
def clip_upper(self, threshold):
return self.map_partitions(M.clip_upper, threshold=threshold)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def to_bag(self, index=False):
"""Convert to a dask Bag of tuples of each row.
Parameters
----------
index : bool, optional
If True, the index is included as the first element of each tuple.
Default is False.
"""
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.DataFrame)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data.to_string(max_rows=max_rows,
show_dimensions=False)
def _get_numeric_data(self, how='any', subset=None):
# calculate columns to avoid unnecessary calculation
numerics = self._meta._get_numeric_data()
if len(numerics.columns) < len(self.columns):
name = self._token_prefix + '-get_numeric_data'
return self.map_partitions(M._get_numeric_data,
meta=numerics, token=name)
else:
# use myself if all numerics
return self
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
@derived_from(pd.DataFrame)
def drop(self, labels, axis=0, errors='raise'):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(M.drop, labels, axis=axis, errors=errors)
raise NotImplementedError("Drop currently only works for axis=1")
@derived_from(pd.DataFrame)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, suffixes=('_x', '_y'),
indicator=False, npartitions=None, shuffle=None):
if not isinstance(right, (DataFrame, pd.DataFrame)):
raise ValueError('right must be DataFrame')
from .multi import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, suffixes=suffixes,
npartitions=npartitions, indicator=indicator,
shuffle=shuffle)
@derived_from(pd.DataFrame)
def join(self, other, on=None, how='left',
lsuffix='', rsuffix='', npartitions=None, shuffle=None):
if not isinstance(other, (DataFrame, pd.DataFrame)):
raise ValueError('other must be DataFrame')
from .multi import merge
return merge(self, other, how=how,
left_index=on is None, right_index=True,
left_on=on, suffixes=[lsuffix, rsuffix],
npartitions=npartitions, shuffle=shuffle)
@derived_from(pd.DataFrame)
def append(self, other):
if isinstance(other, Series):
msg = ('Unable to appending dd.Series to dd.DataFrame.'
'Use pd.Series to append as row.')
raise ValueError(msg)
elif isinstance(other, pd.Series):
other = other.to_frame().T
return super(DataFrame, self).append(other)
@derived_from(pd.DataFrame)
def iterrows(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.iterrows():
yield row
@derived_from(pd.DataFrame)
def itertuples(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.itertuples():
yield row
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
# name must be explicitly passed for div method whose name is truediv
def meth(self, other, axis='columns', level=None, fill_value=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
if axis in (1, 'columns'):
# When axis=1 and other is a series, `other` is transposed
# and the operator is applied broadcast across rows. This
# isn't supported with dd.Series.
if isinstance(other, Series):
msg = 'Unable to {0} dd.Series with axis=1'.format(name)
raise ValueError(msg)
elif isinstance(other, pd.Series):
# Special case for pd.Series to avoid unwanted partitioning
# of other. We pass it in as a kwarg to prevent this.
meta = _emulate(op, self, other=other, axis=axis,
fill_value=fill_value)
return map_partitions(op, self, other=other, meta=meta,
axis=axis, fill_value=fill_value)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like DataFrame.add to this class """
def meth(self, other, axis='columns', level=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__doc__ = comparison.__doc__
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, axis=0, args=(), meta=no_default,
columns=no_default, **kwds):
""" Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
$META
columns : list, scalar or None
Deprecated, please use `meta` instead. If list is given, the result
is a DataFrame which columns is specified list. Otherwise, the
result is a Series which name is given scalar or None (no name). If
name keyword is not given, dask tries to infer the result type
using its beginning of data. This inference may take some time and
lead to unexpected result
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame
Examples
--------
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.sum() + a + b
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5)
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)
See Also
--------
dask.DataFrame.map_partitions
"""
axis = self._validate_axis(axis)
if axis == 0:
msg = ("dd.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)")
raise NotImplementedError(msg)
if columns is not no_default:
warnings.warn("`columns` is deprecated, please use `meta` instead")
if meta is no_default and isinstance(columns, (pd.DataFrame, pd.Series)):
meta = columns
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
meta = _emulate(M.apply, self._meta_nonempty, func,
axis=axis, args=args, **kwds)
return map_partitions(M.apply, self, func, axis,
False, False, None, args, meta=meta, **kwds)
@derived_from(pd.DataFrame)
def applymap(self, func, meta='__no_default__'):
return elemwise(M.applymap, self, func, meta=meta)
@derived_from(pd.DataFrame)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def cov(self, min_periods=None, split_every=False):
return cov_corr(self, min_periods, split_every=split_every)
@derived_from(pd.DataFrame)
def corr(self, method='pearson', min_periods=None, split_every=False):
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
return cov_corr(self, min_periods, True, split_every=split_every)
def info(self, buf=None, verbose=False, memory_usage=False):
"""
Concise summary of a Dask DataFrame.
"""
if buf is None:
import sys
buf = sys.stdout
lines = [str(type(self))]
if len(self.columns) == 0:
lines.append('Index: 0 entries')
lines.append('Empty %s' % type(self).__name__)
put_lines(buf, lines)
return
# Group and execute the required computations
computations = {}
if verbose:
computations.update({'index': self.index, 'count': self.count()})
if memory_usage:
computations.update({'memory_usage': self.map_partitions(M.memory_usage, index=True)})
computations = dict(zip(computations.keys(), da.compute(*computations.values())))
if verbose:
index = computations['index']
counts = computations['count']
lines.append(index.summary())
lines.append('Data columns (total {} columns):'.format(len(self.columns)))
from pandas.formats.printing import pprint_thing
space = max([len(pprint_thing(k)) for k in self.columns]) + 3
column_template = '{!s:<%d} {} non-null {}' % space
column_info = [column_template.format(pprint_thing(x[0]), x[1], x[2])
for x in zip(self.columns, counts, self.dtypes)]
else:
column_info = [self.columns.summary(name='Columns')]
lines.extend(column_info)
dtype_counts = ['%s(%d)' % k for k in sorted(self.dtypes.value_counts().iteritems(), key=str)]
lines.append('dtypes: {}'.format(', '.join(dtype_counts)))
if memory_usage:
memory_int = computations['memory_usage'].sum()
lines.append('memory usage: {}\n'.format(memory_repr(memory_int)))
put_lines(buf, lines)
@derived_from(pd.DataFrame)
def memory_usage(self, index=True, deep=False):
result = self.map_partitions(M.memory_usage, index=index, deep=deep)
result = result.groupby(result.index).sum()
return result
def pivot_table(self, index=None, columns=None,
values=None, aggfunc='mean'):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.
Parameters
----------
values : scalar
column to aggregate
index : scalar
column to be index
columns : scalar
column to be columns
aggfunc : {'mean', 'sum', 'count'}, default 'mean'
Returns
-------
table : DataFrame
"""
from .reshape import pivot_table
return pivot_table(self, index=index, columns=columns, values=values,
aggfunc=aggfunc)
def to_records(self, index=False):
from .io import to_records
return to_records(self)
@derived_from(pd.DataFrame)
def to_html(self, max_rows=5):
# pd.Series doesn't have html repr
data = self._repr_data.to_html(max_rows=max_rows,
show_dimensions=False)
return self._HTML_FMT.format(data=data, name=key_split(self._name),
task=len(self.dask))
@property
def _repr_data(self):
meta = self._meta
index = self._repr_divisions
values = {c: _repr_data_series(meta[c], index) for c in meta.columns}
return pd.DataFrame(values, columns=meta.columns)
_HTML_FMT = """<div><strong>Dask DataFrame Structure:</strong></div>
{data}
<div>Dask Name: {name}, {task} tasks</div>"""
def _repr_html_(self):
data = self._repr_data.to_html(max_rows=5,
show_dimensions=False, notebook=True)
return self._HTML_FMT.format(data=data, name=key_split(self._name),
task=len(self.dask))
# bind operators
for op in [operator.abs, operator.add, operator.and_, operator_div,
operator.eq, operator.gt, operator.ge, operator.inv,
operator.lt, operator.le, operator.mod, operator.mul,
operator.ne, operator.neg, operator.or_, operator.pow,
operator.sub, operator.truediv, operator.floordiv, operator.xor]:
_Frame._bind_operator(op)
Scalar._bind_operator(op)
for name in ['add', 'sub', 'mul', 'div',
'truediv', 'floordiv', 'mod', 'pow',
'radd', 'rsub', 'rmul', 'rdiv',
'rtruediv', 'rfloordiv', 'rmod', 'rpow']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_operator_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_operator_method(name, meth)
for name in ['lt', 'gt', 'le', 'ge', 'ne', 'eq']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_comparison_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_comparison_method(name, meth)
def is_broadcastable(dfs, s):
"""
This Series is broadcastable against another dataframe in the sequence
"""
return (isinstance(s, Series) and
s.npartitions == 1 and
s.known_divisions and
any(s.divisions == (min(df.columns), max(df.columns))
for df in dfs if isinstance(df, DataFrame)))
def elemwise(op, *args, **kwargs):
""" Elementwise operation for dask.Dataframes """
meta = kwargs.pop('meta', no_default)
_name = funcname(op) + '-' + tokenize(op, kwargs, *args)
args = _maybe_from_pandas(args)
from .multi import _maybe_align_partitions
args = _maybe_align_partitions(args)
dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar))]
dfs = [df for df in dasks if isinstance(df, _Frame)]
divisions = dfs[0].divisions
_is_broadcastable = partial(is_broadcastable, dfs)
dfs = list(remove(_is_broadcastable, dfs))
n = len(divisions) - 1
other = [(i, arg) for i, arg in enumerate(args)
if not isinstance(arg, (_Frame, Scalar))]
# adjust the key length of Scalar
keys = [d._keys() * n if isinstance(d, Scalar) or _is_broadcastable(d)
else d._keys() for d in dasks]
if other:
dsk = {(_name, i):
(apply, partial_by_order, list(frs),
{'function': op, 'other': other})
for i, frs in enumerate(zip(*keys))}
else:
dsk = {(_name, i): (op,) + frs for i, frs in enumerate(zip(*keys))}
dsk = merge(dsk, *[d.dask for d in dasks])
if meta is no_default:
if len(dfs) >= 2 and len(dasks) != len(dfs):
# should not occur in current funcs
msg = 'elemwise with 2 or more DataFrames and Scalar is not supported'
raise NotImplementedError(msg)
meta = _emulate(op, *args, **kwargs)
return new_dd_object(dsk, _name, meta, divisions)
def _maybe_from_pandas(dfs):
from .io import from_pandas
dfs = [from_pandas(df, 1) if isinstance(df, (pd.Series, pd.DataFrame))
else df for df in dfs]
return dfs
def hash_shard(df, nparts, split_out_setup=None, split_out_setup_kwargs=None):
if split_out_setup:
h = split_out_setup(df, **(split_out_setup_kwargs or {}))
else:
h = df
h = hash_pandas_object(h, index=False)
if isinstance(h, pd.Series):
h = h._values
h %= nparts
return {i: df.iloc[h == i] for i in range(nparts)}
def split_evenly(df, k):
""" Split dataframe into k roughly equal parts """
divisions = np.linspace(0, len(df), k + 1).astype(int)
return {i: df.iloc[divisions[i]: divisions[i + 1]] for i in range(k)}
def split_out_on_index(df):
h = df.index
if isinstance(h, pd.MultiIndex):
h = pd.DataFrame([], index=h).reset_index()
return h
def split_out_on_cols(df, cols=None):
return df[cols]
@insert_meta_param_description
def apply_concat_apply(args, chunk=None, aggregate=None, combine=None,
meta=no_default, token=None, chunk_kwargs=None,
aggregate_kwargs=None, combine_kwargs=None,
split_every=None, split_out=None, split_out_setup=None,
split_out_setup_kwargs=None, **kwargs):
"""Apply a function to blocks, then concat, then apply again
Parameters
----------
args :
Positional arguments for the `chunk` function. All `dask.dataframe`
objects should be partitioned and indexed equivalently.
chunk : function [block-per-arg] -> block
Function to operate on each block of data
aggregate : function concatenated-block -> block
Function to operate on the concatenated result of chunk
combine : function concatenated-block -> block, optional
Function to operate on intermediate concatenated results of chunk
in a tree-reduction. If not provided, defaults to aggregate.
$META
token : str, optional
The name to use for the output keys.
chunk_kwargs : dict, optional
Keywords for the chunk function only.
aggregate_kwargs : dict, optional
Keywords for the aggregate function only.
combine_kwargs : dict, optional
Keywords for the combine function only.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to ``aggregate``.
Default is 8.
split_out : int, optional
Number of output partitions. Split occurs after first chunk reduction.
split_out_setup : callable, optional
If provided, this function is called on each chunk before performing
the hash-split. It should return a pandas object, where each row
(excluding the index) is hashed. If not provided, the chunk is hashed
as is.
split_out_setup_kwargs : dict, optional
Keywords for the `split_out_setup` function only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``aggregate``, and
``combine``.
Examples
--------
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if chunk_kwargs is None:
chunk_kwargs = dict()
if aggregate_kwargs is None:
aggregate_kwargs = dict()
chunk_kwargs.update(kwargs)
aggregate_kwargs.update(kwargs)
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
else:
if combine_kwargs is None:
combine_kwargs = dict()
combine_kwargs.update(kwargs)
if not isinstance(args, (tuple, list)):
args = [args]
npartitions = set(arg.npartitions for arg in args
if isinstance(arg, _Frame))
if len(npartitions) > 1:
raise ValueError("All arguments must have same number of partitions")
npartitions = npartitions.pop()
if split_every is None:
split_every = 8
elif split_every is False:
split_every = npartitions
elif split_every < 2 or not isinstance(split_every, int):
raise ValueError("split_every must be an integer >= 2")
token_key = tokenize(token or (chunk, aggregate), meta, args,
chunk_kwargs, aggregate_kwargs, combine_kwargs,
split_every, split_out, split_out_setup,
split_out_setup_kwargs)
# Chunk
a = '{0}-chunk-{1}'.format(token or funcname(chunk), token_key)
if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:
dsk = {(a, 0, i, 0): (chunk, key) for i, key in enumerate(args[0]._keys())}
else:
dsk = {(a, 0, i, 0): (apply, chunk,
[(x._name, i) if isinstance(x, _Frame)
else x for x in args], chunk_kwargs)
for i in range(args[0].npartitions)}
# Split
if split_out and split_out > 1:
split_prefix = 'split-%s' % token_key
shard_prefix = 'shard-%s' % token_key
for i in range(args[0].npartitions):
dsk[(split_prefix, i)] = (hash_shard, (a, 0, i, 0), split_out,
split_out_setup, split_out_setup_kwargs)
for j in range(split_out):
dsk[(shard_prefix, 0, i, j)] = (getitem, (split_prefix, i), j)
a = shard_prefix
else:
split_out = 1
# Combine
b = '{0}-combine-{1}'.format(token or funcname(combine), token_key)
k = npartitions
depth = 0
while k > split_every:
for part_i, inds in enumerate(partition_all(split_every, range(k))):
for j in range(split_out):
conc = (_concat, [(a, depth, i, j) for i in inds])
if combine_kwargs:
dsk[(b, depth + 1, part_i, j)] = (apply, combine, [conc], combine_kwargs)
else:
dsk[(b, depth + 1, part_i, j)] = (combine, conc)
k = part_i + 1
a = b
depth += 1
# Aggregate
for j in range(split_out):
b = '{0}-agg-{1}'.format(token or funcname(aggregate), token_key)
conc = (_concat, [(a, depth, i, j) for i in range(k)])
if aggregate_kwargs:
dsk[(b, j)] = (apply, aggregate, [conc], aggregate_kwargs)
else:
dsk[(b, j)] = (aggregate, conc)
if meta is no_default:
meta_chunk = _emulate(apply, chunk, args, chunk_kwargs)
meta = _emulate(apply, aggregate, [_concat([meta_chunk])],
aggregate_kwargs)
meta = make_meta(meta)
for arg in args:
if isinstance(arg, _Frame):
dsk.update(arg.dask)
divisions = [None] * (split_out + 1)
return new_dd_object(dsk, b, meta, divisions)
aca = apply_concat_apply
def _extract_meta(x, nonempty=False):
"""
Extract internal cache data (``_meta``) from dd.DataFrame / dd.Series
"""
if isinstance(x, Scalar):
return x._meta_nonempty if nonempty else x._meta
elif isinstance(x, _Frame):
if (isinstance(x, Series) and
x.npartitions == 1 and
x.known_divisions): # may be broadcastable
return x._meta
else:
return x._meta_nonempty if nonempty else x._meta
elif isinstance(x, list):
return [_extract_meta(_x, nonempty) for _x in x]
elif isinstance(x, tuple):
return tuple([_extract_meta(_x, nonempty) for _x in x])
elif isinstance(x, dict):
res = {}
for k in x:
res[k] = _extract_meta(x[k], nonempty)
return res
else:
return x
def _emulate(func, *args, **kwargs):
"""
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_meta``) for calculation
"""
with raise_on_meta_error(funcname(func)):
return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))
@insert_meta_param_description
def map_partitions(func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. At least one of the
args should be a Dask.dataframe.
$META
"""
meta = kwargs.pop('meta', no_default)
if meta is not no_default:
meta = make_meta(meta)
assert callable(func)
if 'token' in kwargs:
name = kwargs.pop('token')
token = tokenize(meta, *args, **kwargs)
else:
name = funcname(func)
token = tokenize(func, meta, *args, **kwargs)
name = '{0}-{1}'.format(name, token)
from .multi import _maybe_align_partitions
args = _maybe_from_pandas(args)
args = _maybe_align_partitions(args)
if meta is no_default:
meta = _emulate(func, *args, **kwargs)
if all(isinstance(arg, Scalar) for arg in args):
dask = {(name, 0):
(apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)}
return Scalar(merge(dask, *[arg.dask for arg in args]), name, meta)
elif not isinstance(meta, (pd.Series, pd.DataFrame, pd.Index)):
# If `meta` is not a pandas object, the concatenated results will be a
# different type
meta = _concat([meta])
meta = make_meta(meta)
dfs = [df for df in args if isinstance(df, _Frame)]
dsk = {}
for i in range(dfs[0].npartitions):
values = [(arg._name, i if isinstance(arg, _Frame) else 0)
if isinstance(arg, (_Frame, Scalar)) else arg for arg in args]
dsk[(name, i)] = (apply_and_enforce, func, values, kwargs, meta)
dasks = [arg.dask for arg in args if isinstance(arg, (_Frame, Scalar))]
return new_dd_object(merge(dsk, *dasks), name, meta, args[0].divisions)
def apply_and_enforce(func, args, kwargs, meta):
"""Apply a function, and enforce the output to match meta
Ensures the output has the same columns, even if empty."""
df = func(*args, **kwargs)
if isinstance(df, (pd.DataFrame, pd.Series, pd.Index)):
if len(df) == 0:
return meta
c = meta.columns if isinstance(df, pd.DataFrame) else meta.name
return _rename(c, df)
return df
def _rename(columns, df):
"""
Rename columns of pd.DataFrame or name of pd.Series.
Not for dd.DataFrame or dd.Series.
Parameters
----------
columns : tuple, string, pd.DataFrame or pd.Series
Column names, Series name or pandas instance which has the
target column names / name.
df : pd.DataFrame or pd.Series
target DataFrame / Series to be renamed
"""
assert not isinstance(df, _Frame)
if columns is no_default:
return df
if isinstance(columns, Iterator):
columns = list(columns)
if isinstance(df, pd.DataFrame):
if isinstance(columns, pd.DataFrame):
columns = columns.columns
if not isinstance(columns, pd.Index):
columns = pd.Index(columns)
if (len(columns) == len(df.columns) and
type(columns) is type(df.columns) and
columns.equals(df.columns)):
# if target is identical, rename is not necessary
return df
# deep=False doesn't doesn't copy any data/indices, so this is cheap
df = df.copy(deep=False)
df.columns = columns
return df
elif isinstance(df, (pd.Series, pd.Index)):
if isinstance(columns, (pd.Series, pd.Index)):
columns = columns.name
if df.name == columns:
return df
return df.rename(columns)
# map_partition may pass other types
return df
def _rename_dask(df, names):
"""
Destructively rename columns of dd.DataFrame or name of dd.Series.
Not for pd.DataFrame or pd.Series.
Internaly used to overwrite dd.DataFrame.columns and dd.Series.name
We can't use map_partition because it applies function then rename
Parameters
----------
df : dd.DataFrame or dd.Series
target DataFrame / Series to be renamed
names : tuple, string
Column names/Series name
"""
assert isinstance(df, _Frame)
metadata = _rename(names, df._meta)
name = 'rename-{0}'.format(tokenize(df, metadata))
dsk = {}
for i in range(df.npartitions):
dsk[name, i] = (_rename, metadata, (df._name, i))
return new_dd_object(merge(dsk, df.dask), name, metadata, df.divisions)
def quantile(df, q):
"""Approximate quantiles of Series.
Parameters
----------
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
assert isinstance(df, Series)
from dask.array.percentile import _percentile, merge_percentiles
# currently, only Series has quantile method
if isinstance(df, Index):
meta = pd.Series(df._meta_nonempty).quantile(q)
else:
meta = df._meta_nonempty.quantile(q)
if isinstance(meta, pd.Series):
# Index.quantile(list-like) must be pd.Series, not pd.Index
df_name = df.name
finalize_tsk = lambda tsk: (pd.Series, tsk, q, None, df_name)
return_type = Series
else:
finalize_tsk = lambda tsk: (getitem, tsk, 0)
return_type = Scalar
q = [q]
# pandas uses quantile in [0, 1]
# numpy / everyone else uses [0, 100]
qs = np.asarray(q) * 100
token = tokenize(df, qs)
if len(qs) == 0:
name = 'quantiles-' + token
empty_index = pd.Index([], dtype=float)
return Series({(name, 0): pd.Series([], name=df.name, index=empty_index)},
name, df._meta, [None, None])
else:
new_divisions = [np.min(q), np.max(q)]
name = 'quantiles-1-' + token
val_dsk = dict(((name, i), (_percentile, (getattr, key, 'values'), qs))
for i, key in enumerate(df._keys()))
name2 = 'quantiles-2-' + token
len_dsk = dict(((name2, i), (len, key)) for i, key in enumerate(df._keys()))
name3 = 'quantiles-3-' + token
merge_dsk = {(name3, 0): finalize_tsk((merge_percentiles, qs,
[qs] * df.npartitions,
sorted(val_dsk), sorted(len_dsk)))}
dsk = merge(df.dask, val_dsk, len_dsk, merge_dsk)
return return_type(dsk, name3, meta, new_divisions)
def cov_corr(df, min_periods=None, corr=False, scalar=False, split_every=False):
"""DataFrame covariance and pearson correlation.
Computes pairwise covariance or correlation of columns, excluding NA/null
values.
Parameters
----------
df : DataFrame
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
corr : bool, optional
If True, compute the Pearson correlation. If False [default], compute
the covariance.
scalar : bool, optional
If True, compute covariance between two variables as a scalar. Only
valid if `df` has 2 columns. If False [default], compute the entire
covariance/correlation matrix.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is False.
"""
if min_periods is None:
min_periods = 2
elif min_periods < 2:
raise ValueError("min_periods must be >= 2")
if split_every is False:
split_every = df.npartitions
elif split_every < 2 or not isinstance(split_every, int):
raise ValueError("split_every must be an integer >= 2")
df = df._get_numeric_data()
if scalar and len(df.columns) != 2:
raise ValueError("scalar only valid for 2 column dataframe")
token = tokenize(df, min_periods, scalar, split_every)
funcname = 'corr' if corr else 'cov'
a = '{0}-chunk-{1}'.format(funcname, df._name)
dsk = {(a, i): (cov_corr_chunk, f, corr)
for (i, f) in enumerate(df._keys())}
prefix = '{0}-combine-{1}-'.format(funcname, df._name)
k = df.npartitions
b = a
depth = 0
while k > split_every:
b = prefix + str(depth)
for part_i, inds in enumerate(partition_all(split_every, range(k))):
dsk[(b, part_i)] = (cov_corr_combine, [(a, i) for i in inds], corr)
k = part_i + 1
a = b
depth += 1
name = '{0}-{1}'.format(funcname, token)
dsk[(name, 0)] = (cov_corr_agg, [(a, i) for i in range(k)],
df.columns, min_periods, corr, scalar)
dsk.update(df.dask)
if scalar:
return Scalar(dsk, name, 'f8')
meta = make_meta([(c, 'f8') for c in df.columns], index=df.columns)
return DataFrame(dsk, name, meta, (df.columns[0], df.columns[-1]))
def cov_corr_chunk(df, corr=False):
"""Chunk part of a covariance or correlation computation"""
mat = df.values
mask = np.isfinite(mat)
keep = np.bitwise_and(mask[:, None, :], mask[:, :, None])
x = np.where(keep, mat[:, None, :], np.nan)
sums = np.nansum(x, 0)
counts = keep.astype('int').sum(0)
cov = df.cov().values
dtype = [('sum', sums.dtype), ('count', counts.dtype), ('cov', cov.dtype)]
if corr:
m = np.nansum((x - sums / np.where(counts, counts, np.nan)) ** 2, 0)
dtype.append(('m', m.dtype))
out = np.empty(counts.shape, dtype=dtype)
out['sum'] = sums
out['count'] = counts
out['cov'] = cov * (counts - 1)
if corr:
out['m'] = m
return out
def cov_corr_combine(data, corr=False):
data = np.concatenate(data).reshape((len(data),) + data[0].shape)
sums = np.nan_to_num(data['sum'])
counts = data['count']
cum_sums = np.cumsum(sums, 0)
cum_counts = np.cumsum(counts, 0)
s1 = cum_sums[:-1]
s2 = sums[1:]
n1 = cum_counts[:-1]
n2 = counts[1:]
d = (s2 / n2) - (s1 / n1)
C = (np.nansum((n1 * n2) / (n1 + n2) * (d * d.transpose((0, 2, 1))), 0) +
np.nansum(data['cov'], 0))
out = np.empty(C.shape, dtype=data.dtype)
out['sum'] = cum_sums[-1]
out['count'] = cum_counts[-1]
out['cov'] = C
if corr:
nobs = np.where(cum_counts[-1], cum_counts[-1], np.nan)
mu = cum_sums[-1] / nobs
counts_na = np.where(counts, counts, np.nan)
m = np.nansum(data['m'] + counts * (sums / counts_na - mu) ** 2,
axis=0)
out['m'] = m
return out
def cov_corr_agg(data, cols, min_periods=2, corr=False, scalar=False):
out = cov_corr_combine(data, corr)
counts = out['count']
C = out['cov']
C[counts < min_periods] = np.nan
if corr:
m2 = out['m']
den = np.sqrt(m2 * m2.T)
else:
den = np.where(counts, counts, np.nan) - 1
mat = C / den
if scalar:
return mat[0, 1]
return pd.DataFrame(mat, columns=cols, index=cols)
def pd_split(df, p, random_state=None):
""" Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(df, [0.5, 0.5], random_state=123) # roughly 50/50 split
>>> a
a b
1 2 3
2 3 4
5 6 7
>>> b
a b
0 1 2
3 4 5
4 5 6
"""
p = list(p)
index = pseudorandom(len(df), p, random_state)
return [df.iloc[index == i] for i in range(len(p))]
def _take_last(a, skipna=True):
"""
take last row (Series) of DataFrame / last value of Series
considering NaN.
Parameters
----------
a : pd.DataFrame or pd.Series
skipna : bool, default True
Whether to exclude NaN
"""
if skipna is False:
return a.iloc[-1]
else:
# take last valid value excluding NaN, NaN location may be different
# in each columns
group_dummy = np.ones(len(a.index))
last_row = a.groupby(group_dummy).last()
if isinstance(a, pd.DataFrame):
return pd.Series(last_row.values[0], index=a.columns)
else:
return last_row.values[0]
def repartition_divisions(a, b, name, out1, out2, force=False):
""" dask graph to repartition dataframe by new divisions
Parameters
----------
a : tuple
old divisions
b : tuple, list
new divisions
name : str
name of old dataframe
out1 : str
name of temporary splits
out2 : str
name of new dataframe
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
{('b', 0): (<function boundary_slice at ...>, ('a', 0), 1, 3, False),
('b', 1): (<function boundary_slice at ...>, ('a', 1), 3, 4, False),
('b', 2): (<function boundary_slice at ...>, ('a', 1), 4, 6, False),
('b', 3): (<function boundary_slice at ...>, ('a', 1), 6, 7, False)
('c', 0): (<function concat at ...>,
(<type 'list'>, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
"""
if not isinstance(b, (list, tuple)):
raise ValueError('New division must be list or tuple')
b = list(b)
if len(b) < 2:
# minimum division is 2 elements, like [0, 0]
raise ValueError('New division must be longer than 2 elements')
if b != sorted(b):
raise ValueError('New division must be sorted')
if len(b[:-1]) != len(list(unique(b[:-1]))):
msg = 'New division must be unique, except for the last element'
raise ValueError(msg)
if force:
if a[0] < b[0]:
msg = ('left side of the new division must be equal or smaller '
'than old division')
raise ValueError(msg)
if a[-1] > b[-1]:
msg = ('right side of the new division must be equal or larger '
'than old division')
raise ValueError(msg)
else:
if a[0] != b[0]:
msg = 'left side of old and new divisions are different'
raise ValueError(msg)
if a[-1] != b[-1]:
msg = 'right side of old and new divisions are different'
raise ValueError(msg)
def _is_single_last_div(x):
"""Whether last division only contains single label"""
return len(x) >= 2 and x[-1] == x[-2]
c = [a[0]]
d = dict()
low = a[0]
i, j = 1, 1 # indices for old/new divisions
k = 0 # index for temp divisions
last_elem = _is_single_last_div(a)
# process through old division
# left part of new division can be processed in this loop
while (i < len(a) and j < len(b)):
if a[i] < b[j]:
# tuple is something like:
# (methods.boundary_slice, ('from_pandas-#', 0), 3, 4, False))
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, a[i], False)
low = a[i]
i += 1
elif a[i] > b[j]:
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)
low = b[j]
j += 1
else:
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)
low = b[j]
i += 1
j += 1
c.append(low)
k += 1
# right part of new division can remain
if a[-1] < b[-1] or b[-1] == b[-2]:
for _j in range(j, len(b)):
# always use right-most of old division
# because it may contain last element
m = len(a) - 2
d[(out1, k)] = (methods.boundary_slice, (name, m), low, b[_j], False)
low = b[_j]
c.append(low)
k += 1
else:
# even if new division is processed through,
# right-most element of old division can remain
if last_elem and i < len(a):
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), a[i], a[i], False)
k += 1
c.append(a[-1])
# replace last element of tuple with True
d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)
i, j = 0, 1
last_elem = _is_single_last_div(c)
while j < len(b):
tmp = []
while c[i] < b[j]:
tmp.append((out1, i))
i += 1
if last_elem and c[i] == b[-1] and (b[-1] != b[-2] or j == len(b) - 1) and i < k:
# append if last split is not included
tmp.append((out1, i))
i += 1
if len(tmp) == 0:
# dummy slice to return empty DataFrame or Series,
# which retain original data attributes (columns / name)
d[(out2, j - 1)] = (methods.boundary_slice, (name, 0), a[0], a[0], False)
elif len(tmp) == 1:
d[(out2, j - 1)] = tmp[0]
else:
if not tmp:
raise ValueError('check for duplicate partitions\nold:\n%s\n\n'
'new:\n%s\n\ncombined:\n%s'
% (pformat(a), pformat(b), pformat(c)))
d[(out2, j - 1)] = (pd.concat, tmp)
j += 1
return d
def repartition_freq(df, freq=None):
""" Repartition a timeseries dataframe by a new frequency """
freq = pd.Timedelta(freq)
if not isinstance(df.divisions[0], pd.Timestamp):
raise TypeError("Can only repartition on frequency for timeseries")
divisions = pd.DatetimeIndex(start=df.divisions[0].ceil(freq),
end=df.divisions[-1],
freq=freq).tolist()
if divisions[-1] != df.divisions[-1]:
divisions.append(df.divisions[-1])
if divisions[0] != df.divisions[0]:
divisions = [df.divisions[0]] + divisions
return df.repartition(divisions=divisions)
def repartition_npartitions(df, npartitions):
""" Repartition dataframe to a smaller number of partitions """
new_name = 'repartition-%d-%s' % (npartitions, tokenize(df))
if df.npartitions == npartitions:
return df
elif df.npartitions > npartitions:
npartitions_ratio = df.npartitions / npartitions
new_partitions_boundaries = [int(new_partition_index * npartitions_ratio)
for new_partition_index in range(npartitions + 1)]
dsk = {}
for new_partition_index in range(npartitions):
value = (pd.concat, [(df._name, old_partition_index)
for old_partition_index in
range(new_partitions_boundaries[new_partition_index],
new_partitions_boundaries[new_partition_index + 1])])
dsk[new_name, new_partition_index] = value
divisions = [df.divisions[new_partition_index]
for new_partition_index in new_partitions_boundaries]
return new_dd_object(merge(df.dask, dsk), new_name, df._meta, divisions)
else:
original_divisions = divisions = pd.Series(df.divisions)
if (df.known_divisions and (np.issubdtype(divisions.dtype, np.datetime64) or
np.issubdtype(divisions.dtype, np.number))):
if np.issubdtype(divisions.dtype, np.datetime64):
divisions = divisions.values.astype('float64')
if isinstance(divisions, pd.Series):
divisions = divisions.values
n = len(divisions)
divisions = np.interp(x=np.linspace(0, n, npartitions + 1),
xp=np.linspace(0, n, n),
fp=divisions)
if np.issubdtype(original_divisions.dtype, np.datetime64):
divisions = pd.Series(divisions).astype(original_divisions.dtype).tolist()
elif np.issubdtype(original_divisions.dtype, np.integer):
divisions = divisions.astype(original_divisions.dtype)
if isinstance(divisions, np.ndarray):
divisions = tuple(divisions.tolist())
return df.repartition(divisions=divisions)
else:
ratio = npartitions / df.npartitions
split_name = 'split-%s' % tokenize(df, npartitions)
dsk = {}
last = 0
j = 0
for i in range(df.npartitions):
new = last + ratio
if i == df.npartitions - 1:
k = npartitions - j
else:
k = int(new - last)
dsk[(split_name, i)] = (split_evenly, (df._name, i), k)
for jj in range(k):
dsk[(new_name, j)] = (getitem, (split_name, i), jj)
j += 1
last = new
divisions = [None] * (npartitions + 1)
return new_dd_object(merge(df.dask, dsk), new_name, df._meta, divisions)
def repartition(df, divisions=None, force=False):
""" Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
Parameters
----------
divisions : list
List of partitions to be used
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
token = tokenize(df, divisions)
if isinstance(df, _Frame):
tmp = 'repartition-split-' + token
out = 'repartition-merge-' + token
dsk = repartition_divisions(df.divisions, divisions,
df._name, tmp, out, force=force)
return new_dd_object(merge(df.dask, dsk), out,
df._meta, divisions)
elif isinstance(df, (pd.Series, pd.DataFrame)):
name = 'repartition-dataframe-' + token
from .utils import shard_df_on_index
dfs = shard_df_on_index(df, divisions[1:-1])
dsk = dict(((name, i), df) for i, df in enumerate(dfs))
return new_dd_object(dsk, name, df, divisions)
raise ValueError('Data must be DataFrame or Series')
def _reduction_chunk(x, aca_chunk=None, **kwargs):
o = aca_chunk(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if isinstance(o, pd.Series) else o
def _reduction_combine(x, aca_combine=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
o = aca_combine(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if isinstance(o, pd.Series) else o
def _reduction_aggregate(x, aca_aggregate=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
return aca_aggregate(x, **kwargs)
def idxmaxmin_chunk(x, fn=None, skipna=True):
idx = getattr(x, fn)(skipna=skipna)
minmax = 'max' if fn == 'idxmax' else 'min'
value = getattr(x, minmax)(skipna=skipna)
if isinstance(x, pd.DataFrame):
return pd.DataFrame({'idx': idx, 'value': value})
return pd.DataFrame({'idx': [idx], 'value': [value]})
def idxmaxmin_row(x, fn=None, skipna=True):
x = x.set_index('idx')
idx = getattr(x.value, fn)(skipna=skipna)
minmax = 'max' if fn == 'idxmax' else 'min'
value = getattr(x.value, minmax)(skipna=skipna)
return pd.DataFrame({'idx': [idx], 'value': [value]})
def idxmaxmin_combine(x, fn=None, skipna=True):
return (x.groupby(level=0)
.apply(idxmaxmin_row, fn=fn, skipna=skipna)
.reset_index(level=1, drop=True))
def idxmaxmin_agg(x, fn=None, skipna=True, scalar=False):
res = idxmaxmin_combine(x, fn, skipna=skipna)['idx']
if scalar:
return res[0]
res.name = None
return res
def safe_head(df, n):
r = df.head(n=n)
if len(r) != n:
msg = ("Insufficient elements for `head`. {0} elements "
"requested, only {1} elements available. Try passing larger "
"`npartitions` to `head`.")
warnings.warn(msg.format(n, len(r)))
return r
def maybe_shift_divisions(df, periods, freq):
"""Maybe shift divisions by periods of size freq
Used to shift the divisions for the `shift` method. If freq isn't a fixed
size (not anchored or relative), then the divisions are shifted
appropriately. Otherwise the divisions are cleared.
Parameters
----------
df : dd.DataFrame, dd.Series, or dd.Index
periods : int
The number of periods to shift.
freq : DateOffset, timedelta, or time rule string
The frequency to shift by.
"""
if isinstance(freq, str):
freq = pd.tseries.frequencies.to_offset(freq)
if (isinstance(freq, pd.DateOffset) and
(freq.isAnchored() or not hasattr(freq, 'delta'))):
# Can't infer divisions on relative or anchored offsets, as
# divisions may now split identical index value.
# (e.g. index_partitions = [[1, 2, 3], [3, 4, 5]])
return df.clear_divisions()
if df.known_divisions:
divs = pd.Series(range(len(df.divisions)), index=df.divisions)
divisions = divs.shift(periods, freq=freq).index
return type(df)(df.dask, df._name, df._meta, divisions)
return df
def to_delayed(df):
""" Create Dask Delayed objects from a Dask Dataframe
Returns a list of delayed values, one value per partition.
Examples
--------
>>> partitions = df.to_delayed() # doctest: +SKIP
"""
from ..delayed import Delayed
return [Delayed(k, df.dask) for k in df._keys()]
@wraps(pd.to_datetime)
def to_datetime(arg, **kwargs):
meta = pd.Series([pd.Timestamp('2000')])
return map_partitions(pd.to_datetime, arg, meta=meta, **kwargs)
def _repr_data_series(s, index):
"""A helper for creating the ``_repr_data`` property"""
npartitions = len(index) - 1
if is_categorical_dtype(s):
if has_known_categories(s):
dtype = 'category[known]'
else:
dtype = 'category[unknown]'
else:
dtype = str(s.dtype)
return pd.Series([dtype] + ['...'] * npartitions, index=index, name=s.name)
if PY3:
_Frame.to_delayed.__doc__ = to_delayed.__doc__
| {
"repo_name": "cpcloud/dask",
"path": "dask/dataframe/core.py",
"copies": "1",
"size": "144496",
"license": "bsd-3-clause",
"hash": -3113892694012104000,
"line_mean": 36.7274151436,
"line_max": 102,
"alpha_frac": 0.5710054258,
"autogenerated": false,
"ratio": 3.9370061577025774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5008011583502577,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from itertools import chain
import operator
import uuid
from toolz import merge, unique, curry, first
from .core import quote
from .utils import concrete, funcname, methodcaller
from . import base
from .compatibility import apply
from . import threaded
__all__ = ['compute', 'do', 'Delayed', 'delayed']
def flat_unique(ls):
"""Flatten ``ls``, filter by unique id, and return a list"""
return list(unique(chain.from_iterable(ls), key=id))
def unzip(ls, nout):
"""Unzip a list of lists into ``nout`` outputs."""
out = list(zip(*ls))
if not out:
out = [()] * nout
return out
def to_task_dasks(expr):
"""Normalize a python object and extract all sub-dasks.
- Replace ``Delayed`` with their keys
- Convert literals to things the schedulers can handle
- Extract dasks from all enclosed values
Parameters
----------
expr : object
The object to be normalized. This function knows how to handle
``Delayed``s, as well as most builtin python types.
Returns
-------
task : normalized task to be run
dasks : list of dasks that form the dag for this task
Examples
--------
>>> a = delayed(1, 'a')
>>> b = delayed(2, 'b')
>>> task, dasks = to_task_dasks([a, b, 3])
>>> task # doctest: +SKIP
['a', 'b', 3]
>>> dasks # doctest: +SKIP
[{'a': 1}, {'b': 2}]
>>> task, dasks = to_task_dasks({a: 1, b: 2})
>>> task # doctest: +SKIP
(dict, [['a', 1], ['b', 2]])
>>> dasks # doctest: +SKIP
[{'a': 1}, {'b': 2}]
"""
if isinstance(expr, Delayed):
return expr.key, expr._dasks
if isinstance(expr, base.Base):
name = 'finalize-' + tokenize(expr, pure=True)
keys = expr._keys()
dsk = expr._optimize(expr.dask, keys)
dsk[name] = (expr._finalize, (concrete, keys))
return name, [dsk]
if isinstance(expr, tuple) and type(expr) != tuple:
return expr, []
if isinstance(expr, (Iterator, list, tuple, set)):
args, dasks = unzip((to_task_dasks(e) for e in expr), 2)
args = list(args)
dasks = flat_unique(dasks)
# Ensure output type matches input type
if isinstance(expr, (tuple, set)):
return (type(expr), args), dasks
else:
return args, dasks
if isinstance(expr, dict):
args, dasks = to_task_dasks([[k, v] for k, v in expr.items()])
return (dict, args), dasks
return expr, []
def tokenize(*args, **kwargs):
"""Mapping function from task -> consistent name.
Parameters
----------
args : object
Python objects that summarize the task.
pure : boolean, optional
If True, a consistent hash function is tried on the input. If this
fails, then a unique identifier is used. If False (default), then a
unique identifier is always used.
"""
if kwargs.pop('pure', False):
return base.tokenize(*args, **kwargs)
else:
return str(uuid.uuid4())
@curry
def delayed(obj, name=None, pure=False, nout=None, traverse=True):
"""Wraps a function or object to produce a ``Delayed``.
``Delayed`` objects act as proxies for the object they wrap, but all
operations on them are done lazily by building up a dask graph internally.
Parameters
----------
obj : object
The function or object to wrap
name : string or hashable, optional
The key to use in the underlying graph for the wrapped object. Defaults
to hashing content.
pure : bool, optional
Indicates whether calling the resulting ``Delayed`` object is a pure
operation. If True, arguments to the call are hashed to produce
deterministic keys. Default is False.
nout : int, optional
The number of outputs returned from calling the resulting ``Delayed``
object. If provided, the ``Delayed`` output of the call can be iterated
into ``nout`` objects, allowing for unpacking of results. By default
iteration over ``Delayed`` objects will error. Note, that ``nout=1``
expects ``obj``, to return a tuple of length 1, and consequently for
`nout=0``, ``obj`` should return an empty tuple.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``delayed``. For large collections this can be
expensive. If ``obj`` doesn't contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
Examples
--------
Apply to functions to delay execution:
>>> def inc(x):
... return x + 1
>>> inc(10)
11
>>> x = delayed(inc, pure=True)(10)
>>> type(x) == Delayed
True
>>> x.compute()
11
Can be used as a decorator:
>>> @delayed(pure=True)
... def add(a, b):
... return a + b
>>> add(1, 2).compute()
3
``delayed`` also accepts an optional keyword ``pure``. If False (default),
then subsequent calls will always produce a different ``Delayed``. This is
useful for non-pure functions (such as ``time`` or ``random``).
>>> from random import random
>>> out1 = delayed(random, pure=False)()
>>> out2 = delayed(random, pure=False)()
>>> out1.key == out2.key
False
If you know a function is pure (output only depends on the input, with no
global state), then you can set ``pure=True``. This will attempt to apply a
consistent name to the output, but will fallback on the same behavior of
``pure=False`` if this fails.
>>> @delayed(pure=True)
... def add(a, b):
... return a + b
>>> out1 = add(1, 2)
>>> out2 = add(1, 2)
>>> out1.key == out2.key
True
The key name of the result of calling a delayed object is determined by
hashing the arguments by default. To explicitly set the name, you can use
the ``dask_key_name`` keyword when calling the function:
>>> add(1, 2) # doctest: +SKIP
Delayed('add-3dce7c56edd1ac2614add714086e950f')
>>> add(1, 2, dask_key_name='three')
Delayed('three')
Note that objects with the same key name are assumed to have the same
result. If you set the names explicitly you should make sure your key names
are different for different results.
>>> add(1, 2, dask_key_name='three')
>>> add(2, 1, dask_key_name='three')
>>> add(2, 2, dask_key_name='four')
``delayed`` can also be applied to objects to make operations on them lazy:
>>> a = delayed([1, 2, 3])
>>> isinstance(a, Delayed)
True
>>> a.compute()
[1, 2, 3]
The key name of a delayed object is hashed by default if ``pure=True`` or
is generated randomly if ``pure=False`` (default). To explicitly set the
name, you can use the ``name`` keyword:
>>> a = delayed([1, 2, 3], name='mylist')
>>> a
Delayed('mylist')
Delayed results act as a proxy to the underlying object. Many operators
are supported:
>>> (a + [1, 2]).compute()
[1, 2, 3, 1, 2]
>>> a[1].compute()
2
Method and attribute access also works:
>>> a.count(2).compute()
1
Note that if a method doesn't exist, no error will be thrown until runtime:
>>> res = a.not_a_real_method()
>>> res.compute() # doctest: +SKIP
AttributeError("'list' object has no attribute 'not_a_real_method'")
Methods are assumed to be impure by default, meaning that subsequent calls
may return different results. To assume purity, set `pure=True`. This
allows sharing of any intermediate values.
>>> a.count(2, pure=True).key == a.count(2, pure=True).key
True
As with function calls, method calls also support the ``dask_key_name``
keyword:
>>> a.count(2, dask_key_name="count_2")
Delayed("count_2")
"""
if isinstance(obj, Delayed):
return obj
if isinstance(obj, base.Base) or traverse:
task, dasks = to_task_dasks(obj)
else:
task = quote(obj)
dasks = []
if task is obj:
if not (nout is None or (type(nout) is int and nout >= 0)):
raise ValueError("nout must be None or a positive integer,"
" got %s" % nout)
if not name:
try:
prefix = obj.__name__
except AttributeError:
prefix = type(obj).__name__
token = tokenize(obj, nout, pure=pure)
name = '%s-%s' % (prefix, token)
return DelayedLeaf(obj, name, pure=pure, nout=nout)
else:
if not name:
name = '%s-%s' % (type(obj).__name__, tokenize(task, pure=pure))
dasks.append({name: task})
return Delayed(name, dasks)
do = delayed
def compute(*args, **kwargs):
"""Evaluate more than one ``Delayed`` at once.
Note that the only difference between this function and
``dask.base.compute`` is that this implicitly wraps python objects in
``Delayed``, allowing for collections of dask objects to be computed.
Examples
--------
>>> a = delayed(1)
>>> b = a + 2
>>> c = a + 3
>>> compute(b, c) # Compute both simultaneously
(3, 4)
>>> compute(a, [b, c]) # Works for lists of Delayed
(1, [3, 4])
"""
args = [delayed(a) for a in args]
return base.compute(*args, **kwargs)
def right(method):
"""Wrapper to create 'right' version of operator given left version"""
def _inner(self, other):
return method(other, self)
return _inner
class Delayed(base.Base):
"""Represents a value to be computed by dask.
Equivalent to the output from a single key in a dask graph.
"""
__slots__ = ('_key', '_dasks', '_length')
_finalize = staticmethod(first)
_default_get = staticmethod(threaded.get)
_optimize = staticmethod(lambda d, k, **kwds: d)
def __init__(self, key, dasks, length=None):
self._key = key
self._dasks = dasks
self._length = length
def __getstate__(self):
return tuple(getattr(self, i) for i in self.__slots__)
def __setstate__(self, state):
for k, v in zip(self.__slots__, state):
setattr(self, k, v)
@property
def dask(self):
return merge(*self._dasks)
@property
def key(self):
return self._key
def _keys(self):
return [self.key]
def __repr__(self):
return "Delayed({0})".format(repr(self.key))
def __hash__(self):
return hash(self.key)
def __dir__(self):
return dir(type(self))
def __getattr__(self, attr):
if attr.startswith('_'):
raise AttributeError("Attribute {0} not found".format(attr))
return DelayedAttr(self, attr, 'getattr-%s' % tokenize(self, attr))
def __setattr__(self, attr, val):
if attr in self.__slots__:
object.__setattr__(self, attr, val)
else:
raise TypeError("Delayed objects are immutable")
def __setitem__(self, index, val):
raise TypeError("Delayed objects are immutable")
def __iter__(self):
if getattr(self, '_length', None) is None:
raise TypeError("Delayed objects of unspecified length are "
"not iterable")
for i in range(self._length):
yield self[i]
def __len__(self):
if getattr(self, '_length', None) is None:
raise TypeError("Delayed objects of unspecified length have "
"no len()")
return self._length
def __call__(self, *args, **kwargs):
pure = kwargs.pop('pure', False)
name = kwargs.pop('dask_key_name', None)
func = delayed(apply, pure=pure)
if name is not None:
return func(self, args, kwargs, dask_key_name=name)
return func(self, args, kwargs)
def __bool__(self):
raise TypeError("Truth of Delayed objects is not supported")
__nonzero__ = __bool__
@classmethod
def _get_binary_operator(cls, op, inv=False):
method = delayed(right(op) if inv else op, pure=True)
return lambda *args, **kwargs: method(*args, **kwargs)
_get_unary_operator = _get_binary_operator
def call_function(func, args, kwargs, pure=False, nout=None):
dask_key_name = kwargs.pop('dask_key_name', None)
pure = kwargs.pop('pure', pure)
if dask_key_name is None:
name = '%s-%s' % (funcname(func), tokenize(func, *args,
pure=pure, **kwargs))
else:
name = dask_key_name
args, dasks = unzip(map(to_task_dasks, args), 2)
if kwargs:
dask_kwargs, dasks2 = to_task_dasks(kwargs)
dasks = dasks + (dasks2,)
task = (apply, func, list(args), dask_kwargs)
else:
task = (func,) + args
dasks = flat_unique(dasks)
dasks.append({name: task})
nout = nout if nout is not None else None
return Delayed(name, dasks, length=nout)
class DelayedLeaf(Delayed):
__slots__ = ('_obj', '_key', '_pure', '_nout')
def __init__(self, obj, key, pure=False, nout=None):
self._obj = obj
self._key = key
self._pure = pure
self._nout = nout
@property
def dask(self):
return {self._key: self._obj}
@property
def _dasks(self):
return [self.dask]
def __call__(self, *args, **kwargs):
return call_function(self._obj, args, kwargs,
pure=self._pure, nout=self._nout)
class DelayedAttr(Delayed):
__slots__ = ('_obj', '_attr', '_key')
def __init__(self, obj, attr, key):
self._obj = obj
self._attr = attr
self._key = key
@property
def _dasks(self):
return [{self._key: (getattr, self._obj._key, self._attr)}] + self._obj._dasks
def __call__(self, *args, **kwargs):
return call_function(methodcaller(self._attr), (self._obj,) + args, kwargs)
for op in [operator.abs, operator.neg, operator.pos, operator.invert,
operator.add, operator.sub, operator.mul, operator.floordiv,
operator.truediv, operator.mod, operator.pow, operator.and_,
operator.or_, operator.xor, operator.lshift, operator.rshift,
operator.eq, operator.ge, operator.gt, operator.ne, operator.le,
operator.lt, operator.getitem]:
Delayed._bind_operator(op)
base.normalize_token.register(Delayed, lambda a: a.key)
| {
"repo_name": "chrisbarber/dask",
"path": "dask/delayed.py",
"copies": "2",
"size": "14545",
"license": "bsd-3-clause",
"hash": 6204796247401347000,
"line_mean": 29.6856540084,
"line_max": 86,
"alpha_frac": 0.5909247164,
"autogenerated": false,
"ratio": 3.700076316458916,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5291001032858916,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
import datetime
from functools import reduce
import itertools
import operator
import warnings
import datashape
from datashape import discover, Tuple, Record, DataShape, var
from datashape.predicates import iscollection, isscalar, isrecord, istabular
import numpy as np
from odo import resource, odo
from odo.utils import ignoring, copydoc
from odo.compatibility import unicode
from pandas import DataFrame, Series
from .expr import Expr, Symbol, ndim
from .dispatch import dispatch
from .compatibility import _strtypes
__all__ = ['Data', 'Table', 'into', 'to_html']
names = ('_%d' % i for i in itertools.count(1))
not_an_iterator = []
with ignoring(ImportError):
import bcolz
not_an_iterator.append(bcolz.carray)
with ignoring(ImportError):
import pymongo
not_an_iterator.append(pymongo.collection.Collection)
not_an_iterator.append(pymongo.database.Database)
class InteractiveSymbol(Symbol):
"""Interactive data.
The ``Data`` object presents a familiar view onto a variety of forms of
data. This user-level object provides an interactive experience to using
Blaze's abstract expressions.
Parameters
----------
data : object
Any type with ``discover`` and ``compute`` implementations
fields : list, optional
Field or column names, will be inferred from datasource if possible
dshape : str or DataShape, optional
DataShape describing input data
name : str, optional
A name for the data.
Examples
--------
>>> t = Data([(1, 'Alice', 100),
... (2, 'Bob', -200),
... (3, 'Charlie', 300),
... (4, 'Denis', 400),
... (5, 'Edith', -500)],
... fields=['id', 'name', 'balance'])
>>> t[t.balance < 0].name
name
0 Bob
1 Edith
"""
__slots__ = 'data', 'dshape', '_name'
def __init__(self, data, dshape, name=None):
self.data = data
self.dshape = dshape
self._name = name or (next(names)
if isrecord(dshape.measure)
else None)
def _resources(self):
return {self: self.data}
@property
def _args(self):
return id(self.data), self.dshape, self._name
def __setstate__(self, state):
for slot, arg in zip(self.__slots__, state):
setattr(self, slot, arg)
@copydoc(InteractiveSymbol)
def Data(data, dshape=None, name=None, fields=None, columns=None, schema=None,
**kwargs):
if columns:
raise ValueError("columns argument deprecated, use fields instead")
if schema and dshape:
raise ValueError("Please specify one of schema= or dshape= keyword"
" arguments")
if isinstance(data, InteractiveSymbol):
return Data(data.data, dshape, name, fields, columns, schema, **kwargs)
if isinstance(data, _strtypes):
data = resource(data, schema=schema, dshape=dshape, columns=columns,
**kwargs)
if (isinstance(data, Iterator) and
not isinstance(data, tuple(not_an_iterator))):
data = tuple(data)
if schema and not dshape:
dshape = var * schema
if dshape and isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if not dshape:
dshape = discover(data)
types = None
if isinstance(dshape.measure, Tuple) and fields:
types = dshape[1].dshapes
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
elif isscalar(dshape.measure) and fields:
types = (dshape.measure,) * int(dshape[-2])
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape[:-1] + (schema,)))
elif isrecord(dshape.measure) and fields:
ds = discover(data)
assert isrecord(ds.measure)
names = ds.measure.names
if names != fields:
raise ValueError('data column names %s\n'
'\tnot equal to fields parameter %s,\n'
'\tuse Data(data).relabel(%s) to rename '
'fields' % (names,
fields,
', '.join('%s=%r' % (k, v)
for k, v in
zip(names, fields))))
types = dshape.measure.types
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
ds = datashape.dshape(dshape)
return InteractiveSymbol(data, ds, name)
def Table(*args, **kwargs):
""" Deprecated, see Data instead """
warnings.warn("Table is deprecated, use Data instead",
DeprecationWarning)
return Data(*args, **kwargs)
@dispatch(InteractiveSymbol, dict)
def _subs(o, d):
return o
@dispatch(Expr)
def compute(expr, **kwargs):
resources = expr._resources()
if not resources:
raise ValueError("No data resources found")
else:
return compute(expr, resources, **kwargs)
def concrete_head(expr, n=10):
""" Return head of computed expression """
if not expr._resources():
raise ValueError("Expression does not contain data resources")
if not iscollection(expr.dshape):
return compute(expr)
head = expr.head(n + 1)
if not iscollection(expr.dshape):
return odo(head, object)
elif isrecord(expr.dshape.measure):
return odo(head, DataFrame)
else:
df = odo(head, DataFrame)
df.columns = [expr._name]
return df
result = compute(head)
if len(result) == 0:
return DataFrame(columns=expr.fields)
if isrecord(expr.dshape.measure):
return odo(result, DataFrame, dshape=expr.dshape)
else:
df = odo(result, DataFrame, dshape=expr.dshape)
df.columns = [expr._name]
return df
def repr_tables(expr, n=10):
result = concrete_head(expr, n).rename(columns={None: ''})
if isinstance(result, (DataFrame, Series)):
s = repr(result)
if len(result) > 10:
s = '\n'.join(s.split('\n')[:-1]) + '\n...'
return s
else:
return repr(result) # pragma: no cover
def numel(shape):
if var in shape:
return None
if not shape:
return 1
return reduce(operator.mul, shape, 1)
def short_dshape(ds, nlines=5):
s = datashape.coretypes.pprint(ds)
lines = s.split('\n')
if len(lines) > 5:
s = '\n'.join(lines[:nlines]) + '\n ...'
return s
def coerce_to(typ, x):
try:
return typ(x)
except TypeError:
return odo(x, typ)
def coerce_scalar(result, dshape):
if 'float' in dshape:
return coerce_to(float, result)
elif 'int' in dshape:
return coerce_to(int, result)
elif 'bool' in dshape:
return coerce_to(bool, result)
elif 'datetime' in dshape:
return coerce_to(datetime.datetime, result)
elif 'date' in dshape:
return coerce_to(datetime.date, result)
else:
return result
def expr_repr(expr, n=10):
# Pure Expressions, not interactive
if not expr._resources():
return str(expr)
# Scalars
if ndim(expr) == 0 and isscalar(expr.dshape):
return repr(coerce_scalar(compute(expr), str(expr.dshape)))
# Tables
if (ndim(expr) == 1 and (istabular(expr.dshape) or
isscalar(expr.dshape.measure))):
return repr_tables(expr, 10)
# Smallish arrays
if ndim(expr) >= 2 and numel(expr.shape) and numel(expr.shape) < 1000000:
return repr(compute(expr))
# Other
dat = expr._resources().values()
if len(dat) == 1:
dat = list(dat)[0] # may be dict_values
s = 'Data: %s' % dat
if not isinstance(expr, Symbol):
s += '\nExpr: %s' % str(expr)
s += '\nDataShape: %s' % short_dshape(expr.dshape, nlines=7)
return s
@dispatch(DataFrame)
def to_html(df):
return df.to_html()
@dispatch(Expr)
def to_html(expr):
# Tables
if not expr._resources() or ndim(expr) != 1:
return to_html(repr(expr))
return to_html(concrete_head(expr))
@dispatch(object)
def to_html(o):
return repr(o)
@dispatch(_strtypes)
def to_html(o):
return o.replace('\n', '<br>')
@dispatch((object, type, str, unicode), Expr)
def into(a, b, **kwargs):
result = compute(b, **kwargs)
kwargs['dshape'] = b.dshape
return into(a, result, **kwargs)
def table_length(expr):
try:
return expr._len()
except ValueError:
return compute(expr.count())
Expr.__repr__ = expr_repr
Expr._repr_html_ = lambda x: to_html(x)
Expr.__len__ = table_length
def intonumpy(data, dtype=None, **kwargs):
# TODO: Don't ignore other kwargs like copy
result = odo(data, np.ndarray)
if dtype and result.dtype != dtype:
result = result.astype(dtype)
return result
def convert_base(typ, x):
x = compute(x)
try:
return typ(x)
except:
return typ(odo(x, typ))
Expr.__array__ = intonumpy
Expr.__int__ = lambda x: convert_base(int, x)
Expr.__float__ = lambda x: convert_base(float, x)
Expr.__complex__ = lambda x: convert_base(complex, x)
Expr.__bool__ = lambda x: convert_base(bool, x)
Expr.__nonzero__ = lambda x: convert_base(bool, x)
Expr.__iter__ = into(Iterator)
| {
"repo_name": "LiaoPan/blaze",
"path": "blaze/interactive.py",
"copies": "9",
"size": "9656",
"license": "bsd-3-clause",
"hash": -1982591515577266000,
"line_mean": 27.1516034985,
"line_max": 79,
"alpha_frac": 0.5873032312,
"autogenerated": false,
"ratio": 3.722436391673092,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8809739622873092,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
import decimal
import datetime
from functools import reduce, partial
import itertools
import operator
import warnings
from collections import Iterator
from functools import reduce
import datashape
from datashape import discover, Tuple, Record, DataShape, var, Map
from datashape.predicates import iscollection, isscalar, isrecord, istabular
import numpy as np
from odo import resource, odo
from odo.utils import ignoring, copydoc
from odo.compatibility import unicode
from pandas import DataFrame, Series, Timestamp
from .expr import Expr, Symbol, ndim
from .dispatch import dispatch
from .compatibility import _strtypes
__all__ = ['Data', 'Table', 'into', 'to_html']
names = ('_%d' % i for i in itertools.count(1))
not_an_iterator = []
with ignoring(ImportError):
import bcolz
not_an_iterator.append(bcolz.carray)
with ignoring(ImportError):
import pymongo
not_an_iterator.append(pymongo.collection.Collection)
not_an_iterator.append(pymongo.database.Database)
class InteractiveSymbol(Symbol):
"""Interactive data.
The ``Data`` object presents a familiar view onto a variety of forms of
data. This user-level object provides an interactive experience to using
Blaze's abstract expressions.
Parameters
----------
data : object
Any type with ``discover`` and ``compute`` implementations
fields : list, optional
Field or column names, will be inferred from datasource if possible
dshape : str or DataShape, optional
DataShape describing input data
name : str, optional
A name for the data.
Examples
--------
>>> t = Data([(1, 'Alice', 100),
... (2, 'Bob', -200),
... (3, 'Charlie', 300),
... (4, 'Denis', 400),
... (5, 'Edith', -500)],
... fields=['id', 'name', 'balance'])
>>> t[t.balance < 0].name
name
0 Bob
1 Edith
"""
__slots__ = '_hash', 'data', 'dshape', '_name'
def __init__(self, data, dshape, name=None):
self.data = data
self.dshape = dshape
self._name = name or (next(names)
if isrecord(dshape.measure)
else None)
self._hash = None
def _resources(self):
return {self: self.data}
@property
def _hashargs(self):
data = self.data
try:
# cannot use isinstance(data, Hashable)
# some classes give a false positive
hash(data)
except TypeError:
data = id(data)
return data, self.dshape, self._name
@copydoc(InteractiveSymbol)
def Data(data, dshape=None, name=None, fields=None, schema=None, **kwargs):
if schema and dshape:
raise ValueError("Please specify one of schema= or dshape= keyword"
" arguments")
if isinstance(data, InteractiveSymbol):
return Data(data.data, dshape, name, fields, schema, **kwargs)
if isinstance(data, _strtypes):
data = resource(data, schema=schema, dshape=dshape, **kwargs)
if (isinstance(data, Iterator) and
not isinstance(data, tuple(not_an_iterator))):
data = tuple(data)
if schema and not dshape:
dshape = var * schema
if dshape and isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if not dshape:
dshape = discover(data)
types = None
if isinstance(dshape.measure, Tuple) and fields:
types = dshape[1].dshapes
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
elif isscalar(dshape.measure) and fields:
types = (dshape.measure,) * int(dshape[-2])
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape[:-1] + (schema,)))
elif isrecord(dshape.measure) and fields:
ds = discover(data)
assert isrecord(ds.measure)
names = ds.measure.names
if names != fields:
raise ValueError('data column names %s\n'
'\tnot equal to fields parameter %s,\n'
'\tuse Data(data).relabel(%s) to rename '
'fields' % (names,
fields,
', '.join('%s=%r' % (k, v)
for k, v in
zip(names, fields))))
types = dshape.measure.types
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
ds = datashape.dshape(dshape)
return InteractiveSymbol(data, ds, name)
def Table(*args, **kwargs):
""" Deprecated, see Data instead """
warnings.warn("Table is deprecated, use Data instead",
DeprecationWarning)
return Data(*args, **kwargs)
@dispatch(InteractiveSymbol, dict)
def _subs(o, d):
return o
@dispatch(Expr)
def compute(expr, **kwargs):
resources = expr._resources()
if not resources:
raise ValueError("No data resources found")
else:
return compute(expr, resources, **kwargs)
def concrete_head(expr, n=10):
""" Return head of computed expression """
if not expr._resources():
raise ValueError("Expression does not contain data resources")
if not iscollection(expr.dshape):
return compute(expr)
head = expr.head(n + 1)
if not iscollection(expr.dshape):
return odo(head, object)
elif isrecord(expr.dshape.measure):
return odo(head, DataFrame)
else:
df = odo(head, DataFrame)
df.columns = [expr._name]
return df
result = compute(head)
if len(result) == 0:
return DataFrame(columns=expr.fields)
if isrecord(expr.dshape.measure):
return odo(result, DataFrame, dshape=expr.dshape)
else:
df = odo(result, DataFrame, dshape=expr.dshape)
df.columns = [expr._name]
return df
def repr_tables(expr, n=10):
result = concrete_head(expr, n).rename(columns={None: ''})
if isinstance(result, (DataFrame, Series)):
s = repr(result)
if len(result) > 10:
s = '\n'.join(s.split('\n')[:-1]) + '\n...'
return s
else:
return repr(result) # pragma: no cover
def numel(shape):
if var in shape:
return None
if not shape:
return 1
return reduce(operator.mul, shape, 1)
def short_dshape(ds, nlines=5):
s = datashape.coretypes.pprint(ds)
lines = s.split('\n')
if len(lines) > 5:
s = '\n'.join(lines[:nlines]) + '\n ...'
return s
def coerce_to(typ, x, odo_kwargs=None):
try:
return typ(x)
except TypeError:
return odo(x, typ, **(odo_kwargs or {}))
def coerce_scalar(result, dshape, odo_kwargs=None):
coerce_ = partial(coerce_to, x=result, odo_kwargs=odo_kwargs)
if 'float' in dshape:
return coerce_(float)
if 'decimal' in dshape:
return coerce_(decimal.Decimal)
elif 'int' in dshape:
return coerce_(int)
elif 'bool' in dshape:
return coerce_(bool)
elif 'datetime' in dshape:
return coerce_(Timestamp)
elif 'date' in dshape:
return coerce_(datetime.date)
else:
return result
def expr_repr(expr, n=10):
# Pure Expressions, not interactive
if not set(expr._resources().keys()).issuperset(expr._leaves()):
return str(expr)
# Scalars
if ndim(expr) == 0 and isscalar(expr.dshape):
return repr(coerce_scalar(compute(expr), str(expr.dshape)))
# Tables
if (ndim(expr) == 1 and (istabular(expr.dshape) or
isscalar(expr.dshape.measure) or
isinstance(expr.dshape.measure, Map))):
return repr_tables(expr, 10)
# Smallish arrays
if ndim(expr) >= 2 and numel(expr.shape) and numel(expr.shape) < 1000000:
return repr(compute(expr))
# Other
dat = expr._resources().values()
if len(dat) == 1:
dat = list(dat)[0] # may be dict_values
s = 'Data: %s' % dat
if not isinstance(expr, Symbol):
s += '\nExpr: %s' % str(expr)
s += '\nDataShape: %s' % short_dshape(expr.dshape, nlines=7)
return s
@dispatch(DataFrame)
def to_html(df):
return df.to_html()
@dispatch(Expr)
def to_html(expr):
# Tables
if not expr._resources() or ndim(expr) != 1:
return to_html(repr(expr))
return to_html(concrete_head(expr))
@dispatch(object)
def to_html(o):
return repr(o)
@dispatch(_strtypes)
def to_html(o):
return o.replace('\n', '<br>')
@dispatch((object, type, str, unicode), Expr)
def into(a, b, **kwargs):
result = compute(b, **kwargs)
kwargs['dshape'] = b.dshape
return into(a, result, **kwargs)
def table_length(expr):
try:
return expr._len()
except ValueError:
return int(expr.count())
Expr.__repr__ = expr_repr
Expr._repr_html_ = lambda x: to_html(x)
Expr.__len__ = table_length
def intonumpy(data, dtype=None, **kwargs):
# TODO: Don't ignore other kwargs like copy
result = odo(data, np.ndarray)
if dtype and result.dtype != dtype:
result = result.astype(dtype)
return result
def convert_base(typ, x):
x = compute(x)
try:
return typ(x)
except:
return typ(odo(x, typ))
Expr.__array__ = intonumpy
Expr.__int__ = lambda x: convert_base(int, x)
Expr.__float__ = lambda x: convert_base(float, x)
Expr.__complex__ = lambda x: convert_base(complex, x)
Expr.__bool__ = lambda x: convert_base(bool, x)
Expr.__nonzero__ = lambda x: convert_base(bool, x)
Expr.__iter__ = into(Iterator)
| {
"repo_name": "cowlicks/blaze",
"path": "blaze/interactive.py",
"copies": "2",
"size": "9950",
"license": "bsd-3-clause",
"hash": -8886279359326199000,
"line_mean": 27.3475783476,
"line_max": 77,
"alpha_frac": 0.587839196,
"autogenerated": false,
"ratio": 3.7307836520434945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003429355281207133,
"num_lines": 351
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.