gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import logging
import os.path
import shutil
import requests
from contextlib import closing
from cattle.type_manager import get_type, MARSHALLER
from cattle.storage import BaseStoragePool
from cattle.agent.handler import KindBasedMixin
from cattle.plugins.volmgr import volmgr
from cattle.plugins.docker.util import is_no_op, remove_container
from cattle.lock import lock
from cattle.progress import Progress
from . import docker_client, get_compute
from docker.errors import APIError
from cattle.utils import is_str_set
log = logging.getLogger('docker')
class DockerPool(KindBasedMixin, BaseStoragePool):
def __init__(self):
KindBasedMixin.__init__(self, kind='docker')
BaseStoragePool.__init__(self)
@staticmethod
def _get_image_by_id(id):
templates = docker_client().images(all=True)
templates = filter(lambda x: x['Id'] == id, templates)
if len(templates) > 0:
return templates[0]
return None
def pull_image(self, image, progress):
if not self._is_image_active(image, None):
self._do_image_activate(image, None, progress)
def _is_image_active(self, image, storage_pool):
if is_no_op(image):
return True
parsed_tag = DockerPool.parse_repo_tag(image.data.dockerImage.fullName)
try:
if len(docker_client().inspect_image(parsed_tag['uuid'])):
return True
except APIError:
pass
return False
def _image_build(self, image, progress):
client = docker_client()
opts = dict(image.data.fields.build)
def do_build():
for key in ['context', 'remote']:
if key in opts:
del opts[key]
opts['stream'] = True
marshaller = get_type(MARSHALLER)
for status in client.build(**opts):
try:
status = marshaller.from_string(status)
progress.update(status['stream'])
except:
pass
if is_str_set(opts, 'context'):
with closing(requests.get(opts['context'], stream=True)) as r:
if r.status_code != 200:
raise Exception('Bad response {} from {}'
.format(r.status_code,
opts['context']))
del opts['context']
opts['fileobj'] = ResponseWrapper(r)
opts['custom_context'] = True
do_build()
else:
remote = opts['remote']
if remote.startswith('git@github.com:'):
remote = remote.replace('git@github.com:', 'git://github.com/')
del opts['remote']
opts['path'] = remote
do_build()
def _is_build(self, image):
try:
if is_str_set(image.data.fields.build, 'context') or \
is_str_set(image.data.fields.build, 'remote'):
return True
except (KeyError, AttributeError):
pass
return False
def _do_image_activate(self, image, storage_pool, progress):
if is_no_op(image):
return
if self._is_build(image):
return self._image_build(image, progress)
auth_config = None
try:
if 'registryCredential' in image:
if image.registryCredential is not None:
auth_config = {
'username': image.registryCredential['publicValue'],
'email': image.registryCredential['data']['fields']
['email'],
'password': image.registryCredential['secretValue'],
'serveraddress': image.registryCredential['registry']
['data']['fields']['serverAddress']
}
if auth_config['serveraddress'] == "https://docker.io":
auth_config['serveraddress'] =\
"https://index.docker.io"
log.debug('Auth_Config: [%s]', auth_config)
else:
log.debug('No Registry credential found. Pulling non-authed')
except (AttributeError, KeyError, TypeError) as e:
raise AuthConfigurationError("Malformed Auth Config. \n\n"
"error: [%s]\nregistryCredential:"
" %s"
% (e, image.registryCredential))
client = docker_client()
data = image.data.dockerImage
marshaller = get_type(MARSHALLER)
temp = data.qualifiedName
if data.qualifiedName.startswith('docker.io/'):
temp = 'index.' + data.qualifiedName
# Always pass insecure_registry=True to prevent docker-py
# from pre-verifying the registry. Let the docker daemon handle
# the verification of and connection to the registry.
if progress is None:
result = client.pull(repository=temp,
tag=data.tag, auth_config=auth_config,
insecure_registry=True)
if 'error' in result:
raise ImageValidationError('Image [%s] failed to pull' %
data.fullName)
else:
for status in client.pull(repository=temp,
tag=data.tag,
auth_config=auth_config,
stream=True,
insecure_registry=True):
status = marshaller.from_string(status)
try:
message = status['status']
except KeyError:
message = status['error']
raise ImageValidationError('Image [%s] failed to pull '
': %s' % (data.fullName,
message))
progress.update(message)
def _get_image_storage_pool_map_data(self, obj):
return {}
def _get_volume_storage_pool_map_data(self, obj):
return {
'volume': {
'format': 'docker'
}
}
def _is_volume_active(self, volume, storage_pool):
return True
def _is_volume_inactive(self, volume, storage_pool):
return True
def _is_volume_removed(self, volume, storage_pool):
if volume.deviceNumber == 0:
container = get_compute().get_container(docker_client(),
volume.instance)
return container is None
else:
path = self._path_to_volume(volume)
# Check for volmgr managed volume, must be done before "isHostPath"
if volmgr.volume_exists(path):
return False
if volume.data.fields['isHostPath']:
# If this is a host path volume, we'll never really remove it
# from disk, so just report is as removed for the purpose of
# handling the event.
return True
return not os.path.exists(path)
def _do_volume_remove(self, volume, storage_pool, progress):
if volume.deviceNumber == 0:
container = get_compute().get_container(docker_client(),
volume.instance)
if container is None:
return
remove_container(docker_client(), container)
else:
path = self._path_to_volume(volume)
# Check for volmgr managed volume, must be done before "isHostPath"
if volmgr.volume_exists(path):
log.info("Deleting volmgr managed volume: %s" % path)
volmgr.remove_volume(path)
return
if not volume.data.fields['isHostPath']:
if os.path.exists(path):
log.info("Deleting volume: %s" % volume.uri)
shutil.rmtree(path)
def _path_to_volume(self, volume):
return volume.uri.replace('file://', '')
@staticmethod
def parse_repo_tag(image_uuid):
if image_uuid.startswith('docker:'):
image_uuid = image_uuid[7:]
n = image_uuid.rfind(":")
if n < 0:
return {'repo': image_uuid,
'tag': 'latest',
'uuid': image_uuid + ':latest'}
tag = image_uuid[n+1:]
if tag.find("/") < 0:
return {'repo': image_uuid[:n], 'tag': tag, 'uuid': image_uuid}
return {'repo': image_uuid,
'tag': 'latest',
'uuid': image_uuid + ':latest'}
def volume_remove(self, req=None, volumeStoragePoolMap=None, **kw):
volume = volumeStoragePoolMap.volume
storage_pool = volumeStoragePoolMap.storagePool
progress = Progress(req)
with lock(volume):
if volume.deviceNumber == 0:
get_compute().purge_state(docker_client(), volume.instance)
if not self._is_volume_removed(volume, storage_pool):
self._do_volume_remove(volume, storage_pool, progress)
data = self._get_response_data(req, volumeStoragePoolMap)
return self._reply(req, data)
class ImageValidationError(Exception):
pass
class AuthConfigurationError(Exception):
pass
class ResponseWrapper(object):
""""
This wrapper is to prevent requests from incorrectly setting the
Content-Length on the request. If you do not use this wrapper requests
finds r.raw.fileno and uses the size of that FD, which is 0
"""
def __init__(self, response):
self.r = response
def __iter__(self):
return self.r.raw.__iter__()
|
|
import numpy as np
import scipy.misc
import scipy.signal
import math
import draw
import ref
# =============================================================================
# General image processing functions
# =============================================================================
def get_transform(center, scale, res, rot=0):
# Generate transformation matrix
h = 200 * scale
t = np.zeros((3, 3))
t[0, 0] = float(res[1]) / h
t[1, 1] = float(res[0]) / h
t[0, 2] = res[1] * (-float(center[0]) / h + .5)
t[1, 2] = res[0] * (-float(center[1]) / h + .5)
t[2, 2] = 1
if not rot == 0:
rot = -rot # To match direction of rotation from cropping
rot_mat = np.zeros((3,3))
rot_rad = rot * np.pi / 180
sn,cs = np.sin(rot_rad), np.cos(rot_rad)
rot_mat[0,:2] = [cs, -sn]
rot_mat[1,:2] = [sn, cs]
rot_mat[2,2] = 1
# Need to rotate around center
t_mat = np.eye(3)
t_mat[0,2] = -res[1]/2
t_mat[1,2] = -res[0]/2
t_inv = t_mat.copy()
t_inv[:2,2] *= -1
t = np.dot(t_inv,np.dot(rot_mat,np.dot(t_mat,t)))
return t
def transform(pt, center, scale, res, invert=0, rot=0):
# Transform pixel location to different reference
t = get_transform(center, scale, res, rot=rot)
if invert:
t = np.linalg.inv(t)
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2].astype(int)
def crop(img, center, scale, res, rot=0):
# Upper left point
ul = np.array(transform([0, 0], center, scale, res, invert=1))
# Bottom right point
br = np.array(transform(res, center, scale, res, invert=1))
# Padding so that when rotated proper amount of context is included
pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)
if not rot == 0:
ul -= pad
br += pad
new_shape = [br[1] - ul[1], br[0] - ul[0]]
if len(img.shape) > 2:
new_shape += [img.shape[2]]
new_img = np.zeros(new_shape)
# Range to fill new array
new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]
new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]
# Range to sample from original image
old_x = max(0, ul[0]), min(len(img[0]), br[0])
old_y = max(0, ul[1]), min(len(img), br[1])
new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], old_x[0]:old_x[1]]
if not rot == 0:
# Remove padding
new_img = scipy.misc.imrotate(new_img, rot)
new_img = new_img[pad:-pad, pad:-pad]
return scipy.misc.imresize(new_img, res)
def two_pt_crop(img, scale, pt1, pt2, pad, res, chg=None):
center = (pt1+pt2) / 2
scale = max(20*scale, np.linalg.norm(pt1-pt2)) * .007
scale *= pad
angle = math.atan2(pt2[1]-pt1[1],pt2[0]-pt1[0]) * 180 / math.pi - 90
flip = False
# Handle data augmentation
if chg is not None:
# Flipping
if 'flip' in chg:
if np.random.rand() < .5:
flip = True
# Scaling
if 'scale' in chg:
scale *= min(1+chg['scale'], max(1-chg['scale'], (np.random.randn() * chg['scale']) + 1))
# Rotation
if 'rotate' in chg:
angle += np.random.randint(-chg['rotate'], chg['rotate'] + 1)
# Translation
if 'translate' in chg:
for i in xrange(2):
offset = np.random.randint(-chg['translate'], chg['translate'] + 1) * scale
center[i] += offset
# Create input image
cropped = crop(img, center, scale, res, rot=angle)
inp = np.zeros((3, res[0], res[1]))
for i in xrange(3):
inp[i, :, :] = cropped[:, :, i]
# Create heatmap
hm = np.zeros((2,res[0],res[1]))
draw.gaussian(hm[0],transform(pt1, center, scale, res, rot=angle), 2)
draw.gaussian(hm[1],transform(pt2, center, scale, res, rot=angle), 2)
if flip:
inp = np.array([np.fliplr(inp[i]) for i in xrange(len(inp))])
hm = np.array([np.fliplr(hm[i]) for i in xrange(len(hm))])
return inp, hm
def nms(img):
# Do non-maximum suppression on a 2D array
win_size = 3
domain = np.ones((win_size, win_size))
maxes = scipy.signal.order_filter(img, domain, win_size ** 2 - 1)
diff = maxes - img
result = img.copy()
result[diff > 0] = 0
return result
# =============================================================================
# Helpful display functions
# =============================================================================
def gauss(x, a, b, c, d=0):
return a * np.exp(-(x - b)**2 / (2 * c**2)) + d
def color_heatmap(x):
color = np.zeros((x.shape[0],x.shape[1],3))
color[:,:,0] = gauss(x, .5, .6, .2) + gauss(x, 1, .8, .3)
color[:,:,1] = gauss(x, 1, .5, .3)
color[:,:,2] = gauss(x, 1, .2, .3)
color[color > 1] = 1
color = (color * 255).astype(np.uint8)
return color
def sample_with_heatmap(dataset, inp, out, num_rows=2, parts_to_show=None):
img = np.zeros((inp.shape[1], inp.shape[2], inp.shape[0]))
for i in xrange(3):
img[:, :, i] = inp[i, :, :]
if parts_to_show is None:
parts_to_show = np.arange(out.shape[0])
# Generate a single image to display input/output pair
num_cols = np.ceil(float(len(parts_to_show)) / num_rows)
size = img.shape[0] / num_rows
full_img = np.zeros((img.shape[0], size * (num_cols + num_rows), 3), np.uint8)
full_img[:img.shape[0], :img.shape[1]] = img
inp_small = scipy.misc.imresize(img, [size, size])
# Set up heatmap display for each part
for i, part in enumerate(parts_to_show):
if type(part) is str:
part_idx = ref.parts[dataset].index(part)
else:
part_idx = part
out_resized = scipy.misc.imresize(out[part_idx], [size, size])
out_resized = out_resized.astype(float)/255
out_img = inp_small.copy() * .3
color_hm = color_heatmap(out_resized)
out_img += color_hm * .7
col_offset = (i % num_cols + num_rows) * size
row_offset = (i // num_cols) * size
full_img[row_offset:row_offset + size, col_offset:col_offset + size] = out_img
return full_img
def sample_with_skeleton(annot, idx, preds, res=None):
# Load image and basic info
ds = annot.attrs['name']
img = ref.loadimg(annot, idx)
c = annot['center'][idx]
s = annot['scale'][idx]
if res is None:
res = [256, 256]
# Skeleton colors
colors = [(255, 0, 0), # Upper arm (left)
(255, 100, 100), # Lower arm (left)
(0, 0, 255), # Upper arm (right)
(100, 100, 255), # Lower arm (right)
(100, 255, 100), # Head/neck/face
(255, 75, 0), # Upper leg (left)
(255, 175, 100), # Lower leg (left)
(0, 75, 255), # Upper leg (right)
(100, 175, 255) # Lower leg (right)
]
# Draw arms
draw.limb(img, preds[ref.parts[ds].index('lsho')], preds[ref.parts[ds].index('lelb')], colors[0], 5 * s)
draw.limb(img, preds[ref.parts[ds].index('lwri')], preds[ref.parts[ds].index('lelb')], colors[1], 5 * s)
draw.limb(img, preds[ref.parts[ds].index('rsho')], preds[ref.parts[ds].index('relb')], colors[2], 5 * s)
draw.limb(img, preds[ref.parts[ds].index('rwri')], preds[ref.parts[ds].index('relb')], colors[3], 5 * s)
if ds == 'mpii':
# MPII
# Draw head
draw.circle(img, preds[ref.parts[ds].index('head')], colors[4], 5 * s)
draw.circle(img, preds[ref.parts[ds].index('neck')], colors[4], 5 * s)
# Draw legs
draw.limb(img, preds[ref.parts[ds].index('lhip')], preds[ref.parts[ds].index('lkne')], colors[5], 5 * s)
draw.limb(img, preds[ref.parts[ds].index('lank')], preds[ref.parts[ds].index('lkne')], colors[6], 5 * s)
draw.limb(img, preds[ref.parts[ds].index('rhip')], preds[ref.parts[ds].index('rkne')], colors[7], 5 * s)
draw.limb(img, preds[ref.parts[ds].index('rank')], preds[ref.parts[ds].index('rkne')], colors[8], 5 * s)
elif ds == 'flic':
# FLIC
# Draw face
draw.circle(img, preds[ref.parts[ds].index('leye')], colors[4], 3 * s)
draw.circle(img, preds[ref.parts[ds].index('reye')], colors[4], 3 * s)
draw.circle(img, preds[ref.parts[ds].index('nose')], colors[4], 3 * s)
# Draw hips
draw.circle(img, preds[ref.parts[ds].index('lhip')], colors[5], 5 * s)
draw.circle(img, preds[ref.parts[ds].index('rhip')], colors[7], 5 * s)
return crop(img, c, s, res)
|
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import Namespace
import mock
import pytest
from .helper import broker_range
from kafka_utils.kafka_cluster_manager.cluster_info.error import RebalanceError
from kafka_utils.kafka_cluster_manager.cluster_info \
.partition_count_balancer import PartitionCountBalancer
from kafka_utils.kafka_cluster_manager.cluster_info \
.stats import calculate_partition_movement
from kafka_utils.kafka_cluster_manager.cluster_info \
.stats import get_broker_leader_counts
from kafka_utils.kafka_cluster_manager.cluster_info \
.stats import get_net_imbalance
from kafka_utils.kafka_cluster_manager.cluster_info \
.stats import get_replication_group_imbalance_stats
class TestPartitionCountBalancer(object):
@pytest.fixture
def create_balancer(self):
def build_balancer(cluster_topology, **kwargs):
args = mock.Mock(spec=Namespace)
args.balancer_args = []
args.configure_mock(**kwargs)
return PartitionCountBalancer(cluster_topology, args)
return build_balancer
def assert_valid(self, new_assignment, orig_assignment, orig_brokers):
"""Assert if new-assignment is valid based on given assignment.
Asserts the results for following parameters:
a) Asserts that keys in both assignments are same
b) Asserts that replication-factor of result remains same
c) Assert that new-replica-brokers are amongst given broker-list
"""
# Verify that partitions remain same
assert set(orig_assignment.keys()) == set(new_assignment.keys())
for t_p, new_replicas in new_assignment.iteritems():
orig_replicas = orig_assignment[t_p]
# Verify that new-replicas are amongst given broker-list
assert all([broker in orig_brokers for broker in new_replicas])
# Verify that replication-factor remains same
assert len(new_replicas) == len(orig_replicas)
def assert_leader_valid(self, orig_assignment, new_assignment):
"""Verify that new-assignment complies with just leader changes.
Following characteristics are verified for just leader-changes.
a) partitions remain same
b) replica set remains same
"""
# Partition-list remains unchanged
assert sorted(orig_assignment.keys()) == sorted(new_assignment.keys())
# Replica-set remains same
for partition, orig_replicas in orig_assignment.iteritems():
assert set(orig_replicas) == set(new_assignment[partition])
def test_rebalance_replication_groups(
self,
create_balancer,
create_cluster_topology,
default_assignment,
):
ct = create_cluster_topology()
cb = create_balancer(ct)
cb.rebalance_replication_groups()
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify that rg-group-balanced
assert net_imbal == 0
# Verify that new-assignment is valid
self.assert_valid(
ct.assignment,
default_assignment,
ct.brokers.keys(),
)
def test_rebalance_replication_groups_balanced(
self,
create_balancer,
create_cluster_topology,
):
# Replication-group is already balanced
assignment = dict(
[
((u'T0', 0), ['0', '2']),
((u'T0', 1), ['0', '3']),
((u'T2', 0), ['2']),
((u'T3', 0), ['0', '1', '2']),
]
)
ct = create_cluster_topology(assignment, broker_range(5))
cb = create_balancer(ct)
cb.rebalance_replication_groups()
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify that rg-group-balanced
assert net_imbal == 0
# Verify that new-assignment same as previous
assert ct.assignment == assignment
def test_rebalance_replication_groups_error(
self,
create_balancer,
create_cluster_topology,
):
assignment = dict(
[
((u'T0', 0), ['0', '2']),
((u'T0', 1), ['0', '3']),
((u'T2', 0), ['2']),
((u'T3', 0), ['0', '1', '9']), # broker 9 is not active
]
)
ct = create_cluster_topology(assignment, broker_range(5))
with pytest.raises(RebalanceError):
cb = create_balancer(ct)
cb.rebalance_replication_groups()
def test__rebalance_groups_partition_cnt_case1(
self,
create_balancer,
create_cluster_topology,
):
# rg1 has 6 partitions
# rg2 has 2 partitions
# Both rg's are balanced(based on replica-count) initially
# Result: rg's will be balanced for partition-count
assignment = dict(
[
((u'T1', 1), ['0', '1', '2']),
((u'T1', 0), ['1']),
((u'T3', 0), ['1']),
((u'T2', 0), ['0', '1', '3']),
]
)
ct = create_cluster_topology(assignment, broker_range(4))
cb = create_balancer(ct)
# Re-balance replication-groups for partition-count
cb._rebalance_groups_partition_cnt()
# Verify both replication-groups have same partition-count
assert len(ct.rgs['rg1'].partitions) == len(ct.rgs['rg2'].partitions)
_, total_movements = \
calculate_partition_movement(assignment, ct.assignment)
# Verify minimum partition movements 2
assert total_movements == 2
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify replica-count imbalance remains unaltered
assert net_imbal == 0
def test__rebalance_groups_partition_cnt_case2(
self,
create_balancer,
create_cluster_topology,
):
# 1 over-balanced, 2 under-balanced replication-groups
# rg1 has 4 partitions
# rg2 has 1 partition
# rg3 has 1 partition
# All rg's are balanced(based on replica-count) initially
# Result: rg's will be balanced for partition-count
assignment = dict(
[
((u'T1', 1), ['0', '2']),
((u'T3', 1), ['0']),
((u'T3', 0), ['0']),
((u'T2', 0), ['0', '5']),
]
)
brokers = {
'0': mock.MagicMock(),
'2': mock.MagicMock(),
'5': mock.MagicMock(),
}
ct = create_cluster_topology(assignment, brokers)
cb = create_balancer(ct)
# Re-balance brokers
cb._rebalance_groups_partition_cnt()
# Verify all replication-groups have same partition-count
assert len(ct.rgs['rg1'].partitions) == len(ct.rgs['rg2'].partitions)
assert len(ct.rgs['rg1'].partitions) == len(ct.rgs['rg3'].partitions)
_, total_movements = \
calculate_partition_movement(assignment, ct.assignment)
# Verify minimum partition movements 2
assert total_movements == 2
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify replica-count imbalance remains 0
assert net_imbal == 0
def test__rebalance_groups_partition_cnt_case3(
self,
create_balancer,
create_cluster_topology,
):
# 1 over-balanced, 1 under-balanced, 1 opt-balanced replication-group
# rg1 has 3 partitions
# rg2 has 2 partitions
# rg3 has 1 partition
# All rg's are balanced(based on replica-count) initially
# Result: rg's will be balanced for partition-count
assignment = dict(
[
((u'T1', 1), ['0', '2']),
((u'T3', 1), ['2']),
((u'T3', 0), ['0']),
((u'T2', 0), ['0', '5']),
]
)
brokers = {
'0': mock.MagicMock(),
'2': mock.MagicMock(),
'5': mock.MagicMock(),
}
ct = create_cluster_topology(assignment, brokers)
cb = create_balancer(ct)
# Re-balance brokers across replication-groups
cb._rebalance_groups_partition_cnt()
# Verify all replication-groups have same partition-count
assert len(ct.rgs['rg1'].partitions) == len(ct.rgs['rg2'].partitions)
assert len(ct.rgs['rg1'].partitions) == len(ct.rgs['rg3'].partitions)
_, total_movements = \
calculate_partition_movement(assignment, ct.assignment)
# Verify minimum partition movements
assert total_movements == 1
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify replica-count imbalance remains 0
assert net_imbal == 0
def test__rebalance_groups_partition_cnt_case4(
self,
create_balancer,
create_cluster_topology,
):
# rg1 has 4 partitions
# rg2 has 2 partitions
# Both rg's are balanced(based on replica-count) initially
# Result: rg's couldn't be balanced partition-count since
# no available broker without partition movement
assignment = dict(
[
((u'T1', 1), ['0', '1', '2']),
((u'T2', 0), ['0', '1', '2']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
cb = create_balancer(ct)
# Re-balance replication-groups for partition-count
cb._rebalance_groups_partition_cnt()
# Verify no change in assignment
assert ct.assignment == assignment
def test__rebalance_groups_partition_cnt_case5(
self,
create_balancer,
create_cluster_topology,
):
# rg1 has 4 partitions
# rg2 has 2 partitions
# rg3 has 2 partitions
# Result: rg's will be balanced for partition-count
# All rg's will be balanced with just 1 partition-movement
brokers = {
"0": {"host": "host1"},
"1": {"host": "host2"},
"2": {"host": "host3"},
"3": {"host": "host4"},
"5": {"host": "host5"},
}
assignment = dict(
[
((u'T0', 0), ['0', '2']),
((u'T1', 0), ['1', '3']),
((u'T2', 0), ['0', '5']),
((u'T3', 0), ['1', '5']),
]
)
ct = create_cluster_topology(assignment, brokers)
cb = create_balancer(ct)
# Re-balance replication-groups for partition-count
cb._rebalance_groups_partition_cnt()
# Assert partition is moved from rg1 only
print(ct.assignment)
assert len(ct.rgs['rg1'].partitions) == 3
_, total_movements = \
calculate_partition_movement(assignment, ct.assignment)
# Verify minimum partition movements 1
assert total_movements == 1
net_imbal, _ = get_replication_group_imbalance_stats(
ct.rgs.values(),
ct.partitions.values(),
)
# Verify replica-count imbalance remains unaltered
assert net_imbal == 0
def test__rebalance_groups_partition_cnt_case6(
self,
create_balancer,
create_cluster_topology,
):
# rg1 has 5 partitions
# rg2 has 1 partitions
# rg3 has 1 partitions
# Result: rg's will be balanced for partition-count
# All rg's will be balanced with 2 partition-movements
# This test case covers the aspect that even if the partition
# count difference b/w the replication-groups is > 1,
# we still move onto next replication-group if either of the
# replication-groups reaches the optimal partition-count.
brokers = {
"0": {"host": "host1"},
"1": {"host": "host2"},
"2": {"host": "host3"},
"3": {"host": "host4"},
"5": {"host": "host5"},
}
assignment = dict(
[
((u'T0', 0), ['0', '2']),
((u'T1', 0), ['1', '0']),
((u'T2', 0), ['0', '5']),
((u'T3', 0), ['1']),
]
)
ct = create_cluster_topology(assignment, brokers)
cb = create_balancer(ct)
# Re-balance replication-groups for partition-count
cb._rebalance_groups_partition_cnt()
# Assert final partition counts in replication-groups
assert len(ct.rgs['rg1'].partitions) == 3
assert len(ct.rgs['rg2'].partitions) == 2
assert len(ct.rgs['rg3'].partitions) == 2
_, total_movements = \
calculate_partition_movement(assignment, ct.assignment)
# Verify minimum partition movements 2
assert total_movements == 2
# Tests for leader-balancing
def test_rebalance_leaders_balanced_case1(
self,
create_balancer,
create_cluster_topology,
):
# Already balanced-assignment with evenly-distributed
# (broker-id: leader-count): {0: 1, 1:1, 2:1}
# opt-count: 3/3 = 1, extra-count: 3%3 = 0
assignment = dict(
[
((u'T0', 0), ['1', '2']),
((u'T0', 1), ['2', '0']),
((u'T1', 0), ['0', '2']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
orig_assignment = ct.assignment
cb = create_balancer(ct)
cb.rebalance_leaders()
net_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
# No changed in already-balanced assignment
assert orig_assignment == ct.assignment
# Assert leader-balanced
assert net_imbal == 0
def test_rebalance_leaders_balanced_case2(
self,
create_balancer,
create_cluster_topology,
):
# Already balanced-assignment NOT evenly-distributed
# (broker-id: leader-count): {0: 1, 1:1, 2:1}
# opt-count: 2/3 = 0, extra-count: 2%3 = 2
assignment = dict(
[
((u'T0', 0), ['1', '2']),
((u'T0', 1), ['2', '0']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
orig_assignment = ct.assignment
cb = create_balancer(ct)
cb.rebalance_leaders()
net_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
# No changed in already-balanced assignment
assert orig_assignment == ct.assignment
# Assert leader-balanced
assert net_imbal == 0
def test_rebalance_leaders_unbalanced_case1(
self,
create_balancer,
create_cluster_topology,
):
# Balance leader-imbalance successfully
# (broker-id: leader-count): {0: 0, 1:2, 2:1}
# Net-leader-imbalance: 1
# opt-count: 3/3 = 1, extra-count: 3%3 = 0
assignment = dict(
[
((u'T0', 0), ['1', '2']),
((u'T0', 1), ['2', '0']),
((u'T1', 0), ['1', '0']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
orig_assignment = ct.assignment
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify if valid-leader assignment
self.assert_leader_valid(orig_assignment, ct.assignment)
# New-leader imbalance-count be less than previous imbal count
new_leaders_per_broker = {
broker.id: broker.count_preferred_replica()
for broker in ct.brokers.itervalues()
}
new_leader_imbal = get_net_imbalance(new_leaders_per_broker.values())
# Verify leader-balanced
assert new_leader_imbal == 0
# Verify partitions-changed assignment
assert new_leaders_per_broker['0'] == 1
assert new_leaders_per_broker['1'] == 1
assert new_leaders_per_broker['2'] == 1
def test_rebalance_leaders_unbalanced_case2(
self,
create_balancer,
create_cluster_topology,
):
# (Broker: leader-count): {0: 2, 1: 1, 2:0}
# opt-count: 3/3 = 1, extra-count = 0
# Leader-imbalance-value: 1
assignment = dict(
[
((u'T0', 0), ['1', '2']),
((u'T1', 1), ['0', '1']),
((u'T1', 0), ['0']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify leader-balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
def test_rebalance_leaders_unbalanced_case2a(
self,
create_balancer,
create_cluster_topology,
):
# (Broker: leader-count): {0: 2, 1: 1, 2:0, 3:1}
# opt-count: 3/4 = 1, extra-count = 3
# Leader-imbalance-value: 1
# imbalanced-broker: 0,2; balanced-brokers: 1,3
assignment = dict(
[
((u'T0', 0), ['3', '2']),
((u'T0', 1), ['1', '3']),
((u'T1', 1), ['0', '1']),
((u'T1', 0), ['0']),
]
)
ct = create_cluster_topology(assignment, broker_range(4))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
# Verify that (T0, 1) also swapped even if 1 and 3 were balanced
# Rebalancing through non-followers
replica_ids = [b.id for b in ct.partitions[('T0', 1)].replicas]
assert replica_ids == ['3', '1']
def test_rebalance_leaders_unbalanced_case2b(
self,
create_balancer,
create_cluster_topology,
):
assignment = dict(
[
((u'T0', 0), ['3', '2']),
((u'T1', 0), ['1', '2']),
((u'T1', 1), ['0', '1']),
((u'T2', 0), ['0']),
]
)
ct = create_cluster_topology(assignment, broker_range(4))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
def test_rebalance_leaders_unbalanced_case2c(
self,
create_balancer,
create_cluster_topology,
):
# Broker-2 imbalance value: 2 with different brokers
# Broker-2 requests leadership from multiple brokers (0, 1) once
assignment = dict(
[
((u'T1', 0), ['1', '2']),
((u'T1', 1), ['0', '1']),
((u'T2', 0), ['0']),
((u'T2', 1), ['0']),
((u'T3', 0), ['3', '2']),
((u'T3', 1), ['1', '3']),
((u'T4', 0), ['1']),
((u'T4', 2), ['3']),
]
)
ct = create_cluster_topology(assignment, broker_range(4))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify leader-balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
def test_rebalance_leaders_unbalanced_case2d(
self,
create_balancer,
create_cluster_topology,
):
# Broker-2 imbalanced with same brokers
# Broker-2 requests leadership from same broker-1 twice
assignment = dict(
[
((u'T1', 0), ['1', '2']),
((u'T1', 1), ['0', '1']),
((u'T1', 2), ['0']),
((u'T1', 3), ['1', '2']),
((u'T1', 4), ['0', '1']),
((u'T1', 5), ['0']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify leader-balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
def test_rebalance_leaders_unbalanced_case2e(
self,
create_balancer,
create_cluster_topology,
):
# Imbalance-val 2
# Multiple imbalanced brokers (2, 5) gets non-follower balanced
# from multiple brokers (1,4)
assignment = dict(
[
((u'T1', 0), ['1', '2']),
((u'T1', 1), ['0', '1']),
((u'T2', 0), ['0']),
((u'T3', 0), ['4', '5']),
((u'T3', 1), ['3', '4']),
((u'T4', 0), ['3']),
]
)
ct = create_cluster_topology(assignment, broker_range(6))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify leader-balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
def test_rebalance_leaders_unbalanced_case3(
self,
create_balancer,
create_cluster_topology,
):
# Imbalanced 0 and 2. No re-balance possible.
assignment = dict(
[
((u'T1', 0), ['1', '2']),
((u'T1', 1), ['0']),
((u'T2', 0), ['0']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify still leader-imbalanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 1
# No change in assignment
assert sorted(ct.assignment) == sorted(assignment)
def test_rebalance_leaders_unbalanced_case4(
self,
create_balancer,
create_cluster_topology,
):
# Imbalanced assignment
# Partial leader-imbalance possible
# (Broker: leader-count): {0: 3, 1: 1, 2:0}
# opt-count: 5/3 = 1, extra-count = 2
assignment = dict(
[
((u'T0', 0), ['1', '2']),
((u'T0', 1), ['0', '2']),
((u'T1', 0), ['0']),
((u'T1', 1), ['0']),
((u'T1', 2), ['0']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
net_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
cb = create_balancer(ct)
cb.rebalance_leaders()
new_leaders_per_broker = {
broker.id: broker.count_preferred_replica()
for broker in ct.brokers.itervalues()
}
new_net_imbal = get_net_imbalance(new_leaders_per_broker.values())
# Verify that net-imbalance has reduced but not zero
assert new_net_imbal > 0 and new_net_imbal < net_imbal
# Verify the changes in leaders-per-broker count
assert new_leaders_per_broker['2'] == 1
assert new_leaders_per_broker['1'] == 1
assert new_leaders_per_broker['0'] == 3
def test_rebalance_leaders_unbalanced_case2f(
self,
create_balancer,
create_cluster_topology,
):
assignment = dict(
[
((u'T0', 0), ['2', '0']),
((u'T1', 0), ['2', '0']),
((u'T1', 1), ['0']),
((u'T2', 0), ['1']),
((u'T2', 1), ['2']),
]
)
ct = create_cluster_topology(assignment, broker_range(3))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify leader-balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
def test_rebalance_leaders_unbalanced_case5(
self,
create_balancer,
create_cluster_topology,
):
# Special case, wherein none under-balanced
# but 0 is overbalanced
assignment = dict(
[
((u'T1', 1), ['0', '1']),
((u'T2', 0), ['0']),
((u'T2', 1), ['0']),
((u'T3', 0), ['2', '3']),
((u'T3', 1), ['3', '1']),
((u'T4', 0), ['1']),
]
)
ct = create_cluster_topology(assignment, broker_range(4))
cb = create_balancer(ct)
cb.rebalance_leaders()
# Verify leader-balanced
leader_imbal = get_net_imbalance(
get_broker_leader_counts(ct.brokers.values()),
)
assert leader_imbal == 0
|
|
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Test rest module"""
import json
import mock
import requests
from fiblary.common import exceptions
from fiblary.common import restapi
from fiblary.tests import utils
fake_user_agent = 'test_rapi'
fake_auth = '11223344556677889900'
fake_url = 'http://gopher.com'
fake_key = 'gopher'
fake_keys = 'gophers'
fake_username = 'admin'
fake_password = 'admin'
fake_gopher_mac = {
'id': 'g1',
'name': 'mac',
'actor': 'Mel Blanc',
}
fake_gopher_tosh = {
'id': 'g2',
'name': 'tosh',
'actor': 'Stan Freeberg',
}
fake_gopher_single = {
fake_key: fake_gopher_mac,
}
fake_gopher_list = {
fake_keys: [
fake_gopher_mac,
fake_gopher_tosh,
]
}
fake_headers = {
'User-Agent': fake_user_agent,
}
class FakeResponse(requests.Response):
def __init__(self, headers={}, status_code=None, data=None, encoding=None):
super(FakeResponse, self).__init__()
self.status_code = status_code
self.headers.update(headers)
self._content = json.dumps(data)
@mock.patch('fiblary.common.restapi.requests.Session')
class TestRESTApi(utils.TestCase):
def test_request_get(self, session_mock):
resp = FakeResponse(status_code=200, data=fake_gopher_single)
session_mock.return_value = mock.MagicMock(
request=mock.MagicMock(return_value=resp),
)
api = restapi.RESTApi(
user_agent=fake_user_agent,
)
gopher = api.request('GET', fake_url)
session_mock.return_value.request.assert_called_with(
'GET',
fake_url,
headers={},
allow_redirects=True,
timeout=10
)
self.assertEqual(gopher.status_code, 200)
self.assertEqual(gopher.json(), fake_gopher_single)
def test_request_get_return_300(self, session_mock):
resp = FakeResponse(status_code=300, data=fake_gopher_single)
session_mock.return_value = mock.MagicMock(
request=mock.MagicMock(return_value=resp),
)
api = restapi.RESTApi(
user_agent=fake_user_agent,
)
gopher = api.request('GET', fake_url)
session_mock.return_value.request.assert_called_with(
'GET',
fake_url,
headers={},
allow_redirects=True,
timeout=10
)
self.assertEqual(gopher.status_code, 300)
self.assertEqual(gopher.json(), fake_gopher_single)
def test_request_get_fail_404(self, session_mock):
resp = FakeResponse(status_code=404, data=fake_gopher_single)
session_mock.return_value = mock.MagicMock(
request=mock.MagicMock(return_value=resp),
)
api = restapi.RESTApi(
user_agent=fake_user_agent,
)
self.assertRaises(
exceptions.HTTPNotFound,
api.request,
'GET',
fake_url)
session_mock.return_value.request.assert_called_with(
'GET',
fake_url,
headers={},
allow_redirects=True,
timeout=10
)
def test_request_get_auth(self, session_mock):
resp = FakeResponse(status_code=200, data=fake_gopher_single)
session_mock.return_value = mock.MagicMock(
request=mock.MagicMock(return_value=resp),
headers=mock.MagicMock(return_value={}),
)
api = restapi.RESTApi(
username=fake_username,
password=fake_password,
user_agent=fake_user_agent,
)
gopher = api.request('GET', fake_url)
session_mock.return_value.request.assert_called_with(
'GET',
fake_url,
auth=(fake_username, fake_password),
headers={},
allow_redirects=True,
timeout=10
)
self.assertEqual(gopher.json(), fake_gopher_single)
def test_request_post(self, session_mock):
resp = FakeResponse(status_code=200, data=fake_gopher_single)
session_mock.return_value = mock.MagicMock(
request=mock.MagicMock(return_value=resp),
)
api = restapi.RESTApi(
user_agent=fake_user_agent,
)
data = fake_gopher_tosh
gopher = api.request('POST', fake_url, json=data)
session_mock.return_value.request.assert_called_with(
'POST',
fake_url,
headers={
'Content-Type': 'application/json',
},
allow_redirects=True,
data=json.dumps(data),
timeout=10
)
self.assertEqual(gopher.json(), fake_gopher_single)
# Methods
# TODO(dtroyer): add the other method methods
def test_delete(self, session_mock):
resp = FakeResponse(status_code=200, data=None)
session_mock.return_value = mock.MagicMock(
request=mock.MagicMock(return_value=resp),
)
api = restapi.RESTApi()
gopher = api.delete(fake_url)
session_mock.return_value.request.assert_called_with(
'DELETE',
fake_url,
headers=mock.ANY,
allow_redirects=True,
timeout=10
)
self.assertEqual(gopher.status_code, 200)
# Commands
def test_create(self, session_mock):
resp = FakeResponse(status_code=200, data=fake_gopher_single)
session_mock.return_value = mock.MagicMock(
request=mock.MagicMock(return_value=resp),
)
api = restapi.RESTApi()
data = fake_gopher_mac
# Test no key
gopher = api.create(fake_url, data=data)
session_mock.return_value.request.assert_called_with(
'POST',
fake_url,
headers=mock.ANY,
allow_redirects=True,
data=json.dumps(data),
timeout=10
)
self.assertEqual(gopher, fake_gopher_single)
# Test with key
gopher = api.create(fake_url, data=data, response_key=fake_key)
session_mock.return_value.request.assert_called_with(
'POST',
fake_url,
headers=mock.ANY,
allow_redirects=True,
data=json.dumps(data),
timeout=10
)
self.assertEqual(gopher, fake_gopher_mac)
def test_list(self, session_mock):
resp = FakeResponse(status_code=200, data=fake_gopher_list)
session_mock.return_value = mock.MagicMock(
request=mock.MagicMock(return_value=resp),
)
# test base
api = restapi.RESTApi()
gopher = api.list(fake_url, response_key=fake_keys)
session_mock.return_value.request.assert_called_with(
'GET',
fake_url,
headers=mock.ANY,
allow_redirects=True,
timeout=10
)
self.assertEqual(gopher, [fake_gopher_mac, fake_gopher_tosh])
# test body
api = restapi.RESTApi()
data = {'qwerty': 1}
gopher = api.list(fake_url, response_key=fake_keys, data=data)
session_mock.return_value.request.assert_called_with(
'POST',
fake_url,
headers=mock.ANY,
allow_redirects=True,
data=json.dumps(data),
timeout=10
)
self.assertEqual(gopher, [fake_gopher_mac, fake_gopher_tosh])
# test query params
api = restapi.RESTApi()
params = {'qaz': '123'}
gophers = api.list(fake_url, response_key=fake_keys, params=params)
session_mock.return_value.request.assert_called_with(
'GET',
fake_url,
headers=mock.ANY,
allow_redirects=True,
params=params,
timeout=10
)
self.assertEqual(gophers, [fake_gopher_mac, fake_gopher_tosh])
def test_set(self, session_mock):
new_gopher = fake_gopher_single
new_gopher[fake_key]['name'] = 'Chip'
resp = FakeResponse(status_code=200, data=fake_gopher_single)
session_mock.return_value = mock.MagicMock(
request=mock.MagicMock(return_value=resp),
)
api = restapi.RESTApi()
data = fake_gopher_mac
data['name'] = 'Chip'
# Test no data, no key
gopher = api.set(fake_url)
session_mock.return_value.request.assert_called_with(
'PUT',
fake_url,
headers=mock.ANY,
allow_redirects=True,
timeout=10
)
self.assertEqual(gopher, None)
# Test data, no key
gopher = api.set(fake_url, data=data)
session_mock.return_value.request.assert_called_with(
'PUT',
fake_url,
headers=mock.ANY,
allow_redirects=True,
data=json.dumps(data),
timeout=10
)
self.assertEqual(gopher, fake_gopher_single)
# NOTE:(dtroyer): Key and no data is not tested as without data
# the response_key is moot
# Test data and key
gopher = api.set(fake_url, data=data, response_key=fake_key)
session_mock.return_value.request.assert_called_with(
'PUT',
fake_url,
headers=mock.ANY,
allow_redirects=True,
data=json.dumps(data),
timeout=10
)
self.assertEqual(gopher, fake_gopher_mac)
def test_show(self, session_mock):
resp = FakeResponse(status_code=200, data=fake_gopher_single)
session_mock.return_value = mock.MagicMock(
request=mock.MagicMock(return_value=resp),
)
api = restapi.RESTApi()
# Test no key
gopher = api.show(fake_url)
session_mock.return_value.request.assert_called_with(
'GET',
fake_url,
headers=mock.ANY,
allow_redirects=True,
timeout=10
)
self.assertEqual(gopher, fake_gopher_single)
# Test with key
gopher = api.show(fake_url, response_key=fake_key)
session_mock.return_value.request.assert_called_with(
'GET',
fake_url,
headers=mock.ANY,
allow_redirects=True,
timeout=10
)
self.assertEqual(gopher, fake_gopher_mac)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'EVS.aa_ac_alt'
db.alter_column('evs', 'aa_ac_alt', self.gf('django.db.models.fields.CharField')(max_length=20, null=True))
# Changing field 'EVS.ea_ac_alt'
db.alter_column('evs', 'ea_ac_alt', self.gf('django.db.models.fields.CharField')(max_length=20, null=True))
# Changing field 'EVS.aa_ac_ref'
db.alter_column('evs', 'aa_ac_ref', self.gf('django.db.models.fields.CharField')(max_length=20, null=True))
# Changing field 'EVS.all_ac_ref'
db.alter_column('evs', 'all_ac_ref', self.gf('django.db.models.fields.CharField')(max_length=20, null=True))
# Changing field 'EVS.all_ac_alt'
db.alter_column('evs', 'all_ac_alt', self.gf('django.db.models.fields.CharField')(max_length=20, null=True))
def backwards(self, orm):
# Changing field 'EVS.aa_ac_alt'
db.alter_column('evs', 'aa_ac_alt', self.gf('django.db.models.fields.IntegerField')(max_length=20, null=True))
# Changing field 'EVS.ea_ac_alt'
db.alter_column('evs', 'ea_ac_alt', self.gf('django.db.models.fields.IntegerField')(max_length=20, null=True))
# Changing field 'EVS.aa_ac_ref'
db.alter_column('evs', 'aa_ac_ref', self.gf('django.db.models.fields.IntegerField')(max_length=20, null=True))
# Changing field 'EVS.all_ac_ref'
db.alter_column('evs', 'all_ac_ref', self.gf('django.db.models.fields.IntegerField')(max_length=20, null=True))
# Changing field 'EVS.all_ac_alt'
db.alter_column('evs', 'all_ac_alt', self.gf('django.db.models.fields.IntegerField')(max_length=20, null=True))
models = {
'genes.exon': {
'Meta': {'object_name': 'Exon', 'db_table': "'exon'"},
'end': ('django.db.models.fields.IntegerField', [], {}),
'gene': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genes.Gene']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.IntegerField', [], {})
},
'genes.gene': {
'Meta': {'object_name': 'Gene', 'db_table': "'gene'"},
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'db_table': "'gene_pubmed'", 'symmetrical': 'False'}),
'chr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Chromosome']"}),
'families': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['genes.GeneFamily']", 'symmetrical': 'False', 'blank': 'True'}),
'hgnc_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'phenotypes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['phenotypes.Phenotype']", 'through': "orm['genes.GenePhenotype']", 'symmetrical': 'False'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['genes.Synonym']", 'db_table': "'gene_synonym'", 'symmetrical': 'False'})
},
'genes.genefamily': {
'Meta': {'object_name': 'GeneFamily', 'db_table': "'gene_family'"},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'})
},
'genes.genephenotype': {
'Meta': {'object_name': 'GenePhenotype', 'db_table': "'gene_phenotype'"},
'gene': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genes.Gene']"}),
'hgmd_id': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phenotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['phenotypes.Phenotype']"})
},
'genes.synonym': {
'Meta': {'object_name': 'Synonym', 'db_table': "'synonym'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'genes.transcript': {
'Meta': {'object_name': 'Transcript', 'db_table': "'transcript'"},
'coding_end': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'coding_end_status': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'coding_start': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'coding_start_status': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'exon_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'exons': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['genes.Exon']", 'db_table': "'transcript_exon'", 'symmetrical': 'False'}),
'gene': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genes.Gene']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'refseq_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'start': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'strand': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'})
},
'genome.chromosome': {
'Meta': {'ordering': "['order']", 'object_name': 'Chromosome', 'db_table': "'chromosome'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'})
},
'literature.pubmed': {
'Meta': {'object_name': 'PubMed', 'db_table': "'pubmed'"},
'pmid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})
},
'phenotypes.phenotype': {
'Meta': {'object_name': 'Phenotype', 'db_table': "'phenotype'"},
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hpo_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1000'})
},
'variants.effect': {
'Meta': {'ordering': "['order']", 'object_name': 'Effect', 'db_table': "'effect'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.EffectImpact']", 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.EffectRegion']", 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'variants.effectimpact': {
'Meta': {'ordering': "['order']", 'object_name': 'EffectImpact', 'db_table': "'effect_impact'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'variants.effectregion': {
'Meta': {'ordering': "['order']", 'object_name': 'EffectRegion', 'db_table': "'effect_region'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'variants.evs': {
'Meta': {'object_name': 'EVS', 'db_table': "'evs'"},
'aa_ac_alt': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'aa_ac_ref': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'aa_gtc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'aa_maf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_index': 'True'}),
'all_ac_alt': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'all_ac_ref': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'all_gtc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'all_maf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_index': 'True'}),
'clinical_association': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'ea_ac_alt': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ea_ac_ref': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ea_gtc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'ea_maf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_index': 'True'}),
'gts': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'read_depth': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'evs'", 'to': "orm['variants.Variant']"})
},
'variants.functionalclass': {
'Meta': {'ordering': "['order']", 'object_name': 'FunctionalClass', 'db_table': "'variant_functional_class'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'variants.polyphen2': {
'Meta': {'object_name': 'PolyPhen2', 'db_table': "'polyphen2'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'refaa': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_index': 'True'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polyphen2'", 'to': "orm['variants.Variant']"})
},
'variants.sift': {
'Meta': {'object_name': 'Sift', 'db_table': "'sift'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'refaa': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_index': 'True'}),
'varaa': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sift'", 'to': "orm['variants.Variant']"})
},
'variants.thousandg': {
'Meta': {'object_name': 'ThousandG', 'db_table': "'1000g'"},
'aa': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'ac': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'af': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_index': 'True'}),
'afr_af': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_index': 'True'}),
'amr_af': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_index': 'True'}),
'an': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asn_af': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_index': 'True'}),
'eur_af': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thousandg'", 'to': "orm['variants.Variant']"})
},
'variants.variant': {
'Meta': {'unique_together': "(('chr', 'pos', 'ref', 'alt'),)", 'object_name': 'Variant', 'db_table': "'variant'"},
'alt': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'db_table': "'variant_pubmed'", 'symmetrical': 'False'}),
'chr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Chromosome']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'liftover': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'phenotypes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['phenotypes.Phenotype']", 'through': "orm['variants.VariantPhenotype']", 'symmetrical': 'False'}),
'pos': ('django.db.models.fields.IntegerField', [], {}),
'ref': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'rsid': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.VariantType']", 'null': 'True'})
},
'variants.varianteffect': {
'Meta': {'object_name': 'VariantEffect', 'db_table': "'variant_effect'"},
'amino_acid_change': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'codon_change': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Effect']", 'null': 'True', 'blank': 'True'}),
'exon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genes.Exon']", 'null': 'True', 'blank': 'True'}),
'functional_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.FunctionalClass']", 'null': 'True', 'blank': 'True'}),
'gene': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genes.Gene']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'transcript': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genes.Transcript']", 'null': 'True', 'blank': 'True'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'effects'", 'null': 'True', 'to': "orm['variants.Variant']"})
},
'variants.variantphenotype': {
'Meta': {'object_name': 'VariantPhenotype', 'db_table': "'variant_phenotype'"},
'hgmd_id': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phenotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['phenotypes.Phenotype']"}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"})
},
'variants.varianttype': {
'Meta': {'ordering': "['order']", 'object_name': 'VariantType', 'db_table': "'variant_type'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['variants']
|
|
from datetime import time, timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import Series, TimedeltaIndex, isna, to_timedelta
import pandas._testing as tm
class TestTimedeltas:
@pytest.mark.parametrize("readonly", [True, False])
def test_to_timedelta_readonly(self, readonly):
# GH#34857
arr = np.array([], dtype=object)
if readonly:
arr.setflags(write=False)
result = to_timedelta(arr)
expected = to_timedelta([])
tm.assert_index_equal(result, expected)
def test_to_timedelta(self):
result = to_timedelta(["", ""])
assert isna(result).all()
# pass thru
result = to_timedelta(np.array([np.timedelta64(1, "s")]))
expected = pd.Index(np.array([np.timedelta64(1, "s")]))
tm.assert_index_equal(result, expected)
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(["1d", "1days 00:00:01"]))
tm.assert_series_equal(result, expected)
# with units
result = TimedeltaIndex(
[np.timedelta64(0, "ns"), np.timedelta64(10, "s").astype("m8[ns]")]
)
expected = to_timedelta([0, 10], unit="s")
tm.assert_index_equal(result, expected)
# arrays of various dtypes
arr = np.array([1] * 5, dtype="int64")
result = to_timedelta(arr, unit="s")
expected = TimedeltaIndex([np.timedelta64(1, "s")] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype="int64")
result = to_timedelta(arr, unit="m")
expected = TimedeltaIndex([np.timedelta64(1, "m")] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype="int64")
result = to_timedelta(arr, unit="h")
expected = TimedeltaIndex([np.timedelta64(1, "h")] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype="timedelta64[s]")
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, "s")] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype="timedelta64[D]")
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, "D")] * 5)
tm.assert_index_equal(result, expected)
def test_to_timedelta_dataframe(self):
# GH 11776
arr = np.arange(10).reshape(2, 5)
df = pd.DataFrame(np.arange(10).reshape(2, 5))
for arg in (arr, df):
with pytest.raises(TypeError, match="1-d array"):
to_timedelta(arg)
for errors in ["ignore", "raise", "coerce"]:
with pytest.raises(TypeError, match="1-d array"):
to_timedelta(arg, errors=errors)
def test_to_timedelta_invalid(self):
# bad value for errors parameter
msg = "errors must be one of"
with pytest.raises(ValueError, match=msg):
to_timedelta(["foo"], errors="never")
# these will error
msg = "invalid unit abbreviation: foo"
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit="foo")
with pytest.raises(ValueError, match=msg):
to_timedelta(1, unit="foo")
# time not supported ATM
msg = (
"Value must be Timedelta, string, integer, float, timedelta or convertible"
)
with pytest.raises(ValueError, match=msg):
to_timedelta(time(second=1))
assert to_timedelta(time(second=1), errors="coerce") is pd.NaT
msg = "unit abbreviation w/o a number"
with pytest.raises(ValueError, match=msg):
to_timedelta(["foo", "bar"])
tm.assert_index_equal(
TimedeltaIndex([pd.NaT, pd.NaT]),
to_timedelta(["foo", "bar"], errors="coerce"),
)
tm.assert_index_equal(
TimedeltaIndex(["1 day", pd.NaT, "1 min"]),
to_timedelta(["1 day", "bar", "1 min"], errors="coerce"),
)
# gh-13613: these should not error because errors='ignore'
invalid_data = "apple"
assert invalid_data == to_timedelta(invalid_data, errors="ignore")
invalid_data = ["apple", "1 days"]
tm.assert_numpy_array_equal(
np.array(invalid_data, dtype=object),
to_timedelta(invalid_data, errors="ignore"),
)
invalid_data = pd.Index(["apple", "1 days"])
tm.assert_index_equal(invalid_data, to_timedelta(invalid_data, errors="ignore"))
invalid_data = Series(["apple", "1 days"])
tm.assert_series_equal(
invalid_data, to_timedelta(invalid_data, errors="ignore")
)
@pytest.mark.parametrize(
"val, warning",
[
("1M", FutureWarning),
("1 M", FutureWarning),
("1Y", FutureWarning),
("1 Y", FutureWarning),
("1y", FutureWarning),
("1 y", FutureWarning),
("1m", None),
("1 m", None),
("1 day", None),
("2day", None),
],
)
def test_unambiguous_timedelta_values(self, val, warning):
# GH36666 Deprecate use of strings denoting units with 'M', 'Y', 'm' or 'y'
# in pd.to_timedelta
with tm.assert_produces_warning(warning, check_stacklevel=False):
to_timedelta(val)
def test_to_timedelta_via_apply(self):
# GH 5458
expected = Series([np.timedelta64(1, "s")])
result = Series(["00:00:01"]).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta("00:00:01")])
tm.assert_series_equal(result, expected)
def test_to_timedelta_on_missing_values(self):
# GH5438
timedelta_NaT = np.timedelta64("NaT")
actual = pd.to_timedelta(Series(["00:00:01", np.nan]))
expected = Series(
[np.timedelta64(1000000000, "ns"), timedelta_NaT], dtype="<m8[ns]"
)
tm.assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(["00:00:01", pd.NaT]))
tm.assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
assert actual.value == timedelta_NaT.astype("int64")
actual = pd.to_timedelta(pd.NaT)
assert actual.value == timedelta_NaT.astype("int64")
def test_to_timedelta_float(self):
# https://github.com/pandas-dev/pandas/issues/25077
arr = np.arange(0, 1, 1e-6)[-10:]
result = pd.to_timedelta(arr, unit="s")
expected_asi8 = np.arange(999990000, 10 ** 9, 1000, dtype="int64")
tm.assert_numpy_array_equal(result.asi8, expected_asi8)
def test_to_timedelta_coerce_strings_unit(self):
arr = np.array([1, 2, "error"], dtype=object)
result = pd.to_timedelta(arr, unit="ns", errors="coerce")
expected = pd.to_timedelta([1, 2, pd.NaT], unit="ns")
tm.assert_index_equal(result, expected)
def test_to_timedelta_ignore_strings_unit(self):
arr = np.array([1, 2, "error"], dtype=object)
result = pd.to_timedelta(arr, unit="ns", errors="ignore")
tm.assert_numpy_array_equal(result, arr)
def test_to_timedelta_nullable_int64_dtype(self):
# GH 35574
expected = Series([timedelta(days=1), timedelta(days=2)])
result = to_timedelta(Series([1, 2], dtype="Int64"), unit="days")
tm.assert_series_equal(result, expected)
# IntegerArray Series with nulls
expected = Series([timedelta(days=1), None])
result = to_timedelta(Series([1, None], dtype="Int64"), unit="days")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
("input", "expected"),
[
("8:53:08.71800000001", "8:53:08.718"),
("8:53:08.718001", "8:53:08.718001"),
("8:53:08.7180000001", "8:53:08.7180000001"),
("-8:53:08.71800000001", "-8:53:08.718"),
("8:53:08.7180000089", "8:53:08.718000008"),
],
)
@pytest.mark.parametrize("func", [pd.Timedelta, pd.to_timedelta])
def test_to_timedelta_precision_over_nanos(self, input, expected, func):
# GH: 36738
expected = pd.Timedelta(expected)
result = func(input)
assert result == expected
def test_to_timedelta_zerodim(self):
# ndarray.item() incorrectly returns int for dt64[ns] and td64[ns]
dt64 = pd.Timestamp.now().to_datetime64()
arg = np.array(dt64)
msg = (
"Value must be Timedelta, string, integer, float, timedelta "
"or convertible, not datetime64"
)
with pytest.raises(ValueError, match=msg):
to_timedelta(arg)
arg2 = arg.view("m8[ns]")
result = to_timedelta(arg2)
assert isinstance(result, pd.Timedelta)
assert result.value == dt64.view("i8")
|
|
from statsmodels.compat.python import lzip
import numpy as np
from scipy import stats
from statsmodels.distributions import ECDF
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.tools import add_constant
from . import utils
__all__ = ["qqplot", "qqplot_2samples", "qqline", "ProbPlot"]
class ProbPlot(object):
"""
Q-Q and P-P Probability Plots
Can take arguments specifying the parameters for dist or fit them
automatically. (See fit under kwargs.)
Parameters
----------
data : array_like
A 1d data array
dist : callable
Compare x against dist. A scipy.stats or statsmodels distribution. The
default is scipy.stats.distributions.norm (a standard normal). Can be
a SciPy frozen distribution.
fit : bool
If fit is false, loc, scale, and distargs are passed to the
distribution. If fit is True then the parameters for dist are fit
automatically using dist.fit. The quantiles are formed from the
standardized data, after subtracting the fitted loc and dividing by
the fitted scale. fit cannot be used if dist is a SciPy frozen
distribution.
distargs : tuple
A tuple of arguments passed to dist to specify it fully so dist.ppf
may be called. distargs must not contain loc or scale. These values
must be passed using the loc or scale inputs. distargs cannot be used
if dist is a SciPy frozen distribution.
a : float
Offset for the plotting position of an expected order statistic, for
example. The plotting positions are given by
(i - a)/(nobs - 2*a + 1) for i in range(0,nobs+1)
loc : float
Location parameter for dist. Cannot be used if dist is a SciPy frozen
distribution.
scale : float
Scale parameter for dist. Cannot be used if dist is a SciPy frozen
distribution.
See Also
--------
scipy.stats.probplot
Notes
-----
1) Depends on matplotlib.
2) If `fit` is True then the parameters are fit using the
distribution's `fit()` method.
3) The call signatures for the `qqplot`, `ppplot`, and `probplot`
methods are similar, so examples 1 through 4 apply to all
three methods.
4) The three plotting methods are summarized below:
ppplot : Probability-Probability plot
Compares the sample and theoretical probabilities (percentiles).
qqplot : Quantile-Quantile plot
Compares the sample and theoretical quantiles
probplot : Probability plot
Same as a Q-Q plot, however probabilities are shown in the scale of
the theoretical distribution (x-axis) and the y-axis contains
unscaled quantiles of the sample data.
Examples
--------
The first example shows a Q-Q plot for regression residuals
>>> # example 1
>>> import statsmodels.api as sm
>>> from matplotlib import pyplot as plt
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> model = sm.OLS(data.endog, data.exog)
>>> mod_fit = model.fit()
>>> res = mod_fit.resid # residuals
>>> pplot = sm.ProbPlot(res)
>>> fig = pplot.qqplot()
>>> h = plt.title("Ex. 1 - qqplot - residuals of OLS fit")
>>> plt.show()
qqplot of the residuals against quantiles of t-distribution with 4
degrees of freedom:
>>> # example 2
>>> import scipy.stats as stats
>>> pplot = sm.ProbPlot(res, stats.t, distargs=(4,))
>>> fig = pplot.qqplot()
>>> h = plt.title("Ex. 2 - qqplot - residuals against quantiles of t-dist")
>>> plt.show()
qqplot against same as above, but with mean 3 and std 10:
>>> # example 3
>>> pplot = sm.ProbPlot(res, stats.t, distargs=(4,), loc=3, scale=10)
>>> fig = pplot.qqplot()
>>> h = plt.title("Ex. 3 - qqplot - resids vs quantiles of t-dist")
>>> plt.show()
Automatically determine parameters for t distribution including the
loc and scale:
>>> # example 4
>>> pplot = sm.ProbPlot(res, stats.t, fit=True)
>>> fig = pplot.qqplot(line="45")
>>> h = plt.title("Ex. 4 - qqplot - resids vs. quantiles of fitted t-dist")
>>> plt.show()
A second `ProbPlot` object can be used to compare two separate sample
sets by using the `other` kwarg in the `qqplot` and `ppplot` methods.
>>> # example 5
>>> import numpy as np
>>> x = np.random.normal(loc=8.25, scale=2.75, size=37)
>>> y = np.random.normal(loc=8.75, scale=3.25, size=37)
>>> pp_x = sm.ProbPlot(x, fit=True)
>>> pp_y = sm.ProbPlot(y, fit=True)
>>> fig = pp_x.qqplot(line="45", other=pp_y)
>>> h = plt.title("Ex. 5 - qqplot - compare two sample sets")
>>> plt.show()
In qqplot, sample size of `other` can be equal or larger than the first.
In case of larger, size of `other` samples will be reduced to match the
size of the first by interpolation
>>> # example 6
>>> x = np.random.normal(loc=8.25, scale=2.75, size=37)
>>> y = np.random.normal(loc=8.75, scale=3.25, size=57)
>>> pp_x = sm.ProbPlot(x, fit=True)
>>> pp_y = sm.ProbPlot(y, fit=True)
>>> fig = pp_x.qqplot(line="45", other=pp_y)
>>> title = "Ex. 6 - qqplot - compare different sample sizes"
>>> h = plt.title(title)
>>> plt.show()
In ppplot, sample size of `other` and the first can be different. `other`
will be used to estimate an empirical cumulative distribution function
(ECDF). ECDF(x) will be plotted against p(x)=0.5/n, 1.5/n, ..., (n-0.5)/n
where x are sorted samples from the first.
>>> # example 7
>>> x = np.random.normal(loc=8.25, scale=2.75, size=37)
>>> y = np.random.normal(loc=8.75, scale=3.25, size=57)
>>> pp_x = sm.ProbPlot(x, fit=True)
>>> pp_y = sm.ProbPlot(y, fit=True)
>>> pp_y.ppplot(line="45", other=pp_x)
>>> plt.title("Ex. 7A- ppplot - compare two sample sets, other=pp_x")
>>> pp_x.ppplot(line="45", other=pp_y)
>>> plt.title("Ex. 7B- ppplot - compare two sample sets, other=pp_y")
>>> plt.show()
The following plot displays some options, follow the link to see the
code.
.. plot:: plots/graphics_gofplots_qqplot.py
"""
def __init__(
self,
data,
dist=stats.norm,
fit=False,
distargs=(),
a=0,
loc=0,
scale=1,
):
self.data = data
self.a = a
self.nobs = data.shape[0]
self.distargs = distargs
self.fit = fit
self._is_frozen = isinstance(dist, stats.distributions.rv_frozen)
if self._is_frozen and (
fit or loc != 0 or scale != 1 or distargs != ()
):
raise ValueError(
"Frozen distributions cannot be combined with fit, loc, scale"
" or distargs."
)
# propertes
self._cache = {}
if self._is_frozen:
self.dist = dist
dist_gen = dist.dist
shapes = dist_gen.shapes
if shapes is not None:
shape_args = tuple(map(str.strip, shapes.split(",")))
else:
shape_args = ()
numargs = len(shape_args)
args = dist.args
if len(args) >= numargs + 1:
self.loc = args[numargs]
else:
self.loc = dist.kwds.get("loc", loc)
if len(args) >= numargs + 2:
self.scale = args[numargs + 1]
else:
self.scale = dist.kwds.get("scale", scale)
fit_params = []
for i, arg in enumerate(shape_args):
if arg in dist.kwds:
value = dist.kwds[arg]
else:
value = dist.args[i]
fit_params.append(value)
self.fit_params = np.r_[fit_params, self.loc, self.scale]
elif fit:
self.fit_params = dist.fit(data)
self.loc = self.fit_params[-2]
self.scale = self.fit_params[-1]
if len(self.fit_params) > 2:
self.dist = dist(*self.fit_params[:-2], **dict(loc=0, scale=1))
else:
self.dist = dist(loc=0, scale=1)
elif distargs or loc != 0 or scale != 1:
try:
self.dist = dist(*distargs, **dict(loc=loc, scale=scale))
except Exception:
distargs = ", ".join([str(da) for da in distargs])
cmd = "dist({distargs}, loc={loc}, scale={scale})"
cmd = cmd.format(distargs=distargs, loc=loc, scale=scale)
raise TypeError(
"Initializing the distribution failed. This "
"can occur if distargs contains loc or scale. "
"The distribution initialization command "
"is:\n{cmd}".format(cmd=cmd)
)
self.loc = loc
self.scale = scale
self.fit_params = np.r_[distargs, loc, scale]
else:
self.dist = dist
self.loc = loc
self.scale = scale
self.fit_params = np.r_[loc, scale]
@cache_readonly
def theoretical_percentiles(self):
"""Theoretical percentiles"""
return plotting_pos(self.nobs, self.a)
@cache_readonly
def theoretical_quantiles(self):
"""Theoretical quantiles"""
try:
return self.dist.ppf(self.theoretical_percentiles)
except TypeError:
msg = "%s requires more parameters to compute ppf".format(
self.dist.name,
)
raise TypeError(msg)
except Exception as exc:
msg = "failed to compute the ppf of {0}".format(self.dist.name)
raise type(exc)(msg)
@cache_readonly
def sorted_data(self):
"""sorted data"""
sorted_data = np.array(self.data, copy=True)
sorted_data.sort()
return sorted_data
@cache_readonly
def sample_quantiles(self):
"""sample quantiles"""
if self.fit and self.loc != 0 and self.scale != 1:
return (self.sorted_data - self.loc) / self.scale
else:
return self.sorted_data
@cache_readonly
def sample_percentiles(self):
"""Sample percentiles"""
_check_for(self.dist, "cdf")
if self._is_frozen:
return self.dist.cdf(self.sorted_data)
quantiles = (self.sorted_data - self.fit_params[-2]) / self.fit_params[
-1
]
return self.dist.cdf(quantiles)
def ppplot(
self,
xlabel=None,
ylabel=None,
line=None,
other=None,
ax=None,
**plotkwargs,
):
"""
Plot of the percentiles of x versus the percentiles of a distribution.
Parameters
----------
xlabel : str or None, optional
User-provided labels for the x-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
ylabel : str or None, optional
User-provided labels for the y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : {None, "45", "s", "r", q"}, optional
Options for the reference line to which the data is compared:
- "45": 45-degree line
- "s": standardized line, the expected order statistics are
scaled by the standard deviation of the given sample and have
the mean added to them
- "r": A regression line is fit
- "q": A line is fit through the quartiles.
- None: by default no reference line is added to the plot.
other : ProbPlot, array_like, or None, optional
If provided, ECDF(x) will be plotted against p(x) where x are
sorted samples from `self`. ECDF is an empirical cumulative
distribution function estimated from `other` and
p(x) = 0.5/n, 1.5/n, ..., (n-0.5)/n where n is the number of
samples in `self`. If an array-object is provided, it will be
turned into a `ProbPlot` instance default parameters. If not
provided (default), `self.dist(x)` is be plotted against p(x).
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure
being created.
**plotkwargs
Additional arguments to be passed to the `plot` command.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if other is not None:
check_other = isinstance(other, ProbPlot)
if not check_other:
other = ProbPlot(other)
p_x = self.theoretical_percentiles
ecdf_x = ECDF(other.sample_quantiles)(self.sample_quantiles)
fig, ax = _do_plot(
p_x, ecdf_x, self.dist, ax=ax, line=line, **plotkwargs
)
if xlabel is None:
xlabel = "Probabilities of 2nd Sample"
if ylabel is None:
ylabel = "Probabilities of 1st Sample"
else:
fig, ax = _do_plot(
self.theoretical_percentiles,
self.sample_percentiles,
self.dist,
ax=ax,
line=line,
**plotkwargs,
)
if xlabel is None:
xlabel = "Theoretical Probabilities"
if ylabel is None:
ylabel = "Sample Probabilities"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
return fig
def qqplot(
self,
xlabel=None,
ylabel=None,
line=None,
other=None,
ax=None,
swap: bool = False,
**plotkwargs,
):
"""
Plot of the quantiles of x versus the quantiles/ppf of a distribution.
Can also be used to plot against the quantiles of another `ProbPlot`
instance.
Parameters
----------
xlabel : {None, str}
User-provided labels for the x-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
ylabel : {None, str}
User-provided labels for the y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : {None, "45", "s", "r", q"}, optional
Options for the reference line to which the data is compared:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
other : {ProbPlot, array_like, None}, optional
If provided, the sample quantiles of this `ProbPlot` instance are
plotted against the sample quantiles of the `other` `ProbPlot`
instance. Sample size of `other` must be equal or larger than
this `ProbPlot` instance. If the sample size is larger, sample
quantiles of `other` will be interpolated to match the sample size
of this `ProbPlot` instance. If an array-like object is provided,
it will be turned into a `ProbPlot` instance using default
parameters. If not provided (default), the theoretical quantiles
are used.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure
being created.
swap : bool, optional
Flag indicating to swap the x and y labels.
**plotkwargs
Additional arguments to be passed to the `plot` command.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if other is not None:
check_other = isinstance(other, ProbPlot)
if not check_other:
other = ProbPlot(other)
s_self = self.sample_quantiles
s_other = other.sample_quantiles
if len(s_self) > len(s_other):
raise ValueError(
"Sample size of `other` must be equal or "
+ "larger than this `ProbPlot` instance"
)
elif len(s_self) < len(s_other):
# Use quantiles of the smaller set and interpolate quantiles of
# the larger data set
p = plotting_pos(self.nobs, self.a)
s_other = stats.mstats.mquantiles(s_other, p)
fig, ax = _do_plot(
s_other, s_self, self.dist, ax=ax, line=line, **plotkwargs
)
if xlabel is None:
xlabel = "Quantiles of 2nd Sample"
if ylabel is None:
ylabel = "Quantiles of 1st Sample"
if swap:
xlabel, ylabel = ylabel, xlabel
else:
fig, ax = _do_plot(
self.theoretical_quantiles,
self.sample_quantiles,
self.dist,
ax=ax,
line=line,
**plotkwargs,
)
if xlabel is None:
xlabel = "Theoretical Quantiles"
if ylabel is None:
ylabel = "Sample Quantiles"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return fig
def probplot(
self,
xlabel=None,
ylabel=None,
line=None,
exceed=False,
ax=None,
**plotkwargs,
):
"""
Plot of unscaled quantiles of x against the prob of a distribution.
The x-axis is scaled linearly with the quantiles, but the probabilities
are used to label the axis.
Parameters
----------
xlabel : {None, str}, optional
User-provided labels for the x-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
ylabel : {None, str}, optional
User-provided labels for the y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : {None, "45", "s", "r", q"}, optional
Options for the reference line to which the data is compared:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
exceed : bool, optional
If False (default) the raw sample quantiles are plotted against
the theoretical quantiles, show the probability that a sample will
not exceed a given value. If True, the theoretical quantiles are
flipped such that the figure displays the probability that a
sample will exceed a given value.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure
being created.
**plotkwargs
Additional arguments to be passed to the `plot` command.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if exceed:
fig, ax = _do_plot(
self.theoretical_quantiles[::-1],
self.sorted_data,
self.dist,
ax=ax,
line=line,
**plotkwargs,
)
if xlabel is None:
xlabel = "Probability of Exceedance (%)"
else:
fig, ax = _do_plot(
self.theoretical_quantiles,
self.sorted_data,
self.dist,
ax=ax,
line=line,
**plotkwargs,
)
if xlabel is None:
xlabel = "Non-exceedance Probability (%)"
if ylabel is None:
ylabel = "Sample Quantiles"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
_fmt_probplot_axis(ax, self.dist, self.nobs)
return fig
def qqplot(
data,
dist=stats.norm,
distargs=(),
a=0,
loc=0,
scale=1,
fit=False,
line=None,
ax=None,
**plotkwargs,
):
"""
Q-Q plot of the quantiles of x versus the quantiles/ppf of a distribution.
Can take arguments specifying the parameters for dist or fit them
automatically. (See fit under Parameters.)
Parameters
----------
data : array_like
A 1d data array.
dist : callable
Comparison distribution. The default is
scipy.stats.distributions.norm (a standard normal).
distargs : tuple
A tuple of arguments passed to dist to specify it fully
so dist.ppf may be called.
a : float
Offset for the plotting position of an expected order statistic, for
example. The plotting positions are given by (i - a)/(nobs - 2*a + 1)
for i in range(0,nobs+1)
loc : float
Location parameter for dist
scale : float
Scale parameter for dist
fit : bool
If fit is false, loc, scale, and distargs are passed to the
distribution. If fit is True then the parameters for dist
are fit automatically using dist.fit. The quantiles are formed
from the standardized data, after subtracting the fitted loc
and dividing by the fitted scale.
line : {None, "45", "s", "r", "q"}
Options for the reference line to which the data is compared:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure being
created.
**plotkwargs
Additional matplotlib arguments to be passed to the `plot` command.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
scipy.stats.probplot
Notes
-----
Depends on matplotlib. If `fit` is True then the parameters are fit using
the distribution's fit() method.
Examples
--------
>>> import statsmodels.api as sm
>>> from matplotlib import pyplot as plt
>>> data = sm.datasets.longley.load()
>>> exog = sm.add_constant(data.exog)
>>> mod_fit = sm.OLS(data.endog, exog).fit()
>>> res = mod_fit.resid # residuals
>>> fig = sm.qqplot(res)
>>> plt.show()
qqplot of the residuals against quantiles of t-distribution with 4 degrees
of freedom:
>>> import scipy.stats as stats
>>> fig = sm.qqplot(res, stats.t, distargs=(4,))
>>> plt.show()
qqplot against same as above, but with mean 3 and std 10:
>>> fig = sm.qqplot(res, stats.t, distargs=(4,), loc=3, scale=10)
>>> plt.show()
Automatically determine parameters for t distribution including the
loc and scale:
>>> fig = sm.qqplot(res, stats.t, fit=True, line="45")
>>> plt.show()
The following plot displays some options, follow the link to see the code.
.. plot:: plots/graphics_gofplots_qqplot.py
"""
probplot = ProbPlot(
data, dist=dist, distargs=distargs, fit=fit, a=a, loc=loc, scale=scale
)
fig = probplot.qqplot(ax=ax, line=line, **plotkwargs)
return fig
def qqplot_2samples(
data1, data2, xlabel=None, ylabel=None, line=None, ax=None
):
"""
Q-Q Plot of two samples' quantiles.
Can take either two `ProbPlot` instances or two array-like objects. In the
case of the latter, both inputs will be converted to `ProbPlot` instances
using only the default values - so use `ProbPlot` instances if
finer-grained control of the quantile computations is required.
Parameters
----------
data1 : {array_like, ProbPlot}
Data to plot along x axis. If the sample sizes are unequal, the longer
series is always plotted along the x-axis.
data2 : {array_like, ProbPlot}
Data to plot along y axis. Does not need to have the same number of
observations as data 1. If the sample sizes are unequal, the longer
series is always plotted along the x-axis.
xlabel : {None, str}
User-provided labels for the x-axis. If None (default),
other values are used.
ylabel : {None, str}
User-provided labels for the y-axis. If None (default),
other values are used.
line : {None, "45", "s", "r", q"}
Options for the reference line to which the data is compared:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
scipy.stats.probplot
Notes
-----
1) Depends on matplotlib.
2) If `data1` and `data2` are not `ProbPlot` instances, instances will be
created using the default parameters. Therefore, it is recommended to use
`ProbPlot` instance if fine-grained control is needed in the computation
of the quantiles.
Examples
--------
>>> import statsmodels.api as sm
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from statsmodels.graphics.gofplots import qqplot_2samples
>>> x = np.random.normal(loc=8.5, scale=2.5, size=37)
>>> y = np.random.normal(loc=8.0, scale=3.0, size=37)
>>> pp_x = sm.ProbPlot(x)
>>> pp_y = sm.ProbPlot(y)
>>> qqplot_2samples(pp_x, pp_y)
>>> plt.show()
.. plot:: plots/graphics_gofplots_qqplot_2samples.py
>>> fig = qqplot_2samples(pp_x, pp_y, xlabel=None, ylabel=None,
... line=None, ax=None)
"""
if not isinstance(data1, ProbPlot):
data1 = ProbPlot(data1)
if not isinstance(data2, ProbPlot):
data2 = ProbPlot(data2)
if data2.data.shape[0] > data1.data.shape[0]:
fig = data1.qqplot(
xlabel=ylabel, ylabel=xlabel, line=line, other=data2, ax=ax
)
else:
fig = data2.qqplot(
xlabel=ylabel,
ylabel=xlabel,
line=line,
other=data1,
ax=ax,
swap=True,
)
return fig
def qqline(ax, line, x=None, y=None, dist=None, fmt="r-", **lineoptions):
"""
Plot a reference line for a qqplot.
Parameters
----------
ax : matplotlib axes instance
The axes on which to plot the line
line : str {"45","r","s","q"}
Options for the reference line to which the data is compared.:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled by
the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - By default no reference line is added to the plot.
x : ndarray
X data for plot. Not needed if line is "45".
y : ndarray
Y data for plot. Not needed if line is "45".
dist : scipy.stats.distribution
A scipy.stats distribution, needed if line is "q".
fmt : str, optional
Line format string passed to `plot`.
**lineoptions
Additional arguments to be passed to the `plot` command.
Notes
-----
There is no return value. The line is plotted on the given `ax`.
Examples
--------
Import the food expenditure dataset. Plot annual food expenditure on x-axis
and household income on y-axis. Use qqline to add regression line into the
plot.
>>> import statsmodels.api as sm
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from statsmodels.graphics.gofplots import qqline
>>> foodexp = sm.datasets.engel.load()
>>> x = foodexp.exog
>>> y = foodexp.endog
>>> ax = plt.subplot(111)
>>> plt.scatter(x, y)
>>> ax.set_xlabel(foodexp.exog_name[0])
>>> ax.set_ylabel(foodexp.endog_name)
>>> qqline(ax, "r", x, y)
>>> plt.show()
.. plot:: plots/graphics_gofplots_qqplot_qqline.py
"""
lineoptions = lineoptions.copy()
for ls in ("-", "--", "-.", ":"):
if ls in fmt:
lineoptions.setdefault("linestyle", ls)
fmt = fmt.replace(ls, "")
break
for marker in (
".",
",",
"o",
"v",
"^",
"<",
">",
"1",
"2",
"3",
"4",
"8",
"s",
"p",
"P",
"*",
"h",
"H",
"+",
"x",
"X",
"D",
"d",
"|",
"_",
):
if marker in fmt:
lineoptions.setdefault("marker", marker)
fmt = fmt.replace(marker, "")
break
if fmt:
lineoptions.setdefault("color", fmt)
if line == "45":
end_pts = lzip(ax.get_xlim(), ax.get_ylim())
end_pts[0] = min(end_pts[0])
end_pts[1] = max(end_pts[1])
ax.plot(end_pts, end_pts, **lineoptions)
ax.set_xlim(end_pts)
ax.set_ylim(end_pts)
return # does this have any side effects?
if x is None or y is None:
raise ValueError("If line is not 45, x and y cannot be None.")
x = np.array(x)
y = np.array(y)
if line == "r":
# could use ax.lines[0].get_xdata(), get_ydata(),
# but don't know axes are "clean"
y = OLS(y, add_constant(x)).fit().fittedvalues
ax.plot(x, y, **lineoptions)
elif line == "s":
m, b = np.std(y), np.mean(y)
ref_line = x * m + b
ax.plot(x, ref_line, **lineoptions)
elif line == "q":
_check_for(dist, "ppf")
q25 = stats.scoreatpercentile(y, 25)
q75 = stats.scoreatpercentile(y, 75)
theoretical_quartiles = dist.ppf([0.25, 0.75])
m = (q75 - q25) / np.diff(theoretical_quartiles)
b = q25 - m * theoretical_quartiles[0]
ax.plot(x, m * x + b, **lineoptions)
# about 10x faster than plotting_position in sandbox and mstats
def plotting_pos(nobs, a=0.0, b=None):
"""
Generates sequence of plotting positions
Parameters
----------
nobs : int
Number of probability points to plot
a : float, default 0.0
alpha parameter for the plotting position of an expected order
statistic
b : float, default None
beta parameter for the plotting position of an expected order
statistic. If None, then b is set to a.
Returns
-------
ndarray
The plotting positions
Notes
-----
The plotting positions are given by (i - a)/(nobs + 1 - a - b) for i in
range(1, nobs+1)
See Also
--------
scipy.stats.mstats.plotting_positions
Additional information on alpha and beta
"""
b = a if b is None else b
return (np.arange(1.0, nobs + 1) - a) / (nobs + 1 - a - b)
def _fmt_probplot_axis(ax, dist, nobs):
"""
Formats a theoretical quantile axis to display the corresponding
probabilities on the quantiles' scale.
Parameters
----------
ax : AxesSubplot, optional
The axis to be formatted
nobs : scalar
Number of observations in the sample
dist : scipy.stats.distribution
A scipy.stats distribution sufficiently specified to implement its
ppf() method.
Returns
-------
There is no return value. This operates on `ax` in place
"""
_check_for(dist, "ppf")
axis_probs = np.linspace(10, 90, 9, dtype=float)
small = np.array([1.0, 2, 5])
axis_probs = np.r_[small, axis_probs, 100 - small[::-1]]
if nobs >= 50:
axis_probs = np.r_[small / 10, axis_probs, 100 - small[::-1] / 10]
if nobs >= 500:
axis_probs = np.r_[small / 100, axis_probs, 100 - small[::-1] / 100]
axis_probs /= 100.0
axis_qntls = dist.ppf(axis_probs)
ax.set_xticks(axis_qntls)
ax.set_xticklabels(
axis_probs * 100,
rotation=45,
rotation_mode="anchor",
horizontalalignment="right",
verticalalignment="center",
)
ax.set_xlim([axis_qntls.min(), axis_qntls.max()])
def _do_plot(
x, y, dist=None, line=None, ax=None, fmt="b", step=False, **kwargs
):
"""
Boiler plate plotting function for the `ppplot`, `qqplot`, and
`probplot` methods of the `ProbPlot` class
Parameters
----------
x : array_like
X-axis data to be plotted
y : array_like
Y-axis data to be plotted
dist : scipy.stats.distribution
A scipy.stats distribution, needed if `line` is "q".
line : {"45", "s", "r", "q", None}, default None
Options for the reference line to which the data is compared.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure being
created.
fmt : str, optional
matplotlib-compatible formatting string for the data markers
kwargs : keywords
These are passed to matplotlib.plot
Returns
-------
fig : Figure
The figure containing `ax`.
ax : AxesSubplot
The original axes if provided. Otherwise a new instance.
"""
plot_style = {
"marker": "o",
"markerfacecolor": "C0",
"markeredgecolor": "C0",
"linestyle": "none",
}
plot_style.update(**kwargs)
where = plot_style.pop("where", "pre")
fig, ax = utils.create_mpl_ax(ax)
ax.set_xmargin(0.02)
if step:
ax.step(x, y, fmt, where=where, **plot_style)
else:
ax.plot(x, y, fmt, **plot_style)
if line:
if line not in ["r", "q", "45", "s"]:
msg = "%s option for line not understood" % line
raise ValueError(msg)
qqline(ax, line, x=x, y=y, dist=dist)
return fig, ax
def _check_for(dist, attr="ppf"):
if not hasattr(dist, attr):
raise AttributeError(f"distribution must have a {attr} method")
|
|
# -*- test-case-name: twisted.application.test.test_internet,twisted.test.test_application,twisted.test.test_cooperator -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Reactor-based Services
Here are services to run clients, servers and periodic services using
the reactor.
If you want to run a server service, L{StreamServerEndpointService} defines a
service that can wrap an arbitrary L{IStreamServerEndpoint
<twisted.internet.interfaces.IStreamServerEndpoint>}
as an L{IService}. See also L{twisted.application.strports.service} for
constructing one of these directly from a descriptive string.
Additionally, this module (dynamically) defines various Service subclasses that
let you represent clients and servers in a Service hierarchy. Endpoints APIs
should be preferred for stream server services, but since those APIs do not yet
exist for clients or datagram services, many of these are still useful.
They are as follows::
TCPServer, TCPClient,
UNIXServer, UNIXClient,
SSLServer, SSLClient,
UDPServer, UDPClient,
UNIXDatagramServer, UNIXDatagramClient,
MulticastServer
These classes take arbitrary arguments in their constructors and pass
them straight on to their respective reactor.listenXXX or
reactor.connectXXX calls.
For example, the following service starts a web server on port 8080:
C{TCPServer(8080, server.Site(r))}. See the documentation for the
reactor.listen/connect* methods for more information.
"""
import warnings
from twisted.python import log
from twisted.application import service
from twisted.internet import task
from twisted.internet.defer import CancelledError
def _maybeGlobalReactor(maybeReactor):
"""
@return: the argument, or the global reactor if the argument is C{None}.
"""
if maybeReactor is None:
from twisted.internet import reactor
return reactor
else:
return maybeReactor
class _VolatileDataService(service.Service):
volatile = []
def __getstate__(self):
d = service.Service.__getstate__(self)
for attr in self.volatile:
if attr in d:
del d[attr]
return d
class _AbstractServer(_VolatileDataService):
"""
@cvar volatile: list of attribute to remove from pickling.
@type volatile: C{list}
@ivar method: the type of method to call on the reactor, one of B{TCP},
B{UDP}, B{SSL} or B{UNIX}.
@type method: C{str}
@ivar reactor: the current running reactor.
@type reactor: a provider of C{IReactorTCP}, C{IReactorUDP},
C{IReactorSSL} or C{IReactorUnix}.
@ivar _port: instance of port set when the service is started.
@type _port: a provider of L{twisted.internet.interfaces.IListeningPort}.
"""
volatile = ['_port']
method = None
reactor = None
_port = None
def __init__(self, *args, **kwargs):
self.args = args
if 'reactor' in kwargs:
self.reactor = kwargs.pop("reactor")
self.kwargs = kwargs
def privilegedStartService(self):
service.Service.privilegedStartService(self)
self._port = self._getPort()
def startService(self):
service.Service.startService(self)
if self._port is None:
self._port = self._getPort()
def stopService(self):
service.Service.stopService(self)
# TODO: if startup failed, should shutdown skip stopListening?
# _port won't exist
if self._port is not None:
d = self._port.stopListening()
del self._port
return d
def _getPort(self):
"""
Wrapper around the appropriate listen method of the reactor.
@return: the port object returned by the listen method.
@rtype: an object providing
L{twisted.internet.interfaces.IListeningPort}.
"""
return getattr(_maybeGlobalReactor(self.reactor),
'listen%s' % (self.method,))(*self.args, **self.kwargs)
class _AbstractClient(_VolatileDataService):
"""
@cvar volatile: list of attribute to remove from pickling.
@type volatile: C{list}
@ivar method: the type of method to call on the reactor, one of B{TCP},
B{UDP}, B{SSL} or B{UNIX}.
@type method: C{str}
@ivar reactor: the current running reactor.
@type reactor: a provider of C{IReactorTCP}, C{IReactorUDP},
C{IReactorSSL} or C{IReactorUnix}.
@ivar _connection: instance of connection set when the service is started.
@type _connection: a provider of L{twisted.internet.interfaces.IConnector}.
"""
volatile = ['_connection']
method = None
reactor = None
_connection = None
def __init__(self, *args, **kwargs):
self.args = args
if 'reactor' in kwargs:
self.reactor = kwargs.pop("reactor")
self.kwargs = kwargs
def startService(self):
service.Service.startService(self)
self._connection = self._getConnection()
def stopService(self):
service.Service.stopService(self)
if self._connection is not None:
self._connection.disconnect()
del self._connection
def _getConnection(self):
"""
Wrapper around the appropriate connect method of the reactor.
@return: the port object returned by the connect method.
@rtype: an object providing L{twisted.internet.interfaces.IConnector}.
"""
return getattr(_maybeGlobalReactor(self.reactor),
'connect%s' % (self.method,))(*self.args, **self.kwargs)
_doc={
'Client':
"""Connect to %(tran)s
Call reactor.connect%(tran)s when the service starts, with the
arguments given to the constructor.
""",
'Server':
"""Serve %(tran)s clients
Call reactor.listen%(tran)s when the service starts, with the
arguments given to the constructor. When the service stops,
stop listening. See twisted.internet.interfaces for documentation
on arguments to the reactor method.
""",
}
import types
for tran in 'TCP UNIX SSL UDP UNIXDatagram Multicast'.split():
for side in 'Server Client'.split():
if tran == "Multicast" and side == "Client":
continue
base = globals()['_Abstract'+side]
doc = _doc[side] % vars()
klass = types.ClassType(tran+side, (base,),
{'method': tran, '__doc__': doc})
globals()[tran+side] = klass
class TimerService(_VolatileDataService):
"""Service to periodically call a function
Every C{step} seconds call the given function with the given arguments.
The service starts the calls when it starts, and cancels them
when it stops.
"""
volatile = ['_loop']
def __init__(self, step, callable, *args, **kwargs):
self.step = step
self.call = (callable, args, kwargs)
def startService(self):
service.Service.startService(self)
callable, args, kwargs = self.call
# we have to make a new LoopingCall each time we're started, because
# an active LoopingCall remains active when serialized. If
# LoopingCall were a _VolatileDataService, we wouldn't need to do
# this.
self._loop = task.LoopingCall(callable, *args, **kwargs)
self._loop.start(self.step, now=True).addErrback(self._failed)
def _failed(self, why):
# make a note that the LoopingCall is no longer looping, so we don't
# try to shut it down a second time in stopService. I think this
# should be in LoopingCall. -warner
self._loop.running = False
log.err(why)
def stopService(self):
if self._loop.running:
self._loop.stop()
return service.Service.stopService(self)
class CooperatorService(service.Service):
"""
Simple L{service.IService} which starts and stops a L{twisted.internet.task.Cooperator}.
"""
def __init__(self):
self.coop = task.Cooperator(started=False)
def coiterate(self, iterator):
return self.coop.coiterate(iterator)
def startService(self):
self.coop.start()
def stopService(self):
self.coop.stop()
class StreamServerEndpointService(service.Service, object):
"""
A L{StreamServerEndpointService} is an L{IService} which runs a server on a
listening port described by an L{IStreamServerEndpoint
<twisted.internet.interfaces.IStreamServerEndpoint>}.
@ivar factory: A server factory which will be used to listen on the
endpoint.
@ivar endpoint: An L{IStreamServerEndpoint
<twisted.internet.interfaces.IStreamServerEndpoint>} provider
which will be used to listen when the service starts.
@ivar _waitingForPort: a Deferred, if C{listen} has yet been invoked on the
endpoint, otherwise None.
@ivar _raiseSynchronously: Defines error-handling behavior for the case
where C{listen(...)} raises an exception before C{startService} or
C{privilegedStartService} have completed.
@type _raiseSynchronously: C{bool}
@since: 10.2
"""
_raiseSynchronously = None
def __init__(self, endpoint, factory):
self.endpoint = endpoint
self.factory = factory
self._waitingForPort = None
def privilegedStartService(self):
"""
Start listening on the endpoint.
"""
service.Service.privilegedStartService(self)
self._waitingForPort = self.endpoint.listen(self.factory)
raisedNow = []
def handleIt(err):
if self._raiseSynchronously:
raisedNow.append(err)
elif not err.check(CancelledError):
log.err(err)
self._waitingForPort.addErrback(handleIt)
if raisedNow:
raisedNow[0].raiseException()
def startService(self):
"""
Start listening on the endpoint, unless L{privilegedStartService} got
around to it already.
"""
service.Service.startService(self)
if self._waitingForPort is None:
self.privilegedStartService()
def stopService(self):
"""
Stop listening on the port if it is already listening, otherwise,
cancel the attempt to listen.
@return: a L{Deferred<twisted.internet.defer.Deferred>} which fires
with C{None} when the port has stopped listening.
"""
self._waitingForPort.cancel()
def stopIt(port):
if port is not None:
return port.stopListening()
d = self._waitingForPort.addCallback(stopIt)
def stop(passthrough):
self.running = False
return passthrough
d.addBoth(stop)
return d
__all__ = (['TimerService', 'CooperatorService', 'MulticastServer',
'StreamServerEndpointService'] +
[tran+side
for tran in 'TCP UNIX SSL UDP UNIXDatagram'.split()
for side in 'Server Client'.split()])
|
|
# Zawgyi<>Unicode converter python module
# Based on rules from Parabaik Myanmar Text Converter
# Copyright (C) 2014 Ngwe Tun (Solveware Solution)
# Copyright (C) 2014 Thura Hlaing
# Copyright (C) 2015 Kaung Htet Zaw
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#--
import re
def uni512zg1(input_text=""):
output_text = input_text
output_text = re.sub('\u104e\u1004\u103a\u1038', '\u104e', output_text, flags=re.M)
output_text = re.sub('\u102b\u103a', '\u105a', output_text, flags=re.M)
output_text = re.sub('\u102d\u1036', '\u108e', output_text, flags=re.M)
output_text = re.sub('\u103f', '\u1086', output_text, flags=re.M)
output_text = re.sub('(?<=\u102f)\u1037', '\u1094', output_text, flags=re.M)
output_text = re.sub('(?<=\u102f\u1036)\u1037', '\u1094', output_text, flags=re.M)
output_text = re.sub('(?<=\u1030)\u1037', '\u1094', output_text, flags=re.M)
output_text = re.sub('(?<=\u1030\u1036)\u1037', '\u1094', output_text, flags=re.M)
output_text = re.sub('(?<=\u1014)\u1037', '\u1094', output_text, flags=re.M)
output_text = re.sub('(?<=\u1014[\u103a\u1032])\u1037', '\u1094', output_text, flags=re.M)
output_text = re.sub('(?<=\u103b)\u1037', '\u1095', output_text, flags=re.M)
output_text = re.sub('(?<=\u103b[\u1032\u1036])\u1037', '\u1095', output_text, flags=re.M)
output_text = re.sub('(?<=\u103d)\u1037', '\u1095', output_text, flags=re.M)
output_text = re.sub('(?<=\u103d[\u1032])\u1037', '\u1095', output_text, flags=re.M)
output_text = re.sub('(?<=[\u103b\u103c\u103d])\u102f', '\u1033', output_text, flags=re.M)
output_text = re.sub('(?<=[\u103b\u103c\u103d][\u102d\u1036])\u102f', '\u1033', output_text, flags=re.M)
output_text = re.sub('(?<=(\u1039[\u1000-\u1021]))\u102f', '\u1033', output_text, flags=re.M)
output_text = re.sub('(?<=(\u1039[\u1000-\u1021])[\u102d\u1036])\u102f', '\u1033', output_text, flags=re.M)
output_text = re.sub('(?<=[\u100a\u100c\u1020\u1025\u1029])\u102f', '\u1033', output_text, flags=re.M)
output_text = re.sub('(?<=[\u100a\u100c\u1020\u1025\u1029][\u102d\u1036])\u102f', '\u1033', output_text, flags=re.M)
output_text = re.sub('(?<=[\u103b\u103c])\u1030', '\u1034', output_text, flags=re.M)
output_text = re.sub('(?<=[\u103b\u103c][\u103d])\u1030', '\u1034', output_text, flags=re.M)
output_text = re.sub('(?<=[\u103b\u103c][\u103e])\u1030', '\u1034', output_text, flags=re.M)
output_text = re.sub('(?<=[\u103b\u103c][\u102d\u1036])\u1030', '\u1034', output_text, flags=re.M)
output_text = re.sub('(?<=[\u103b\u103c][\u103d][\u103e])\u1030', '\u1034', output_text, flags=re.M)
output_text = re.sub('(?<=[\u103b\u103c][\u103d][\u102d\u1036])\u1030', '\u1034', output_text, flags=re.M)
output_text = re.sub('(?<=[\u103b\u103c][\u103e][\u102d\u1036])\u1030', '\u1034', output_text, flags=re.M)
output_text = re.sub('(?<=[\u103b\u103c][\u103d][\u103e][\u102d\u1036])\u1030', '\u1034', output_text, flags=re.M)
output_text = re.sub('(?<=(\u1039[\u1000-\u1021]))\u1030', '\u1034', output_text, flags=re.M)
output_text = re.sub('(?<=(\u1039[\u1000-\u1021])[\u102d\u1036])\u1030', '\u1034', output_text, flags=re.M)
output_text = re.sub('(?<=[\u100a\u100c\u1020\u1025\u1029])\u1030', '\u1034', output_text, flags=re.M)
output_text = re.sub('(?<=[\u100a\u100c\u1020\u1025\u1029][\u102d\u1036])\u1030', '\u1034', output_text, flags=re.M)
output_text = re.sub('(?<=\u103c)\u103e', '\u1087', output_text, flags=re.M)
output_text = re.sub('\u1009(?=[\u103a])', '\u1025', output_text, flags=re.M)
output_text = re.sub('\u1009(?=\u1039[\u1000-\u1021])', '\u1025', output_text, flags=re.M)
output_text = re.sub('([\u1000-\u1021\u1029])((?:\u1039[\u1000-\u1021])?)((?:[\u103b-\u103e\u1087]*)?)\u1031', '\u1031\\1\\2\\3', output_text, flags=re.M)
output_text = re.sub('([\u1000-\u1021\u1029])((?:\u1039[\u1000-\u1021\u1000-\u1021])?)(\u103c)', '\\3\\1\\2', output_text, flags=re.M)
output_text = re.sub('\u1004\u103a\u1039', '\u1064', output_text, flags=re.M)
output_text = re.sub('(\u1064)((?:\u1031)?)((?:\u103c)?)([\u1000-\u1021])\u102d', '\\2\\3\\4\u108b', output_text, flags=re.M)
output_text = re.sub('(\u1064)((?:\u1031)?)((?:\u103c)?)([\u1000-\u1021])\u102e', '\\2\\3\\4\u108c', output_text, flags=re.M)
output_text = re.sub('(\u1064)((?:\u1031)?)((?:\u103c)?)([\u1000-\u1021])\u1036', '\\2\\3\\4\u108d', output_text, flags=re.M)
output_text = re.sub('(\u1064)((?:\u1031)?)((?:\u103c)?)([\u1000-\u1021])', '\\2\\3\\4\u1064', output_text, flags=re.M)
output_text = re.sub('\u100a(?=[\u1039\u102f\u1030])', '\u106b', output_text, flags=re.M)
output_text = re.sub('\u100a', '\u100a', output_text, flags=re.M)
output_text = re.sub('\u101b(?=[\u102f\u1030])', '\u1090', output_text, flags=re.M)
output_text = re.sub('\u101b', '\u101b', output_text, flags=re.M)
output_text = re.sub('\u1014(?=[\u1039\u103d\u103e\u102f\u1030])', '\u108f', output_text, flags=re.M)
output_text = re.sub('\u1014', '\u1014', output_text, flags=re.M)
output_text = re.sub('\u1039\u1000', '\u1060', output_text, flags=re.M)
output_text = re.sub('\u1039\u1001', '\u1061', output_text, flags=re.M)
output_text = re.sub('\u1039\u1002', '\u1062', output_text, flags=re.M)
output_text = re.sub('\u1039\u1003', '\u1063', output_text, flags=re.M)
output_text = re.sub('\u1039\u1005', '\u1065', output_text, flags=re.M)
output_text = re.sub('\u1039\u1006', '\u1066', output_text, flags=re.M)
output_text = re.sub('(?<=[\u1001\u1002\u1004\u1005\u1007\u1012\u1013\u108f\u1015\u1016\u1017\u1019\u101d])\u1066', '\u1067', output_text, flags=re.M)
output_text = re.sub('\u1039\u1007', '\u1068', output_text, flags=re.M)
output_text = re.sub('\u1039\u1008', '\u1069', output_text, flags=re.M)
output_text = re.sub('\u1039\u100f', '\u1070', output_text, flags=re.M)
output_text = re.sub('\u1039\u1010', '\u1071', output_text, flags=re.M)
output_text = re.sub('(?<=[\u1001\u1002\u1004\u1005\u1007\u1012\u1013\u108f\u1015\u1016\u1017\u1019\u101d])\u1071', '\u1072', output_text, flags=re.M)
output_text = re.sub('\u1039\u1011', '\u1073', output_text, flags=re.M)
output_text = re.sub('(?<=[\u1001\u1002\u1004\u1005\u1007\u1012\u1013\u108f\u1015\u1016\u1017\u1019\u101d])\u1073', '\u1074', output_text, flags=re.M)
output_text = re.sub('\u1039\u1012', '\u1075', output_text, flags=re.M)
output_text = re.sub('\u1039\u1013', '\u1076', output_text, flags=re.M)
output_text = re.sub('\u1039\u1014', '\u1077', output_text, flags=re.M)
output_text = re.sub('\u1039\u1015', '\u1078', output_text, flags=re.M)
output_text = re.sub('\u1039\u1016', '\u1079', output_text, flags=re.M)
output_text = re.sub('\u1039\u1017', '\u107a', output_text, flags=re.M)
output_text = re.sub('\u1039\u1018', '\u107b', output_text, flags=re.M)
output_text = re.sub('\u1039\u1019', '\u107c', output_text, flags=re.M)
output_text = re.sub('\u1039\u101c', '\u1085', output_text, flags=re.M)
output_text = re.sub('\u100f\u1039\u100d', '\u1091', output_text, flags=re.M)
output_text = re.sub('\u100b\u1039\u100c', '\u1092', output_text, flags=re.M)
output_text = re.sub('\u1039\u100c', '\u106d', output_text, flags=re.M)
output_text = re.sub('\u100b\u1039\u100b', '\u1097', output_text, flags=re.M)
output_text = re.sub('\u1039\u100b', '\u106c', output_text, flags=re.M)
output_text = re.sub('\u100e\u1039\u100d', '\u106f', output_text, flags=re.M)
output_text = re.sub('\u100d\u1039\u100d', '\u106e', output_text, flags=re.M)
output_text = re.sub('\u1009(?=\u103a)', '\u1025', output_text, flags=re.M)
output_text = re.sub('\u1025(?=[\u1039\u102f\u1030])', '\u106a', output_text, flags=re.M)
output_text = re.sub('\u1025', '\u1025', output_text, flags=re.M)
output_text = re.sub('\u103a', '\u1039', output_text, flags=re.M)
output_text = re.sub('\u103b\u103d\u103e', '\u107d\u108a', output_text, flags=re.M)
output_text = re.sub('\u103d\u103e', '\u108a', output_text, flags=re.M)
output_text = re.sub('\u103b', '\u103a', output_text, flags=re.M)
output_text = re.sub('\u103c', '\u103b', output_text, flags=re.M)
output_text = re.sub('\u103d', '\u103c', output_text, flags=re.M)
output_text = re.sub('\u103e', '\u103d', output_text, flags=re.M)
output_text = re.sub('\u103a(?=[\u103c\u103d\u108a])', '\u107d', output_text, flags=re.M)
output_text = re.sub('(?<=\u100a(?:[\u102d\u102e\u1036\u108b\u108c\u108d\u108e]))\u103d', '\u1087', output_text, flags=re.M)
output_text = re.sub('(?<=\u100a)\u103d', '\u1087', output_text, flags=re.M)
output_text = re.sub('\u103b(?=[\u1000\u1003\u1006\u100f\u1010\u1011\u1018\u101a\u101c\u101e\u101f\u1021])', '\u107e', output_text, flags=re.M)
output_text = re.sub('\u107e([\u1000-\u1021\u108f])(?=[\u102d\u102e\u1036\u108b\u108c\u108d\u108e])', '\u1080\\1', output_text, flags=re.M)
output_text = re.sub('\u107e([\u1000-\u1021\u108f])(?=[\u103c\u108a])', '\u1082\\1', output_text, flags=re.M)
output_text = re.sub('\u103b([\u1000-\u1021\u108f])(?=[\u102d\u102e\u1036\u108b\u108c\u108d\u108e])', '\u107f\\1', output_text, flags=re.M)
output_text = re.sub('\u103b([\u1000-\u1021\u108f])(?=[\u103c\u108a])', '\u1081\\1', output_text, flags=re.M)
output_text = re.sub('(?<=\u1014)\u1037', '\u1094', output_text, flags=re.M)
output_text = re.sub('(?<=\u1014[\u103a\u1032])\u1037', '\u1094', output_text, flags=re.M)
output_text = re.sub('(?<=\u1033)\u1094', '\u1095', output_text, flags=re.M)
output_text = re.sub('(?<=\u1033[\u1036])\u1094', '\u1095', output_text, flags=re.M)
output_text = re.sub('(?<=\u1034)\u1094', '\u1095', output_text, flags=re.M)
output_text = re.sub('(?<=\u1034[\u1036])\u1094', '\u1095', output_text, flags=re.M)
output_text = re.sub('(?<=[\u103c\u103d\u108a])\u1037', '\u1095', output_text, flags=re.M)
output_text = re.sub('(?<=[\u103c\u103d\u108a][\u1032])\u1037', '\u1095', output_text, flags=re.M)
return output_text
def zg12uni51(input_text=""):
output_text = input_text
output_text = re.sub('\u106a', '\u1009', output_text, flags=re.M)
output_text = re.sub('\u1025(?=[\u1039\u102c])', '\u1009', output_text, flags=re.M)
output_text = re.sub('\u1025\u102e', '\u1026', output_text, flags=re.M)
output_text = re.sub('\u106b', '\u100a', output_text, flags=re.M)
output_text = re.sub('\u1090', '\u101b', output_text, flags=re.M)
output_text = re.sub('\u1040', '\u1040', output_text, flags=re.M)
output_text = re.sub('\u108f', '\u1014', output_text, flags=re.M)
output_text = re.sub('\u1012', '\u1012', output_text, flags=re.M)
output_text = re.sub('\u1013', '\u1013', output_text, flags=re.M)
output_text = re.sub('[\u103d\u1087]', '\u103e', output_text, flags=re.M)
output_text = re.sub('\u103c', '\u103d', output_text, flags=re.M)
output_text = re.sub('[\u103b\u107e\u107f\u1080\u1081\u1082\u1083\u1084]', '\u103c', output_text, flags=re.M)
output_text = re.sub('[\u103a\u107d]', '\u103b', output_text, flags=re.M)
output_text = re.sub('\u103d\u103b', '\u103b\u103d', output_text, flags=re.M)
output_text = re.sub('\u108a', '\u103d\u103d', output_text, flags=re.M)
output_text = re.sub('\u103d\u103d', '\u103d\u103d', output_text, flags=re.M)
output_text = re.sub('((?:\u1031)?)((?:\u103c)?)([\u1000-\u1021])\u1064', '\u1064\\1\\2\\3', output_text, flags=re.M)
output_text = re.sub('((?:\u1031)?)((?:\u103c)?)([\u1000-\u1021])\u108b', '\u1064\\1\\2\\3\u102d', output_text, flags=re.M)
output_text = re.sub('((?:\u1031)?)((?:\u103c)?)([\u1000-\u1021])\u108c', '\u1064\\1\\2\\3\u102e', output_text, flags=re.M)
output_text = re.sub('((?:\u1031)?)((?:\u103c)?)([\u1000-\u1021])\u108d', '\u1064\\1\\2\\3\u1036', output_text, flags=re.M)
output_text = re.sub('\u105a', '\u102b\u103a', output_text, flags=re.M)
output_text = re.sub('\u108e', '\u102d\u1036', output_text, flags=re.M)
output_text = re.sub('\u1033', '\u102f', output_text, flags=re.M)
output_text = re.sub('\u1034', '\u1030', output_text, flags=re.M)
output_text = re.sub('\u1088', '\u103d\u102f', output_text, flags=re.M)
output_text = re.sub('\u1089', '\u103d\u1030', output_text, flags=re.M)
output_text = re.sub('\u1039', '\u103a', output_text, flags=re.M)
output_text = re.sub('[\u1094\u1095]', '\u1037', output_text, flags=re.M)
output_text = re.sub('([\u1000-\u1021])([\u102c\u102d\u102e\u1032\u1036]){1,2}([\u1060\u1061\u1062\u1063\u1065\u1066\u1067\u1068\u1069\u1070\u1071\u1072\u1073\u1074\u1075\u1076\u1077\u1078\u1079\u107a\u107b\u107c\u1085])', '\\1\\3\\2', output_text, flags=re.M)
output_text = re.sub('\u1064', '\u1004\u103a\u1039', output_text, flags=re.M)
output_text = re.sub('\u104e', '\u104e\u1004\u103a\u1038', output_text, flags=re.M)
output_text = re.sub('\u1086', '\u103f', output_text, flags=re.M)
output_text = re.sub('\u1060', '\u1039\u1000', output_text, flags=re.M)
output_text = re.sub('\u1061', '\u1039\u1001', output_text, flags=re.M)
output_text = re.sub('\u1062', '\u1039\u1002', output_text, flags=re.M)
output_text = re.sub('\u1063', '\u1039\u1003', output_text, flags=re.M)
output_text = re.sub('\u1065', '\u1039\u1005', output_text, flags=re.M)
output_text = re.sub('[\u1066\u1067]', '\u1039\u1006', output_text, flags=re.M)
output_text = re.sub('\u1068', '\u1039\u1007', output_text, flags=re.M)
output_text = re.sub('\u1069', '\u1039\u1008', output_text, flags=re.M)
output_text = re.sub('\u106c', '\u1039\u100b', output_text, flags=re.M)
output_text = re.sub('\u1070', '\u1039\u100f', output_text, flags=re.M)
output_text = re.sub('[\u1071\u1072]', '\u1039\u1010', output_text, flags=re.M)
output_text = re.sub('[\u1073\u1074]', '\u1039\u1011', output_text, flags=re.M)
output_text = re.sub('\u1075', '\u1039\u1012', output_text, flags=re.M)
output_text = re.sub('\u1076', '\u1039\u1013', output_text, flags=re.M)
output_text = re.sub('\u1077', '\u1039\u1014', output_text, flags=re.M)
output_text = re.sub('\u1078', '\u1039\u1015', output_text, flags=re.M)
output_text = re.sub('\u1079', '\u1039\u1016', output_text, flags=re.M)
output_text = re.sub('\u107a', '\u1039\u1017', output_text, flags=re.M)
output_text = re.sub('\u107b', '\u1039\u1018', output_text, flags=re.M)
output_text = re.sub('\u107c', '\u1039\u1019', output_text, flags=re.M)
output_text = re.sub('\u1085', '\u1039\u101c', output_text, flags=re.M)
output_text = re.sub('\u106d', '\u1039\u100c', output_text, flags=re.M)
output_text = re.sub('\u1091', '\u100f\u1039\u100d', output_text, flags=re.M)
output_text = re.sub('\u1092', '\u100b\u1039\u100c', output_text, flags=re.M)
output_text = re.sub('\u1097', '\u100b\u1039\u100b', output_text, flags=re.M)
output_text = re.sub('\u106f', '\u100e\u1039\u100d', output_text, flags=re.M)
output_text = re.sub('\u106e', '\u100d\u1039\u100d', output_text, flags=re.M)
output_text = re.sub('(\u103c)([\u1000-\u1021])((?:\u1039[\u1000-\u1021])?)', '\\2\\3\\1', output_text, flags=re.M)
output_text = re.sub('(\u103d)(\u103d)([\u103b\u103c])', '\\3\\2\\1', output_text, flags=re.M)
output_text = re.sub('(\u103d)([\u103b\u103c])', '\\2\\1', output_text, flags=re.M)
output_text = re.sub('(\u103d)([\u103b\u103c])', '\\2\\1', output_text, flags=re.M)
output_text = re.sub('(?<=([\u1000-\u101c\u101e-\u102a\u102c\u102e-\u103d\u104c-\u109f]))(\u1040)(?=\s)?', '\u101d', output_text, flags=re.M)
output_text = re.sub('(?<=(\u101d))(\u1040)(?=\s)?', '\u101d', output_text, flags=re.M)
output_text = re.sub('(?<=([\u1000-\u101c\u101e-\u102a\u102c\u102e-\u103d\u104c-\u109f\s]))(\u1047)', '\u101b', output_text, flags=re.M)
output_text = re.sub('(\u1047)(?=[\u1000-\u101c\u101e-\u102a\u102c\u102e-\u103d\u104c-\u109f\s])', '\u101b', output_text, flags=re.M)
output_text = re.sub('((?:\u1031)?)([\u1000-\u1021])((?:\u1039[\u1000-\u1021])?)((?:[\u102d\u102e\u1032])?)([\u1036\u1037\u1038]{0,2})([\u103b-\u103d]{0,3})((?:[\u102f\u1030])?)([\u1036\u1037\u1038]{0,2})((?:[\u102d\u102e\u1032])?)', '\\2\\3\\6\\1\\4\\9\\7\\5\\8', output_text, flags=re.M)
output_text = re.sub('\u1036\u102f', '\u102f\u1036', output_text, flags=re.M)
output_text = re.sub('(\u103a)(\u1037)', '\\2\\1', output_text, flags=re.M)
return output_text
def test(input_text=""):
return input_text
|
|
from __future__ import print_function, division
from sympy.core import S, sympify, Dummy, Mod
from sympy.core.cache import cacheit
from sympy.core.compatibility import reduce, range, HAS_GMPY
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.logic import fuzzy_and
from sympy.core.numbers import Integer, pi
from sympy.core.relational import Eq
from sympy.ntheory import sieve
from sympy.polys.polytools import Poly
from math import sqrt as _sqrt
class CombinatorialFunction(Function):
"""Base class for combinatorial functions. """
def _eval_simplify(self, **kwargs):
from sympy.simplify.combsimp import combsimp
# combinatorial function with non-integer arguments is
# automatically passed to gammasimp
expr = combsimp(self)
measure = kwargs['measure']
if measure(expr) <= kwargs['ratio']*measure(self):
return expr
return self
###############################################################################
######################## FACTORIAL and MULTI-FACTORIAL ########################
###############################################################################
class factorial(CombinatorialFunction):
r"""Implementation of factorial function over nonnegative integers.
By convention (consistent with the gamma function and the binomial
coefficients), factorial of a negative integer is complex infinity.
The factorial is very important in combinatorics where it gives
the number of ways in which `n` objects can be permuted. It also
arises in calculus, probability, number theory, etc.
There is strict relation of factorial with gamma function. In
fact `n! = gamma(n+1)` for nonnegative integers. Rewrite of this
kind is very useful in case of combinatorial simplification.
Computation of the factorial is done using two algorithms. For
small arguments a precomputed look up table is used. However for bigger
input algorithm Prime-Swing is used. It is the fastest algorithm
known and computes `n!` via prime factorization of special class
of numbers, called here the 'Swing Numbers'.
Examples
========
>>> from sympy import Symbol, factorial, S
>>> n = Symbol('n', integer=True)
>>> factorial(0)
1
>>> factorial(7)
5040
>>> factorial(-2)
zoo
>>> factorial(n)
factorial(n)
>>> factorial(2*n)
factorial(2*n)
>>> factorial(S(1)/2)
factorial(1/2)
See Also
========
factorial2, RisingFactorial, FallingFactorial
"""
def fdiff(self, argindex=1):
from sympy import gamma, polygamma
if argindex == 1:
return gamma(self.args[0] + 1)*polygamma(0, self.args[0] + 1)
else:
raise ArgumentIndexError(self, argindex)
_small_swing = [
1, 1, 1, 3, 3, 15, 5, 35, 35, 315, 63, 693, 231, 3003, 429, 6435, 6435, 109395,
12155, 230945, 46189, 969969, 88179, 2028117, 676039, 16900975, 1300075,
35102025, 5014575, 145422675, 9694845, 300540195, 300540195
]
_small_factorials = []
@classmethod
def _swing(cls, n):
if n < 33:
return cls._small_swing[n]
else:
N, primes = int(_sqrt(n)), []
for prime in sieve.primerange(3, N + 1):
p, q = 1, n
while True:
q //= prime
if q > 0:
if q & 1 == 1:
p *= prime
else:
break
if p > 1:
primes.append(p)
for prime in sieve.primerange(N + 1, n//3 + 1):
if (n // prime) & 1 == 1:
primes.append(prime)
L_product = R_product = 1
for prime in sieve.primerange(n//2 + 1, n + 1):
L_product *= prime
for prime in primes:
R_product *= prime
return L_product*R_product
@classmethod
def _recursive(cls, n):
if n < 2:
return 1
else:
return (cls._recursive(n//2)**2)*cls._swing(n)
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Number:
if n.is_zero:
return S.One
elif n is S.Infinity:
return S.Infinity
elif n.is_Integer:
if n.is_negative:
return S.ComplexInfinity
else:
n = n.p
if n < 20:
if not cls._small_factorials:
result = 1
for i in range(1, 20):
result *= i
cls._small_factorials.append(result)
result = cls._small_factorials[n-1]
# GMPY factorial is faster, use it when available
elif HAS_GMPY:
from sympy.core.compatibility import gmpy
result = gmpy.fac(n)
else:
bits = bin(n).count('1')
result = cls._recursive(n)*2**(n - bits)
return Integer(result)
def _facmod(self, n, q):
res, N = 1, int(_sqrt(n))
# Exponent of prime p in n! is e_p(n) = [n/p] + [n/p**2] + ...
# for p > sqrt(n), e_p(n) < sqrt(n), the primes with [n/p] = m,
# occur consecutively and are grouped together in pw[m] for
# simultaneous exponentiation at a later stage
pw = [1]*N
m = 2 # to initialize the if condition below
for prime in sieve.primerange(2, n + 1):
if m > 1:
m, y = 0, n // prime
while y:
m += y
y //= prime
if m < N:
pw[m] = pw[m]*prime % q
else:
res = res*pow(prime, m, q) % q
for ex, bs in enumerate(pw):
if ex == 0 or bs == 1:
continue
if bs == 0:
return 0
res = res*pow(bs, ex, q) % q
return res
def _eval_Mod(self, q):
n = self.args[0]
if n.is_integer and n.is_nonnegative and q.is_integer:
aq = abs(q)
d = aq - n
if d.is_nonpositive:
return 0
else:
isprime = aq.is_prime
if d == 1:
# Apply Wilson's theorem (if a natural number n > 1
# is a prime number, then (n-1)! = -1 mod n) and
# its inverse (if n > 4 is a composite number, then
# (n-1)! = 0 mod n)
if isprime:
return -1 % q
elif isprime is False and (aq - 6).is_nonnegative:
return 0
elif n.is_Integer and q.is_Integer:
n, d, aq = map(int, (n, d, aq))
if isprime and (d - 1 < n):
fc = self._facmod(d - 1, aq)
fc = pow(fc, aq - 2, aq)
if d%2:
fc = -fc
else:
fc = self._facmod(n, aq)
return Integer(fc % q)
def _eval_rewrite_as_gamma(self, n, **kwargs):
from sympy import gamma
return gamma(n + 1)
def _eval_rewrite_as_Product(self, n, **kwargs):
from sympy import Product
if n.is_nonnegative and n.is_integer:
i = Dummy('i', integer=True)
return Product(i, (i, 1, n))
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_positive(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_even(self):
x = self.args[0]
if x.is_integer and x.is_nonnegative:
return (x - 2).is_nonnegative
def _eval_is_composite(self):
x = self.args[0]
if x.is_integer and x.is_nonnegative:
return (x - 3).is_nonnegative
def _eval_is_real(self):
x = self.args[0]
if x.is_nonnegative or x.is_noninteger:
return True
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0]
arg_1 = arg.as_leading_term(x)
if Order(x, x).contains(arg_1):
return S.One
if Order(1, x).contains(arg_1):
return self.func(arg_1)
####################################################
# The correct result here should be 'None'. #
# Indeed arg in not bounded as x tends to 0. #
# Consequently the series expansion does not admit #
# the leading term. #
# For compatibility reasons, the return value here #
# is the original function, i.e. factorial(arg), #
# instead of None. #
####################################################
return self.func(arg)
class MultiFactorial(CombinatorialFunction):
pass
class subfactorial(CombinatorialFunction):
r"""The subfactorial counts the derangements of n items and is
defined for non-negative integers as:
.. math:: !n = \begin{cases} 1 & n = 0 \\ 0 & n = 1 \\
(n-1)(!(n-1) + !(n-2)) & n > 1 \end{cases}
It can also be written as ``int(round(n!/exp(1)))`` but the
recursive definition with caching is implemented for this function.
An interesting analytic expression is the following [2]_
.. math:: !x = \Gamma(x + 1, -1)/e
which is valid for non-negative integers `x`. The above formula
is not very useful incase of non-integers. :math:`\Gamma(x + 1, -1)` is
single-valued only for integral arguments `x`, elsewhere on the positive
real axis it has an infinite number of branches none of which are real.
References
==========
.. [1] https://en.wikipedia.org/wiki/Subfactorial
.. [2] http://mathworld.wolfram.com/Subfactorial.html
Examples
========
>>> from sympy import subfactorial
>>> from sympy.abc import n
>>> subfactorial(n + 1)
subfactorial(n + 1)
>>> subfactorial(5)
44
See Also
========
sympy.functions.combinatorial.factorials.factorial,
sympy.utilities.iterables.generate_derangements,
sympy.functions.special.gamma_functions.uppergamma
"""
@classmethod
@cacheit
def _eval(self, n):
if not n:
return S.One
elif n == 1:
return S.Zero
return (n - 1)*(self._eval(n - 1) + self._eval(n - 2))
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg.is_Integer and arg.is_nonnegative:
return cls._eval(arg)
elif arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
def _eval_is_even(self):
if self.args[0].is_odd and self.args[0].is_nonnegative:
return True
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_rewrite_as_uppergamma(self, arg, **kwargs):
from sympy import uppergamma
return uppergamma(arg + 1, -1)/S.Exp1
def _eval_is_nonnegative(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_odd(self):
if self.args[0].is_even and self.args[0].is_nonnegative:
return True
class factorial2(CombinatorialFunction):
r"""The double factorial `n!!`, not to be confused with `(n!)!`
The double factorial is defined for nonnegative integers and for odd
negative integers as:
.. math:: n!! = \begin{cases} 1 & n = 0 \\
n(n-2)(n-4) \cdots 1 & n\ \text{positive odd} \\
n(n-2)(n-4) \cdots 2 & n\ \text{positive even} \\
(n+2)!!/(n+2) & n\ \text{negative odd} \end{cases}
References
==========
.. [1] https://en.wikipedia.org/wiki/Double_factorial
Examples
========
>>> from sympy import factorial2, var
>>> var('n')
n
>>> factorial2(n + 1)
factorial2(n + 1)
>>> factorial2(5)
15
>>> factorial2(-1)
1
>>> factorial2(-5)
1/3
See Also
========
factorial, RisingFactorial, FallingFactorial
"""
@classmethod
def eval(cls, arg):
# TODO: extend this to complex numbers?
if arg.is_Number:
if not arg.is_Integer:
raise ValueError("argument must be nonnegative integer "
"or negative odd integer")
# This implementation is faster than the recursive one
# It also avoids "maximum recursion depth exceeded" runtime error
if arg.is_nonnegative:
if arg.is_even:
k = arg / 2
return 2**k * factorial(k)
return factorial(arg) / factorial2(arg - 1)
if arg.is_odd:
return arg*(S.NegativeOne)**((1 - arg)/2) / factorial2(-arg)
raise ValueError("argument must be nonnegative integer "
"or negative odd integer")
def _eval_is_even(self):
# Double factorial is even for every positive even input
n = self.args[0]
if n.is_integer:
if n.is_odd:
return False
if n.is_even:
if n.is_positive:
return True
if n.is_zero:
return False
def _eval_is_integer(self):
# Double factorial is an integer for every nonnegative input, and for
# -1 and -3
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return (n + 3).is_nonnegative
def _eval_is_odd(self):
# Double factorial is odd for every odd input not smaller than -3, and
# for 0
n = self.args[0]
if n.is_odd:
return (n + 3).is_nonnegative
if n.is_even:
if n.is_positive:
return False
if n.is_zero:
return True
def _eval_is_positive(self):
# Double factorial is positive for every nonnegative input, and for
# every odd negative input which is of the form -1-4k for an
# nonnegative integer k
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return ((n + 1) / 2).is_even
def _eval_rewrite_as_gamma(self, n, **kwargs):
from sympy import gamma, Piecewise, sqrt
return 2**(n/2)*gamma(n/2 + 1) * Piecewise((1, Eq(Mod(n, 2), 0)),
(sqrt(2/pi), Eq(Mod(n, 2), 1)))
###############################################################################
######################## RISING and FALLING FACTORIALS ########################
###############################################################################
class RisingFactorial(CombinatorialFunction):
r"""
Rising factorial (also called Pochhammer symbol) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by:
.. math:: rf(x,k) = x \cdot (x+1) \cdots (x+k-1)
where `x` can be arbitrary expression and `k` is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/RisingFactorial.html page.
When `x` is a Poly instance of degree >= 1 with a single variable,
`rf(x,k) = x(y) \cdot x(y+1) \cdots x(y+k-1)`, where `y` is the
variable of `x`. This is as described in Peter Paule, "Greatest
Factorial Factorization and Symbolic Summation", Journal of
Symbolic Computation, vol. 20, pp. 235-268, 1995.
Examples
========
>>> from sympy import rf, symbols, factorial, ff, binomial, Poly
>>> from sympy.abc import x
>>> n, k = symbols('n k', integer=True)
>>> rf(x, 0)
1
>>> rf(1, 5)
120
>>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x)
True
>>> rf(Poly(x**3, x), 2)
Poly(x**6 + 3*x**5 + 3*x**4 + x**3, x, domain='ZZ')
Rewrite
>>> rf(x, k).rewrite(ff)
FallingFactorial(k + x - 1, k)
>>> rf(x, k).rewrite(binomial)
binomial(k + x - 1, k)*factorial(k)
>>> rf(n, k).rewrite(factorial)
factorial(k + n - 1)/factorial(n - 1)
See Also
========
factorial, factorial2, FallingFactorial
References
==========
.. [1] https://en.wikipedia.org/wiki/Pochhammer_symbol
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN or k is S.NaN:
return S.NaN
elif x is S.One:
return factorial(k)
elif k.is_Integer:
if k.is_zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("rf only defined for "
"polynomials on one generator")
else:
return reduce(lambda r, i:
r*(x.shift(i).expand()),
range(0, int(k)), 1)
else:
return reduce(lambda r, i: r*(x + i),
range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("rf only defined for "
"polynomials on one generator")
else:
return 1/reduce(lambda r, i:
r*(x.shift(-i).expand()),
range(1, abs(int(k)) + 1), 1)
else:
return 1/reduce(lambda r, i:
r*(x - i),
range(1, abs(int(k)) + 1), 1)
if k.is_integer == False:
if x.is_integer and x.is_negative:
return S.Zero
def _eval_rewrite_as_gamma(self, x, k, **kwargs):
from sympy import gamma
return gamma(x + k) / gamma(x)
def _eval_rewrite_as_FallingFactorial(self, x, k, **kwargs):
return FallingFactorial(x + k - 1, k)
def _eval_rewrite_as_factorial(self, x, k, **kwargs):
if x.is_integer and k.is_integer:
return factorial(k + x - 1) / factorial(x - 1)
def _eval_rewrite_as_binomial(self, x, k, **kwargs):
if k.is_integer:
return factorial(k) * binomial(x + k - 1, k)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
def _sage_(self):
import sage.all as sage
return sage.rising_factorial(self.args[0]._sage_(),
self.args[1]._sage_())
class FallingFactorial(CombinatorialFunction):
r"""
Falling factorial (related to rising factorial) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by
.. math:: ff(x,k) = x \cdot (x-1) \cdots (x-k+1)
where `x` can be arbitrary expression and `k` is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/FallingFactorial.html page.
When `x` is a Poly instance of degree >= 1 with single variable,
`ff(x,k) = x(y) \cdot x(y-1) \cdots x(y-k+1)`, where `y` is the
variable of `x`. This is as described in Peter Paule, "Greatest
Factorial Factorization and Symbolic Summation", Journal of
Symbolic Computation, vol. 20, pp. 235-268, 1995.
>>> from sympy import ff, factorial, rf, gamma, polygamma, binomial, symbols, Poly
>>> from sympy.abc import x, k
>>> n, m = symbols('n m', integer=True)
>>> ff(x, 0)
1
>>> ff(5, 5)
120
>>> ff(x, 5) == x*(x-1)*(x-2)*(x-3)*(x-4)
True
>>> ff(Poly(x**2, x), 2)
Poly(x**4 - 2*x**3 + x**2, x, domain='ZZ')
>>> ff(n, n)
factorial(n)
Rewrite
>>> ff(x, k).rewrite(gamma)
(-1)**k*gamma(k - x)/gamma(-x)
>>> ff(x, k).rewrite(rf)
RisingFactorial(-k + x + 1, k)
>>> ff(x, m).rewrite(binomial)
binomial(x, m)*factorial(m)
>>> ff(n, m).rewrite(factorial)
factorial(n)/factorial(-m + n)
See Also
========
factorial, factorial2, RisingFactorial
References
==========
.. [1] http://mathworld.wolfram.com/FallingFactorial.html
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN or k is S.NaN:
return S.NaN
elif k.is_integer and x == k:
return factorial(x)
elif k.is_Integer:
if k.is_zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("ff only defined for "
"polynomials on one generator")
else:
return reduce(lambda r, i:
r*(x.shift(-i).expand()),
range(0, int(k)), 1)
else:
return reduce(lambda r, i: r*(x - i),
range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("rf only defined for "
"polynomials on one generator")
else:
return 1/reduce(lambda r, i:
r*(x.shift(i).expand()),
range(1, abs(int(k)) + 1), 1)
else:
return 1/reduce(lambda r, i: r*(x + i),
range(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k, **kwargs):
from sympy import gamma
return (-1)**k*gamma(k - x) / gamma(-x)
def _eval_rewrite_as_RisingFactorial(self, x, k, **kwargs):
return rf(x - k + 1, k)
def _eval_rewrite_as_binomial(self, x, k, **kwargs):
if k.is_integer:
return factorial(k) * binomial(x, k)
def _eval_rewrite_as_factorial(self, x, k, **kwargs):
if x.is_integer and k.is_integer:
return factorial(x) / factorial(x - k)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
def _sage_(self):
import sage.all as sage
return sage.falling_factorial(self.args[0]._sage_(),
self.args[1]._sage_())
rf = RisingFactorial
ff = FallingFactorial
###############################################################################
########################### BINOMIAL COEFFICIENTS #############################
###############################################################################
class binomial(CombinatorialFunction):
r"""Implementation of the binomial coefficient. It can be defined
in two ways depending on its desired interpretation:
.. math:: \binom{n}{k} = \frac{n!}{k!(n-k)!}\ \text{or}\
\binom{n}{k} = \frac{ff(n, k)}{k!}
First, in a strict combinatorial sense it defines the
number of ways we can choose `k` elements from a set of
`n` elements. In this case both arguments are nonnegative
integers and binomial is computed using an efficient
algorithm based on prime factorization.
The other definition is generalization for arbitrary `n`,
however `k` must also be nonnegative. This case is very
useful when evaluating summations.
For the sake of convenience for negative integer `k` this function
will return zero no matter what valued is the other argument.
To expand the binomial when `n` is a symbol, use either
``expand_func()`` or ``expand(func=True)``. The former will keep
the polynomial in factored form while the latter will expand the
polynomial itself. See examples for details.
Examples
========
>>> from sympy import Symbol, Rational, binomial, expand_func
>>> n = Symbol('n', integer=True, positive=True)
>>> binomial(15, 8)
6435
>>> binomial(n, -1)
0
Rows of Pascal's triangle can be generated with the binomial function:
>>> for N in range(8):
... print([binomial(N, i) for i in range(N + 1)])
...
[1]
[1, 1]
[1, 2, 1]
[1, 3, 3, 1]
[1, 4, 6, 4, 1]
[1, 5, 10, 10, 5, 1]
[1, 6, 15, 20, 15, 6, 1]
[1, 7, 21, 35, 35, 21, 7, 1]
As can a given diagonal, e.g. the 4th diagonal:
>>> N = -4
>>> [binomial(N, i) for i in range(1 - N)]
[1, -4, 10, -20, 35]
>>> binomial(Rational(5, 4), 3)
-5/128
>>> binomial(Rational(-5, 4), 3)
-195/128
>>> binomial(n, 3)
binomial(n, 3)
>>> binomial(n, 3).expand(func=True)
n**3/6 - n**2/2 + n/3
>>> expand_func(binomial(n, 3))
n*(n - 2)*(n - 1)/6
References
==========
.. [1] https://www.johndcook.com/blog/binomial_coefficients/
"""
def fdiff(self, argindex=1):
from sympy import polygamma
if argindex == 1:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/01/
n, k = self.args
return binomial(n, k)*(polygamma(0, n + 1) - \
polygamma(0, n - k + 1))
elif argindex == 2:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/02/
n, k = self.args
return binomial(n, k)*(polygamma(0, n - k + 1) - \
polygamma(0, k + 1))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def _eval(self, n, k):
# n.is_Number and k.is_Integer and k != 1 and n != k
if k.is_Integer:
if n.is_Integer and n >= 0:
n, k = int(n), int(k)
if k > n:
return S.Zero
elif k > n // 2:
k = n - k
if HAS_GMPY:
from sympy.core.compatibility import gmpy
return Integer(gmpy.bincoef(n, k))
d, result = n - k, 1
for i in range(1, k + 1):
d += 1
result = result * d // i
return Integer(result)
else:
d, result = n - k, 1
for i in range(1, k + 1):
d += 1
result *= d
result /= i
return result
@classmethod
def eval(cls, n, k):
n, k = map(sympify, (n, k))
d = n - k
n_nonneg, n_isint = n.is_nonnegative, n.is_integer
if k.is_zero or ((n_nonneg or n_isint is False)
and d.is_zero):
return S.One
if (k - 1).is_zero or ((n_nonneg or n_isint is False)
and (d - 1).is_zero):
return n
if k.is_integer:
if k.is_negative or (n_nonneg and n_isint and d.is_negative):
return S.Zero
elif n.is_number:
res = cls._eval(n, k)
return res.expand(basic=True) if res else res
elif n_nonneg is False and n_isint:
# a special case when binomial evaluates to complex infinity
return S.ComplexInfinity
elif k.is_number:
from sympy import gamma
return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
def _eval_Mod(self, q):
n, k = self.args
if any(x.is_integer is False for x in (n, k, q)):
raise ValueError("Integers expected for binomial Mod")
if all(x.is_Integer for x in (n, k, q)):
n, k = map(int, (n, k))
aq, res = abs(q), 1
# handle negative integers k or n
if k < 0:
return 0
if n < 0:
n = -n + k - 1
res = -1 if k%2 else 1
# non negative integers k and n
if k > n:
return 0
isprime = aq.is_prime
aq = int(aq)
if isprime:
if aq < n:
# use Lucas Theorem
N, K = n, k
while N or K:
res = res*binomial(N % aq, K % aq) % aq
N, K = N // aq, K // aq
else:
# use Factorial Modulo
d = n - k
if k > d:
k, d = d, k
kf = 1
for i in range(2, k + 1):
kf = kf*i % aq
df = kf
for i in range(k + 1, d + 1):
df = df*i % aq
res *= df
for i in range(d + 1, n + 1):
res = res*i % aq
res *= pow(kf*df % aq, aq - 2, aq)
res %= aq
else:
# Binomial Factorization is performed by calculating the
# exponents of primes <= n in `n! /(k! (n - k)!)`,
# for non-negative integers n and k. As the exponent of
# prime in n! is e_p(n) = [n/p] + [n/p**2] + ...
# the exponent of prime in binomial(n, k) would be
# e_p(n) - e_p(k) - e_p(n - k)
M = int(_sqrt(n))
for prime in sieve.primerange(2, n + 1):
if prime > n - k:
res = res*prime % aq
elif prime > n // 2:
continue
elif prime > M:
if n % prime < k % prime:
res = res*prime % aq
else:
N, K = n, k
exp = a = 0
while N > 0:
a = int((N % prime) < (K % prime + a))
N, K = N // prime, K // prime
exp += a
if exp > 0:
res *= pow(prime, exp, aq)
res %= aq
return Integer(res % q)
def _eval_expand_func(self, **hints):
"""
Function to expand binomial(n, k) when m is positive integer
Also,
n is self.args[0] and k is self.args[1] while using binomial(n, k)
"""
n = self.args[0]
if n.is_Number:
return binomial(*self.args)
k = self.args[1]
if k.is_Add and n in k.args:
k = n - k
if k.is_Integer:
if k.is_zero:
return S.One
elif k.is_negative:
return S.Zero
else:
n, result = self.args[0], 1
for i in range(1, k + 1):
result *= n - k + i
result /= i
return result
else:
return binomial(*self.args)
def _eval_rewrite_as_factorial(self, n, k, **kwargs):
return factorial(n)/(factorial(k)*factorial(n - k))
def _eval_rewrite_as_gamma(self, n, k, **kwargs):
from sympy import gamma
return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
def _eval_rewrite_as_tractable(self, n, k, **kwargs):
return self._eval_rewrite_as_gamma(n, k).rewrite('tractable')
def _eval_rewrite_as_FallingFactorial(self, n, k, **kwargs):
if k.is_integer:
return ff(n, k) / factorial(k)
def _eval_is_integer(self):
n, k = self.args
if n.is_integer and k.is_integer:
return True
elif k.is_integer is False:
return False
def _eval_is_nonnegative(self):
n, k = self.args
if n.is_integer and k.is_integer:
if n.is_nonnegative or k.is_negative or k.is_even:
return True
elif k.is_even is False:
return False
|
|
# Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The consistencygroups api."""
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import consistencygroups as consistencygroup_views
from cinder.api import xmlutil
from cinder import consistencygroup as consistencygroupAPI
from cinder import exception
from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
from cinder import utils
LOG = logging.getLogger(__name__)
def make_consistencygroup(elem):
elem.set('id')
elem.set('status')
elem.set('availability_zone')
elem.set('created_at')
elem.set('name')
elem.set('description')
def make_consistencygroup_from_src(elem):
elem.set('id')
elem.set('status')
elem.set('created_at')
elem.set('name')
elem.set('description')
elem.set('cgsnapshot_id')
class ConsistencyGroupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('consistencygroup',
selector='consistencygroup')
make_consistencygroup(root)
alias = Consistencygroups.alias
namespace = Consistencygroups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class ConsistencyGroupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('consistencygroups')
elem = xmlutil.SubTemplateElement(root, 'consistencygroup',
selector='consistencygroups')
make_consistencygroup(elem)
alias = Consistencygroups.alias
namespace = Consistencygroups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class ConsistencyGroupFromSrcTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('consistencygroup-from-src',
selector='consistencygroup-from-src')
make_consistencygroup_from_src(root)
alias = Consistencygroups.alias
namespace = Consistencygroups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class CreateDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
consistencygroup = self._extract_consistencygroup(dom)
return {'body': {'consistencygroup': consistencygroup}}
def _extract_consistencygroup(self, node):
consistencygroup = {}
consistencygroup_node = self.find_first_child_named(
node,
'consistencygroup')
attributes = ['name',
'description']
for attr in attributes:
if consistencygroup_node.getAttribute(attr):
consistencygroup[attr] = consistencygroup_node.\
getAttribute(attr)
return consistencygroup
class CreateFromSrcDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
consistencygroup = self._extract_consistencygroup(dom)
retval = {'body': {'consistencygroup-from-src': consistencygroup}}
return retval
def _extract_consistencygroup(self, node):
consistencygroup = {}
consistencygroup_node = self.find_first_child_named(
node, 'consistencygroup-from-src')
attributes = ['cgsnapshot', 'name', 'description']
for attr in attributes:
if consistencygroup_node.getAttribute(attr):
consistencygroup[attr] = (
consistencygroup_node.getAttribute(attr))
return consistencygroup
class ConsistencyGroupsController(wsgi.Controller):
"""The ConsistencyGroups API controller for the OpenStack API."""
_view_builder_class = consistencygroup_views.ViewBuilder
def __init__(self):
self.consistencygroup_api = consistencygroupAPI.API()
super(ConsistencyGroupsController, self).__init__()
@wsgi.serializers(xml=ConsistencyGroupTemplate)
def show(self, req, id):
"""Return data about the given consistency group."""
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
try:
consistencygroup = self.consistencygroup_api.get(
context,
group_id=id)
except exception.ConsistencyGroupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
return self._view_builder.detail(req, consistencygroup)
def delete(self, req, id, body):
"""Delete a consistency group."""
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
force = False
if body:
cg_body = body['consistencygroup']
force = cg_body.get('force', False)
LOG.info(_LI('Delete consistency group with id: %s'), id,
context=context)
try:
group = self.consistencygroup_api.get(context, id)
self.consistencygroup_api.delete(context, group, force)
except exception.ConsistencyGroupNotFound:
msg = _("Consistency group %s could not be found.") % id
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=202)
@wsgi.serializers(xml=ConsistencyGroupsTemplate)
def index(self, req):
"""Returns a summary list of consistency groups."""
return self._get_consistencygroups(req, is_detail=False)
@wsgi.serializers(xml=ConsistencyGroupsTemplate)
def detail(self, req):
"""Returns a detailed list of consistency groups."""
return self._get_consistencygroups(req, is_detail=True)
def _get_consistencygroups(self, req, is_detail):
"""Returns a list of consistency groups through view builder."""
context = req.environ['cinder.context']
consistencygroups = self.consistencygroup_api.get_all(context)
limited_list = common.limited(consistencygroups, req)
if is_detail:
consistencygroups = self._view_builder.detail_list(req,
limited_list)
else:
consistencygroups = self._view_builder.summary_list(req,
limited_list)
return consistencygroups
@wsgi.response(202)
@wsgi.serializers(xml=ConsistencyGroupTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Create a new consistency group."""
LOG.debug('Creating new consistency group %s', body)
if not self.is_valid_body(body, 'consistencygroup'):
raise exc.HTTPBadRequest()
context = req.environ['cinder.context']
try:
consistencygroup = body['consistencygroup']
except KeyError:
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
name = consistencygroup.get('name', None)
description = consistencygroup.get('description', None)
volume_types = consistencygroup.get('volume_types', None)
if not volume_types:
msg = _("volume_types must be provided to create "
"consistency group %(name)s.") % {'name': name}
raise exc.HTTPBadRequest(explanation=msg)
availability_zone = consistencygroup.get('availability_zone', None)
LOG.info(_LI("Creating consistency group %(name)s."),
{'name': name},
context=context)
try:
new_consistencygroup = self.consistencygroup_api.create(
context, name, description, volume_types,
availability_zone=availability_zone)
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidVolumeType as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.ConsistencyGroupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
retval = self._view_builder.summary(
req,
dict(new_consistencygroup.iteritems()))
return retval
@wsgi.response(202)
@wsgi.serializers(xml=ConsistencyGroupFromSrcTemplate)
@wsgi.deserializers(xml=CreateFromSrcDeserializer)
def create_from_src(self, req, body):
"""Create a new consistency group from a source.
The source can be a snapshot. It could be extended
in the future to support other sources. Note that
this does not require volume_types as the "create"
API above.
"""
LOG.debug('Creating new consistency group %s.', body)
if not self.is_valid_body(body, 'consistencygroup-from-src'):
raise exc.HTTPBadRequest()
context = req.environ['cinder.context']
try:
consistencygroup = body['consistencygroup-from-src']
except KeyError:
msg = _("Incorrect request body format.")
raise exc.HTTPBadRequest(explanation=msg)
name = consistencygroup.get('name', None)
description = consistencygroup.get('description', None)
cgsnapshot_id = consistencygroup.get('cgsnapshot_id', None)
if not cgsnapshot_id:
msg = _("Cgsnapshot id must be provided to create "
"consistency group %(name)s from source.") % {'name': name}
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Creating consistency group %(name)s from cgsnapshot "
"%(snap)s."),
{'name': name, 'snap': cgsnapshot_id},
context=context)
try:
new_consistencygroup = self.consistencygroup_api.create_from_src(
context, name, description, cgsnapshot_id)
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.CgSnapshotNotFound as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.ConsistencyGroupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.CinderException as error:
raise exc.HTTPBadRequest(explanation=error.msg)
retval = self._view_builder.summary(
req,
dict(new_consistencygroup.iteritems()))
return retval
@wsgi.serializers(xml=ConsistencyGroupTemplate)
def update(self, req, id, body):
"""Update the consistency group.
Expected format of the input parameter 'body':
{
"consistencygroup":
{
"name": "my_cg",
"description": "My consistency group",
"add_volumes": "volume-uuid-1,volume-uuid-2,..."
"remove_volumes": "volume-uuid-8,volume-uuid-9,..."
}
}
"""
LOG.debug('Update called for consistency group %s.', id)
if not body:
msg = _("Missing request body.")
raise exc.HTTPBadRequest(explanation=msg)
if not self.is_valid_body(body, 'consistencygroup'):
msg = _("Incorrect request body format.")
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['cinder.context']
consistencygroup = body.get('consistencygroup', None)
name = consistencygroup.get('name', None)
description = consistencygroup.get('description', None)
add_volumes = consistencygroup.get('add_volumes', None)
remove_volumes = consistencygroup.get('remove_volumes', None)
if (not name and not description and not add_volumes
and not remove_volumes):
msg = _("Name, description, add_volumes, and remove_volumes "
"can not be all empty in the request body.")
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Updating consistency group %(id)s with name %(name)s "
"description: %(description)s add_volumes: "
"%(add_volumes)s remove_volumes: %(remove_volumes)s."),
{'id': id, 'name': name,
'description': description,
'add_volumes': add_volumes,
'remove_volumes': remove_volumes},
context=context)
try:
group = self.consistencygroup_api.get(context, id)
self.consistencygroup_api.update(
context, group, name, description,
add_volumes, remove_volumes)
except exception.ConsistencyGroupNotFound:
msg = _("Consistency group %s could not be found.") % id
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=202)
class Consistencygroups(extensions.ExtensionDescriptor):
"""consistency groups support."""
name = 'Consistencygroups'
alias = 'consistencygroups'
namespace = 'http://docs.openstack.org/volume/ext/consistencygroups/api/v1'
updated = '2014-08-18T00:00:00+00:00'
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Consistencygroups.alias, ConsistencyGroupsController(),
collection_actions={'detail': 'GET', 'create_from_src': 'POST'},
member_actions={'delete': 'POST', 'update': 'PUT'})
resources.append(res)
return resources
|
|
# occiput
# Stefano Pedemonte
# April 2014
# Harvard University, Martinos Center for Biomedical Imaging
# Boston, MA, USA
import Scintillators
import Collimators
from mMR import UncompressedProjection
from numpy import *
from numpy.random import randint
# Import NiftyCore ray-tracers
from occiput.Core.NiftyCore_wrap import SPECT_project_parallelholes, SPECT_backproject_parallelholes, has_NiftyCore
from occiput.Core import Image3D
from occiput.Visualization import ProgressBar, svgwrite, has_svgwrite, ipy_table, has_ipy_table
DEFAULT_ITERATIONS = 20
DEFAULT_SUBSET_SIZE = 32
EPS = 1e-9
class FileNotFound(Exception):
def __init__(self,msg,filename):
self.msg = str(msg)
self.filename = str(filename)
def __str__(self):
return "Cannot find file '%s' (%s)."%(self.filename, self.msg)
class UnknownParameter(Exception):
def __init__(self,msg):
self.msg = str(msg)
def __str__(self):
return "Unkwnown parameter: %s"%(self.msg)
class UnexpectedParameter(Exception):
def __init__(self,msg):
self.msg = str(msg)
def __str__(self):
return "Unexpected parameter: %s"%(self.msg)
class SubsetGenerator():
def __init__(self,N_positions):
self._N_positions = N_positions
def new_subset(self,mode,subset_size):
if mode=='random':
return self._random_no_replacement(subset_size)
elif mode=='ordered':
raise UnexpectedParameter("'%s' subset selection mode not yet supported."%str(mode))
else:
raise UnexpectedParameter("'mode' parameter %s not recognised."%str(mode))
def all_active(self):
return ones((self._N_positions),dtype=uint32)
def _random_no_replacement(self,subset_size):
if subset_size>=self._N_positions:
return self.all_active()
M = zeros((self._N_positions),dtype=int32)
n = 0
while n<subset_size:
active = randint(self._N_positions)
if M[active] == 0:
M[active] = 1
n+=1
return M
def deg_to_rad(deg):
return deg*pi/180.0
def rad_to_deg(rad):
return rad*180.0/pi
class SPECT_Static_Scan(object):
def __init__(self):
self._name = "Generic SPECT Scanner"
self._scanner_type = "SPECT"
self._manufacturer = "No manufacturer"
self._version = "0.0"
# scanner parameters are named with 'self._p_xxx'
self._p_gantry_angular_positions = 180 #[adim,integer]
self._p_gantry_angular_position_first = 0.0 #[degrees]
self._p_gantry_angular_position_last = 358.0 #[degrees]
self._subset_generator = SubsetGenerator(self._p_gantry_angular_positions)
self._p_scan_time_sec = 600.0 #[seconds]
self._p_radius_mm = 300.0 #[mm]
self._p_n_pix_x = 128 #[adim]
self._p_n_pix_y = 128 #[adim]
self._p_pix_size_x_mm = 2.5 #[mm]
self._p_pix_size_y_mm = 2.5 #[mm]
self.set_background_activity(0.0)
self.set_background_attenuation(0.0)
self.set_use_gpu(True)
self.set_truncate_negative(False)
self.set_scintillator( Scintillators.Ideal() )
self.set_collimator( Collimators.LEHR() )
self._measurement = None
self._need_update_norm = True
def get_name(self):
return self._name
def get_type(self):
return self._scanner_type
def get_manufacturer(self):
return self._manufacturer
def get_version(self):
return self._version
def _get_parameters(self):
parameters = {}
dic = self.__dict__
for k in dic.keys():
if k.startswith('_p_'):
parameters[k[3:]]=dic[k]
return parameters
def get_gantry_angular_positions(self):
return (self._p_gantry_angular_position_first, self._p_gantry_angular_position_last, self._p_gantry_angular_positions)
def set_gantry_angular_positions(self, first_position_deg, last_position_deg, N_positions):
if not ( isscalar(first_position_deg) and isscalar(last_position_deg) and isscalar(N_positions) ):
raise UnexpectedParameter('Expected scalar values.')
if not isinstance(N_positions,type(1)):
raise UnexpectedParameter('Expected an integer value.')
self._p_gantry_angular_position_first = first_position_deg
self._p_gantry_angular_position_last = last_position_deg
self._p_gantry_angular_positions = N_positions
self._subset_generator = SubsetGenerator(self._p_gantry_angular_positions)
def get_scan_time(self):
return self._p_scan_time_sec
def set_scan_time(self,scan_time_sec):
if not isscalar(scan_time_sec):
raise UnexpectedParameter('Expected a scalar value.')
self._p_scan_time_sec = scan_time_sec
def get_radius(self):
return self._p_radius_mm
def set_radius(self,radius_mm):
if not isscalar(radius_mm):
raise UnexpectedParameter('Expected a scalar value.')
self._p_radius_mm = radius_mm
def get_n_pixels(self):
return (self._p_n_pix_x, self._p_n_pix_y)
def set_n_pixels(self,n_pixels_x,n_pixels_y):
if (not isscalar(n_pixels_x)) or (not isscalar(n_pixels_y)):
raise UnexpectedParameter('Expected scalar values.')
self._p_n_pix_x = n_pixels_x
self._p_n_pix_y = n_pixels_y
self._need_update_norm = True
def get_pixel_size(self):
return (self._p_pix_size_x, self._p_pix_size_y)
def set_pixel_size(self,pixel_size_x,pixel_size_y):
if (not isscalar(pixel_size_x)) or (not isscalar(pixel_size_y)):
raise UnexpectedParameter('Expected scalar values.')
self._p_pix_size_x_mm = pixel_size_x
self._p_pix_size_y_mm = pixel_size_y
def get_scintillator(self):
return self._scintillator
def set_scintillator(self,scintillator):
if not isinstance(scintillator,Scintillators.BaseScintillatorSPECT):
raise UnexpectedParameter('Expected an instance of BaseScintillatorSPECT')
self._scintillator = scintillator
self.__make_psf()
self._need_update_norm = True
def get_collimator(self):
return self._collimator
def set_collimator(self,collimator):
if not isinstance(collimator,Collimators.BaseCollimatorSPECT):
raise UnexpectedParameter('Expected an instance of BaseCollimatorSPECT')
self._collimator = collimator
self.__make_psf()
self._need_update_norm = True
def set_background_activity(self,value):
self._background_activity = value
def get_background_activity(self,value):
return self._background_activity
def set_background_attenuation(self,value):
self._background_attenuation = value
def get_background_attenuation(self,value):
return self._background_attenuation
def set_use_gpu(self, value):
self._use_gpu = value
def set_truncate_negative(self,value):
self._truncate_negative = value
def project(self, activity, attenuation=None, cameras=None, psf=None, subsets_array=None):
if isinstance(activity,ndarray):
activity = float32(activity)
else:
activity = float32(activity.data)
if attenuation is not None:
if isinstance(attenuation,ndarray):
attenuation = float32(attenuation)
else:
attenuation = float32(attenuation.data)
if cameras is None:
cameras = float32(linspace(deg_to_rad(self._p_gantry_angular_position_first),deg_to_rad(self._p_gantry_angular_position_last),self._p_gantry_angular_positions).reshape((self._p_gantry_angular_positions,1)))
# subsets:
if subsets_array is not None:
cameras=cameras[where(subsets_array)]
if psf is None:
psf=self._psf
proj = SPECT_project_parallelholes(activity, cameras, attenuation, psf, self._background_activity, self._background_attenuation, self._use_gpu, self._truncate_negative)
return UncompressedProjection(proj)
def backproject(self, projection, attenuation=None, cameras=None, psf=None, subsets_array=None):
if isinstance(projection,ndarray):
projection = float32(projection)
else:
projection = float32(projection.data)
if attenuation is not None:
if isinstance(attenuation,ndarray):
attenuation = float32(attenuation)
else:
attenuation = float32(attenuation.data)
if cameras is None:
cameras = float32(linspace(deg_to_rad(self._p_gantry_angular_position_first),deg_to_rad(self._p_gantry_angular_position_last),self._p_gantry_angular_positions).reshape((self._p_gantry_angular_positions,1)))
# subsets:
if subsets_array is not None:
cameras=cameras[where(subsets_array)]
if psf is None:
psf=self._psf
backproj = SPECT_backproject_parallelholes(projection, cameras, attenuation, psf, self._background_activity, self._background_attenuation, self._use_gpu, self._truncate_negative)
return Image3D(backproj)
def scan(self,activity_Bq,scan_time_sec=None):
if scan_time_sec is None:
scan_time_sec = self.get_scan_time()
sinogram = 0
return sinogram
def __make_probabilistic_graphical_model(self):
pass
def __make_psf(self):
self._psf = None
def get_normalization(self):
if self._need_update_norm:
self._compute_normalisation()
return self._norm
def _compute_normalisation(self):
subsets_array = self._subset_generator.all_active()
self._norm = self.backproject(ones(( self._p_n_pix_x,self._p_n_pix_y,self._p_gantry_angular_positions ),dtype=float32, order="F") ).data
self._need_update_norm = False
def estimate_activity(self, iterations=DEFAULT_ITERATIONS, subset_size=DEFAULT_SUBSET_SIZE, subset_mode='random'):
progress_bar = ProgressBar()
progress_bar.set_percentage(0.1)
activity = ones((self._p_n_pix_x,self._p_n_pix_y,self._p_n_pix_x),dtype=float32, order="F")
for i in range(iterations):
# Subsets:
if subset_size is None:
subsets_array=None
subset_size=self._p_gantry_angular_positions
elif subset_size>=self._p_gantry_angular_positions:
subsets_array=None
subset_size=self._p_gantry_angular_positions
else:
subsets_array = self._subset_generator.new_subset(subset_mode,subset_size)
if subsets_array is not None:
proj = self.project(activity,subsets_array=subsets_array).data
P = (self._measurement[:,:,where(subsets_array)].reshape((self._p_n_pix_x,self._p_n_pix_y,subset_size))+EPS)/(proj+EPS)
norm = self.backproject(ones(( self._p_n_pix_x,self._p_n_pix_y,subset_size ),dtype=float32, order="F"), subsets_array=subsets_array).data
update = (self.backproject( P ,subsets_array=subsets_array).data+EPS) / (norm +EPS)
else:
proj = self.project(activity).data
P = (self._measurement+EPS)/(proj+EPS)
norm = self.get_normalization()
update = (self.backproject( P ).data+EPS) / (norm +EPS)
activity = activity * update #* self.get_mask().data
progress_bar.set_percentage((i+1)*100.0/iterations)
#print "Iteration: %d max act: %f min act: %f max proj: %f min proj: %f max norm: %f min norm: %f"%(i, activity.max(), activity.min(), proj.max(), proj.min(), norm.data.max(), norm.data.min() )
progress_bar.set_percentage(100.0)
return Image3D(activity)
def volume_render(self,volume,scale=1.0):
# FIXME: use the VolumeRenderer object in occiput.Visualization (improve it), the following is a quick fix:
if isinstance(volume,ndarray):
volume = float32(volume)
else:
volume = float32(volume.data)
proj = self.project(volume).data
proj[where(proj>proj.max()/scale )]=proj.max()/scale
return UncompressedProjection(proj)
def load_measurement_file(self,filename):
pass
def set_measurement(self, measurement):
if not ( self._p_n_pix_x == measurement.shape[0] and self._p_n_pix_y == measurement.shape[1] and self._p_gantry_angular_positions == measurement.shape[2] ):
raise UnexpectedParameter('Measurement size is not compatible with n_pix_x, n_pix_y, gantry_angular_positions. ')
self._measurement = measurement
def get_measurement(self):
return Volume(self._measurement)
def display_measurement(self):
return UncompressedProjection(self._measurement)
def _make_svg(self):
if not has_svgwrite:
self._svg_string = None
return self._svg_string
w = '100%'
h = '100%'
dwg = svgwrite.Drawing('SPECT.svg',size=(w,h), profile='full', debug=True)
dwg.viewbox(width=100, height=100)
# DETECTOR
# collimator
rect = dwg.add(dwg.rect(insert=(12, 30), size=(8, 40), rx=0.5, ry=0.5))
rect.fill('grey',opacity=0.5).stroke('black',width=0.3,opacity=0.001)
# scintillator
rect = dwg.add(dwg.rect(insert=(9, 30), size=(3, 40), rx=0.5, ry=0.5))
rect.fill('green',opacity=0.1).stroke('none',width=0.3,opacity=0.001)
# photomultipliers
for i in range(8):
rect = dwg.add(dwg.rect(insert=(1, 31.2+i*4.8), size=(8, 4), rx=0.3, ry=0.3))
rect.fill('grey',opacity=0.25).stroke('none',width=0.3,opacity=0.001)
# IMAGING VOLUME
rect = dwg.add(dwg.rect(insert=(30, 30), size=(40, 40), rx=0.5, ry=0.5))
rect.fill('grey',opacity=0.02).stroke('grey',width=0.3,opacity=0.02)
# GEOMETRIC NOTATIONS
# circle, gantry rotation
circle = dwg.add(dwg.circle(center=(50, 50), r=30))
circle.fill('none').stroke('grey', width=0.1).dasharray([0.5, 0.5])
# center
circle = dwg.add(dwg.circle(center=(50, 50), r=0.5))
circle.fill('grey',opacity=0.1).stroke('grey', width=0.1)
line = dwg.add(dwg.line(start=(50-1,50), end=(50+1,50)))
line.stroke('grey', width=0.1)
line = dwg.add(dwg.line(start=(50,50-1), end=(50,50+1)))
line.stroke('grey', width=0.1)
#line = dwg.add(dwg.polyline([(10, 10), (10, 100), (100, 100), (100, 10), (10, 10)],stroke='black', fill='none'))
self._svg_string = dwg.tostring()
return self._svg_string
def _repr_svg_(self):
self._make_svg()
return self._svg_string
class Gantry():
def __init__(self):
self.svg_string = self.make_svg()
def make_svg(self):
if not has_svgwrite:
self._svg_string = None
return self._svg_string
w = '100%'
h = '100%'
dwg = svgwrite.Drawing('test.svg',size=(w,h), profile='full', debug=True)
dwg.viewbox(width=100, height=100)
# DETECTOR
# collimator
rect = dwg.add(dwg.rect(insert=(12, 30), size=(8, 40), rx=0.5, ry=0.5))
rect.fill('grey',opacity=0.5).stroke('black',width=0.3,opacity=0.001)
# scintillator
rect = dwg.add(dwg.rect(insert=(9, 30), size=(3, 40), rx=0.5, ry=0.5))
rect.fill('green',opacity=0.1).stroke('none',width=0.3,opacity=0.001)
# photomultipliers
for i in range(8):
rect = dwg.add(dwg.rect(insert=(1, 31.2+i*4.8), size=(8, 4), rx=0.3, ry=0.3))
rect.fill('grey',opacity=0.25).stroke('none',width=0.3,opacity=0.001)
# IMAGING VOLUME
rect = dwg.add(dwg.rect(insert=(30, 30), size=(40, 40), rx=0.5, ry=0.5))
rect.fill('grey',opacity=0.02).stroke('grey',width=0.3,opacity=0.02)
# GEOMETRIC NOTATIONS
# circle, gantry rotation
circle = dwg.add(dwg.circle(center=(50, 50), r=30))
circle.fill('none').stroke('grey', width=0.1).dasharray([0.5, 0.5])
# center
circle = dwg.add(dwg.circle(center=(50, 50), r=0.5))
circle.fill('grey',opacity=0.1).stroke('grey', width=0.1)
line = dwg.add(dwg.line(start=(50-1,50), end=(50+1,50)))
line.stroke('grey', width=0.1)
line = dwg.add(dwg.line(start=(50,50-1), end=(50,50+1)))
line.stroke('grey', width=0.1)
#line = dwg.add(dwg.polyline([(10, 10), (10, 100), (100, 100), (100, 10), (10, 10)],stroke='black', fill='none'))
return dwg.tostring()
def _repr_svg_(self):
return self.svg_string
class GE_Infinia(SPECT_Static_Scan):
def __init__(self):
SPECT.__init__(self)
self._name = "GE Infinia SPECT Scanner with LEHR collimator"
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class RouteTablesOperations(object):
"""RouteTablesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-10-01"
self.config = config
def _delete_initial(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def get(
self, resource_group_name, route_table_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RouteTable or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_10_01.models.RouteTable or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def _create_or_update_initial(
self, resource_group_name, route_table_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RouteTable')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, route_table_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route
table operation.
:type parameters: ~azure.mgmt.network.v2017_10_01.models.RouteTable
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns RouteTable or
ClientRawResponse<RouteTable> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.RouteTable]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_10_01.models.RouteTable]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def _update_tags_initial(
self, resource_group_name, route_table_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, route_table_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns RouteTable or
ClientRawResponse<RouteTable> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.RouteTable]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_10_01.models.RouteTable]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RouteTable
:rtype:
~azure.mgmt.network.v2017_10_01.models.RouteTablePaged[~azure.mgmt.network.v2017_10_01.models.RouteTable]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RouteTablePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RouteTablePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RouteTable
:rtype:
~azure.mgmt.network.v2017_10_01.models.RouteTablePaged[~azure.mgmt.network.v2017_10_01.models.RouteTable]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RouteTablePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RouteTablePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'}
|
|
from PIL import Image, ImageTk
from Tkinter import Frame, Button
from time import sleep
class BoardFrame(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.configure(background="#336600")
# Divide screen in horizontal zones
self.infoFrame = Frame(self) # contains helper buttons and timer
self.topCardsFrame = Frame(self) # contains stock, waste and foundations
self.tableauFrame = Frame(self) # contains tableau piles
self.infoFrame.pack(side="top", fill="x", expand=False)
self.topCardsFrame.pack(side="top", fill="x", expand=False)
self.tableauFrame.pack(side="top", fill="both", expand=True)
# Divide the info frame in 2 vertically
self.possibleMovesFrame = Frame(self.infoFrame)
self.timerFrame = Frame(self.infoFrame)
self.possibleMovesFrame.pack(side="left", fill="x", expand=True)
self.timerFrame.pack(side="right", fill="x", expand=True)
# Divide the top cards frame in 2 vertically
self.topCardsLeft = Frame(self.topCardsFrame)
self.topCardsRight = Frame(self.topCardsFrame)
self.topCardsLeft.pack(side="left", fill="x", expand=True)
self.topCardsRight.pack(side="right", fill="x", expand=True)
# In top left put 2 frames for the stock and the waste
self.stockFrame = Frame(self.topCardsLeft)
self.wasteFrame = Frame(self.topCardsLeft)
self.stockFrame.pack(side="left", fill="x", expand=True)
self.wasteFrame.pack(side="right", fill="x", expand=True)
# In top right put 4 frames for the 4 foundations
self.HFrame = Frame(self.topCardsRight)
self.CFrame = Frame(self.topCardsRight)
self.SFrame = Frame(self.topCardsRight)
self.DFrame = Frame(self.topCardsRight)
self.HFrame.pack(side="right", fill="both", expand=True)
self.CFrame.pack(side="right", fill="both", expand=True)
self.DFrame.pack(side="right", fill="both", expand=True)
self.SFrame.pack(side="right", fill="both", expand=True)
# In bottom frame put 7 frames for the tableau piles
self.tableauFrames = []
for i in range(0, 7):
self.tableauFrames.append(Frame(self.tableauFrame))
self.tableauFrames[i].pack(side="left", fill="y", expand=True)
# Dictionary which will links cards in the tableau
# to buttons which represent them
self.cardButtons = {}
# When a tableau pile is empty, a corresponding button will be stored
# in this dictionnary to allow the user to put a card on an empty pile
self.tableauFirstCardButtons = {}
# Load common images
imageBack = Image.open("../img/back.bmp")
self.photoBack = ImageTk.PhotoImage(imageBack)
self.photoBackCropped = ImageTk.PhotoImage(imageBack.crop((0, 0, imageBack.size[0], imageBack.size[1]/4)))
self.photoEmpty = ImageTk.PhotoImage(Image.open("../img/empty.bmp"))
self.photoHEmpty = ImageTk.PhotoImage(Image.open("../img/Hempty.bmp"))
self.photoCEmpty = ImageTk.PhotoImage(Image.open("../img/Cempty.bmp"))
self.photoSEmpty = ImageTk.PhotoImage(Image.open("../img/Sempty.bmp"))
self.photoDEmpty = ImageTk.PhotoImage(Image.open("../img/Dempty.bmp"))
# Put possible moves button
self.possibleMovesButton = Button(self.possibleMovesFrame)
self.possibleMovesButton.text = "Show possible moves"
self.possibleMovesButton.pack(side="top", fill="both", expand=False)
# Put initial waste button
self.wasteButton = Button(self.wasteFrame, image=self.photoEmpty)
self.wasteButton.photo = self.photoEmpty
self.wasteButton.pack(side="top", fill="both", expand=False)
# Put initial stock button
self.stockButton = Button(self.stockFrame, image=self.photoBack)
self.stockButton.photo = self.photoBack
self.stockButton.pack(side="top", fill="both", expand=False)
# Put initial foundations buttons
self.HButton = Button(self.HFrame, image=self.photoHEmpty)
self.CButton = Button(self.CFrame, image=self.photoCEmpty)
self.SButton = Button(self.SFrame, image=self.photoSEmpty)
self.DButton = Button(self.DFrame, image=self.photoDEmpty)
self.HButton.pack(side="top", fill="both", expand=False)
self.CButton.pack(side="top", fill="both", expand=False)
self.SButton.pack(side="top", fill="both", expand=False)
self.DButton.pack(side="top", fill="both", expand=False)
# To be called by the controller giving as argument
# a list of possible move
def showPossibleMoves(self, possibleMoves):
# if no move is possible focus stock button
if (not possibleMoves):
self.focusButton(self.stockButton, True)
sleep(1)
self.unfocusButton(self.stockButton)
return 0
for origin in possibleMoves.keys():
self.focusButton(origin, True)
for destination in possibleMoves[origin]:
self.focusButton(destination, False)
sleep(1)
self.unfocusButton(origin)
for destination in possibleMoves[origin]:
self.unfocusButton(destination)
sleep(0.5)
return 0
# change the background of a button to focus on it
def focusButton(self, button, isOrigin):
color="red"
if (isOrigin):
color = "green"
button.configure(highlightbackground=color)
button.update_idletasks()
# change the background of a button to unfocus on it
def unfocusButton(self, button):
button.configure(highlightbackground="white")
button.update_idletasks()
# To be called by the controller when the board
# publish the refreshGUI event
def updateGUI(self, board):
print(board.__str__())
# Update stock and waste buttons
resetStockButtonImage = True
if (len(board.stock) > 0):
if (resetStockButtonImage):
self.stockButton.configure(image=self.photoBack)
resetStockButtonImage = False
else:
self.stockButton.configure(image=self.photoEmpty)
resetStockButtonImage = True
if (len(board.waste) == 0):
self.wasteButton.configure(image=self.photoEmpty)
else:
self.wasteButton.configure(image=board.waste[-1].photoFaceUp)
# Update foundations buttons
if (len(board.H) > 0):
self.HButton.configure(image=board.H[-1].photoFaceUp)
else:
self.HButton.configure(image=self.photoHEmpty)
if (len(board.C) > 0):
self.CButton.configure(image=board.C[-1].photoFaceUp)
else:
self.CButton.configure(image=self.photoCEmpty)
if (len(board.S) > 0):
self.SButton.configure(image=board.S[-1].photoFaceUp)
else:
self.SButton.configure(image=self.photoSEmpty)
if (len(board.D) > 0):
self.DButton.configure(image=board.D[-1].photoFaceUp)
else:
self.DButton.configure(image=self.photoDEmpty)
# Update tableau piles
# Remove old buttons in each frame
for f in self.tableauFrames:
if (len(f.winfo_children()) > 0):
for child in f.winfo_children():
child.destroy()
frame = -1
for pile in board.PlayingStacks:
frame += 1
r = -1
# if a pile is empty, create a button to represent it and add
# the button to the dictionary.
# If the pile is not empty anymore destroy the button.
if (len(pile) == 0):
newButton = Button(self.tableauFrames[frame], image=self.photoEmpty)
newButton.grid(row=0, column=0)
self.tableauFirstCardButtons[frame] = newButton
elif frame in self.tableauFirstCardButtons:
self.tableauFirstCardButtons[frame].destroy()
del self.tableauFirstCardButtons[frame]
for card in pile:
r += 1
if (card != pile[-1]):
if (card.facedown):
image=self.photoBackCropped
else:
image=card.photoFaceUpCropped
else:
if (card.facedown):
image=self.photoBack
else:
image=card.photoFaceUp
newButton = Button(self.tableauFrames[frame], image=image)
newButton.grid(row=r, column=0)
if (not card.facedown):
self.cardButtons[card] = newButton
# remove old entries from dictionary
for k in self.cardButtons.keys():
isInTableau = False
for stack in board.PlayingStacks:
if k in stack:
isInTableau = True
break
if (not isInTableau):
self.cardButtons.pop(k, None)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A multi-agent meta-controller policy that runs policies for agents within it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.agents.ppo import ppo_policy
from tf_agents.agents.ppo import ppo_utils
from tf_agents.networks import network
from tf_agents.policies import greedy_policy
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from social_rl.multiagent_tfagents import multiagent_ppo_policy
tfd = tfp.distributions
class AttentionPPOPolicy(ppo_policy.PPOPolicy):
"""A modification of tf_agents PPOPolicy that returns attention info."""
def _distribution(self, time_step, policy_state, training=False):
if not policy_state:
policy_state = {'actor_network_state': (), 'value_network_state': ()}
else:
policy_state = policy_state.copy()
if 'actor_network_state' not in policy_state:
policy_state['actor_network_state'] = ()
if 'value_network_state' not in policy_state:
policy_state['value_network_state'] = ()
new_policy_state = {'actor_network_state': (), 'value_network_state': ()}
(distributions, new_policy_state['actor_network_state'], _) = (
self._apply_actor_network(
time_step, policy_state['actor_network_state'], training=training))
if self._collect:
policy_info = {
'dist_params':
ppo_utils.get_distribution_params(
distributions,
legacy_distribution_network=isinstance(
self._actor_network, network.DistributionNetwork))
}
if not self._compute_value_and_advantage_in_train:
# If value_prediction is not computed in agent.train it needs to be
# computed and saved here.
(policy_info['value_prediction'],
new_policy_state['value_network_state']) = self.apply_value_network(
time_step.observation,
time_step.step_type,
value_state=policy_state['value_network_state'],
training=False)
else:
policy_info = ()
if (not new_policy_state['actor_network_state'] and
not new_policy_state['value_network_state']):
new_policy_state = ()
elif not new_policy_state['value_network_state']:
del new_policy_state['value_network_state']
elif not new_policy_state['actor_network_state']:
del new_policy_state['actor_network_state']
return policy_step.PolicyStep(distributions, new_policy_state, policy_info)
@gin.configurable
class AttentionMultiagentPPOPolicy(multiagent_ppo_policy.MultiagentPPOPolicy):
"""A modification of MultiagentPPOPolicy that returns attention info."""
def __init__(
self,
*args,
use_stacks=False,
**kwargs,
):
"""Creates a centralized controller agent that uses joint attention.
Args:
*args: See superclass.
use_stacks: Use ResNet stacks in image encoder (compresses the image).
**kwargs: See superclass.
"""
self.use_stacks = use_stacks
super(AttentionMultiagentPPOPolicy, self).__init__(*args, **kwargs)
# Building policy out of sub-policies, so pylint:disable=protected-access
def _make_info_spec(self, time_step_spec):
# Make multi-agent info spec
if self._collect:
info_spec = []
for p in self._agent_policies:
agent_info_spec = p.info_spec
if self.use_stacks:
image_shape = [
i // 4 for i in time_step_spec.observation['image'].shape[1:3]
]
else:
image_shape = time_step_spec.observation['image'].shape[1:3]
state_spec = tensor_spec.BoundedTensorSpec(
image_shape, dtype=tf.float32, minimum=0, maximum=1)
agent_info_spec['attention_weights'] = state_spec
info_spec.append(agent_info_spec)
info_spec = tuple(info_spec)
else:
info_spec = ()
return info_spec
def _apply_actor_network(self, time_step, policy_states, training=False):
actions = [None] * self.n_agents
new_states = {'actor_network_state': [None] * self.n_agents}
attention_weights = [None] * self.n_agents
for agent_id, policy in enumerate(self._agent_policies):
# Fixed agents do not act. Used for debugging
if self.inactive_agent_ids and agent_id in self.inactive_agent_ids:
actions[agent_id] = tf.ones_like(time_step.discount, dtype=tf.int64) * 6
new_states['actor_network_state'][agent_id] = policy_states[
'actor_network_state'][agent_id]
continue
agent_time_step = self._get_obs_for_agent(time_step, agent_id)
if isinstance(policy, greedy_policy.GreedyPolicy):
policy = policy._wrapped_policy
agent_policy_state = [
state[:, agent_id] for state in policy_states['actor_network_state']
]
actions[agent_id], new_states['actor_network_state'][
agent_id], attention_weights[agent_id] = policy._apply_actor_network(
agent_time_step, agent_policy_state, training)
actions = tuple(actions)
new_states = {
'actor_network_state': [
tf.stack(i, axis=1)
for i in list(zip(*new_states['actor_network_state']))
]
}
return actions, new_states, attention_weights
def _distribution(self, time_step, policy_state, training=False):
# Actor network outputs a list of distributions or actions (one for each
# agent), and a list of policy states for each agent
actions_or_distributions, policy_state, attention_weights = self._apply_actor_network(
time_step, policy_state, training=training)
def _to_distribution(action_or_distribution):
if isinstance(action_or_distribution, tf.Tensor):
# This is an action tensor, so wrap it in a deterministic distribution.
return tfp.distributions.Deterministic(loc=action_or_distribution)
return action_or_distribution
distributions = tf.nest.map_structure(_to_distribution,
actions_or_distributions)
# Prepare policy_info.
if self._collect:
policy_info = ppo_utils.get_distribution_params(
distributions,
False
)
# Wrap policy info to be comptabile with new spec
policy_info = list(policy_info)
for a in range(len(policy_info)):
if not self.inactive_agent_ids or a not in self.inactive_agent_ids:
policy_info[a] = {'dist_params': policy_info[a]}
policy_info[a].update({'attention_weights': attention_weights[a]})
# Fake logits for fixed agents.
if self.inactive_agent_ids and self.learning_agents:
for a in self.inactive_agent_ids:
policy_info[a] = {
'dist_params': {
'logits':
tf.zeros_like(policy_info[self.learning_agents[0]]
['dist_params']['logits'])
}
}
policy_info = tuple(policy_info)
# PolicyStep has actions, state, info
step_result = policy_step.PolicyStep(distributions, policy_state,
policy_info)
else:
# I was not able to use a GreedyPolicy wrapper and also override _action,
# so I replicated the greedy functionality here.
def dist_fn(dist):
try:
greedy_action = dist.mode()
except NotImplementedError:
raise ValueError("Your network's distribution does not implement "
'mode making it incompatible with a greedy policy.')
return greedy_policy.DeterministicWithLogProb(loc=greedy_action)
actions = tf.nest.map_structure(dist_fn, distributions)
step_result = policy_step.PolicyStep(actions, policy_state, ())
return step_result
|
|
"""The tests for the nx584 sensor platform."""
import requests
import unittest
from unittest import mock
from nx584 import client as nx584_client
from homeassistant.components.binary_sensor import nx584
from homeassistant.bootstrap import setup_component
class StopMe(Exception):
"""Stop helper."""
pass
class TestNX584SensorSetup(unittest.TestCase):
"""Test the NX584 sensor platform."""
def setUp(self):
"""Setup things to be run when tests are started."""
self._mock_client = mock.patch.object(nx584_client, 'Client')
self._mock_client.start()
self.fake_zones = [
{'name': 'front', 'number': 1},
{'name': 'back', 'number': 2},
{'name': 'inside', 'number': 3},
]
client = nx584_client.Client.return_value
client.list_zones.return_value = self.fake_zones
client.get_version.return_value = '1.1'
def tearDown(self):
"""Stop everything that was started."""
self._mock_client.stop()
def test_setup_no_config(self):
"""Test the setup with no configuration."""
hass = mock.MagicMock()
hass.pool.worker_count = 2
assert setup_component(hass, 'binary_sensor', {'nx584': {}})
@mock.patch('homeassistant.components.binary_sensor.nx584.NX584Watcher')
@mock.patch('homeassistant.components.binary_sensor.nx584.NX584ZoneSensor')
def test_setup_defaults(self, mock_nx, mock_watcher):
"""Test the setup with no configuration."""
add_devices = mock.MagicMock()
hass = mock.MagicMock()
config = {
'host': nx584.DEFAULT_HOST,
'port': nx584.DEFAULT_PORT,
'exclude_zones': [],
'zone_types': {},
}
self.assertTrue(nx584.setup_platform(hass, config, add_devices))
mock_nx.assert_has_calls(
[mock.call(zone, 'opening') for zone in self.fake_zones])
self.assertTrue(add_devices.called)
nx584_client.Client.assert_called_once_with('http://localhost:5007')
@mock.patch('homeassistant.components.binary_sensor.nx584.NX584Watcher')
@mock.patch('homeassistant.components.binary_sensor.nx584.NX584ZoneSensor')
def test_setup_full_config(self, mock_nx, mock_watcher):
"""Test the setup with full configuration."""
config = {
'host': 'foo',
'port': 123,
'exclude_zones': [2],
'zone_types': {3: 'motion'},
}
add_devices = mock.MagicMock()
hass = mock.MagicMock()
self.assertTrue(nx584.setup_platform(hass, config, add_devices))
mock_nx.assert_has_calls([
mock.call(self.fake_zones[0], 'opening'),
mock.call(self.fake_zones[2], 'motion'),
])
self.assertTrue(add_devices.called)
nx584_client.Client.assert_called_once_with('http://foo:123')
self.assertTrue(mock_watcher.called)
def _test_assert_graceful_fail(self, config):
"""Test the failing."""
hass = add_devices = mock.MagicMock()
self.assertFalse(setup_component(hass, 'binary_sensor.nx584', config))
self.assertFalse(add_devices.called)
def test_setup_bad_config(self):
"""Test the setup with bad configuration."""
bad_configs = [
{'exclude_zones': ['a']},
{'zone_types': {'a': 'b'}},
{'zone_types': {1: 'notatype'}},
{'zone_types': {'notazone': 'motion'}},
]
for config in bad_configs:
self._test_assert_graceful_fail(config)
def test_setup_connect_failed(self):
"""Test the setup with connection failure."""
nx584_client.Client.return_value.list_zones.side_effect = \
requests.exceptions.ConnectionError
self._test_assert_graceful_fail({})
def test_setup_version_too_old(self):
""""Test if version is too old."""
nx584_client.Client.return_value.get_version.return_value = '1.0'
self._test_assert_graceful_fail({})
def test_setup_no_zones(self):
"""Test the setup with no zones."""
nx584_client.Client.return_value.list_zones.return_value = []
hass = add_devices = mock.MagicMock()
self.assertTrue(nx584.setup_platform(hass, {}, add_devices))
self.assertFalse(add_devices.called)
class TestNX584ZoneSensor(unittest.TestCase):
"""Test for the NX584 zone sensor."""
def test_sensor_normal(self):
"""Test the sensor."""
zone = {'number': 1, 'name': 'foo', 'state': True}
sensor = nx584.NX584ZoneSensor(zone, 'motion')
self.assertEqual('foo', sensor.name)
self.assertFalse(sensor.should_poll)
self.assertTrue(sensor.is_on)
zone['state'] = False
self.assertFalse(sensor.is_on)
class TestNX584Watcher(unittest.TestCase):
"""Test the NX584 watcher."""
@mock.patch.object(nx584.NX584ZoneSensor, 'update_ha_state')
def test_process_zone_event(self, mock_update):
"""Test the processing of zone events."""
zone1 = {'number': 1, 'name': 'foo', 'state': True}
zone2 = {'number': 2, 'name': 'bar', 'state': True}
zones = {
1: nx584.NX584ZoneSensor(zone1, 'motion'),
2: nx584.NX584ZoneSensor(zone2, 'motion'),
}
watcher = nx584.NX584Watcher(None, zones)
watcher._process_zone_event({'zone': 1, 'zone_state': False})
self.assertFalse(zone1['state'])
self.assertEqual(1, mock_update.call_count)
@mock.patch.object(nx584.NX584ZoneSensor, 'update_ha_state')
def test_process_zone_event_missing_zone(self, mock_update):
"""Test the processing of zone events with missing zones."""
watcher = nx584.NX584Watcher(None, {})
watcher._process_zone_event({'zone': 1, 'zone_state': False})
self.assertFalse(mock_update.called)
def test_run_with_zone_events(self):
"""Test the zone events."""
empty_me = [1, 2]
def fake_get_events():
"""Return nothing twice, then some events."""
if empty_me:
empty_me.pop()
else:
return fake_events
client = mock.MagicMock()
fake_events = [
{'zone': 1, 'zone_state': True, 'type': 'zone_status'},
{'zone': 2, 'foo': False},
]
client.get_events.side_effect = fake_get_events
watcher = nx584.NX584Watcher(client, {})
@mock.patch.object(watcher, '_process_zone_event')
def run(fake_process):
fake_process.side_effect = StopMe
self.assertRaises(StopMe, watcher._run)
fake_process.assert_called_once_with(fake_events[0])
run()
self.assertEqual(3, client.get_events.call_count)
@mock.patch('time.sleep')
def test_run_retries_failures(self, mock_sleep):
"""Test the retries with failures."""
empty_me = [1, 2]
def fake_run():
if empty_me:
empty_me.pop()
raise requests.exceptions.ConnectionError()
else:
raise StopMe()
watcher = nx584.NX584Watcher(None, {})
with mock.patch.object(watcher, '_run') as mock_inner:
mock_inner.side_effect = fake_run
self.assertRaises(StopMe, watcher.run)
self.assertEqual(3, mock_inner.call_count)
mock_sleep.assert_has_calls([mock.call(10), mock.call(10)])
|
|
import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import dragonfly.scene.unbound, dragonfly.scene.bound
import dragonfly.std
import dragonfly.io
import dragonfly.canvas
import dragonfly.convert.pull
import dragonfly.logic
import dragonfly.bind
import Spyder
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = Spyder.AxisSystem()
a.rotateZ(360 * random())
a.origin = Spyder.Coordinate(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "AxisSystem")
def id_generator():
n = 0
while 1:
n += 1
yield "spawnedpanda" + str(n)
from dragonfly.canvas import box2d
from bee.mstr import mstr
class parameters: pass
class myscene(dragonfly.pandahive.spyderframe):
a = Spyder.AxisSystem()
a *= 0.25
a.origin += (-8, 42, 0)
env = Spyder.Model3D("models/environment", "egg", a)
#First panda
a = Spyder.AxisSystem()
a *= 0.005
pandaclass = Spyder.ActorClass3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
actorclassname="pandaclass")
box = Spyder.Box2D(50, 470, 96, 96)
icon = Spyder.Icon("pandaicon.png", "pandaicon", box, transparency=True)
#Second panda
a = Spyder.AxisSystem()
a *= 0.002
pandaclass2 = Spyder.ActorClass3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
actorclassname="pandaclass2")
box = Spyder.Box2D(200, 500, 48, 48)
icon2 = Spyder.Icon("pandaicon.png", "pandaicon2", box, transparency=True)
#Third panda
a = Spyder.AxisSystem()
a *= 0.3
model = Spyder.Model3D("models/panda", "egg", a)
pandaclass3 = Spyder.EntityClass3D("pandaclass3", [model])
box = Spyder.Box2D(280, 480, 144, 112)
icon3 = Spyder.Icon("pandaicon2.png", "pandaicon3", box, transparency=True)
camcenter = Spyder.Entity3D(
"camcenter",
(
Spyder.NewMaterial("white", color=(255, 255, 255)),
Spyder.Block3D((1, 1, 1), material="white"),
)
)
marker = Spyder.Entity3D(
"marker",
(
Spyder.NewMaterial("blue", color=(0, 0, 255)),
Spyder.Circle(2, origin=(0, 0, 0.1), material="blue")
)
)
del a, box, model
from bee.spyderhive.hivemaphive import hivemapinithive
class pandawalkhive(hivemapinithive):
pandahivemap = Spyder.Hivemap.fromfile("pandawalk.web")
class pandawalkhive2(hivemapinithive):
pandahivemap = Spyder.Hivemap.fromfile("pandawalk2.web")
from jumpworker import jumpworker
class pandajumphive(bee.inithive):
ksp = dragonfly.io.keyboardsensor_trigger("SPACE")
jump = jumpworker(height=2, duration=0.7)
connect(ksp, jump)
from bee.staticbind import staticbind_baseclass
class pandabind(dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind,
dragonfly.bind.bind):
bind_entity = "relative"
bind_keyboard = "indirect"
class camerabindhive(hivemapinithive):
camera_hivemap = Spyder.Hivemap.fromfile("camera.web")
class camerabind(staticbind_baseclass,
dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind):
hive = camerabindhive
class myhive(dragonfly.pandahive.pandahive):
canvas = dragonfly.pandahive.pandacanvas()
mousearea = dragonfly.canvas.mousearea()
raiser = bee.raiser()
connect("evexc", raiser)
camerabind = camerabind().worker()
camcenter = dragonfly.std.variable("id")("camcenter")
connect(camcenter, camerabind.bindname)
startsensor = dragonfly.sys.startsensor()
cam = dragonfly.scene.get_camera()
camparent = dragonfly.scene.unbound.parent()
connect(cam, camparent.entityname)
connect(camcenter, camparent.entityparentname)
connect(startsensor, camparent)
cphide = dragonfly.scene.unbound.hide()
connect(camcenter, cphide)
connect(startsensor, cphide)
v_marker = dragonfly.std.variable("id")("marker")
hide_marker = dragonfly.scene.unbound.hide()
connect(v_marker, hide_marker)
show_marker = dragonfly.scene.unbound.show()
connect(v_marker, show_marker)
parent_marker = dragonfly.scene.unbound.parent()
connect(v_marker, parent_marker.entityname)
connect(startsensor, hide_marker)
pandaspawn = dragonfly.scene.spawn_actor_or_entity()
v_panda = dragonfly.std.variable("id")("")
connect(v_panda, pandaspawn)
panda_id_gen = dragonfly.std.generator("id", id_generator)()
panda_id = dragonfly.std.variable("id")("")
t_panda_id_gen = dragonfly.std.transistor("id")()
connect(panda_id_gen, t_panda_id_gen)
connect(t_panda_id_gen, panda_id)
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
hivereg = dragonfly.bind.hiveregister()
c_hivereg = bee.configure("hivereg")
pandabinder = pandabind().worker()
v_hivename = dragonfly.std.variable("id")("")
w_bind = dragonfly.std.weaver(("id", "id"))()
connect(panda_id, w_bind.inp1)
connect(v_hivename, w_bind.inp2)
t_bind = dragonfly.std.transistor("id")()
connect(panda_id, t_bind)
t_bind2 = dragonfly.std.transistor(("id", "id"))()
connect(w_bind, t_bind2)
connect(t_bind2, pandabinder.bind)
sel = dragonfly.logic.selector()
connect(t_bind, sel.register_and_select)
selected = dragonfly.std.variable("id")("")
connect(t_bind, selected)
t_get_selected = dragonfly.logic.filter("trigger")()
connect(sel.empty, t_get_selected)
tt_get_selected = dragonfly.std.transistor("id")()
do_select = dragonfly.std.pushconnector("trigger")()
connect(t_get_selected.false, do_select)
connect(do_select, tt_get_selected)
connect(sel.selected, tt_get_selected)
connect(tt_get_selected, selected)
disp_sel = dragonfly.io.display("id")("Selected: ")
connect(tt_get_selected, disp_sel)
connect(selected, parent_marker.entityparentname)
connect(do_select, show_marker)
connect(do_select, parent_marker)
key_tab = dragonfly.io.keyboardsensor_trigger("TAB")
connect(key_tab, sel.select_next)
connect(key_tab, t_get_selected)
key_bsp = dragonfly.io.keyboardsensor_trigger("BACKSPACE")
connect(key_bsp, sel.select_prev)
connect(key_bsp, t_get_selected)
kill = dragonfly.std.pushconnector("trigger")()
t_kill = dragonfly.std.transistor("id")()
connect(selected, t_kill)
connect(t_kill, pandabinder.stop)
remove = dragonfly.scene.unbound.remove_actor_or_entity()
connect(t_kill, remove)
disp_kill = dragonfly.io.display("id")("Killed: ")
connect(t_kill, disp_kill)
connect(kill, t_kill)
connect(kill, sel.unregister)
connect(kill, hide_marker)
connect(kill, t_get_selected)
testkill = dragonfly.logic.filter("trigger")()
connect(sel.empty, testkill)
connect(testkill.false, kill)
key_k = dragonfly.io.keyboardsensor_trigger("K")
connect(key_k, testkill)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
trig_spawn = dragonfly.std.pushconnector("trigger")()
connect(trig_spawn, t_panda_id_gen)
connect(trig_spawn, do_spawn)
connect(trig_spawn, t_bind)
connect(trig_spawn, t_bind2)
connect(trig_spawn, do_select)
#First panda
v_panda1 = dragonfly.std.variable("id")("pandaclass")
set_panda1 = dragonfly.std.transistor("id")()
connect(v_panda1, set_panda1)
connect(set_panda1, v_panda)
c_hivereg.register_hive("pandawalk", pandawalkhive)
v_hivename1 = dragonfly.std.variable("id")("pandawalk")
set_hivename1 = dragonfly.std.transistor("id")()
connect(v_hivename1, set_hivename1)
connect(set_hivename1, v_hivename)
pandaicon_click = dragonfly.io.mouseareasensor("pandaicon")
connect(pandaicon_click, set_panda1)
connect(pandaicon_click, set_hivename1)
connect(pandaicon_click, trig_spawn)
#Second panda
v_panda2 = dragonfly.std.variable("id")("pandaclass2")
set_panda2 = dragonfly.std.transistor("id")()
connect(v_panda2, set_panda2)
connect(set_panda2, v_panda)
c_hivereg.register_hive("pandawalk2", pandawalkhive2)
v_hivename2 = dragonfly.std.variable("id")("pandawalk2")
set_hivename2 = dragonfly.std.transistor("id")()
connect(v_hivename2, set_hivename2)
connect(set_hivename2, v_hivename)
pandaicon2_click = dragonfly.io.mouseareasensor("pandaicon2")
connect(pandaicon2_click, set_panda2)
connect(pandaicon2_click, set_hivename2)
connect(pandaicon2_click, trig_spawn)
#Third panda
v_panda3 = dragonfly.std.variable("id")("pandaclass3")
set_panda3 = dragonfly.std.transistor("id")()
connect(v_panda3, set_panda3)
connect(set_panda3, v_panda)
c_hivereg.register_hive("pandajump", pandajumphive)
v_hivename3 = dragonfly.std.variable("id")("pandajump")
set_hivename3 = dragonfly.std.transistor("id")()
connect(v_hivename3, set_hivename3)
connect(set_hivename3, v_hivename)
pandaicon3_click = dragonfly.io.mouseareasensor("pandaicon3")
connect(pandaicon3_click, set_panda3)
connect(pandaicon3_click, set_hivename3)
connect(pandaicon3_click, trig_spawn)
myscene = myscene(
scene="scene",
canvas=canvas,
mousearea=mousearea,
)
wininit = bee.init("window")
wininit.camera.setPos(0, 45, 25)
wininit.camera.setHpr(180, -20, 0)
keyboardevents = dragonfly.event.sensor_match_leader("keyboard")
add_head = dragonfly.event.add_head()
head = dragonfly.convert.pull.duck("id", "event")()
connect(selected, head)
connect(keyboardevents, add_head)
connect(head, add_head)
connect(add_head, pandabinder.event)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
main.run()
|
|
import decimal
import json
import unittest
import uuid
from django import forms
from django.core import exceptions, serializers, validators
from django.core.exceptions import FieldError
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.test import TransactionTestCase, override_settings
from django.test.utils import isolate_apps
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import (
ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel,
NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel,
PostgreSQLModel, Tag,
)
try:
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import (
SimpleArrayField, SplitArrayField, SplitArrayWidget,
)
except ImportError:
pass
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=['hello', 'goodbye'])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=['1'])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertIsNone(loaded.field)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=[Tag(1), Tag(2), Tag(3)],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
self.assertEqual(instance.tags, loaded.tags)
def test_null_from_db_value_handling(self):
instance = OtherTypesArrayModel.objects.create(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=None,
)
instance.refresh_from_db()
self.assertIsNone(instance.tags)
def test_model_set_on_base_field(self):
instance = IntegerArrayModel()
field = instance._meta.get_field('field')
self.assertEqual(field.model, IntegerArrayModel)
self.assertEqual(field.base_field.model, IntegerArrayModel)
class TestQuerying(PostgreSQLTestCase):
def setUp(self):
self.objs = [
NullableIntegerArrayModel.objects.create(field=[1]),
NullableIntegerArrayModel.objects.create(field=[2]),
NullableIntegerArrayModel.objects.create(field=[2, 3]),
NullableIntegerArrayModel.objects.create(field=[20, 30, 40]),
NullableIntegerArrayModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]),
self.objs[:1]
)
def test_exact_charfield(self):
instance = CharArrayModel.objects.create(field=['text'])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field=['text']),
[instance]
)
def test_exact_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]),
[instance]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True),
self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]),
self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]),
self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2]
)
@unittest.expectedFailure
def test_in_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg2 mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[models.F('id')]]),
self.objs[:2]
)
def test_in_as_F_object(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[models.F('field')]),
self.objs[:4]
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2]
)
@unittest.expectedFailure
def test_contained_by_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg2 mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[models.F('id'), 2]),
self.objs[:2]
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=['text']),
[]
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=['text']),
[]
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=['text']),
[]
)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2),
self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3),
self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1),
[instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]),
[instance]
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3]
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2),
self.objs[0:3]
)
def test_len_empty_array(self):
obj = NullableIntegerArrayModel.objects.create(field=[])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len=0),
[obj]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]),
self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]),
self.objs[2:3]
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]),
[instance]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
id__in=NullableIntegerArrayModel.objects.filter(field__len=3)
),
[self.objs[3]]
)
def test_unsupported_lookup(self):
msg = "Unsupported lookup '0_bar' for ArrayField or join on the field not permitted."
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0_bar=[2]))
msg = "Unsupported lookup '0bar' for ArrayField or join on the field not permitted."
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0bar=[2]))
class TestDateTimeExactQuerying(PostgreSQLTestCase):
def setUp(self):
now = timezone.now()
self.datetimes = [now]
self.dates = [now.date()]
self.times = [now.time()]
self.objs = [
DateTimeArrayModel.objects.create(
datetimes=self.datetimes,
dates=self.dates,
times=self.times,
)
]
def test_exact_datetimes(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(datetimes=self.datetimes),
self.objs
)
def test_exact_dates(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(dates=self.dates),
self.objs
)
def test_exact_times(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(times=self.times),
self.objs
)
class TestOtherTypesExactQuerying(PostgreSQLTestCase):
def setUp(self):
self.ips = ['192.168.0.1', '::1']
self.uuids = [uuid.uuid4()]
self.decimals = [decimal.Decimal(1.25), 1.75]
self.tags = [Tag(1), Tag(2), Tag(3)]
self.objs = [
OtherTypesArrayModel.objects.create(
ips=self.ips,
uuids=self.uuids,
decimals=self.decimals,
tags=self.tags,
)
]
def test_exact_ip_addresses(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(ips=self.ips),
self.objs
)
def test_exact_uuids(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(uuids=self.uuids),
self.objs
)
def test_exact_decimals(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(decimals=self.decimals),
self.objs
)
def test_exact_tags(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(tags=self.tags),
self.objs
)
@isolate_apps('postgres_tests')
class TestChecks(PostgreSQLTestCase):
def test_field_checks(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.CharField())
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
def test_invalid_base_fields(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel'))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E002')
def test_nested_field_checks(self):
"""
Nested ArrayFields are permitted.
"""
class MyModel(PostgreSQLModel):
field = ArrayField(ArrayField(models.CharField()))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class TestMigrations(TransactionTestCase):
available_apps = ['postgres_tests']
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
self.assertIsNot(new.base_field, field.base_field)
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField')
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass')
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
})
def test_adding_field_with_default(self):
# See #22962
table_name = 'postgres_tests_integerarraydefaultmodel'
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_index_migrations",
})
def test_adding_arrayfield_with_index(self):
"""
ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes.
"""
table_name = 'postgres_tests_chartextarrayindexmodel'
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
like_constraint_columns_list = [
v['columns']
for k, v in list(connection.introspection.get_constraints(cursor, table_name).items())
if k.endswith('_like')
]
# Only the CharField should have a LIKE index.
self.assertEqual(like_constraint_columns_list, [['char2']])
# All fields should have regular indexes.
with connection.cursor() as cursor:
indexes = [
c['columns'][0]
for c in connection.introspection.get_constraints(cursor, table_name).values()
if c['index'] and len(c['columns']) == 1
]
self.assertIn('char', indexes)
self.assertIn('char2', indexes)
self.assertIn('text', indexes)
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(PostgreSQLTestCase):
test_data = (
'[{"fields": {"field": "[\\"1\\", \\"2\\", null]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]'
)
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2, None])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, [1, 2, None])
class TestValidation(PostgreSQLTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
'Item 1 in the array did not validate: This field cannot be null.'
)
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.')
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, 'nested_array_mismatch')
self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.')
def test_with_base_field_error_params(self):
field = ArrayField(models.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['abc'], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).'
)
self.assertEqual(exception.code, 'item_invalid')
self.assertEqual(exception.params, {'nth': 0, 'value': 'abc', 'limit_value': 2, 'show_value': 3})
def test_with_validators(self):
field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)]))
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
'Item 0 in the array did not validate: Ensure this value is greater than or equal to 1.'
)
self.assertEqual(exception.code, 'item_invalid')
self.assertEqual(exception.params, {'nth': 0, 'value': 0, 'limit_value': 1, 'show_value': 0})
class TestSimpleFormField(PostgreSQLTestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean('a,b,c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,9')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a whole number.')
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,')
self.assertEqual(cm.exception.messages[0], 'Item 2 in the array did not validate: This field is required.')
def test_validate_fail_base_field_error_params(self):
field = SimpleArrayField(forms.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('abc,c,defg')
errors = cm.exception.error_list
self.assertEqual(len(errors), 2)
first_error = errors[0]
self.assertEqual(
first_error.message,
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).'
)
self.assertEqual(first_error.code, 'item_invalid')
self.assertEqual(first_error.params, {'nth': 0, 'value': 'abc', 'limit_value': 2, 'show_value': 3})
second_error = errors[1]
self.assertEqual(
second_error.message,
'Item 2 in the array did not validate: Ensure this value has at most 2 characters (it has 4).'
)
self.assertEqual(second_error.code, 'item_invalid')
self.assertEqual(second_error.params, {'nth': 2, 'value': 'defg', 'limit_value': 2, 'show_value': 4})
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField('[a-e]{2}'))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,bc,de')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a valid value.')
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter='|')
value = field.clean('a|b|c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|')
value = field.clean('a,b|c,d')
self.assertEqual(value, [['a', 'b'], ['c', 'd']])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(['a', 'b', 'c'])
self.assertEqual(value, 'a,b,c')
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.')
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.')
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('')
self.assertEqual(cm.exception.messages[0], 'This field is required.')
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
def test_already_converted_value(self):
field = SimpleArrayField(forms.CharField())
vals = ['a', 'b', 'c']
self.assertEqual(field.clean(vals), vals)
class TestSplitFormField(PostgreSQLTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {'array_0': '', 'array_1': '', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['This field is required.']})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True)
data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']})
def test_remove_trailing_nulls_not_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False),
size=2,
remove_trailing_nulls=True,
required=False,
)
data = {'array_0': '', 'array_1': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': []})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['Item 2 in the array did not validate: This field is required.']})
def test_invalid_integer(self):
msg = 'Item 1 in the array did not validate: Ensure this value is less than or equal to 100.'
with self.assertRaisesMessage(exceptions.ValidationError, msg):
SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101])
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(str(SplitForm()), '''
<tr>
<th><label for="id_array_0">Array:</label></th>
<td>
<input id="id_array_0" name="array_0" type="text" required />
<input id="id_array_1" name="array_1" type="text" required />
<input id="id_array_2" name="array_2" type="text" required />
</td>
</tr>
''')
def test_invalid_char_length(self):
field = SplitArrayField(forms.CharField(max_length=2), size=3)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['abc', 'c', 'defg'])
self.assertEqual(cm.exception.messages, [
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).',
'Item 2 in the array did not validate: Ensure this value has at most 2 characters (it has 4).',
])
def test_splitarraywidget_value_omitted_from_data(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ('field',)
form = Form({'field_0': '1', 'field_1': '2'})
self.assertEqual(form.errors, {})
obj = form.save(commit=False)
self.assertEqual(obj.field, [1, 2])
class TestSplitFormWidget(PostgreSQLTestCase):
def test_value_omitted_from_data(self):
widget = SplitArrayWidget(forms.TextInput(), size=2)
self.assertIs(widget.value_omitted_from_data({}, {}, 'field'), True)
self.assertIs(widget.value_omitted_from_data({'field_0': 'value'}, {}, 'field'), False)
self.assertIs(widget.value_omitted_from_data({'field_1': 'value'}, {}, 'field'), False)
self.assertIs(widget.value_omitted_from_data({'field_0': 'value', 'field_1': 'value'}, {}, 'field'), False)
|
|
#!/usr/bin/env python
"""
Lamina specification
"""
import csv
import os
import collections
import numpy as np
import gzip
import networkx as nx
class hex_array(object):
"""
0 1 2 3 4
----------------------> cols (X=cols*sqrt(3))
0| 0 2 4
| 1 3
1| 5 7 9
| 6 8
2| 10 12 14
| 11 13
|
V
rows (first col: 0,2,4,6)
(Y=2*row if col is even else Y=2*row+1 )
"""
def __init__(self, nrows, ncols):
self.nrows = nrows
self.ncols = ncols
self.num_elements = nrows * ncols
self.X = np.tile(np.arange(self.ncols, dtype = np.double).reshape((1, self.ncols))*np.sqrt(3),
(self.nrows, 1))
if (self.ncols % 2 == 0):
self.Y = np.tile(np.arange(2*self.nrows, dtype = np.double).reshape((self.nrows, 2)),
(1, self.ncols//2))
else:
self.Y = np.tile(np.arange(2*self.nrows, dtype = np.double).reshape((self.nrows, 2)),
(1, self.ncols//2+1))
self.Y = self.Y[:,0:-1]
self.col = np.tile(np.arange(self.ncols, dtype = np.int32).reshape((1, self.ncols)),
(self.nrows, 1))
self.row = np.tile(np.arange(self.nrows, dtype = np.int32).reshape((self.nrows, 1)),
(1, self.ncols))
self.col = self.col.reshape(-1)
self.row = self.row.reshape(-1)
self.num = np.arange(self.num_elements, dtype = np.int32).reshape(nrows, ncols)
def find_neighbor(self, row, col):
"""
neighbors are defined relatively as
1
2 6
0
3 5
4
"""
if col < 0 or col >= self.ncols:
raise ValueError("column number " + str(col) + " exceeds array limit")
if row < 0 or row >= self.nrows:
raise ValueError("row number " + str(row) + " exceeds array limit")
# adding neighbor 0 (self)
neighbor = [self.num[row, col]]
# adding neighbor 1
neighbor.append(self.num[row-1, col] if row != 0 else None)
# adding neighbor 2, 3
if col == 0:
neighbor.extend([None, None])
elif col % 2 == 0:
if row == 0:
neighbor.extend([None, self.num[row, col-1]])
else:
neighbor.extend(list(self.num[row-1:row+1, col-1]))
else:
if row == self.nrows-1:
neighbor.extend([self.num[row, col-1], None])
else:
neighbor.extend(list(self.num[row:row+2, col-1]))
# adding neighbor 4
neighbor.append(self.num[row+1, col] if row != self.nrows-1 else None)
# adding neighbor 5, 6
if col == self.ncols-1:
neighbor.extend([None, None])
elif col % 2 == 0:
if row == 0:
neighbor.extend([self.num[row, col+1], None])
else:
neighbor.extend(
list(self.num[row:row-2 if row-2 >= 0 else None:-1, col+1]))
else:
if row == self.nrows-1:
neighbor.extend([None, self.num[row, col+1]])
else:
neighbor.extend(
list(self.num[row+1:row-1 if row-1 >= 0 else None:-1, col+1]))
return neighbor
class vision_LPU(object):
def __init__(self, nrows, ncols, neuron_csv,
columnar_synapse_csv, other_synapse_csv,
LPU_name):
self.nrows = nrows
self.ncols = ncols
self.num_cartridges = nrows * ncols
self.neuron_csv = neuron_csv
self.columnar_synapse_csv = columnar_synapse_csv
self.other_synapse_csv = other_synapse_csv
self.hexarray = hex_array(nrows, ncols)
self._connected = False
self.LPU_name = LPU_name
self.composition_rules = []
# read in csv file and turn it into a numpy structured array
neuron_list = []
dtypes = [np.dtype('S10'), np.dtype('S32'),
np.dtype(np.int32), np.dtype(np.int32),
np.dtype(np.int32), np.dtype(np.int32),
np.dtype(np.int32), np.dtype(np.int32),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double)]
with open(self.neuron_csv, 'rU') as csvfile:
reader = csv.reader(csvfile)
self.neuron_field_name = reader.next()
n_entry = len(self.neuron_field_name)
for row in reader:
tmp = [dtypes[i].type(row[i]) for i in range(n_entry)]
neuron_list.append(tuple(tmp))
self.num_neuron_types = len(neuron_list)
self.neuron_dict = np.array(
neuron_list,
dtype = [(a, b) for a, b in zip(self.neuron_field_name, dtypes)])
# read in csv file and turn it into a numpy structured array
if self.columnar_synapse_csv is not None:
synapse_list = []
dtypes = [np.dtype('S10'), np.dtype('S10'),
np.dtype('S32'),
np.dtype(np.int32), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.int32)]
with open(self.columnar_synapse_csv, 'rU') as csvfile:
reader = csv.reader(csvfile)
self.synapse_field_name = reader.next()
n_entry = len(self.synapse_field_name)
for row in reader:
tmp = [dtypes[i].type(row[i]) for i in range(n_entry)]
synapse_list.append(tuple(tmp))
self.num_synapse_types = len(synapse_list)
self.synapse_dict = np.array(
synapse_list,
dtype = [(a, b) for a, b in zip(self.synapse_field_name, dtypes)])
else:
self.num_synapse_types = 0
self.synapse_dict = []
if self.other_synapse_csv is not None:
synapse_list = []
dtypes = [np.dtype('S10'), np.dtype('S10'),
np.dtype(np.int32),
np.dtype(np.int32), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.int32)]
with open(self.columnar_synapse_csv, 'rU') as csvfile:
reader = csv.reader(csvfile)
self.synapse_field_name = reader.next()
n_entry = len(self.synapse_field_name)
for row in reader:
tmp = [dtypes[i].type(row[i]) for i in range(n_entry)]
synapse_list.append(tuple(tmp))
self.num_synapse_types = len(synapse_list)
self.synapse_dict = np.array(
synapse_list,
dtype = [(a, b) for a, b in zip(self.synapse_field_name, dtypes)])
else:
self.num_other_synapse_types = 0
self.other_synapse_dict = []
def create_cartridges(self):
# create a number of cartridges
self.cartridge_neuron_dict = self.neuron_dict[self.neuron_dict['columnar'] == 1]
self.cartridge_synapse_dict = self.synapse_dict[self.synapse_dict['cart'] == 0]
self.cartridges = []
for _ in range(self.num_cartridges):
self.cartridges.append(
Cartridge(self.cartridge_neuron_dict,
self.cartridge_synapse_dict))
def connect_cartridges(self):
# connect cartridge from their neighbors
if not hasattr(self, 'cartridges'):
raise AttributeError("Need to create cartridges before connecting them")
count = 0
for cartridge in self.cartridges:
row = np.asscalar(self.hexarray.row[count])
col = np.asscalar(self.hexarray.col[count])
cartridge.assign_pos(count, row, col,
np.asscalar(self.hexarray.X[row,col]),
np.asscalar(self.hexarray.Y[row,col]))
neighbor_num = self.hexarray.find_neighbor(row, col)
cartridge.set_neighbors(
[self.cartridges[num] if num is not None else None
for num in neighbor_num])
count += 1
self._connected = True
def create_non_columnar_neurons(self):
self.non_columnar_neurons = collections.OrderedDict()
self.non_columnar_neuron_list = self.neuron_dict[self.neuron_dict['columnar'] != 1]
dtnames = self.non_columnar_neuron_list.dtype.names
for neuron_dict in self.non_columnar_neuron_list:
name = neuron_dict['name']
self.non_columnar_neurons.update({name: []})
for _ in range(neuron_dict['columnar']):
self.non_columnar_neurons[name].append(
Neuron(dict(zip(dtnames, [np.asscalar(p) for p in neuron_dict]))))
def remove_cartridge(self, num):
pass
def remove_neuron_type(self, name):
pass
def __repr__(self):
if hasattr(self, 'cartridges'):
return 'LPU with '+str(len(self.cartridges))+' cartridges'
else:
return 'LPU unconfigured'
def export_to_gexf(self, filename):
g = nx.MultiDiGraph()
num = 0
for neuron_type in self.neuron_dict:
if not neuron_type['dummy']:
if neuron_type['columnar'] == 1:
name = neuron_type['name']
for cartridge in self.cartridges:
neuron = cartridge.neurons[name]
neuron.add_num(num)
neuron.process_before_export()
g.add_node(num, neuron.params)
num += 1
for name in self.non_columnar_neurons.iterkeys():
for neuron in self.non_columnar_neurons[name]:
neuron.add_num(num)
neuron.process_before_export()
g.add_node(num, neuron.params)
num += 1
for cartridge in self.cartridges:
for synapse in cartridge.synapses:
synapse.process_before_export()
g.add_edge(synapse.pre_neuron.num, synapse.post_neuron.num,
attr_dict = synapse.params)
for cr in self.composition_rules:
for synapse in cr['synapses']:
synapse.process_before_export()
g.add_edge(synapse.pre_neuron.num, synapse.post_neuron.num,
attr_dict = synapse.params)
if isinstance(filename, str):
name, ext = os.path.splitext(filename)
if name == '':
raise ValueError("Please specify a valid filename")
if ext == '.gz':
with gzip.open(filename, 'w') as f:
nx.write_gexf(g, f, prettyprint=True)
else:
if ext != '.gexf':
name = filename + '.gexf'
else:
name = filename
nx.write_gexf(g, name, prettyprint=True)
else:
raise ValueError("Specify the filename in string")
def add_selectors(self):
for neuron_type in self.neuron_dict:
if not neuron_type['dummy']:
if neuron_type['columnar'] == 1:
if neuron_type['public'] == 1:
name = neuron_type['name']
for cartridge in self.cartridges:
neuron = cartridge.neurons[name]
neuron.add_selector(
'/'+self.LPU_name+'/cart{0}'.format(cartridge.num)
+'/'+name)
for name in self.non_columnar_neurons.iterkeys():
count = 0
for neuron in self.non_columnar_neurons[name]:
if neuron.is_public():
neuron.add_selector(
'/'+self.LPU_name+'/'+name+'[{0}]'.format(count))
count += 1
class Lamina(vision_LPU):
def __init__(self, nrows, ncols, neuron_csv,
columnar_synapse_csv, other_synapse_csv):
super(Lamina, self).__init__(nrows, ncols, neuron_csv,
columnar_synapse_csv, other_synapse_csv,
'lamina')
def connect_composition_II(self):
# create synapses defined in composition rule II.
if not self._connected:
raise AttributeError("Need to connect cartridges before setting interconnects")
self.rule2synapses = self.synapse_dict[self.synapse_dict['cart'] != 0]
synapse_list = []
dtnames = self.rule2synapses.dtype.names
for cartridge in self.cartridges:
for synapse_array in self.rule2synapses:
neighbor_num = synapse_array['cart']
if cartridge.neighbors[neighbor_num] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons[synapse_array['prename']],
cartridge.neighbors[neighbor_num].neurons[synapse_array['postname']])
synapse_list.append(synapse)
self.composition_rules.append({'synapses': synapse_list})
def connect_composition_I(self):
am_list = self.non_columnar_neurons['Am']
synapse_list = []
n_amacrine = len(am_list) # self.non_columnar_neuron_number['Am']
am_xpos = np.random.random(n_amacrine)*self.hexarray.X[-1,-1]
am_ypos = np.random.random(n_amacrine)*self.hexarray.Y[-1,-1]
count = 0
for neuron in am_list:
neuron.assign_pos(np.asscalar(am_xpos[count]),
np.asscalar(am_ypos[count]))
count += 1
bound = 4.0
alpha_profiles = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6']
fill = np.zeros((n_amacrine, self.num_cartridges), np.int32);
count = 0
for cartridge in self.cartridges:
xpos = cartridge.xpos
ypos = cartridge.ypos
#calculate distance and find amacrine cells within
#distance defined by bound
dist = np.sqrt((xpos-am_xpos)**2 + (ypos-am_ypos)**2)
suitable_am = np.nonzero(dist <= bound)[0]
# if less than 4 neurons in the bound, get
# the 4 closest amacrine cells
if suitable_am.size < 4:
suitable_am = np.argsort(dist)[0:4]
for name in alpha_profiles:
assigned = False
for am_num in np.random.permutation(suitable_am):
if fill[am_num, count] < 3:
fill[am_num, count] += 1
#a1-a6 do not have synapses outside a cartridge
synapses = cartridge.replace_dummy(name, am_list[am_num])
synapse_list.extend(synapses)
assigned = True
break
if not assigned:
print name + ' in cartridge ' + str(cartridge.num) + ' not assigned'
count += 1
self.fill = fill
self.composition_rules.append( {'synapses': synapse_list} )
def __repr__(self):
if hasattr(self, 'cartridges'):
return 'Lamina LPU with '+str(len(self.cartridges))+' cartridges'
else:
return 'Lamina LPU unconfigured'
class Cartridge(object):
def __init__(self, neuron, connection):
self.connected = False
self.neuron_list = neuron.copy()
self.synapse_list = connection.copy()
self.neurons = collections.OrderedDict()
dtnames = self.neuron_list.dtype.names
for neuron_dict in self.neuron_list:
self.neurons.update(
{neuron_dict['name']:
Neuron(dict(zip(dtnames, [np.asscalar(p) for p in neuron_dict])))})
dtnames = self.synapse_list.dtype.names
self.synapses = []
for synapse_dict in self.synapse_list:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_dict])))
synapse.link(self.neurons[synapse.prename],
self.neurons[synapse.postname])
self.synapses.append(synapse)
def set_neighbors(self, neighbor_cartridges):
self.neighbors = []
for i in range(7):
self.neighbors.append(neighbor_cartridges[i])
def assign_pos(self, num, row, col, xpos, ypos):
self.num = num
self.row = row
self.col = col
self.xpos = xpos
self.ypos = ypos
for neurons in self.neurons:
self.neurons[neurons].assign_pos(xpos, ypos)
self.connected = True
def position(self):
return (self.xpos, self.ypos)
def __repr__(self):
if self.connected:
return 'Cartridge at ' + str(self.position())
else:
return 'Isolated cartridge at '+ hex(id(self))
def get_num(self):
return self.num
def get_xpos(self):
return self.xpos
def get_ypos(self):
return self.ypos
def replace_dummy(self, name, neuron):
removed_synapse_list = []
neuron_to_be_replaced = self.neurons[name]
if neuron_to_be_replaced.params['dummy'] != 1:
raise ValueError("Neuron to be replaced is not dummy element")
for synapse in neuron_to_be_replaced.outgoing_synapses:
flag = self.remove_synapse(synapse)
synapse.replace_pre(neuron)
if flag:
removed_synapse_list.append(synapse)
for synapse in neuron_to_be_replaced.incoming_synapses:
flag = self.remove_synapse(synapse)
synapse.replace_post(neuron)
if flag:
removed_synapse_list.append(synapse)
self.neurons[name].set_parent(neuron)
return removed_synapse_list
def remove_neuron(self, name):
self.neurons.pop(name)
def remove_synapse(self, synapse):
# the try/except here is to deal with Am to Am connection that
# may have been removed previously by another Am in the same cartridge
try:
self.synapses.remove(synapse)
return True
except:
return False
class Neuron(object):
def __init__(self, param_dict):
self.params = param_dict.copy()
if self.params['model'] == 'MorrisLecar':
spiking = False
else:
spiking = True
self.params.update({'spiking': spiking})
self.outgoing_synapses = []
self.incoming_synapses = []
@property
def name(self):
return self.params['name']
def add_outgoing_synapse(self, synapse):
self.outgoing_synapses.append(synapse)
def add_incoming_synapse(self, synapse):
self.incoming_synapses.append(synapse)
def remove_outgoing_synapse(self, synapse):
self.outgoing_synapses.remove(synapse)
def remove_incoming_synapse(self, synapse):
self.incoming_synapses.remove(synapse)
def __repr__(self):
return 'neuron '+self.params['name']+': '+str(self.params)
def __str__(self):
return 'neuron '+str(self.params['name'])
def assign_pos(self, xpos, ypos):
self.params.update({'xpos': xpos, 'ypos': ypos})
def position(self):
return (self.params['xpos'], self.params['ypos'])
def add_num(self, num):
self.num = num
def process_before_export(self):
self.params.update({'n_dendrites': len(self.incoming_synapses),
'n_outputs': len(self.outgoing_synapses)})
if 'dummy' in self.params.keys():
del self.params['dummy']
if 'columnar' in self.params.keys():
del self.params['columnar']
self.params['input'] = bool(self.params['input'])
self.params['output'] = bool(self.params['output'])
self.params['public'] = bool(self.params['public'])
self.params['extern'] = bool(self.params['extern'])
self.params['model'] = str(self.params['model'])
def is_dummy(self):
if self.params.has_key('dummy'):
return self.params['dummy']
else:
return False
def is_public(self):
return self.params['public']
def add_selector(self, selector):
self.params['selector'] = selector
def set_parent(self, neuron):
self.parent = neuron
class Synapse(object):
def __init__(self, param_dict):
self.params = param_dict.copy()
self.params.update({'conductance': True})
def link(self, pre_neuron, post_neuron):
self.pre_neuron = pre_neuron
self.post_neuron = post_neuron
self.pre_neuron.add_outgoing_synapse(self)
self.post_neuron.add_incoming_synapse(self)
self.update_class(self.get_class(self.pre_neuron, self.post_neuron))
def replace_pre(self, pre_neuron):
self.pre_neuron = pre_neuron
self.pre_neuron.add_outgoing_synapse(self)
self.params['prename'] = pre_neuron.name
def replace_post(self, post_neuron):
self.post_neuron = post_neuron
self.post_neuron.add_incoming_synapse(self)
self.params['postname'] = post_neuron.name
def __repr__(self):
return ('synapse from '+self.params['prename']+' to ' + self.params['postname']
+ ': '+str(self.params))
def __str__(self):
return 'synapse '+str(self.params['prename'])+' to '+self.params['postname']
def process_before_export(self):
if 'cart' in self.params.keys():
del self.params['cart']
if 'scale' in self.params.keys():
self.params['slope'] *= self.params['scale']
self.params['saturation'] *= self.params['scale']
del self.params['scale']
self.params['model'] = str(self.params['model'])
@staticmethod
def get_class(preneuron, postneuron):
""" preneuron: Neuron instance
postneuron: Neuron instance
"""
is_pre_spk = preneuron.params['spiking']
is_post_spk = postneuron.params['spiking']
if is_pre_spk and is_post_spk:
return 0
elif is_pre_spk and not is_post_spk:
return 1
elif not is_pre_spk and is_post_spk:
return 2
elif not is_pre_spk and not is_post_spk:
return 3
def update_class(self, cls):
self.params.update({'class': cls})
@property
def prename(self):
return self.params['prename']
@property
def postname(self):
return self.params['postname']
def append_field(rec, name, arr, dtype=None):
arr = np.asarray(arr)
if dtype is None:
dtype = arr.dtype
newdtype = np.dtype(rec.dtype.descr + [(name, dtype)])
newrec = np.empty(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
newrec[name] = arr
return newrec
|
|
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
import enum
import logging as log
import time
from collections import namedtuple
from datetime import datetime, timedelta
from . import git, gitlab
from .branch import Branch
from .interval import IntervalUnion
from .merge_request import MergeRequestRebaseFailed
from .project import Project
from .user import User
from .pipeline import Pipeline
class MergeJob:
def __init__(self, *, api, user, project, repo, options):
self._api = api
self._user = user
self._project = project
self._repo = repo
self._options = options
self._merge_timeout = timedelta(minutes=5)
@property
def repo(self):
return self._repo
@property
def project(self):
return self._project
@property
def opts(self):
return self._options
def execute(self):
raise NotImplementedError
def ensure_mergeable_mr(self, merge_request):
merge_request.refetch_info()
log.info('Ensuring MR !%s is mergeable', merge_request.iid)
log.debug('Ensuring MR %r is mergeable', merge_request)
if merge_request.work_in_progress:
raise CannotMerge("Sorry, I can't merge requests marked as Work-In-Progress!")
if merge_request.squash and self._options.requests_commit_tagging:
raise CannotMerge(
"Sorry, merging requests marked as auto-squash would ruin my commit tagging!"
)
approvals = merge_request.fetch_approvals()
if not approvals.sufficient:
raise CannotMerge(
'Insufficient approvals '
'(have: {0.approver_usernames} missing: {0.approvals_left})'.format(approvals)
)
if not merge_request.blocking_discussions_resolved:
raise CannotMerge("Sorry, I can't merge requests which have unresolved discussions!")
state = merge_request.state
if state not in ('opened', 'reopened', 'locked'):
if state in ('merged', 'closed'):
raise SkipMerge('The merge request is already {}!'.format(state))
raise CannotMerge('The merge request is in an unknown state: {}'.format(state))
if self.during_merge_embargo():
raise SkipMerge('Merge embargo!')
if self._user.id not in merge_request.assignee_ids:
raise SkipMerge('It is not assigned to me anymore!')
def add_trailers(self, merge_request):
log.info('Adding trailers for MR !%s', merge_request.iid)
# add Reviewed-by
should_add_reviewers = (
self._options.add_reviewers and
self._options.fusion is not Fusion.gitlab_rebase
)
reviewers = (
_get_reviewer_names_and_emails(
merge_request.fetch_commits(),
merge_request.fetch_approvals(),
self._api,
) if should_add_reviewers
else None
)
sha = None
if reviewers is not None:
sha = self._repo.tag_with_trailer(
trailer_name='Reviewed-by',
trailer_values=reviewers,
branch=merge_request.source_branch,
start_commit='origin/' + merge_request.target_branch,
)
# add Tested-by
should_add_tested = (
self._options.add_tested and
self._project.only_allow_merge_if_pipeline_succeeds and
self._options.fusion is Fusion.rebase
)
tested_by = (
['{0._user.name} <{1.web_url}>'.format(self, merge_request)]
if should_add_tested
else None
)
if tested_by is not None:
sha = self._repo.tag_with_trailer(
trailer_name='Tested-by',
trailer_values=tested_by,
branch=merge_request.source_branch,
start_commit=merge_request.source_branch + '^'
)
# add Part-of
should_add_parts_of = (
self._options.add_part_of and
self._options.fusion is not Fusion.gitlab_rebase
)
part_of = (
'<{0.web_url}>'.format(merge_request)
if should_add_parts_of
else None
)
if part_of is not None:
sha = self._repo.tag_with_trailer(
trailer_name='Part-of',
trailer_values=[part_of],
branch=merge_request.source_branch,
start_commit='origin/' + merge_request.target_branch,
)
return sha
def get_mr_ci_status(self, merge_request, commit_sha=None):
if commit_sha is None:
commit_sha = merge_request.sha
if self._api.version().release >= (10, 5, 0):
pipelines = Pipeline.pipelines_by_merge_request(
merge_request.target_project_id,
merge_request.iid,
self._api,
)
else:
pipelines = Pipeline.pipelines_by_branch(
merge_request.source_project_id,
merge_request.source_branch,
self._api,
)
current_pipeline = next(iter(pipeline for pipeline in pipelines if pipeline.sha == commit_sha), None)
if current_pipeline:
ci_status = current_pipeline.status
else:
log.warning('No pipeline listed for %s on branch %s', commit_sha, merge_request.source_branch)
ci_status = None
return ci_status
def wait_for_ci_to_pass(self, merge_request, commit_sha=None):
time_0 = datetime.utcnow()
waiting_time_in_secs = 10
if commit_sha is None:
commit_sha = merge_request.sha
log.info('Waiting for CI to pass for MR !%s', merge_request.iid)
while datetime.utcnow() - time_0 < self._options.ci_timeout:
ci_status = self.get_mr_ci_status(merge_request, commit_sha=commit_sha)
if ci_status == 'success':
log.info('CI for MR !%s passed', merge_request.iid)
return
if ci_status == 'skipped':
log.info('CI for MR !%s skipped', merge_request.iid)
return
if ci_status == 'failed':
raise CannotMerge('CI failed!')
if ci_status == 'canceled':
raise CannotMerge('Someone canceled the CI.')
if ci_status not in ('pending', 'running'):
log.warning('Suspicious CI status: %r', ci_status)
log.debug('Waiting for %s secs before polling CI status again', waiting_time_in_secs)
time.sleep(waiting_time_in_secs)
raise CannotMerge('CI is taking too long.')
def wait_for_merge_status_to_resolve(self, merge_request):
"""
This function is for polling the async `merge_status` field in merge_request API response.
We suspected that the lag `merge_status` prevents MRs to be merged, and the fix did work
for some users.
But we are not sure if this is the root cause and if this is a proper fix. As there're some
evidence that suggest gitlab will always check the mergeability synchronously while merging MRs.
See more https://github.com/smarkets/marge-bot/pull/265#issuecomment-724147901
"""
attempts = 3
waiting_time_in_secs = 5
log.info('Waiting for MR !%s to have merge_status can_be_merged', merge_request.iid)
for attempt in range(attempts):
merge_request.refetch_info()
merge_status = merge_request.merge_status
if merge_status == 'can_be_merged':
log.info('MR !%s can be merged on attempt %d', merge_request.iid, attempt)
return
if merge_status == 'cannot_be_merged':
log.info('MR !%s cannot be merged on attempt %d', merge_request.iid, attempt)
raise CannotMerge('GitLab believes this MR cannot be merged.')
if merge_status == 'unchecked':
log.info('MR !%s merge status currently unchecked on attempt %d.', merge_request.iid, attempt)
time.sleep(waiting_time_in_secs)
def unassign_from_mr(self, merge_request):
log.info('Unassigning from MR !%s', merge_request.iid)
author_id = merge_request.author_id
if author_id != self._user.id:
merge_request.assign_to(author_id)
else:
merge_request.unassign()
def during_merge_embargo(self):
now = datetime.utcnow()
return self.opts.embargo.covers(now)
def maybe_reapprove(self, merge_request, approvals):
# Re-approve the merge request, in case us pushing it has removed approvals.
if self.opts.reapprove:
# approving is not idempotent, so we need to check first that there are no approvals,
# otherwise we'll get a failure on trying to re-instate the previous approvals
def sufficient_approvals():
return merge_request.fetch_approvals().sufficient
# Make sure we don't race by ensuring approvals have reset since the push
waiting_time_in_secs = 5
approval_timeout_in_secs = self._options.approval_timeout.total_seconds()
iterations = round(approval_timeout_in_secs / waiting_time_in_secs)
log.info('Checking if approvals have reset')
while sufficient_approvals() and iterations:
log.debug('Approvals haven\'t reset yet, sleeping for %s secs', waiting_time_in_secs)
time.sleep(waiting_time_in_secs)
iterations -= 1
if not sufficient_approvals():
approvals.reapprove()
def fetch_source_project(self, merge_request):
remote = 'origin'
remote_url = None
source_project = self.get_source_project(merge_request)
if source_project is not self._project:
remote = 'source'
remote_url = source_project.ssh_url_to_repo
self._repo.fetch(
remote_name=remote,
remote_url=remote_url,
)
return source_project, remote_url, remote
def get_source_project(self, merge_request):
source_project = self._project
if merge_request.source_project_id != self._project.id:
source_project = Project.fetch_by_id(
merge_request.source_project_id,
api=self._api,
)
return source_project
def get_target_project(self, merge_request):
return Project.fetch_by_id(merge_request.target_project_id, api=self._api)
def fuse(self, source, target, source_repo_url=None, local=False):
# NOTE: this leaves git switched to branch_a
strategies = {
Fusion.rebase: self._repo.rebase,
Fusion.merge: self._repo.merge,
Fusion.gitlab_rebase: self._repo.rebase, # we rebase locally to know sha
}
strategy = strategies[self._options.fusion]
return strategy(
source,
target,
source_repo_url=source_repo_url,
local=local,
)
def update_from_target_branch_and_push(
self,
merge_request,
source_repo_url=None,
skip_ci=False,
add_trailers=True,
):
"""Updates `source_branch` on `target_branch`, optionally add trailers and push.
The update strategy can either be rebase or merge. The default is rebase.
Returns
-------
(sha_of_target_branch, sha_after_update, sha_after_rewrite)
"""
repo = self._repo
source_branch = merge_request.source_branch
target_branch = merge_request.target_branch
assert source_repo_url != repo.remote_url
if source_repo_url is None and source_branch == target_branch:
raise CannotMerge('source and target branch seem to coincide!')
branch_update_done = commits_rewrite_done = False
try:
initial_mr_sha = merge_request.sha
updated_sha = self.fuse(
source_branch,
target_branch,
source_repo_url=source_repo_url,
)
branch_update_done = True
# The fuse above fetches origin again, so we are now safe to fetch
# the sha from the remote target branch.
target_sha = repo.get_commit_hash('origin/' + target_branch)
if updated_sha == target_sha:
raise CannotMerge('these changes already exist in branch `{}`'.format(target_branch))
final_sha = self.add_trailers(merge_request) if add_trailers else None
final_sha = final_sha or updated_sha
commits_rewrite_done = True
branch_was_modified = final_sha != initial_mr_sha
self.synchronize_mr_with_local_changes(
merge_request,
branch_was_modified,
source_repo_url,
skip_ci=skip_ci,
)
except git.GitError as err:
# A failure to clean up probably means something is fucked with the git repo
# and likely explains any previous failure, so it will better to just
# raise a GitError
if source_branch != self.project.default_branch:
repo.checkout_branch(self.project.default_branch)
repo.remove_branch(source_branch)
if not branch_update_done:
raise CannotMerge('got conflicts while rebasing, your problem now...') from err
if not commits_rewrite_done:
raise CannotMerge('failed on filter-branch; check my logs!') from err
raise
return target_sha, updated_sha, final_sha
def synchronize_mr_with_local_changes(
self,
merge_request,
branch_was_modified,
source_repo_url=None,
skip_ci=False,
):
if self._options.fusion is Fusion.gitlab_rebase:
self.synchronize_using_gitlab_rebase(merge_request)
else:
self.push_force_to_mr(
merge_request,
branch_was_modified,
source_repo_url=source_repo_url,
skip_ci=skip_ci,
)
def push_force_to_mr(
self,
merge_request,
branch_was_modified,
source_repo_url=None,
skip_ci=False,
):
try:
self._repo.push(
merge_request.source_branch,
source_repo_url=source_repo_url,
force=True,
skip_ci=skip_ci,
)
except git.GitError as err:
def fetch_remote_branch():
return Branch.fetch_by_name(
merge_request.source_project_id,
merge_request.source_branch,
self._api,
)
if branch_was_modified and fetch_remote_branch().protected:
raise CannotMerge("Sorry, I can't modify protected branches!") from err
change_type = "merged" if self.opts.fusion == Fusion.merge else "rebased"
raise CannotMerge('Failed to push %s changes, check my logs!' % change_type) from err
def synchronize_using_gitlab_rebase(self, merge_request, expected_sha=None):
expected_sha = expected_sha or self._repo.get_commit_hash()
try:
merge_request.rebase()
except MergeRequestRebaseFailed as err:
raise CannotMerge("GitLab failed to rebase the branch saying: {0[0]}".format(err.args)) from err
except TimeoutError as err:
raise CannotMerge("GitLab was taking too long to rebase the branch...") from err
except gitlab.ApiError as err:
branch = Branch.fetch_by_name(
merge_request.source_project_id,
merge_request.source_branch,
self._api,
)
if branch.protected:
raise CannotMerge("Sorry, I can't modify protected branches!") from err
raise
else:
if merge_request.sha != expected_sha:
raise GitLabRebaseResultMismatch(
gitlab_sha=merge_request.sha,
expected_sha=expected_sha,
)
def _get_reviewer_names_and_emails(commits, approvals, api):
"""Return a list ['A. Prover <a.prover@example.com', ...]` for `merge_request.`"""
uids = approvals.approver_ids
users = [User.fetch_by_id(uid, api) for uid in uids]
self_reviewed = {commit['author_email'] for commit in commits} & {user.email for user in users}
if self_reviewed and len(users) <= 1:
raise CannotMerge('Commits require at least one independent reviewer.')
return ['{0.name} <{0.email}>'.format(user) for user in users]
# pylint: disable=invalid-name
@enum.unique
class Fusion(enum.Enum):
merge = 0
rebase = 1
gitlab_rebase = 2
JOB_OPTIONS = [
'add_tested',
'add_part_of',
'add_reviewers',
'reapprove',
'approval_timeout',
'embargo',
'ci_timeout',
'fusion',
'use_no_ff_batches',
'use_merge_commit_batches',
'skip_ci_batches',
]
class MergeJobOptions(namedtuple('MergeJobOptions', JOB_OPTIONS)):
__slots__ = ()
@property
def requests_commit_tagging(self):
return self.add_tested or self.add_part_of or self.add_reviewers
@classmethod
def default(
cls, *,
add_tested=False, add_part_of=False, add_reviewers=False, reapprove=False,
approval_timeout=None, embargo=None, ci_timeout=None, fusion=Fusion.rebase,
use_no_ff_batches=False, use_merge_commit_batches=False, skip_ci_batches=False,
):
approval_timeout = approval_timeout or timedelta(seconds=0)
embargo = embargo or IntervalUnion.empty()
ci_timeout = ci_timeout or timedelta(minutes=15)
return cls(
add_tested=add_tested,
add_part_of=add_part_of,
add_reviewers=add_reviewers,
reapprove=reapprove,
approval_timeout=approval_timeout,
embargo=embargo,
ci_timeout=ci_timeout,
fusion=fusion,
use_no_ff_batches=use_no_ff_batches,
use_merge_commit_batches=use_merge_commit_batches,
skip_ci_batches=skip_ci_batches,
)
class CannotMerge(Exception):
@property
def reason(self):
args = self.args
if not args:
return 'Unknown reason!'
return args[0]
class SkipMerge(CannotMerge):
pass
class GitLabRebaseResultMismatch(CannotMerge):
def __init__(self, gitlab_sha, expected_sha):
super().__init__(
"GitLab rebase ended up with a different commit:"
"I expected %s but they got %s" % (expected_sha, gitlab_sha)
)
|
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mt_opt dataset."""
import os
from typing import Any, Dict, Generator, Tuple
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """
Datasets for the [MT-Opt paper](https://arxiv.org/abs/2104.08212).
"""
_CITATION = """
@misc{kalashnikov2021mtopt,
title={MT-Opt: Continuous Multi-Task Robotic Reinforcement Learning at Scale},
author={Dmitry Kalashnikov and Jacob Varley and Yevgen Chebotar and Benjamin Swanson and Rico Jonschkowski and Chelsea Finn and Sergey Levine and Karol Hausman},
year={2021},
eprint={2104.08212},
archivePrefix={arXiv},
primaryClass={cs.RO}
}
"""
_BUILDER_CONFIGS = [
tfds.core.BuilderConfig(
name='rlds',
description='This dataset contains task episodes collected across a fleet of real robots.'
),
tfds.core.BuilderConfig(
name='sd',
description='The success detectors dataset that contains human curated definitions of tasks completion.'
)
]
_STEPS_FEATURES = tfds.features.FeaturesDict({
'action':
tfds.features.FeaturesDict({
'close_gripper':
tf.bool,
'open_gripper':
tf.bool,
'target_pose':
tfds.features.Tensor(
shape=(7,),
dtype=tf.float32,
encoding=tfds.features.Encoding.ZLIB),
'terminate':
tf.bool,
}),
'is_first':
tf.bool,
'is_last':
tf.bool,
'is_terminal':
tf.bool,
'observation':
tfds.features.FeaturesDict({
'gripper_closed':
tf.bool,
'height_to_bottom':
tf.float32,
'image':
tfds.features.Image(shape=(512, 640, 3), dtype=tf.uint8),
'state_dense':
tfds.features.Tensor(
shape=(7,),
dtype=tf.float32,
encoding=tfds.features.Encoding.ZLIB),
}),
})
_NAME_TO_FEATURES = {
'rlds':
tfds.features.FeaturesDict({
'episode_id': tf.string,
'skill': tf.uint8,
'steps': tfds.features.Dataset(_STEPS_FEATURES),
'task_code': tf.string,
}),
'sd':
tfds.features.FeaturesDict({
'image_0': tfds.features.Image(shape=(512, 640, 3), dtype=tf.uint8),
'image_1': tfds.features.Image(shape=(480, 640, 3), dtype=tf.uint8),
'image_2': tfds.features.Image(shape=(480, 640, 3), dtype=tf.uint8),
'success': tf.bool,
'task_code': tf.string,
}),
}
# To encode, we use sequence instead of nested dataset. Otherwise, Beam has
# issues calculating the size of the yielded examples (b/219881125)
_NAME_TO_FEATURES_ENCODE = {
'rlds':
tfds.features.FeaturesDict({
'episode_id': tf.string,
'skill': tf.uint8,
'steps': tfds.features.Sequence(_STEPS_FEATURES),
'task_code': tf.string,
}),
'sd':
tfds.features.FeaturesDict({
'image_0': tfds.features.Image(shape=(512, 640, 3), dtype=tf.uint8),
'image_1': tfds.features.Image(shape=(480, 640, 3), dtype=tf.uint8),
'image_2': tfds.features.Image(shape=(480, 640, 3), dtype=tf.uint8),
'success': tf.bool,
'task_code': tf.string,
}),
}
_NAME_TO_SPLITS = {
'sd': {
'train': 1024,
'test': 256,
},
'rlds': {
'train': 2048,
},
}
def _filename(prefix: str, num_shards: int, shard_id: int):
return os.fspath(
tfds.core.Path(f'{prefix}-{shard_id:05d}-of-{num_shards:05d}'))
def _get_files(prefix: str, ds_name: str, split: str, num_shards: int):
prefix = f'{prefix}/mt_opt_{ds_name}/1.0.0/mt_opt_{ds_name}-{split}.tfrecord'
return [_filename(prefix, num_shards, i) for i in range(num_shards)]
class MtOpt(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for mt_opt datasets."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
BUILDER_CONFIGS = _BUILDER_CONFIGS
_INPUT_FILE_PREFIX = 'gs://gresearch/robotics/'
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=_NAME_TO_FEATURES[self.builder_config.name],
supervised_keys=None,
homepage='https://karolhausman.github.io/mt-opt/',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
ds_name = self.builder_config.name
splits = {}
for split, shards in _NAME_TO_SPLITS[ds_name].items():
paths = {
'file_paths':
_get_files(self._INPUT_FILE_PREFIX, ds_name, split, shards)
}
splits[split] = self._generate_examples(paths)
return splits
def _generate_examples_one_file(
self, path) -> Generator[Tuple[str, Dict[str, Any]], None, None]:
"""Yields examples from one file."""
# Dataset of tf.Examples containing full episodes.
example_ds = tf.data.TFRecordDataset(filenames=str(path))
example_features = _NAME_TO_FEATURES_ENCODE[self.builder_config.name]
example_specs = example_features.get_serialized_info()
parser = tfds.core.example_parser.ExampleParser(example_specs)
parsed_examples = example_ds.map(parser.parse_example)
decoded_examples = parsed_examples.map(example_features.decode_example)
for index, example in enumerate(tfds.as_numpy(decoded_examples)):
if self.builder_config.name == 'rlds':
id_key = 'episode_id'
else:
id_key = 'task_code'
example_id = str(index) + str(example[id_key]) + str(hash(path))
yield example_id, example
def _generate_examples(self, paths):
"""Yields examples."""
beam = tfds.core.lazy_imports.apache_beam
file_paths = paths['file_paths']
return beam.Create(file_paths) | beam.FlatMap(
self._generate_examples_one_file)
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 6584
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
# CAU: Adaptation of the cpython 2.2 test_array.py for jython 2.2
# Formerly test_jarray.py, now test_array.py so that this
# test completely supercedes the cpthyhon test. It would
# be better to simply complement the cpython test, but that
# test bombs out too early due to basic incompatibilities.
#
# The jarray module is being phased out, with all functionality
# now available in the array module.
from test_support import *
from array import array, zeros
import sys
from java.lang import String
from java.lang.reflect import Array
from java.util import Arrays
print_test('array module (test_array.py)', 1)
def main():
test_jarray() # while it's still supported
test_java_compat()
test_java_object_arrays()
testtype('c', 'c')
for type in (['b', 'h', 'i', 'l', 'f', 'd']):
testtype(type, 1)
#test a mix of known success and failure cases
init_tests()
extend_tests()
fromlist_tests()
unlink(TESTFN)
def test_jarray(): # until it is fully formally removed
# While jarray is still being phased out, just flex the initilaizers.
# The rest of the test for array will catch all the big problems.
import jarray
jarray.array(range(5), 'i')
jarray.array([String("a"), String("b"), String("c")], String)
jarray.zeros(5, 'i')
jarray.zeros(5, String)
def test_java_object_arrays():
jStringArr = array(String, [String("a"), String("b"), String("c")])
verify(Arrays.equals(jStringArr.typecode, str(String)),
"String array typecode of wrong type, expected %s, found %s" %
(jStringArr.typecode, str(String)))
verify(zeros(String, 5) == Array.newInstance(String, 5))
import java # require for eval to work
if jStringArr != eval(str(jStringArr)):
raise TestFailed, "eval(str(%s)) <> %s" % (jStringArr,)*2
def test_java_compat():
print_test('array', 2)
from java import awt
hsb = awt.Color.RGBtoHSB(0,255,255, None)
#print hsb
verify(hsb == array('f', [0.5,1,1]),
"output hsb float array does not correspond to input rgb values")
rgb = apply(awt.Color.HSBtoRGB, tuple(hsb))
#print hex(rgb)
verify(rgb == 0xff00ffff, "output rgb bytes don't match input hsb floats")
print_test('zeros', 2)
hsb1 = zeros('f', 3)
awt.Color.RGBtoHSB(0,255,255, hsb1)
#print hsb, hsb1
verify(hsb == hsb1, "hsb float arrays were not equal")
def testoverflow(type, lowerLimit, upperLimit):
# should not overflow assigning lower limit
if verbose:
print "test overflow: array(%s, [%s])" % (lowerLimit, type)
try:
a = array(type, [lowerLimit])
except:
raise TestFailed("array(%s) overflowed assigning %s" %
(lowerLimit, type))
# should overflow assigning less than lower limit
if verbose:
print "test overflow: array(%s, [%s])" % (lowerLimit-1, type)
try:
a = array(type, [lowerLimit-1])
raise TestFailed, "array(%s) did not overflow assigning %s" %\
(lowerLimit-1, type)
except OverflowError:
pass
# should not overflow assigning upper limit
if verbose:
print "test verflow: array(%s, [%s])" % (upperLimit, type)
try:
a = array(type, [upperLimit])
except:
raise TestFailed, "array(%s) overflowed assigning %s" %\
(upperLimit, type)
# should overflow assigning more than upper limit
if verbose:
print "test overflow: array(%s, [%s])" % (upperLimit+1, type)
try:
a = array(type, [upperLimit+1])
raise TestFailed, "array(%s) did not overflow assigning %s" %\
(upperLimit+1, type)
except OverflowError:
pass
def testtype(type, example):
if verbose:
print "testing type ", type
a = array(type)
a.append(example)
if verbose:
print 40*'*'
print 'array after append: ', a
a.typecode
a.itemsize
if a <> eval(str(a)):
raise TestFailed, "eval(str(%s)) <> %s" % (a,a)
if a.typecode in ('i', 'b', 'h', 'l'):
a.byteswap()
if a.typecode == 'c':
f = open(TESTFN, "w")
f.write("The quick brown fox jumps over the lazy dog.\n")
f.close()
f = open(TESTFN, 'r')
a.fromfile(f, 10)
f.close()
if verbose:
print 'char array with 10 bytes of TESTFN appended: ', a
a.fromlist(['a', 'b', 'c'])
if verbose:
print 'char array with list appended: ', a
a.insert(0, example)
if verbose:
print 'array of %s after inserting another:' % a.typecode, a
f = open(TESTFN, 'w')
a.tofile(f)
f.close()
# This block is just to verify that the operations don't blow up.
a.tolist()
a.tostring()
repr(a)
str(a)
if verbose:
print 'array of %s converted to a list: ' % a.typecode, a.tolist()
if verbose:
print 'array of %s converted to a string: ' \
% a.typecode, a.tostring()
if type == 'c':
a = array(type, "abcde")
a[:-1] = a
if a != array(type, "abcdee"):
raise TestFailed, "array(%s) self-slice-assign (head)" % type
a = array(type, "abcde")
a[1:] = a
if a != array(type, "aabcde"):
raise TestFailed, "array(%s) self-slice-assign (tail)" % type
a = array(type, "abcde")
a[1:-1] = a
if a != array(type, "aabcdee"):
raise TestFailed, "array(%s) self-slice-assign (cntr)" % type
if a.index("e") != 5:
raise TestFailed, "array(%s) index-test" % type
if a.count("a") != 2:
raise TestFailed, "array(%s) count-test" % type
a.remove("e")
if a != array(type, "aabcde"):
raise TestFailed, "array(%s) remove-test" % type
if a.pop(0) != "a":
raise TestFailed, "array(%s) pop-test" % type
if a.pop(1) != "b":
raise TestFailed, "array(%s) pop-test" % type
a.extend(array(type, "xyz"))
if a != array(type, "acdexyz"):
raise TestFailed, "array(%s) extend-test" % type
a.pop()
a.pop()
a.pop()
x = a.pop()
if x != 'e':
raise TestFailed, "array(%s) pop-test" % type
if a != array(type, "acd"):
raise TestFailed, "array(%s) pop-test" % type
a.reverse()
if a != array(type, "dca"):
raise TestFailed, "array(%s) reverse-test" % type
else:
a = array(type, [1, 2, 3, 4, 5])
a[:-1] = a
if a != array(type, [1, 2, 3, 4, 5, 5]):
raise TestFailed, "array(%s) self-slice-assign (head)" % type
a = array(type, [1, 2, 3, 4, 5])
a[1:] = a
if a != array(type, [1, 1, 2, 3, 4, 5]):
raise TestFailed, "array(%s) self-slice-assign (tail)" % type
a = array(type, [1, 2, 3, 4, 5])
a[1:-1] = a
if a != array(type, [1, 1, 2, 3, 4, 5, 5]):
raise TestFailed, "array(%s) self-slice-assign (cntr)" % type
if a.index(5) != 5:
raise TestFailed, "array(%s) index-test" % type
if a.count(1) != 2:
raise TestFailed, "array(%s) count-test" % type
a.remove(5)
if a != array(type, [1, 1, 2, 3, 4, 5]):
raise TestFailed, "array(%s) remove-test" % type
if a.pop(0) != 1:
raise TestFailed, "array(%s) pop-test" % type
if a.pop(1) != 2:
raise TestFailed, "array(%s) pop-test" % type
a.extend(array(type, [7, 8, 9]))
if a != array(type, [1, 3, 4, 5, 7, 8, 9]):
raise TestFailed, "array(%s) extend-test" % type
a.pop()
a.pop()
a.pop()
x = a.pop()
if x != 5:
raise TestFailed, "array(%s) pop-test" % type
if a != array(type, [1, 3, 4]):
raise TestFailed, "array(%s) pop-test" % type
a.reverse()
if a != array(type, [4, 3, 1]):
raise TestFailed, "array(%s) reverse-test" % type
# test that overflow exceptions are raised as expected for assignment
# to array of specific integral types
from math import pow
#check using long inputs
if type in ('b', 'h', 'i', 'l'):
a = array(type)
signedLowerLimit = -1 * long(pow(2, a.itemsize * 8 - 1))
signedUpperLimit = long(pow(2, a.itemsize * 8 - 1)) - 1L
unsignedLowerLimit = 0
unsignedUpperLimit = long(pow(2, a.itemsize * 8)) - 1L
testoverflow(type, signedLowerLimit, signedUpperLimit)
#check using integer inputs - int cannot hold MAXINT+1 nor MININT-1
# so only valid test types are byte and short for this test
if type in ('b', 'h'):
a = array(type)
signedLowerLimit = -1 * int(pow(2, a.itemsize * 8 - 1))
signedUpperLimit = int(pow(2, a.itemsize * 8 - 1)) - 1
unsignedLowerLimit = 0
unsignedUpperLimit = int(pow(2, a.itemsize * 8)) - 1
testoverflow(type, signedLowerLimit, signedUpperLimit)
def init_tests():
test = array('c', ['t','e','s','t'])
verify(init_test( "test: String initialisation", "test", 'c') == test,
"string initialisation failed")
test = array('i', [41,42,43,44])
s = test.tostring();
verify(init_test( "test: String2 initialisation", s, 'i') == test,
"string 2 initialisation failed")
init_test( "test: List initialisation", [1,2,3,4], 'i')
init_test( "test: Tuple initialisation", (1,2,3,4), 'i')
test = array('i', [1,2,3,4])
verify(init_test( "test: array initialisation", test, 'i') == test,
"array init failed")
try:
init_test('test: "broken" list initialisation', [1,2,3,4, 'fred'], 'i')
raise TestFailed, '"broken" list initialisation'
except TypeError:
pass
test = array('i', [1,2,3,4])
try:
init_test('test: "broken" PyArray initialisation', test, 'd')
raise TestFailed, '"broken" PyArray initialisation'
except TypeError:
pass
f = open(TESTFN, "w")
#f.write("\x00\x00\x00\x01")
f.write("test message\nline2\nline3");
f.close();
f = open(TESTFN, "r")
try:
init_test( "test: Invalid initialisation object (file)", f, 'i')
raise TestFailed, "Invalid initialisation object (file)"
except TypeError:
pass
f.close()
try:
init_test( "test: Invalid initialisation object (module)", sys, 'i')
raise TestFailed, "Invalid initialisation object (module)"
except TypeError:
pass
def extend_tests():
test = array('c', 'testextend')
verify(extend_test("test: String extend", "test", "extend", 'c') == test,
"String extend failed")
test = array('i', [1,2,3,4,51,52,53,54]);
verify( extend_test("test: List extend", [1,2,3,4], [51,52,53,54], 'i') == test,
"List extend failed")
test = array('i', (1,2,3,4,51,52,53,54));
verify( extend_test("test: Tuple extend", (1,2,3,4), (51,52,53,54), 'i') == test,
"Tuple extend failed")
try:
extend_test('test: "broken" list extend', [1,2,3,4], [51,52,53,"fred"], 'i')
raise TestFailed, 'test: "broken" list extend'
except TypeError:
pass
a = array('d', [123.45, 567.89])
test = array('i', [1,2,3,4])
try:
assert extend_test("test: Array type mismatch", [1,2,3,4], a, 'i') == test, \
"Array mismatch test failed"
raise TestFailed, "test: Array type mismatch"
except TypeError:
pass
del a
f = open(TESTFN, "r")
try:
extend_test("test: incorrect type extend (file)", [1,2,3,4], f, 'i')
raise TestFailed, "test: incorrect type extend (file)"
except TypeError:
pass
f.close()
try:
extend_test("test: incorrect type extend (module)", (1,2,3,4), sys, 'i')
raise TestFailed, "test: incorrect type extend (module)"
except TypeError:
pass
try:
extend_test("test: incorrect type extend (integer)", [], 456, 'i')
raise TestFailed, "test: incorrect type extend (integer)"
except TypeError:
pass
def fromlist_tests():
test = array('c', ['t','e','s','t','h','e','l','l','o'])
verify(fromlist_test("test: String fromlist", "test", ['h','e','l','l','o'], 'c') == test,
"String fromlist failed")
test = array('i', [1,2,3,4,51,52,53,54])
verify(fromlist_test("test: integer fromlist", [1,2,3,4], [51,52,53,54], 'i') == test,
"integer fromlist failed")
try:
fromlist_test('test: "broken" fromlist (integer)', [1,2,3,4], [51,52,53,"fred"], 'i')
raise TestFailed, 'test: "broken" fromlist (integer)'
except TypeError:
pass
try:
fromlist_test("test: invalid fromlist (tuple)", [1,2,3,4], (51,52,53,54), 'i')
raise TestFailed, "test: invalid fromlist (tuple)"
except TypeError:
pass
def init_test(name, init, typecode):
if verbose:
print 40*'*'
print name, "- type:", typecode
print "initialiser:", init
a = array(typecode, init)
if verbose:
print a
return a
def extend_test(name, init, extend, typecode):
if verbose:
print 40*'*'
print name, "- type:", typecode
a = array(typecode, init)
if verbose:
print "initial:", a
print "extended by:", extend
a.extend(extend)
#if no exceptions then
if verbose:
print "final:", a
return a
def fromlist_test(name, init, listdata, typecode):
if verbose:
print 40*'*'
print name , "- type:", typecode
a = array(typecode, init)
if verbose:
print "initial:", a
print "fromlist source:", listdata
a.fromlist(listdata)
#if no exceptions then
if verbose:
print "final:", a
return a
main()
|
|
"""The sensor tests for the nut platform."""
from homeassistant.const import UNIT_PERCENTAGE
from .util import async_init_integration
async def test_pr3000rt2u(hass):
"""Test creation of PR3000RT2U sensors."""
await async_init_integration(hass, "PR3000RT2U", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == "CPS_PR3000RT2U_PYVJO2000034_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": UNIT_PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_cp1350c(hass):
"""Test creation of CP1350C sensors."""
config_entry = await async_init_integration(hass, "CP1350C", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == f"{config_entry.entry_id}_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": UNIT_PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_5e850i(hass):
"""Test creation of 5E850I sensors."""
config_entry = await async_init_integration(hass, "5E850I", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == f"{config_entry.entry_id}_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": "%",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_5e650i(hass):
"""Test creation of 5E650I sensors."""
config_entry = await async_init_integration(hass, "5E650I", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == f"{config_entry.entry_id}_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online Battery Charging",
"unit_of_measurement": "%",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_backupsses600m1(hass):
"""Test creation of BACKUPSES600M1 sensors."""
await async_init_integration(hass, "BACKUPSES600M1", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert (
entry.unique_id
== "American Power Conversion_Back-UPS ES 600M1_4B1713P32195 _battery.charge"
)
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": "%",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_cp1500pfclcd(hass):
"""Test creation of CP1500PFCLCD sensors."""
config_entry = await async_init_integration(
hass, "CP1500PFCLCD", ["battery.charge"]
)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == f"{config_entry.entry_id}_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": UNIT_PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_dl650elcd(hass):
"""Test creation of DL650ELCD sensors."""
config_entry = await async_init_integration(hass, "DL650ELCD", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == f"{config_entry.entry_id}_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": UNIT_PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_blazer_usb(hass):
"""Test creation of blazer_usb sensors."""
config_entry = await async_init_integration(hass, "blazer_usb", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == f"{config_entry.entry_id}_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": UNIT_PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
|
|
# -*- coding: utf-8 -*-
import os
import urllib
import uuid
import ssl
from pymongo import MongoClient
import requests
from bs4 import BeautifulSoup
from addons.wiki import settings as wiki_settings
from addons.wiki.exceptions import InvalidVersionError
# MongoDB forbids field names that begin with "$" or contain ".". These
# utilities map to and from Mongo field names.
mongo_map = {
'.': '__!dot!__',
'$': '__!dollar!__',
}
def to_mongo(item):
for key, value in mongo_map.items():
item = item.replace(key, value)
return item
def to_mongo_key(item):
return to_mongo(item).strip().lower()
def generate_private_uuid(node, wname):
"""
Generate private uuid for internal use in sharejs namespacing.
Note that this will NEVER be passed to to the client or sharejs.
"""
private_uuid = str(uuid.uuid1())
wiki_key = to_mongo_key(wname)
node.wiki_private_uuids[wiki_key] = private_uuid
node.save()
return private_uuid
def get_sharejs_uuid(node, wname):
"""
Format private uuid into the form used in mongo and sharejs.
This includes node's primary ID to prevent fork namespace collision
"""
wiki_key = to_mongo_key(wname)
private_uuid = node.wiki_private_uuids.get(wiki_key)
return str(uuid.uuid5(
uuid.UUID(private_uuid),
str(node._id)
)) if private_uuid else None
def delete_share_doc(node, wname):
"""Deletes share document and removes namespace from model."""
db = share_db()
sharejs_uuid = get_sharejs_uuid(node, wname)
db['docs'].remove({'_id': sharejs_uuid})
db['docs_ops'].remove({'name': sharejs_uuid})
wiki_key = to_mongo_key(wname)
del node.wiki_private_uuids[wiki_key]
node.save()
def migrate_uuid(node, wname):
"""Migrates uuid to new namespace."""
db = share_db()
old_sharejs_uuid = get_sharejs_uuid(node, wname)
broadcast_to_sharejs('lock', old_sharejs_uuid)
generate_private_uuid(node, wname)
new_sharejs_uuid = get_sharejs_uuid(node, wname)
doc_item = db['docs'].find_one({'_id': old_sharejs_uuid})
if doc_item:
doc_item['_id'] = new_sharejs_uuid
db['docs'].insert(doc_item)
db['docs'].remove({'_id': old_sharejs_uuid})
ops_items = [item for item in db['docs_ops'].find({'name': old_sharejs_uuid})]
if ops_items:
for item in ops_items:
item['_id'] = item['_id'].replace(old_sharejs_uuid, new_sharejs_uuid)
item['name'] = new_sharejs_uuid
db['docs_ops'].insert(ops_items)
db['docs_ops'].remove({'name': old_sharejs_uuid})
write_contributors = [
user._id for user in node.contributors
if node.has_permission(user, 'write')
]
broadcast_to_sharejs('unlock', old_sharejs_uuid, data=write_contributors)
def share_db():
"""Generate db client for sharejs db"""
client = MongoClient(wiki_settings.SHAREJS_DB_URL, ssl_cert_reqs=ssl.CERT_NONE)
return client[wiki_settings.SHAREJS_DB_NAME]
def get_sharejs_content(node, wname):
db = share_db()
sharejs_uuid = get_sharejs_uuid(node, wname)
doc_item = db['docs'].find_one({'_id': sharejs_uuid})
return doc_item['_data'] if doc_item else ''
def broadcast_to_sharejs(action, sharejs_uuid, node=None, wiki_name='home', data=None):
"""
Broadcast an action to all documents connected to a wiki.
Actions include 'lock', 'unlock', 'redirect', and 'delete'
'redirect' and 'delete' both require a node to be specified
'unlock' requires data to be a list of contributors with write permission
"""
url = 'http://{host}:{port}/{action}/{id}/'.format(
host=wiki_settings.SHAREJS_HOST,
port=wiki_settings.SHAREJS_PORT,
action=action,
id=sharejs_uuid
)
if action == 'redirect' or action == 'delete':
redirect_url = urllib.quote(
node.web_url_for('project_wiki_view', wname=wiki_name, _guid=True),
safe='',
)
url = os.path.join(url, redirect_url)
try:
requests.post(url, json=data)
except requests.ConnectionError:
pass # Assume sharejs is not online
def format_wiki_version(version, num_versions, allow_preview):
"""
:param str version: 'preview', 'current', 'previous', '1', '2', ...
:param int num_versions:
:param allow_preview: True if view, False if compare
"""
if not version:
return
if version.isdigit():
version = int(version)
if version > num_versions or version < 1:
raise InvalidVersionError
elif version == num_versions:
return 'current'
elif version == num_versions - 1:
return 'previous'
elif version != 'current' and version != 'previous':
if allow_preview and version == 'preview':
return version
raise InvalidVersionError
elif version == 'previous' and num_versions == 0:
raise InvalidVersionError
return version
def serialize_wiki_settings(user, nodes):
""" Format wiki data for project settings page
:param user: modular odm User object
:param nodes: list of parent project nodes
:return: treebeard-formatted data
"""
items = []
for node in nodes:
assert node, '{} is not a valid Node.'.format(node._id)
can_read = node.has_permission(user, 'read')
is_admin = node.has_permission(user, 'admin')
include_wiki_settings = node.include_wiki_settings(user)
if not include_wiki_settings:
continue
children = node.get_nodes(**{'is_deleted': False, 'is_node_link': False})
children_tree = []
wiki = node.get_addon('wiki')
if wiki:
children_tree.append({
'select': {
'title': 'permission',
'permission':
'public'
if wiki.is_publicly_editable
else 'private'
},
})
children_tree.extend(serialize_wiki_settings(user, children))
item = {
'node': {
'id': node._id,
'url': node.url if can_read else '',
'title': node.title if can_read else 'Private Project',
'is_public': node.is_public
},
'children': children_tree,
'kind': 'folder' if not node.parent_node or not node.parent_node.has_permission(user, 'read') else 'node',
'nodeType': node.project_or_component,
'category': node.category,
'permissions': {
'view': can_read,
'admin': is_admin,
},
}
items.append(item)
return items
def serialize_wiki_widget(node):
wiki = node.get_addon('wiki')
wiki_version = node.get_wiki_version('home')
# Show "Read more" link if there are multiple pages or has > 400 characters
more = node.wikis.filter(deleted__isnull=True).count() >= 2
MAX_DISPLAY_LENGTH = 400
rendered_before_update = False
if wiki_version and wiki_version.html(node):
wiki_html = BeautifulSoup(wiki_version.html(node)).text
if len(wiki_html) > MAX_DISPLAY_LENGTH:
wiki_html = BeautifulSoup(wiki_html[:MAX_DISPLAY_LENGTH] + '...', 'html.parser')
more = True
rendered_before_update = wiki_version.rendered_before_update
else:
wiki_html = None
wiki_widget_data = {
'complete': True,
'wiki_content': unicode(wiki_html) if wiki_html else None,
'wiki_content_url': node.api_url_for('wiki_page_content', wname='home'),
'rendered_before_update': rendered_before_update,
'more': more,
'include': False,
}
wiki_widget_data.update(wiki.config.to_json())
return wiki_widget_data
|
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.timeseries.periodograms.lombscargle import LombScargle
ALL_METHODS = LombScargle.available_methods
ALL_METHODS_NO_AUTO = [method for method in ALL_METHODS if method != 'auto']
FAST_METHODS = [method for method in ALL_METHODS if 'fast' in method]
NTERMS_METHODS = [method for method in ALL_METHODS if 'chi2' in method]
NORMALIZATIONS = ['standard', 'psd', 'log', 'model']
@pytest.fixture
def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0):
"""Generate some data for testing"""
rng = np.random.RandomState(rseed)
t = 20 * period * rng.rand(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.rand(N))
y += dy * rng.randn(N)
return t, y, dy
@pytest.mark.parametrize('minimum_frequency', [None, 1.0])
@pytest.mark.parametrize('maximum_frequency', [None, 5.0])
@pytest.mark.parametrize('nyquist_factor', [1, 10])
@pytest.mark.parametrize('samples_per_peak', [1, 5])
def test_autofrequency(data, minimum_frequency, maximum_frequency,
nyquist_factor, samples_per_peak):
t, y, dy = data
baseline = t.max() - t.min()
freq = LombScargle(t, y, dy).autofrequency(samples_per_peak,
nyquist_factor,
minimum_frequency,
maximum_frequency)
df = freq[1] - freq[0]
# Check sample spacing
assert_allclose(df, 1. / baseline / samples_per_peak)
# Check minimum frequency
if minimum_frequency is None:
assert_allclose(freq[0], 0.5 * df)
else:
assert_allclose(freq[0], minimum_frequency)
if maximum_frequency is None:
avg_nyquist = 0.5 * len(t) / baseline
assert_allclose(freq[-1], avg_nyquist * nyquist_factor, atol=0.5*df)
else:
assert_allclose(freq[-1], maximum_frequency, atol=0.5*df)
@pytest.mark.parametrize('method', ALL_METHODS_NO_AUTO)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('errors', ['none', 'partial', 'full'])
@pytest.mark.parametrize('with_units', [True, False])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_all_methods(data, method, center_data, fit_mean,
errors, with_units, normalization):
if method == 'scipy' and (fit_mean or errors != 'none'):
return
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
frequency = frequency / t.unit
if errors == 'none':
dy = None
elif errors == 'partial':
dy = dy[0]
elif errors == 'full':
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
kwds = {}
ls = LombScargle(t, y, dy, center_data=center_data, fit_mean=fit_mean,
normalization=normalization)
P_expected = ls.power(frequency)
# don't use the fft approximation here; we'll test this elsewhere
if method in FAST_METHODS:
kwds['method_kwds'] = dict(use_fft=False)
P_method = ls.power(frequency, method=method, **kwds)
if with_units:
if normalization == 'psd' and errors == 'none':
assert P_method.unit == y.unit ** 2
else:
assert P_method.unit == u.dimensionless_unscaled
else:
assert not hasattr(P_method, 'unit')
assert_quantity_allclose(P_expected, P_method)
@pytest.mark.parametrize('method', ALL_METHODS_NO_AUTO)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('with_errors', [True, False])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_integer_inputs(data, method, center_data, fit_mean, with_errors,
normalization):
if method == 'scipy' and (fit_mean or with_errors):
return
t, y, dy = data
t = np.floor(100 * t)
t_int = t.astype(int)
y = np.floor(100 * y)
y_int = y.astype(int)
dy = np.floor(100 * dy)
dy_int = dy.astype('int32')
frequency = 1E-2 * (0.8 + 0.01 * np.arange(40))
if not with_errors:
dy = None
dy_int = None
kwds = dict(center_data=center_data,
fit_mean=fit_mean,
normalization=normalization)
P_float = LombScargle(t, y, dy, **kwds).power(frequency,method=method)
P_int = LombScargle(t_int, y_int, dy_int,
**kwds).power(frequency, method=method)
assert_allclose(P_float, P_int)
@pytest.mark.parametrize('method', NTERMS_METHODS)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('errors', ['none', 'partial', 'full'])
@pytest.mark.parametrize('nterms', [0, 2, 4])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_nterms_methods(method, center_data, fit_mean, errors,
nterms, normalization, data):
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if errors == 'none':
dy = None
elif errors == 'partial':
dy = dy[0]
elif errors == 'full':
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
ls = LombScargle(t, y, dy, center_data=center_data,
fit_mean=fit_mean, nterms=nterms,
normalization=normalization)
if nterms == 0 and not fit_mean:
with pytest.raises(ValueError) as err:
ls.power(frequency, method=method)
assert 'nterms' in str(err.value) and 'bias' in str(err.value)
else:
P_expected = ls.power(frequency)
# don't use fast fft approximations here
kwds = {}
if 'fast' in method:
kwds['method_kwds'] = dict(use_fft=False)
P_method = ls.power(frequency, method=method, **kwds)
assert_allclose(P_expected, P_method, rtol=1E-7, atol=1E-25)
@pytest.mark.parametrize('method', FAST_METHODS)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('errors', ['none', 'partial', 'full'])
@pytest.mark.parametrize('nterms', [0, 1, 2])
def test_fast_approximations(method, center_data, fit_mean,
errors, nterms, data):
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if errors == 'none':
dy = None
elif errors == 'partial':
dy = dy[0]
elif errors == 'full':
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
ls = LombScargle(t, y, dy, center_data=center_data,
fit_mean=fit_mean, nterms=nterms,
normalization='standard')
# use only standard normalization because we compare via absolute tolerance
kwds = dict(method=method)
if method == 'fast' and nterms != 1:
with pytest.raises(ValueError) as err:
ls.power(frequency, **kwds)
assert 'nterms' in str(err.value)
elif nterms == 0 and not fit_mean:
with pytest.raises(ValueError) as err:
ls.power(frequency, **kwds)
assert 'nterms' in str(err.value) and 'bias' in str(err.value)
else:
P_fast = ls.power(frequency, **kwds)
kwds['method_kwds'] = dict(use_fft=False)
P_slow = ls.power(frequency, **kwds)
assert_allclose(P_fast, P_slow, atol=0.008)
@pytest.mark.parametrize('method', LombScargle.available_methods)
@pytest.mark.parametrize('shape', [(), (1,), (2,), (3,), (2, 3)])
def test_output_shapes(method, shape, data):
t, y, dy = data
freq = np.asarray(np.zeros(shape))
freq.flat = np.arange(1, freq.size + 1)
PLS = LombScargle(t, y, fit_mean=False).power(freq, method=method)
assert PLS.shape == shape
@pytest.mark.parametrize('method', LombScargle.available_methods)
def test_errors_on_unit_mismatch(method, data):
t, y, dy = data
t = t * u.second
y = y * u.mag
frequency = np.linspace(0.5, 1.5, 10)
# this should fail because frequency and 1/t units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y, fit_mean=False).power(frequency, method=method)
assert str(err.value).startswith('Units of frequency not equivalent')
# this should fail because dy and y units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y, dy, fit_mean=False).power(frequency / t.unit)
assert str(err.value).startswith('Units of dy not equivalent')
# we don't test all normalizations here because they are tested above
# only test method='auto' because unit handling does not depend on method
@pytest.mark.parametrize('with_error', [True, False])
def test_unit_conversions(data, with_error):
t, y, dy = data
t_day = t * u.day
t_hour = u.Quantity(t_day, 'hour')
y_meter = y * u.meter
y_millimeter = u.Quantity(y_meter, 'millimeter')
# sanity check on inputs
assert_quantity_allclose(t_day, t_hour)
assert_quantity_allclose(y_meter, y_millimeter)
if with_error:
dy = dy * u.meter
else:
dy = None
freq_day, P1 = LombScargle(t_day, y_meter, dy).autopower()
freq_hour, P2 = LombScargle(t_hour, y_millimeter, dy).autopower()
# Check units of frequency
assert freq_day.unit == 1. / u.day
assert freq_hour.unit == 1. / u.hour
# Check that results match
assert_quantity_allclose(freq_day, freq_hour)
assert_quantity_allclose(P1, P2)
# Check that switching frequency units doesn't change things
P3 = LombScargle(t_day, y_meter, dy).power(freq_hour)
P4 = LombScargle(t_hour, y_meter, dy).power(freq_day)
assert_quantity_allclose(P3, P4)
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('with_units', [True, False])
@pytest.mark.parametrize('freq', [1.0, 2.0])
def test_model(fit_mean, with_units, freq):
rand = np.random.RandomState(0)
t = 10 * rand.rand(40)
params = 10 * rand.rand(3)
y = np.zeros_like(t)
if fit_mean:
y += params[0]
y += params[1] * np.sin(2 * np.pi * freq * (t - params[2]))
if with_units:
t = t * u.day
y = y * u.mag
freq = freq / u.day
ls = LombScargle(t, y, center_data=False, fit_mean=fit_mean)
y_fit = ls.model(t, freq)
assert_quantity_allclose(y_fit, y)
@pytest.mark.parametrize('t_unit', [u.second, u.day])
@pytest.mark.parametrize('frequency_unit', [u.Hz, 1. / u.second])
@pytest.mark.parametrize('y_unit', [u.mag, u.jansky])
def test_model_units_match(data, t_unit, frequency_unit, y_unit):
t, y, dy = data
t_fit = t[:5]
frequency = 1.0
t = t * t_unit
t_fit = t_fit * t_unit
y = y * y_unit
dy = dy * y_unit
frequency = frequency * frequency_unit
ls = LombScargle(t, y, dy)
y_fit = ls.model(t_fit, frequency)
assert y_fit.unit == y_unit
def test_model_units_mismatch(data):
t, y, dy = data
frequency = 1.0
t_fit = t[:5]
t = t * u.second
t_fit = t_fit * u.second
y = y * u.mag
frequency = 1.0 / t.unit
# this should fail because frequency and 1/t units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y).model(t_fit, frequency=1.0)
assert str(err.value).startswith('Units of frequency not equivalent')
# this should fail because t and t_fit units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y).model([1, 2], frequency)
assert str(err.value).startswith('Units of t not equivalent')
# this should fail because dy and y units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y, dy).model(t_fit, frequency)
assert str(err.value).startswith('Units of dy not equivalent')
def test_autopower(data):
t, y, dy = data
ls = LombScargle(t, y, dy)
kwargs = dict(samples_per_peak=6, nyquist_factor=2,
minimum_frequency=2, maximum_frequency=None)
freq1 = ls.autofrequency(**kwargs)
power1 = ls.power(freq1)
freq2, power2 = ls.autopower(**kwargs)
assert_allclose(freq1, freq2)
assert_allclose(power1, power2)
@pytest.mark.parametrize('with_units', [True, False])
@pytest.mark.parametrize('errors', ['none', 'partial', 'full'])
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('nterms', [0, 1, 2])
def test_model_parameters(data, nterms, fit_mean, center_data,
errors, with_units):
if nterms == 0 and not fit_mean:
return
t, y, dy = data
frequency = 1.5
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
frequency = frequency / t.unit
if errors == 'none':
dy = None
elif errors == 'partial':
dy = dy[0]
elif errors == 'full':
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
ls = LombScargle(t, y, dy,
nterms=nterms,
fit_mean=fit_mean,
center_data=center_data)
tfit = np.linspace(0, 20, 10)
if with_units:
tfit = tfit * u.day
model = ls.model(tfit, frequency)
params = ls.model_parameters(frequency)
design = ls.design_matrix(frequency, t=tfit)
offset = ls.offset()
assert len(params) == int(fit_mean) + 2 * nterms
assert_quantity_allclose(offset + design.dot(params), model)
@pytest.mark.parametrize('timedelta', [False, True])
def test_absolute_times(data, timedelta):
# Make sure that we handle absolute times correctly. We also check that
# TimeDelta works properly when timedelta is True.
# The example data uses relative times
t, y, dy = data
# FIXME: There seems to be a numerical stability issue in that if we run
# the algorithm with the same values but offset in time, the transit_time
# is not offset by a fixed amount. To avoid this issue in this test, we
# make sure the first time is also the smallest so that internally the
# values of the relative time should be the same.
t[0] = 0.
# Add units
t = t * u.day
y = y * u.mag
dy = dy * u.mag
# We now construct a set of absolute times but keeping the rest the same
start = Time('2019-05-04T12:34:56')
trel = TimeDelta(t) if timedelta else t
t = trel + start
# and we set up two instances of LombScargle, one with absolute and one
# with relative times.
ls1 = LombScargle(t, y, dy)
ls2 = LombScargle(trel, y, dy)
kwargs = dict(samples_per_peak=6, nyquist_factor=2,
minimum_frequency=2 / u.day, maximum_frequency=None)
freq1 = ls1.autofrequency(**kwargs)
freq2 = ls2.autofrequency(**kwargs)
assert_quantity_allclose(freq1, freq2)
power1 = ls1.power(freq1)
power2 = ls2.power(freq2)
assert_quantity_allclose(power1, power2)
freq1, power1 = ls1.autopower(**kwargs)
freq2, power2 = ls2.autopower(**kwargs)
assert_quantity_allclose(freq1, freq2)
assert_quantity_allclose(power1, power2)
model1 = ls1.model(t, 2 / u.day)
model2 = ls2.model(trel, 2 / u.day)
assert_quantity_allclose(model1, model2)
# Check model validation
with pytest.raises(TypeError) as exc:
ls1.model(trel, 2 / u.day)
assert exc.value.args[0] == ('t was provided as a relative time but the '
'LombScargle class was initialized with '
'absolute times.')
with pytest.raises(TypeError) as exc:
ls2.model(t, 2 / u.day)
assert exc.value.args[0] == ('t was provided as an absolute time but the '
'LombScargle class was initialized with '
'relative times.')
# Check design matrix
design1 = ls1.design_matrix(2 / u.day, t=t)
design2 = ls2.design_matrix(2 / u.day, t=trel)
assert_quantity_allclose(design1, design2)
# Check design matrix validation
with pytest.raises(TypeError) as exc:
ls1.design_matrix(2 / u.day, t=trel)
assert exc.value.args[0] == ('t was provided as a relative time but the '
'LombScargle class was initialized with '
'absolute times.')
with pytest.raises(TypeError) as exc:
ls2.design_matrix(2 / u.day, t=t)
assert exc.value.args[0] == ('t was provided as an absolute time but the '
'LombScargle class was initialized with '
'relative times.')
|
|
#!/usr/bin/env python
'''
This script will accept a username and password from OpenVPN, and use them to obtain
an authentication token from Azure AD.
'''
#pylint: disable=invalid-name
import binascii
#import hashlib
#from hmac import compare_digest
from backports.pbkdf2 import pbkdf2_hmac, compare_digest
import logging
import os
import sys
import adal
import requests
import yaml
loggerName = __name__
logging.basicConfig(
format='%(asctime) 25s openvpn-azure-aad-auth %(levelname) 7s %(pathname)s %(module)s: %(message)s'
)
logger = logging.getLogger(loggerName)
def success():
''' The user has authenticated and is authorized '''
sys.exit(0)
def failure():
''' The user failed to authenticate or authorize. Exit with an error code '''
sys.exit(1)
CONFIG_FILE = 'config.yaml'
def main(config_file):
try:
username = os.environ['username']
password = os.environ['password']
except KeyError:
logger.error("Environment variables `username` and `password` must be set")
failure()
try:
with open(config_file) as cfg:
config = yaml.load(cfg.read())
except IOError as err:
logger.critical("Could not open config file %s", config_file)
failure()
except yaml.scanner.ScannerError as err:
logger.critical("Config file %s failed to load: %s", config_file, err)
failure()
set_log_level(getattr(logging, config.get('log_level', "INFO").upper()))
try:
tenant_id = config['tenant_id']
authority_url = "https://login.microsoftonline.com/{}".format(tenant_id)
client_id = config['client_id']
resource = config['resource'] if 'resource' in config else 'https://graph.windows.net'
except KeyError as err:
logger.error("invalid config file! could not find %s", err)
failure()
token_cache_file = config.get('token_cache_file')
token_cache = read_token_cache(token_cache_file)
context = adal.AuthenticationContext(authority_url, cache=token_cache)
if len(sys.argv) == 2 and sys.argv[1] == "--consent":
if obtain_consent(context, resource, client_id):
success()
else:
failure()
logger.info("request recieved to authenticate user %s", username)
token, save_cache = get_token(context, resource, username, password, client_id)
if token is None:
failure()
if 'permitted_groups' not in config or \
check_group_membership(token, tenant_id, config['permitted_groups']):
if save_cache:
save_token_cache(token_cache_file, context.cache)
success()
logger.info("User %s not authorized", username)
failure()
def set_log_level(log_level):
logger.setLevel(log_level)
adal.set_logging_options({'level': log_level})
def read_token_cache(token_cache_file):
if token_cache_file is None:
return None
token_cache = None
try:
logger.debug("reading token cache from %s", token_cache_file)
token_cache_fd = os.open(token_cache_file, os.O_CREAT, 0o600)
with os.fdopen(token_cache_fd, 'r') as token_cache_fh:
token_cache = adal.TokenCache(state=token_cache_fh.read())
except IOError as err:
logger.error(
"could not open token cache file %s: %s. continuing without cache",
token_cache_file, err)
os.close(token_cache_fd)
except ValueError as err:
logger.error("could not load cache from disk: %s", err)
return token_cache
def save_token_cache(token_cache_file, token_cache):
if token_cache is None or token_cache_file is None:
return
try:
token_cache_fd = os.open(
token_cache_file,
os.O_TRUNC | os.O_CREAT | os.O_WRONLY,
0o600
)
with os.fdopen(token_cache_fd, 'w') as token_cache_fh:
token_cache_fh.write(token_cache.serialize())
logger.debug("wrote token cache info to %s", token_cache_file)
except IOError as err:
logger.warning(
"could not write to token cache file %s: %s",
token_cache_file, err)
os.close(token_cache_fd)
def obtain_consent(context, resource, client_id):
try:
code = context.acquire_user_code(resource, client_id)
print code['message']
_ = context.acquire_token_with_device_code(resource, code, client_id)
except adal.adal_error.AdalError as err:
logger.error("Failed to get consent %s", err)
return False
except KeyboardInterrupt:
context.cancel_request_to_get_token_with_device_code(code)
logger.info("Cancelled code request")
return False
else:
return True
def get_token(context, resource, username, password, client_id):
"""
Get a JWT as evidence of authentication.
Using the provided ADAL authentication context, attempt to get a JWT from the cache (if
enabled). If the cache misses or the cached refresh token cannot be exchanged, interrogate
AAD for a new JWT.
Returns: Either a valid token bundle or None, and a flag indicating that the cache is stale
and should be updated.
Side-effects: the token bundle that is returned is a reference to the token inside the
context's cache member. As such, this function modifies `context`.
"""
try:
# Get a token from the cache (avoids a round-trip to AAD if the cached token hasn't expired)
try:
token = context.acquire_token(resource, username, client_id)
except adal.adal_error.AdalError as err: # see issue #3
token = None
if token is not None:
password_hmac = hash_password(token, password)
if compare_digest(bytes(password_hmac), bytes(token['passwordHash'])):
return token, False
logger.info("authenticated user %s from cache", username)
logger.debug("could not get a token from cache; acquiring from AAD")
token = context.acquire_token_with_username_password(
resource,
username,
password,
client_id
)
except adal.adal_error.AdalError as err:
logger.info("User %s failed to authenticate: %s", username, err)
return None, False
token['passwordHash'] = hash_password(token, password)
logger.info("authenticated user %s from AAD request", username)
return token, True
def hash_password(token, password):
return binascii.hexlify(pbkdf2_hmac('sha512', password, token['accessToken'], 128000))
def check_group_membership(token, tenant_id, permitted_groups):
graph_url = "https://graph.windows.net/me/memberOf?api-version=1.6"
while True:
header = {
"Authorization": "Bearer {}".format(token['accessToken']),
"Content-Type": "application/json"
}
try:
logger.debug("requesting a batch of group info")
resp = requests.get(
graph_url,
headers=header
)
resp.encoding = "utf-8-sig"
data = resp.json()
except Exception as err: #pylint: disable=broad-except
logger.error("Graph API request unsuccessful: %s", err)
return False
if 'odata.error' in data:
logger.error("could not get graph data: %s", data)
return False
try:
# Exit early if we've found a permitted group
for group in [v['displayName'] for v in data['value']]:
if group in permitted_groups:
return True
except KeyError as err:
if err.message == 'value':
logger.debug("no 'value' key in returned group data %s", resp.text)
elif err.message == 'displayName':
logger.debug("no 'displayName' in group value: %s", v)
else:
logger.error("Unhandled KeyError getting '%s' out of response '%s'", err, resp.text)
return False
if "odata.nextLink" in data:
graph_url = "https://graph.windows.net/{}/{}&api-version=1.6".format(
tenant_id,
data["odata.nextLink"]
)
else:
break
return False
if __name__ == "__main__":
main(CONFIG_FILE)
|
|
"""
Handles all requests relevant to the verification service of the API.
"""
from hutts_verification.image_processing.sample_extract import TextExtractor
from hutts_verification.image_preprocessing.face_manager import FaceDetector
from hutts_verification.verification.text_verify import TextVerify
from hutts_verification.verification.face_verify import FaceVerify
from flask import jsonify, request, Blueprint
from hutts_verification.utils.hutts_logger import logger
from hutts_verification.utils.image_handling import grab_image
from hutts_verification.utils.pypath import correct_path
from pathlib import Path
from multiprocessing.pool import ThreadPool
import os
__authors__ = "Nicolai van Niekerk, Stephan Nell, Marno Hermann, Andreas Nel"
__copyright__ = "Copyright 2017, Java the Hutts"
__license__ = "BSD"
__maintainer__ = "Nicolai van Niekerk"
__email__ = "nicvaniek@gmail.com"
__status__ = "Development"
verify = Blueprint('verify', __name__)
pool = ThreadPool(processes=1)
THREAD_TIME_OUT = 7200
# Constants path to trained data for Shape Predictor.
CURRENT_LOCATION = os.path.abspath(os.path.dirname(__file__))
SHAPE_PREDICTOR_PATH = correct_path(Path(
Path(CURRENT_LOCATION),
Path(CURRENT_LOCATION).parent,
'image_preprocessing',
'trained_data',
'shape_predictor_face_landmarks.dat'
)
)
FACE_RECOGNITION_PATH = correct_path(Path(
Path(CURRENT_LOCATION),
Path(CURRENT_LOCATION).parent,
'image_preprocessing',
'trained_data',
'dlib_face_recognition_resnet_model_v1.dat'
))
@verify.route('/verifyID', methods=['POST'])
def verify_id():
"""
Sample function to return a match percentage of an ID image and
provided personal information and picture of face.
URL: http://localhost:5000/verifyID.
"""
image_of_id, face = receive_faces(match_face=True)
entered_details = receive_details()
match_face_thread = pool.apply_async(match_faces, args=(image_of_id, face))
# is_match, distance = match_faces(image_of_id, face)
extracted_text, preferences = manage_text_extractor(image_of_id)
text_match_percentage, text_match, is_pass = manage_text_verification(preferences, extracted_text, entered_details)
logger.debug("Receiving match face thread results")
is_match, distance = match_face_thread.get(THREAD_TIME_OUT)
logger.info("Preparing Results...")
result = {
# text verification contributes to 40% of the total and face likeness for 60%
"total_match": text_match_percentage*0.4 + distance*0.6,
"text_match": text_match,
"face_match": distance,
"is_match": is_match,
"is_pass": is_pass
}
return jsonify(result)
@verify.route('/verifyFaces', methods=['POST'])
def verify_faces():
"""
Sample function to return a match percentage of an ID face image and
picture of face.
URL: http://localhost:5000/verifyFaces.
"""
image_of_id, face = receive_faces(match_face=True)
(is_match, distance) = match_faces(image_of_id, face)
logger.info("Preparing Results...")
result = {
"face_match": distance,
"is_match": is_match,
}
return jsonify(result)
@verify.route('/verifyInfo', methods=['POST'])
def verify_info():
"""
Sample function to return a match percentage of an ID image and
provided personal information.
"""
image_of_id, _ = receive_faces(match_face=False)
entered_details = receive_details()
extracted_text, preferences = manage_text_extractor(image_of_id)
text_match_percentage, text_match, is_pass = manage_text_verification(preferences, extracted_text, entered_details)
logger.info("Preparing Results...")
result = {
"text_match": text_match,
"is_pass": is_pass,
"text_match_percentage": text_match_percentage
}
return jsonify(result)
def match_faces(image_of_id, face):
"""
This function receives two images that receive images of faces that need to be verified.
It is expected that an image of an ID and an image of a Profile picture will be received.
Even if the expected images are not received the function will still apply a best effort solution.
:param image_of_id (obj): An image of an ID that contains a face that needs to be verified.
:param face (obj): A image of a face that needs to be verified.
Returns:
- boolean: Whether the two faces match (the distance between them is above the threshold value).
- float: Return Euclidean distance between the vector representations of the two faces.
"""
# Extract face
face_detector = FaceDetector(SHAPE_PREDICTOR_PATH)
extracted_face1 = face_detector.extract_face(image_of_id)
extracted_face2 = face_detector.extract_face(face)
# Verify faces
face_verifier = FaceVerify(SHAPE_PREDICTOR_PATH, FACE_RECOGNITION_PATH)
return face_verifier.verify(extracted_face1, extracted_face2)
def receive_faces(match_face=True):
"""
This function receives faces/ID from request flask handler.
The function checks for multiple means of receiving the faces/ID. These include
- Receiving image by file path
- Receiving image by URL
- Receiving image by file Stream
It is expected that an image of a face and an image of an ID will be sent.
However, if the order is not followed that system will still be able to return the best effort result without
loss of accuracy.
:param match_face (boolean): Indicates if an additional profile of a face should be extracted.
If an additional face should not be extracted simply return the ID image.
Returns:
- (obj): An image of a ID.
- (obj): An image of a face if match_face is set to True.
"""
data = {"success": False}
# Get id image as numpy array
# Check to see if an image was uploaded.
if request.get_json().get("id_img", None) is not None:
# Grab the uploaded image.
image_of_id = grab_image(string=request.get_json()["id_img"])
# Otherwise, assume that a URL was passed in.
else:
# Grab the URL from the request.
url = request.get_json().get("url", None)
# If the URL is None, then return an error.
if url is None:
data["error"] = "No URL provided."
return jsonify(data)
# Load the image and convert.
image_of_id = grab_image(url=url)
if not match_face:
return image_of_id
# Get face image as numpy array
# Check to see if an image was uploaded.
if request.get_json().get("face_img", None) is not None:
# Grab the uploaded image.
face = grab_image(string=request.get_json()["face_img"])
# Otherwise, assume that a URL was passed in.
else:
# Grab the URL from the request.
url = request.get_json().get("url", None)
# If the URL is None, then return an error.
if url is None:
data["error"] = "No URL provided."
return jsonify(data)
# Load the image and convert.
face = grab_image(url=url)
return image_of_id, face
def manage_text_extractor(image_of_id):
"""
This function manages the text extraction from an ID images.
Management includes preparing text extraction preferences and receiving extracted text.
:param image_of_id (obj): An image of an ID that text must be extracted from.
Returns:
- preferences (dict): Prepared list of preferences. May contain additional text extraction or logger preferences.
- extracted_text (json object): A collection of text extracted from the ID.
"""
preferences = {}
# Grab additional parameters specifying techniques. Extract text
logger.info("Setting Preferences")
if 'blur_technique' in request.get_json():
preferences['blur_method'] = request.get_json()['blur_technique']
if 'threshold_technique' in request.get_json():
preferences['threshold_method'] = request.get_json()['threshold_technique']
if 'remove_face' in request.get_json():
preferences['remove_face'] = request.get_json()['remove_face']
if 'remove_barcode' in request.get_json():
preferences['remove_barcode'] = request.get_json()['remove_barcode']
if 'color' in request.get_json():
preferences['color'] = request.get_json()['color']
if 'id_type' in request.get_json():
preferences['id_type'] = request.get_json()['id_type']
if 'verbose_verify' in request.get_json():
preferences['verbose_verify'] = True if request.get_json()['verbose_verify'] == 'true' else False
else:
preferences['verbose_verify'] = False
if 'useIO' in request.get_json():
preferences['useIO'] = request.get_json()['useIO'] == 'true'
extractor = TextExtractor(preferences)
extracted_text = extractor.extract(image_of_id)
return extracted_text, preferences
def manage_text_verification(preferences, extracted_text, entered_details):
"""
The function manages the preparation before text verification and result of the text verification it self
Management includes preparing logger functionality and controlling match percentages and messages.
:param preferences (list): A list of preferences containing details about logger functionality.
:param extracted_text (JSON object): Contains text extracted from ID image.
:param entered_details (dict): Dictionary containing information that needs to be verified.
Returns:
- (float): Value representing the accuracy with which the entered details matches that of the extracted text.
- (dict): Contains individual match percentages for different fields.
- (boolean): Indicates if text_verification passes based on the threshold value.
"""
text_verifier = TextVerify()
verbose_verify = preferences['verbose_verify']
logger.debug('%s text verifiction requested' % ('Verbose' if verbose_verify else 'Non-verbose'))
(is_pass, text_match) = text_verifier.verify(extracted_text, entered_details, verbose=verbose_verify)
# Check if we are working with verbose output for text verification
text_match_percentage = text_match if not verbose_verify else text_match['total']
return text_match_percentage, text_match, is_pass
def receive_details():
"""
This function receives the details that need to be verified from the flask handler.
Returns:
- (dict): Details that need to be verified with that extracted from image.
"""
entered_details = {
"names": request.get_json()['names'],
"surname": request.get_json()['surname'],
"identity_number": request.get_json()['idNumber'],
"nationality": request.get_json()['nationality'],
"country_of_birth": request.get_json()['cob'],
"status": request.get_json()['status'],
"sex": request.get_json()['gender'],
"date_of_birth": request.get_json()['dob']
}
return entered_details
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for TensorFlow's "Eager" mode of execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import random
import threading
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
GRAPH_MODE = 0
EAGER_MODE = 1
# Default execution mode.
_default_mode = GRAPH_MODE
# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,
# new_device_spec).
# Note that we do not protect this with a lock and instead rely on python's GIL
# and the idempotent nature of writes to provide thread safety.
_device_parsing_cache = {}
_MAXINT32 = 2**31 - 1
DEVICE_PLACEMENT_EXPLICIT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_EXPLICIT
DEVICE_PLACEMENT_WARN = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_WARN
DEVICE_PLACEMENT_SILENT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT
DEVICE_PLACEMENT_SILENT_FOR_INT32 = (
pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)
# TODO(agarwal): better name ?
class _EagerContext(threading.local):
"""Thread local eager context."""
def __init__(self):
super(_EagerContext, self).__init__()
self.device_spec = pydev.DeviceSpec.from_string("")
self.device_name = self.device_spec.to_string()
self.mode = _default_mode
self.scope_name = ""
self.recording_summaries = False
self.summary_writer_resource = None
self.scalar_cache = {}
ContextStackEntry = collections.namedtuple(
"ContextStackEntry", ["is_building_function", "enter_context_fn"])
class ContextStack(threading.local):
"""A thread-local stack of context switches."""
def __init__(self):
super(ContextStack, self).__init__()
self.stack = []
def push(self, is_building_function, enter_context_fn):
"""Push metadata about a context switch onto the stack.
A context switch can take one of two forms: installing a graph as the
default graph, or entering the eager context.
Args:
is_building_function: (bool.) Whether the context is building a function.
enter_context_fn: (function.) A callable that executes the context switch.
For example, `graph.as_default` or `eager_mode`.
"""
self.stack.append(
ContextStackEntry(is_building_function, enter_context_fn))
def pop(self):
"""Pop the stack."""
self.stack.pop()
context_stack = ContextStack()
# TODO(agarwal): rename to EagerContext / EagerRuntime ?
# TODO(agarwal): consider keeping the corresponding Graph here.
class Context(object):
"""Environment in which eager operations execute."""
def __init__(self, config=None, device_policy=None):
"""Creates a new Context.
Args:
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the Context. Note that a lot of these options may be
currently unimplemented or irrelevant when eager execution is enabled.
device_policy: (Optional.) What policy to use when trying to run an
operation on a device with inputs which are not on that device.
Valid values:
tfe.DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is not
correct.
tfe.DEVICE_PLACEMENT_WARN: copies the tensors which are not on the
right device but raises a warning.
tfe.DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might
hide performance problems.
tfe.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,
raising errors on the other ones.
"""
self._eager_context = _EagerContext()
self._context_handle = None
self._context_devices = None
self._post_execution_callbacks = []
self._config = config
self._seed = None
self._initialize_lock = threading.Lock()
self._device_policy = device_policy
def _set_global_seed(self, seed):
"""Set a global eager mode seed for random ops."""
self._seed = seed
self._rng = random.Random(self._seed)
# Also clear the kernel cache, to reset any existing seeds
if self._context_handle is not None:
pywrap_tensorflow.TFE_ContextClearCaches(self._context_handle)
def _internal_operation_seed(self):
"""Returns a fake operation seed.
In eager mode, user shouldn't set or depend on operation seed.
Here, we generate a random seed based on global seed to make
operation's randomness different and depend on the global seed.
Returns:
A fake operation seed based on global seed.
"""
return self._rng.randint(0, _MAXINT32)
def _initialize_handle_and_devices(self):
"""Initialize handle and devices."""
with self._initialize_lock:
if self._context_handle is not None:
return
assert self._context_devices is None
opts = pywrap_tensorflow.TFE_NewContextOptions()
try:
with errors.raise_exception_on_not_ok_status() as status:
if self._config is not None:
config_str = self._config.SerializeToString()
pywrap_tensorflow.TFE_ContextOptionsSetConfig(
opts, config_str, len(config_str), status)
if self._device_policy is not None:
pywrap_tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy(
opts, self._device_policy)
self._context_handle = pywrap_tensorflow.TFE_NewContext(opts, status)
finally:
pywrap_tensorflow.TFE_DeleteContextOptions(opts)
# Store list of devices
self._context_devices = []
with errors.raise_exception_on_not_ok_status() as status:
device_list = pywrap_tensorflow.TFE_ContextListDevices(
self._context_handle, status)
try:
self._num_gpus = 0
for i in range(pywrap_tensorflow.TF_DeviceListCount(device_list)):
with errors.raise_exception_on_not_ok_status() as status:
dev_name = pywrap_tensorflow.TF_DeviceListName(
device_list, i, status)
self._context_devices.append(pydev.canonical_name(dev_name))
with errors.raise_exception_on_not_ok_status() as status:
dev_type = pywrap_tensorflow.TF_DeviceListType(
device_list, i, status)
if dev_type == "GPU":
self._num_gpus += 1
finally:
pywrap_tensorflow.TF_DeleteDeviceList(device_list)
@property
def _handle(self):
ctx = self._context_handle
if ctx is None:
self._initialize_handle_and_devices()
return self._context_handle
else:
return ctx
@property
def _devices(self):
devices = self._context_devices
if devices is None:
self._initialize_handle_and_devices()
return self._context_devices
else:
return devices
def __str__(self):
if self._context_handle is None:
return "Eager TensorFlow Context. Devices currently uninitialized."
else:
devices = self._devices
lines = ["Eager TensorFlow Context with %d devices" % (len(devices))]
for i, d in enumerate(devices):
lines.append(" Device %d: %s" % (i, d))
return "\n".join(lines)
@tf_contextlib.contextmanager
def _mode(self, mode):
ctx = self._eager_context
old_mode = ctx.mode
ctx.mode = mode
if mode == EAGER_MODE:
context_stack.push(False, eager_mode)
try:
yield
finally:
ctx.mode = old_mode
if mode == EAGER_MODE:
context_stack.pop()
def in_graph_mode(self):
"""Returns True if current thread is in GRAPH mode."""
return self._eager_context.mode == GRAPH_MODE
def in_eager_mode(self):
"""Returns True if current thread is in EAGER mode."""
return self._eager_context.mode == EAGER_MODE
def scalar_cache(self):
"""Per-device cache for scalars."""
return self._eager_context.scalar_cache
@property
def scope_name(self):
"""Returns scope name for the current thread."""
return self._eager_context.scope_name
@scope_name.setter
def scope_name(self, s):
"""Sets scope name for the current thread."""
self._eager_context.scope_name = s
@property
def summary_writer_resource(self):
"""Returns summary writer resource."""
return self._eager_context.summary_writer_resource
@summary_writer_resource.setter
def summary_writer_resource(self, resource):
"""Sets summary writer resource."""
self._eager_context.summary_writer_resource = resource
@property
def device_name(self):
"""Returns the device name for the current thread."""
return self._eager_context.device_name
@property
def device_spec(self):
"""Returns the device spec for the current thread."""
return self._eager_context.device_spec
@tf_contextlib.contextmanager
def device(self, name):
"""Context-manager to force placement of operations and Tensors on a device.
Args:
name: Name of the device or None to get default placement.
Yields:
Nothing.
Raises:
ValueError: If name is not a string or is an invalid device name.
"""
eager_context = self._eager_context
old_device_name = eager_context.device_name
old_device_spec = eager_context.device_spec
cache_key = (old_device_name, name)
try:
new_device_name, new_device_spec = _device_parsing_cache[cache_key]
except TypeError:
# Error while trying to compute the cache key.
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(name), name))
except KeyError:
# Handle a cache miss.
if name is not None:
if not isinstance(name, str):
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(name), name))
device_spec = pydev.DeviceSpec.from_string(name)
if old_device_name:
new_device_spec = copy.copy(old_device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string(
"/job:localhost/replica:0/task:0/device:CPU:0")
new_device_spec.merge_from(device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string("")
new_device_name = new_device_spec.to_string()
_device_parsing_cache[cache_key] = (new_device_name, new_device_spec)
try:
eager_context.device_name = new_device_name
eager_context.device_spec = new_device_spec
yield
finally:
eager_context.device_name = old_device_name
eager_context.device_spec = old_device_spec
def devices(self):
"""List of the names of devices available to execute operations."""
return self._devices
def num_gpus(self):
"""The number of GPUs available to execute operations."""
self._initialize_handle_and_devices()
return self._num_gpus
def add_function(self, fn):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.TFE_ContextAddFunction(
self._handle, # pylint: disable=protected-access
fn,
status)
def add_function_def(self, fdef):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fdef: A FunctionDef protocol buffer message.
"""
fdef_string = fdef.SerializeToString()
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.TFE_ContextAddFunctionDef(
self._handle, # pylint: disable=protected-access
fdef_string,
len(fdef_string),
status)
def add_post_execution_callback(self, callback):
"""Add a post-execution callback to the context.
A post-execution callback is invoked immediately after an eager operation or
function has finished execution, providing access to the op's type, name
input and output tensors. Multiple execution callbacks can be added, in
which case the callbacks will be invoked in the order in which they are
added.
Args:
callback: a callable of the signature
`f(op_type, op_name, attrs, inputs, outputs)`.
`op_type` is the type of the operation that was just executed (e.g.,
`MatMul`).
`op_name` is the name of the operation that has was just executed. This
name is set by the client who created the operation and can be `None` if
it is unset.
`attrs` contains the attributes of the operation as a `tuple` of
alternating attribute names and attribute values.
`inputs` is the `list` of input `Tensor`(s) to the op.
`outputs` is the `list` of output `Tensor`(s) from the op.
Return value(s) from the callback are ignored.
"""
# TODO(cais): (b/64674139) Allow access to function-internal operations.
self._post_execution_callbacks.append(callback)
def clear_post_execution_callbacks(self):
"""Clear all post-execution callbacks added to the context."""
del self._post_execution_callbacks[:]
@property
def post_execution_callbacks(self):
"""Get the list of post-execution callbacks added to the context."""
return self._post_execution_callbacks
def enable_run_metadata(self):
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
if not self._context_handle:
self._initialize_handle_and_devices()
pywrap_tensorflow.TFE_ContextEnableRunMetadata(self._context_handle)
@tf_contextlib.contextmanager
def device_policy(self, policy):
if not self._context_handle:
self._initialize_handle_and_devices()
old = pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy(
self._context_handle)
pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy(
self._handle, policy)
try:
yield
finally:
pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy(
self._handle, old)
def disable_run_metadata(self):
"""Disables tracing of op execution via RunMetadata."""
if not self._context_handle:
return
pywrap_tensorflow.TFE_ContextDisableRunMetadata(self._context_handle)
def export_run_metadata(self):
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer. Or None if not enabled.
"""
if not self._context_handle:
return None
with c_api_util.tf_buffer() as buffer_:
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.TFE_ContextExportRunMetadata(
self._context_handle, buffer_, status)
proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)
run_metadata = config_pb2.RunMetadata()
run_metadata.ParseFromString(compat.as_bytes(proto_data))
return run_metadata
_context = None
_context_lock = threading.Lock()
def _initialize_context():
global _context
with _context_lock:
if _context is None:
_context = Context()
def context():
"""Returns a singleton context object."""
if _context is None:
_initialize_context()
return _context
# TODO(agarwal): remove this.
def get_default_context():
"""Same as context."""
if _context is None:
_initialize_context()
return _context
def set_global_seed(seed):
"""Sets the eager mode seed."""
context()._set_global_seed(seed) # pylint: disable=protected-access
def global_seed():
"""Returns the eager mode seed."""
return context()._seed # pylint: disable=protected-access
def internal_operation_seed():
"""Returns the operation seed generated based on global seed."""
return context()._internal_operation_seed() # pylint: disable=protected-access
def in_graph_mode():
"""Returns True if current thread is in GRAPH mode for default context."""
return context().in_graph_mode()
def in_eager_mode():
"""Returns True if current thread is in EAGER mode for default context."""
return context().in_eager_mode()
def graph_mode():
"""Context-manager to enable GRAPH mode for current thread."""
return context()._mode(GRAPH_MODE) # pylint: disable=protected-access
def eager_mode():
"""Context-manager to enable EAGER mode for current thread."""
return context()._mode(EAGER_MODE) # pylint: disable=protected-access
# TODO(agarwal): get rid of this and use ops.name_scope instead.
@contextlib.contextmanager
def namescope(name):
"""ContextManager for creating hierarchical name scopes."""
ctx = context()
old_name = ctx.scope_name
ctx.scope_name = "%s/%s" % (old_name, name) if old_name else name
try:
yield
finally:
ctx.scope_name = old_name
def scope_name():
"""Name of the current scope."""
return context().scope_name
def device(name):
"""Context-manager to force placement of operations and Tensors on a device.
Example:
```python
with tfe.device('gpu:0'):
with tfe.device('cpu:0'):
shape = tf.constant([], dtype=tf.int32)
x = tf.truncated_normal(shape, tf.float32)
```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
operation runs on GPU 0.
Args:
name: Name of the device (see context().devices()), or None to
perform automatic placement.
Returns:
Context manager for setting the device.
"""
return context().device(name)
def list_devices():
"""List the names of the available devices.
Returns:
Names of the available devices, as a `list`.
"""
return context().devices()
def num_gpus():
"""Get the number of available GPU devices.
Returns:
The number of available GPU devices.
"""
return context().num_gpus()
def enable_run_metadata():
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
context().enable_run_metadata()
def disable_run_metadata():
"""Disables tracing of op execution via RunMetadata."""
context().disable_run_metadata()
def export_run_metadata():
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer.
"""
return context().export_run_metadata()
|
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Cloud Logging API."""
import os
try:
from google.logging.v2.config_service_v2_api import (
ConfigServiceV2Api as GeneratedSinksAPI)
from google.logging.v2.logging_service_v2_api import (
LoggingServiceV2Api as GeneratedLoggingAPI)
from google.logging.v2.metrics_service_v2_api import (
MetricsServiceV2Api as GeneratedMetricsAPI)
from gcloud.logging._gax import _LoggingAPI as GAXLoggingAPI
from gcloud.logging._gax import _MetricsAPI as GAXMetricsAPI
from gcloud.logging._gax import _SinksAPI as GAXSinksAPI
except ImportError: # pragma: NO COVER
_HAVE_GAX = False
GeneratedLoggingAPI = GAXLoggingAPI = None
GeneratedMetricsAPI = GAXMetricsAPI = None
GeneratedSinksAPI = GAXSinksAPI = None
else:
_HAVE_GAX = True
from gcloud.client import JSONClient
from gcloud.logging.connection import Connection
from gcloud.logging.connection import _LoggingAPI as JSONLoggingAPI
from gcloud.logging.connection import _MetricsAPI as JSONMetricsAPI
from gcloud.logging.connection import _SinksAPI as JSONSinksAPI
from gcloud.logging.entries import ProtobufEntry
from gcloud.logging.entries import StructEntry
from gcloud.logging.entries import TextEntry
from gcloud.logging.logger import Logger
from gcloud.logging.metric import Metric
from gcloud.logging.sink import Sink
_USE_GAX = _HAVE_GAX and (os.environ.get('GCLOUD_ENABLE_GAX') is not None)
class Client(JSONClient):
"""Client to bundle configuration needed for API requests.
:type project: str
:param project: the project which the client acts on behalf of.
If not passed, falls back to the default inferred
from the environment.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
"""
_connection_class = Connection
_logging_api = _sinks_api = _metrics_api = None
@property
def logging_api(self):
"""Helper for logging-related API calls.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs
"""
if self._logging_api is None:
if _USE_GAX:
generated = GeneratedLoggingAPI()
self._logging_api = GAXLoggingAPI(generated)
else:
self._logging_api = JSONLoggingAPI(self.connection)
return self._logging_api
@property
def sinks_api(self):
"""Helper for log sink-related API calls.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks
"""
if self._sinks_api is None:
if _USE_GAX:
generated = GeneratedSinksAPI()
self._sinks_api = GAXSinksAPI(generated)
else:
self._sinks_api = JSONSinksAPI(self.connection)
return self._sinks_api
@property
def metrics_api(self):
"""Helper for log metric-related API calls.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics
"""
if self._metrics_api is None:
if _USE_GAX:
generated = GeneratedMetricsAPI()
self._metrics_api = GAXMetricsAPI(generated)
else:
self._metrics_api = JSONMetricsAPI(self.connection)
return self._metrics_api
def logger(self, name):
"""Creates a logger bound to the current client.
:type name: str
:param name: the name of the logger to be constructed.
:rtype: :class:`gcloud.logging.logger.Logger`
:returns: Logger created with the current client.
"""
return Logger(name, client=self)
def _entry_from_resource(self, resource, loggers):
"""Detect correct entry type from resource and instantiate.
:type resource: dict
:param resource: one entry resource from API response
:type loggers: dict or None
:param loggers: A mapping of logger fullnames -> loggers. If not
passed, the entry will have a newly-created logger.
:rtype: One of:
:class:`gcloud.logging.entries.TextEntry`,
:class:`gcloud.logging.entries.StructEntry`,
:class:`gcloud.logging.entries.ProtobufEntry`
:returns: the entry instance, constructed via the resource
"""
if 'textPayload' in resource:
return TextEntry.from_api_repr(resource, self, loggers)
elif 'jsonPayload' in resource:
return StructEntry.from_api_repr(resource, self, loggers)
elif 'protoPayload' in resource:
return ProtobufEntry.from_api_repr(resource, self, loggers)
raise ValueError('Cannot parse log entry resource')
def list_entries(self, projects=None, filter_=None, order_by=None,
page_size=None, page_token=None):
"""Return a page of log entries.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/list
:type projects: list of strings
:param projects: project IDs to include. If not passed,
defaults to the project bound to the client.
:type filter_: str
:param filter_: a filter expression. See:
https://cloud.google.com/logging/docs/view/advanced_filters
:type order_by: str
:param order_by: One of :data:`gcloud.logging.ASCENDING` or
:data:`gcloud.logging.DESCENDING`.
:type page_size: int
:param page_size: maximum number of entries to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of entries. If not
passed, the API will return the first page of
entries.
:rtype: tuple, (list, str)
:returns: list of :class:`gcloud.logging.entry.TextEntry`, plus a
"next page token" string: if not None, indicates that
more entries can be retrieved with another call (pass that
value as ``page_token``).
"""
if projects is None:
projects = [self.project]
resources, token = self.logging_api.list_entries(
projects=projects, filter_=filter_, order_by=order_by,
page_size=page_size, page_token=page_token)
loggers = {}
entries = [self._entry_from_resource(resource, loggers)
for resource in resources]
return entries, token
def sink(self, name, filter_=None, destination=None):
"""Creates a sink bound to the current client.
:type name: str
:param name: the name of the sink to be constructed.
:type filter_: str
:param filter_: (optional) the advanced logs filter expression
defining the entries exported by the sink. If not
passed, the instance should already exist, to be
refreshed via :meth:`Sink.reload`.
:type destination: str
:param destination: destination URI for the entries exported by
the sink. If not passed, the instance should
already exist, to be refreshed via
:meth:`Sink.reload`.
:rtype: :class:`gcloud.logging.sink.Sink`
:returns: Sink created with the current client.
"""
return Sink(name, filter_, destination, client=self)
def list_sinks(self, page_size=None, page_token=None):
"""List sinks for the project associated with this client.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/list
:type page_size: int
:param page_size: maximum number of sinks to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of sinks. If not
passed, the API will return the first page of
sinks.
:rtype: tuple, (list, str)
:returns: list of :class:`gcloud.logging.sink.Sink`, plus a
"next page token" string: if not None, indicates that
more sinks can be retrieved with another call (pass that
value as ``page_token``).
"""
resources, token = self.sinks_api.list_sinks(
self.project, page_size, page_token)
sinks = [Sink.from_api_repr(resource, self)
for resource in resources]
return sinks, token
def metric(self, name, filter_=None, description=''):
"""Creates a metric bound to the current client.
:type name: str
:param name: the name of the metric to be constructed.
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries tracked by the metric. If not
passed, the instance should already exist, to be
refreshed via :meth:`Metric.reload`.
:type description: str
:param description: the description of the metric to be constructed.
If not passed, the instance should already exist,
to be refreshed via :meth:`Metric.reload`.
:rtype: :class:`gcloud.logging.metric.Metric`
:returns: Metric created with the current client.
"""
return Metric(name, filter_, client=self, description=description)
def list_metrics(self, page_size=None, page_token=None):
"""List metrics for the project associated with this client.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/list
:type page_size: int
:param page_size: maximum number of metrics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of metrics. If not
passed, the API will return the first page of
metrics.
:rtype: tuple, (list, str)
:returns: list of :class:`gcloud.logging.metric.Metric`, plus a
"next page token" string: if not None, indicates that
more metrics can be retrieved with another call (pass that
value as ``page_token``).
"""
resources, token = self.metrics_api.list_metrics(
self.project, page_size, page_token)
metrics = [Metric.from_api_repr(resource, self)
for resource in resources]
return metrics, token
|
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
import time
class TestFtu(GaiaTestCase):
_activation_section_locator = ('id', 'activation')
_main_title_locator = ('id', 'main_title')
_next_button_locator = ('id', 'forward')
# Step Languages section
_section_languages_locator = ('id', 'languages')
_listed_languages_locator = ('css selector', "#languages ul li input[name='language.current']")
# Step Cell data section
_section_cell_data_locator = ('id', 'data_3g')
_enable_data_checkbox_locator = ('id', 'data-connection-switch')
# Step Wifi
_section_wifi_locator = ('id', 'wifi')
_found_wifi_networks_locator = ('css selector', 'ul#networks li')
_network_state_locator = ('xpath', 'p[2]')
# Step Date & Time
_section_date_time_locator = ('id', 'date_and_time')
_timezone_continent_locator = ('id', 'tz-region')
_timezone_city_locator = ('id', 'tz-city')
_time_zone_title_locator = ('id', 'time-zone-title')
# Section Import contacts
_section_import_contacts_locator = ('id', 'import_contacts')
_import_from_sim_locator = ('id', 'sim_import')
_sim_import_feedback_locator = ('id', 'sim_import_feedback')
# Section About Your rights
_section_ayr_locator = ('id', 'about-your-rights')
# Section Welcome Browser
_section_welcome_browser_locator = ('id', 'welcome_browser')
_enable_statistic_checkbox_locator = ('id', 'share-performance')
# Section Privacy Choices
_section_browser_privacy_locator = ('id', 'browser_privacy')
_email_field_locator = ('css selector', 'input[type="email"]')
# Section Finish
_section_finish_locator = ('id', 'finish-screen')
_skip_tour_button_locator = ('id', 'skip-tutorial-button')
_take_tour_button_locator = ('id', 'lets-go-button')
# Section Tour
_step1_header_locator = ('id', 'step1Header')
_step2_header_locator = ('id', 'step2Header')
_step3_header_locator = ('id', 'step3Header')
_step4_header_locator = ('id', 'step4Header')
_step5_header_locator = ('id', 'step5Header')
_tour_next_button_locator = ('id', 'forwardTutorial')
_tour_back_button_locator = ('id', 'backTutorial')
# Section Tutorial Finish
_section_tutorial_finish_locator = ('id', 'tutorialFinish')
_lets_go_button_locator = ('id', 'tutorialFinished')
def setUp(self):
GaiaTestCase.setUp(self)
# We need WiFi enabled but not connected to a network
self.data_layer.enable_wifi()
self.data_layer.forget_all_networks()
# Cell data must be off so we can switch it on again
self.data_layer.disable_cell_data()
# launch the First Time User app
self.app = self.apps.launch('FTU')
def test_ftu_skip_tour(self):
# https://moztrap.mozilla.org/manage/case/3876/
# 3876, 3879
self.wait_for_element_displayed(*self._section_languages_locator)
# FTU is not properly localized yet so let's just check some are listed
# TODO enhance this to include lang selection when FTU is localized
listed_languages = self.marionette.find_elements(*self._listed_languages_locator)
self.assertGreater(len(listed_languages), 0, "No languages listed on screen")
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_cell_data_locator)
# Click enable data
self.marionette.find_element(*self._enable_data_checkbox_locator).click()
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_wifi_locator)
# Wait for some networks to be found
self.wait_for_condition(lambda m: len(m.find_elements(*self._found_wifi_networks_locator)) > 0,
message="No networks listed on screen")
# TODO This will only work on Mozilla Guest or unsecure network
wifi_network = self.marionette.find_element('id', self.testvars['wifi']['ssid'])
wifi_network.click()
self.wait_for_condition(
lambda m: wifi_network.find_element(*self._network_state_locator).text == "Connected")
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_date_time_locator)
# Set timezone
continent_select = self.marionette.find_element(*self._timezone_continent_locator)
# Click to activate the b2g select element
continent_select.click()
self._select("Asia")
city_select = self.marionette.find_element(*self._timezone_city_locator)
# Click to activate the b2g select element
city_select.click()
self._select("Almaty")
self.assertEqual(self.marionette.find_element(*self._time_zone_title_locator).text,
"UTC+06:00 Asia/Almaty")
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_import_contacts_locator)
# Commenting out SIM import for now
# # Click import from SIM
# # You can do this as many times as you like without db conflict
# self.marionette.find_element(*self._import_from_sim_locator).click()
#
# # TODO What if Sim has two contacts?
# self.wait_for_condition(lambda m: m.find_element(*self._sim_import_feedback_locator).text ==
# "Imported one contact", message="Contact did not import from sim before timeout")
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_welcome_browser_locator)
# Click the statistics box and check that it sets a setting
# TODO assert via settings API that this is set. Currently it is not used
self.marionette.find_element(*self._enable_statistic_checkbox_locator).click()
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_browser_privacy_locator)
# Enter a dummy email address and check it set inside the os
# TODO assert that this is preserved in the system somewhere. Currently it is not used
self.marionette.find_element(*self._email_field_locator).send_keys("testuser@mozilla.com")
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_finish_locator)
# Skip the tour
self.marionette.find_element(*self._skip_tour_button_locator).click()
# Switch back to top level now that FTU app is gone
self.marionette.switch_to_frame()
self.assertTrue(self.data_layer.get_setting("ril.data.enabled"), "Cell data was not enabled by FTU app")
self.assertTrue(self.data_layer.is_wifi_connected(self.testvars['wifi']), "WiFi was not connected via FTU app")
def test_ftu_with_tour(self):
self.wait_for_element_displayed(*self._section_languages_locator)
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_cell_data_locator)
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_wifi_locator)
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_date_time_locator)
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_import_contacts_locator)
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_welcome_browser_locator)
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_browser_privacy_locator)
# Click next
self.marionette.find_element(*self._next_button_locator).click()
self.wait_for_element_displayed(*self._section_finish_locator)
# Take the tour
self.marionette.find_element(*self._take_tour_button_locator).click()
# Walk through the tour
self.wait_for_element_displayed(*self._step1_header_locator)
self.assertEqual(self.marionette.find_element(*self._step1_header_locator).text,
"Swipe from right to left to browse your apps.")
self.marionette.find_element(*self._tour_next_button_locator).click()
self.wait_for_element_displayed(*self._step2_header_locator)
self.assertEqual(self.marionette.find_element(*self._step2_header_locator).text,
"Swipe from left to right to discover new apps.")
self.marionette.find_element(*self._tour_next_button_locator).click()
self.wait_for_element_displayed(*self._step3_header_locator)
self.assertEqual(self.marionette.find_element(*self._step3_header_locator).text,
"Tap and hold on an icon to delete or move it.")
self.marionette.find_element(*self._tour_next_button_locator).click()
self.wait_for_element_displayed(*self._step4_header_locator)
self.assertEqual(self.marionette.find_element(*self._step4_header_locator).text,
"Swipe down to access recent notifications, credit information and settings.")
self.marionette.find_element(*self._tour_next_button_locator).click()
self.wait_for_element_displayed(*self._step5_header_locator)
self.assertEqual(self.marionette.find_element(*self._step5_header_locator).text,
"Tap and hold the home button to browse and close recent apps.")
# Try going back a step
self.marionette.find_element(*self._tour_back_button_locator).click()
self.wait_for_element_displayed(*self._step4_header_locator)
self.marionette.find_element(*self._tour_next_button_locator).click()
self.wait_for_element_displayed(*self._step5_header_locator)
self.marionette.find_element(*self._tour_next_button_locator).click()
self.wait_for_element_displayed(*self._section_tutorial_finish_locator)
self.marionette.find_element(*self._lets_go_button_locator).click()
# Switch back to top level now that FTU app is gone
self.marionette.switch_to_frame()
def tearDown(self):
# TODO flush any settings set by the FTU app
self.data_layer.disable_cell_data()
self.data_layer.disable_wifi()
GaiaTestCase.tearDown(self)
def _select(self, match_string):
# Cheeky Select wrapper until Marionette has its own
# Due to the way B2G wraps the app's select box we match on text
# Have to go back to top level to get the B2G select box wrapper
self.marionette.switch_to_frame()
options = self.marionette.find_elements('css selector', '#value-selector-container li')
close_button = self.marionette.find_element('css selector', 'button.value-option-confirm')
# Loop options until we find the match
for li in options:
if li.text == match_string:
li.click()
break
close_button.click()
# Now back to app
self.marionette.switch_to_frame(self.app.frame)
|
|
import functools
import os
from flask import Blueprint
from flask import abort
from flask import flash
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask import session
from flask import url_for
from peewee import *
from wtforms import Form
from wtforms import PasswordField
from wtforms import TextField
from wtforms import validators
from flask_turboduck.utils import check_password
from flask_turboduck.utils import get_next
from flask_turboduck.utils import make_password
current_dir = os.path.dirname(__file__)
class LoginForm(Form):
username = TextField('Username', validators=[validators.Required()])
password = PasswordField('Password', validators=[validators.Required()])
class BaseUser(object):
def set_password(self, password):
self.password = make_password(password)
def check_password(self, password):
return check_password(password, self.password)
class Auth(object):
def __init__(self, app, db, user_model=None, prefix='/accounts', name='auth',
clear_session=False, default_next_url='/', db_table='user'):
self.app = app
self.db = db
self.db_table = db_table
self.User = user_model or self.get_user_model()
self.blueprint = self.get_blueprint(name)
self.url_prefix = prefix
self.clear_session = clear_session
self.default_next_url = default_next_url
self.setup()
def get_context_user(self):
return {'user': self.get_logged_in_user()}
def get_user_model(self):
class User(self.db.Model, BaseUser):
username = CharField(unique=True)
password = CharField()
email = CharField(unique=True)
active = BooleanField()
admin = BooleanField(default=False)
def __unicode__(self):
return self.username
class Meta:
db_table = self.db_table # Postgres reserves user as a keyword
return User
def get_model_admin(self, model_admin=None):
if model_admin is None:
from flask_turboduck.admin import ModelAdmin
model_admin = ModelAdmin
class UserAdmin(model_admin):
columns = getattr(model_admin, 'columns') or (
['username', 'email', 'active', 'admin'])
def save_model(self, instance, form, adding=False):
orig_password = instance.password
user = super(UserAdmin, self).save_model(instance, form, adding)
if orig_password != form.password.data:
user.set_password(form.password.data)
user.save()
return user
return UserAdmin
def register_admin(self, admin_site, model_admin=None):
admin_site.register(self.User, self.get_model_admin(model_admin))
def get_blueprint(self, blueprint_name):
return Blueprint(
blueprint_name,
__name__,
static_folder=os.path.join(current_dir, 'static'),
template_folder=os.path.join(current_dir, 'templates'),
)
def get_urls(self):
return (
('/logout/', self.logout),
('/login/', self.login),
)
def get_login_form(self):
return LoginForm
def test_user(self, test_fn):
def decorator(fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
user = self.get_logged_in_user()
if not user or not test_fn(user):
login_url = url_for('%s.login' % self.blueprint.name, next=get_next())
return redirect(login_url)
return fn(*args, **kwargs)
return inner
return decorator
def login_required(self, func):
return self.test_user(lambda u: True)(func)
def admin_required(self, func):
return self.test_user(lambda u: u.admin)(func)
def authenticate(self, username, password):
active = self.User.select().where(self.User.active==True)
try:
user = active.where(self.User.username==username).get()
except self.User.DoesNotExist:
return False
else:
if not user.check_password(password):
return False
return user
def login_user(self, user):
session['logged_in'] = True
session['user_pk'] = user.get_id()
session.permanent = True
g.user = user
flash('You are logged in as %s' % user, 'success')
def logout_user(self):
if self.clear_session:
session.clear()
else:
session.pop('logged_in', None)
g.user = None
flash('You are now logged out', 'success')
def get_logged_in_user(self):
if session.get('logged_in'):
if getattr(g, 'user', None):
return g.user
try:
return self.User.select().where(
self.User.active==True,
self.User.id==session.get('user_pk')
).get()
except self.User.DoesNotExist:
pass
def login(self):
error = None
Form = self.get_login_form()
if request.method == 'POST':
form = Form(request.form)
next_url = request.form.get('next') or self.default_next_url
if form.validate():
authenticated_user = self.authenticate(
form.username.data,
form.password.data,
)
if authenticated_user:
self.login_user(authenticated_user)
return redirect(next_url)
else:
flash('Incorrect username or password')
else:
form = Form()
next_url = request.args.get('next')
return render_template(
'auth/login.html',
error=error,
form=form,
login_url=url_for('%s.login' % self.blueprint.name),
next=next_url)
def logout(self):
self.logout_user()
return redirect(request.args.get('next') or self.default_next_url)
def configure_routes(self):
for url, callback in self.get_urls():
self.blueprint.route(url, methods=['GET', 'POST'])(callback)
def register_blueprint(self, **kwargs):
self.app.register_blueprint(self.blueprint, url_prefix=self.url_prefix, **kwargs)
def load_user(self):
g.user = self.get_logged_in_user()
def register_handlers(self):
self.app.before_request(self.load_user)
def register_context_processors(self):
self.app.template_context_processors[None].append(self.get_context_user)
def setup(self):
self.configure_routes()
self.register_blueprint()
self.register_handlers()
self.register_context_processors()
|
|
# -*- coding: UTF-8 -*-
#-------------------------------------------------------------------------------
# Name: xgrablib.py
# Purpose: Create a File Geo database from a xGRAB-file
# Author: Kay Warrie
#
# Created: 28/05/2017
# Licence: MIT
#-------------------------------------------------------------------------------
import os, sys, codecs, datetime
import xml.etree.cElementTree as etree
try:
import arcpy
except ImportError:
pass
from _helpers import *
etree.register_namespace("gml","http://www.opengis.net/gml")
etree.register_namespace("","http://crab.agiv.be")
class xgrab2geodb:
def __init__(self, xgrabPath , geoDB ):
xmlGrab = etree.parse(xgrabPath)
self.rootgrab = xmlGrab.getroot()
self.components = self.rootgrab.find("{http://crab.agiv.be}COMPONENTEN")
self.geoDB = geoDB
arcpy.env.overwriteOutput = True
arcpy.env.workspace = geoDB
def createAll(self, includeEndDates=True, includeadresPos=True ):
self.STRAATNAMEN(includeEndDates, True, False)
self.STRAATNAAMSTATUSSEN(includeEndDates, True, False)
self.HUISNUMMERS(includeEndDates, True, False)
self.HUISNUMMERSTATUSSEN(includeEndDates, True, False)
self.POSTKANTONCODES(includeEndDates, True, False)
self.RRSTRAATNAAM_STRAATNAAM_RELATIES(includeEndDates, True, False)
self.TERREINOBJECT_HUISNUMMER_RELATIES(includeEndDates, True, False)
self.TERREINOBJECTEN(includeEndDates, [2,3,5,99], True, False)
self.GEBOUWSTATUSSEN(includeEndDates, True, False)
self.GEBOUWGEOMETRIEN(includeEndDates, True, False)
if includeadresPos:
self.ADRESPOSITIES(includeEndDates, True, False)
self.ADRES_RRADRES_RELATIES(includeEndDates, True, False)
self.RRADRESSEN(includeEndDates, True, False)
self.RSTRAATNAAM_STRAATNAAM_RELATIES(includeEndDates, True, False)
self.STRAATKANTEN(includeEndDates, True, False)
self.SUBADRESSEN(includeEndDates, True, False)
self.WEGVERBINDINGGEOMETRIEN(includeEndDates, True, False) #geo
def updateAll(self, includeadresPos=True ):
self.STRAATNAMEN(True, False, True)
self.STRAATNAAMSTATUSSEN(True, False, True)
self.HUISNUMMERS(True, False, True)
self.HUISNUMMERSTATUSSEN(True, False, True)
self.POSTKANTONCODES(True, False,True)
self.RRSTRAATNAAM_STRAATNAAM_RELATIES(True, False, True)
self.TERREINOBJECT_HUISNUMMER_RELATIES(True, False, True)
self.TERREINOBJECTEN(True, [2,3,5,99], False, True)
self.GEBOUWSTATUSSEN(True, False, True)
self.GEBOUWGEOMETRIEN(True, False, True)
if includeadresPos:
self.ADRESPOSITIES(True, False, True)
self.ADRES_RRADRES_RELATIES(True, False, True)
self.RRADRESSEN(True, False, True)
self.RSTRAATNAAM_STRAATNAAM_RELATIES(True, False, True)
self.STRAATKANTEN(True, False, True)
self.SUBADRESSEN(True, False, True)
self.WEGVERBINDINGGEOMETRIEN(True, False, True) #geo
def STRAATNAMEN(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}STRAATNAMEN")
if create and not arcpy.Exists('STRAATNAMEN'):
createTbl(self.geoDB, "STRAATNAMEN",
[field("ID","LONG"),
field("STRAATCODE","LONG"),
field("NISGEMEENTECODE","LONG"),
field("STRAATNAAM","TEXT"),
field("TAALCODESTRAATNAAM","TEXT", 10),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"),
field("BEGINBEWERKING","LONG"),
field("EINDDATUM","DATE")
])
elif arcpy.Exists('STRAATNAMEN') and not append:
arcpy.management.DeleteRows('STRAATNAMEN')
elif arcpy.Exists('STRAATNAMEN') and append:
ids = [ int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren() if unicode(r, 'utf-8').isnumeric() ]
with arcpy.da.UpdateCursor(self.geoDB + "\\STRAATNAMEN", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\STRAATNAMEN",
("ID","STRAATCODE", "NISGEMEENTECODE", "STRAATNAAM", "TAALCODESTRAATNAAM",
"BEGINDATUM","BEGINORGANISATIE","BEGINBEWERKING","EINDDATUM" ) )
for row in rows.getchildren():
ID = row.find("{http://crab.agiv.be}ID").text
STRAATCODE = row.find("{http://crab.agiv.be}STRAATCODE").text
NISGEMEENTECODE = row.find("{http://crab.agiv.be}NISGEMEENTECODE").text
STRAATNAAM = row.find("{http://crab.agiv.be}STRAATNAAM").text
TAALCODESTRAATNAAM = row.find("{http://crab.agiv.be}TAALCODESTRAATNAAM").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID,STRAATCODE, NISGEMEENTECODE, STRAATNAAM, TAALCODESTRAATNAAM,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID,STRAATCODE, NISGEMEENTECODE, STRAATNAAM, TAALCODESTRAATNAAM,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
def STRAATNAAMSTATUSSEN(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}STRAATNAAMSTATUSSEN")
if create and not arcpy.Exists('STRAATNAAMSTATUSSEN'):
createTbl(self.geoDB, "STRAATNAAMSTATUSSEN",
[field("ID","LONG"),
field("STRAATNAAMID","LONG"),
field("STRAATNAAMSTATUS","LONG"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"),
field("BEGINBEWERKING","LONG"),
field("EINDDATUM","DATE")
])
elif arcpy.Exists('STRAATNAAMSTATUSSEN') and not append:
arcpy.management.DeleteRows('STRAATNAAMSTATUSSEN')
elif arcpy.Exists('STRAATNAAMSTATUSSEN') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren() if unicode(r, 'utf-8').isnumeric() ]
with arcpy.da.UpdateCursor(self.geoDB + "\\STRAATNAAMSTATUSSEN", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\" + "STRAATNAAMSTATUSSEN",
("ID","STRAATNAAMID", "STRAATNAAMSTATUS",
"BEGINDATUM","BEGINORGANISATIE","BEGINBEWERKING","EINDDATUM" ) )
for row in rows.getchildren():
ID = row.find("{http://crab.agiv.be}ID").text
STRAATNAAMID = row.find("{http://crab.agiv.be}STRAATNAAMID").text
STRAATNAAMSTATUS = row.find("{http://crab.agiv.be}STRAATNAAMSTATUS").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID,STRAATNAAMID, STRAATNAAMSTATUS,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID,STRAATNAAMID, STRAATNAAMSTATUS,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
def HUISNUMMERS(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}HUISNUMMERS")
if create and not arcpy.Exists('HUISNUMMERS'):
createTbl(self.geoDB, "HUISNUMMERS",
[field("ID","LONG"),
field("STRAATNAAMID","LONG"),
field("HUISNUMMER","TEXT", 64),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"),
field("BEGINBEWERKING","LONG"),
field("EINDDATUM","DATE")
])
elif arcpy.Exists('HUISNUMMERS') and not append:
arcpy.management.DeleteRows('HUISNUMMERS')
elif arcpy.Exists('HUISNUMMERS') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\HUISNUMMERS", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "HUISNUMMERS",
("ID","STRAATNAAMID", "HUISNUMMER",
"BEGINDATUM","BEGINORGANISATIE","BEGINBEWERKING","EINDDATUM" ) )
for row in rows.getchildren():
ID = row.find("{http://crab.agiv.be}ID").text
STRAATNAAMID = row.find("{http://crab.agiv.be}STRAATNAAMID").text
HUISNUMMER = row.find("{http://crab.agiv.be}HUISNUMMER").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, STRAATNAAMID, HUISNUMMER,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID, STRAATNAAMID, HUISNUMMER,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
def HUISNUMMERSTATUSSEN(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}HUISNUMMERSTATUSSEN")
if create and not arcpy.Exists('HUISNUMMERSTATUSSEN'):
createTbl(self.geoDB, "HUISNUMMERSTATUSSEN",
[field("ID","LONG"),
field("HUISNUMMERID","LONG"),
field("HUISNUMMERSTATUS","LONG"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"),
field("BEGINBEWERKING","LONG"),
field("EINDDATUM","DATE")
])
elif arcpy.Exists('HUISNUMMERSTATUSSEN') and not append:
arcpy.management.DeleteRows('HUISNUMMERSTATUSSEN')
elif arcpy.Exists('HUISNUMMERSTATUSSEN') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\HUISNUMMERSTATUSSEN", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "HUISNUMMERSTATUSSEN",
("ID","HUISNUMMERID", "HUISNUMMERSTATUS",
"BEGINDATUM","BEGINORGANISATIE","BEGINBEWERKING","EINDDATUM" ) )
for row in rows.getchildren():
ID = row.find("{http://crab.agiv.be}ID").text
HUISNUMMERID = row.find("{http://crab.agiv.be}HUISNUMMERID").text
HUISNUMMERSTATUS = row.find("{http://crab.agiv.be}HUISNUMMERSTATUS").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, HUISNUMMERID, HUISNUMMERSTATUS,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID, HUISNUMMERID, HUISNUMMERSTATUS,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
def POSTKANTONCODES(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}POSTKANTONCODES")
if create and not arcpy.Exists('POSTKANTONCODES'):
createTbl(self.geoDB, "POSTKANTONCODES",
[field("ID","LONG"),
field("HUISNUMMERID","LONG"),
field("POSTKANTONCODE","LONG"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"),
field("BEGINBEWERKING","LONG"),
field("EINDDATUM","DATE")
])
elif arcpy.Exists('POSTKANTONCODES') and not append:
arcpy.management.DeleteRows('POSTKANTONCODES')
elif arcpy.Exists('POSTKANTONCODES') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\POSTKANTONCODES", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "POSTKANTONCODES",
("ID", "HUISNUMMERID", "POSTKANTONCODE",
"BEGINDATUM", "BEGINORGANISATIE", "BEGINBEWERKING", "EINDDATUM" ) )
for row in rows.getchildren():
ID = row.find("{http://crab.agiv.be}ID").text
HUISNUMMERID = row.find("{http://crab.agiv.be}HUISNUMMERID").text
POSTKANTONCODE = row.find("{http://crab.agiv.be}POSTKANTONCODE").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, HUISNUMMERID, POSTKANTONCODE,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID, HUISNUMMERID, POSTKANTONCODE,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
def RRSTRAATNAAM_STRAATNAAM_RELATIES(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}RRSTRAATNAAM_STRAATNAAM_RELATIES")
if create and not arcpy.Exists('RRSTRAATNAAM_STRAATNAAM_RELATIES'):
createTbl(self.geoDB, "RRSTRAATNAAM_STRAATNAAM_RELATIES",
[field("ID","LONG"),
field("STRAATNAAMID","LONG"),
field("SUBKANTONCODE","TEXT", 24),
field("RRSTRAATCODE","TEXT", 128),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"),
field("BEGINBEWERKING","LONG"),
field("EINDDATUM","DATE")
])
elif arcpy.Exists('RRSTRAATNAAM_STRAATNAAM_RELATIES') and not append:
arcpy.management.DeleteRows('RRSTRAATNAAM_STRAATNAAM_RELATIES')
elif arcpy.Exists('RRSTRAATNAAM_STRAATNAAM_RELATIES') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\RRSTRAATNAAM_STRAATNAAM_RELATIES", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "RRSTRAATNAAM_STRAATNAAM_RELATIES",
("ID", "STRAATNAAMID", "SUBKANTONCODE", "RRSTRAATCODE", "BEGINDATUM", "BEGINORGANISATIE", "BEGINBEWERKING", "EINDDATUM" ) )
for row in rows.getchildren():
ID = row.find("{http://crab.agiv.be}ID").text
STRAATNAAMID = row.find("{http://crab.agiv.be}STRAATNAAMID").text
SUBKANTONCODE = row.find("{http://crab.agiv.be}SUBKANTONCODE").text
RRSTRAATCODE = row.find("{http://crab.agiv.be}RRSTRAATCODE").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, STRAATNAAMID, SUBKANTONCODE, RRSTRAATCODE,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID, STRAATNAAMID, SUBKANTONCODE, RRSTRAATCODE,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
def TERREINOBJECT_HUISNUMMER_RELATIES(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}TERREINOBJECT_HUISNUMMER_RELATIES")
if create and not arcpy.Exists('TERREINOBJECT_HUISNUMMER_RELATIES'):
createTbl(self.geoDB, "TERREINOBJECT_HUISNUMMER_RELATIES",
[field("ID","LONG"),
field("TERREINOBJECTID","LONG"),
field("HUISNUMMERID","LONG"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"),
field("BEGINBEWERKING","LONG"),
field("EINDDATUM","DATE")
])
elif arcpy.Exists('TERREINOBJECT_HUISNUMMER_RELATIES') and not append:
arcpy.management.DeleteRows('TERREINOBJECT_HUISNUMMER_RELATIES')
elif arcpy.Exists('TERREINOBJECT_HUISNUMMER_RELATIES') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\TERREINOBJECT_HUISNUMMER_RELATIES", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "TERREINOBJECT_HUISNUMMER_RELATIES",
("ID", "TERREINOBJECTID", "HUISNUMMERID",
"BEGINDATUM", "BEGINORGANISATIE", "BEGINBEWERKING", "EINDDATUM" ) )
for row in rows.getchildren():
ID = row.find("{http://crab.agiv.be}ID").text
TERREINOBJECTID = row.find("{http://crab.agiv.be}TERREINOBJECTID").text
HUISNUMMERID = row.find("{http://crab.agiv.be}HUISNUMMERID").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, TERREINOBJECTID, HUISNUMMERID,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID, TERREINOBJECTID, HUISNUMMERID,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
def TERREINOBJECTEN(self, includeEndDates=True, aardAllowed=[1,2,3,4,5,99], create=True, append=False):
""" aardAllowed=1: kadastraal perceel
2: GRB gebouw
3: GRB kunstwerk
4: GRB administratief perceel
5: gebouw volgens de gemeente
99: andere """
rows = self.components.find("{http://crab.agiv.be}TERREINOBJECTEN")
if create and not arcpy.Exists('TERREINOBJECTEN'):
createTbl(self.geoDB, "TERREINOBJECTEN",
[field("ID","LONG"),
field("IDENTIFICATORTERREINOBJECT","TEXT"),
field("AARDTERREINOBJECT","LONG"),
field("AARDGEBOUW","LONG"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"),
field("BEGINBEWERKING","LONG"),
field("EINDDATUM","DATE")
])
elif arcpy.Exists('TERREINOBJECTEN') and not append:
arcpy.management.DeleteRows('TERREINOBJECTEN')
elif arcpy.Exists('TERREINOBJECTEN') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\TERREINOBJECTEN", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "TERREINOBJECTEN",
("ID", "IDENTIFICATORTERREINOBJECT", "AARDTERREINOBJECT", "AARDGEBOUW",
"BEGINDATUM", "BEGINORGANISATIE", "BEGINBEWERKING", "EINDDATUM" ) )
for row in rows.getchildren():
ID = row.find("{http://crab.agiv.be}ID").text
IDENTIFICATORTERREINOBJECT = row.find("{http://crab.agiv.be}IDENTIFICATORTERREINOBJECT").text
AARDTERREINOBJECT = row.find("{http://crab.agiv.be}AARDTERREINOBJECT").text
#aardgeobouw = double node, sometimes NULL!
AARDGEBOUWnode = row.find( "{http://crab.agiv.be}GEBOUW/{http://crab.agiv.be}AARDGEBOUW" )
if AARDGEBOUWnode != None: AARDGEBOUW = AARDGEBOUWnode.text
else: AARDGEBOUW = None
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None and int(AARDTERREINOBJECT) in aardAllowed:
curs.insertRow((ID, IDENTIFICATORTERREINOBJECT, AARDTERREINOBJECT, AARDGEBOUW,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID, IDENTIFICATORTERREINOBJECT, AARDTERREINOBJECT, AARDGEBOUW,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
def GEBOUWSTATUSSEN(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}GEBOUWSTATUSSEN")
if create and not arcpy.Exists('GEBOUWSTATUSSEN'):
createTbl(self.geoDB, "GEBOUWSTATUSSEN",
[field("ID","LONG"),
field("TERREINOBJECTID","LONG"),
field("GEBOUWSTATUS","LONG"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"),
field("BEGINBEWERKING","LONG"),
field("EINDDATUM","DATE")
])
elif arcpy.Exists('GEBOUWSTATUSSEN') and not append:
arcpy.management.DeleteRows('GEBOUWSTATUSSEN')
elif arcpy.Exists('GEBOUWSTATUSSEN') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\GEBOUWSTATUSSEN", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "GEBOUWSTATUSSEN",
("ID", "TERREINOBJECTID", "GEBOUWSTATUS",
"BEGINDATUM", "BEGINORGANISATIE", "BEGINBEWERKING", "EINDDATUM" ) )
for row in rows.getchildren():
ID = row.find("{http://crab.agiv.be}ID").text
TERREINOBJECTID = row.find("{http://crab.agiv.be}TERREINOBJECTID").text
GEBOUWSTATUS = row.find("{http://crab.agiv.be}GEBOUWSTATUS").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, TERREINOBJECTID, GEBOUWSTATUS,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID, TERREINOBJECTID, GEBOUWSTATUS,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
#geo
def GEBOUWGEOMETRIEN(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}GEBOUWGEOMETRIEN")
if create and not arcpy.Exists('GEBOUWGEOMETRIEN'):
createTbl(self.geoDB, "GEBOUWGEOMETRIEN",
[field("ID","LONG"),
field("TERREINOBJECTID","LONG"),
field("METHODEGEBOUWGEOMETRIE","LONG"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"),
field("BEGINBEWERKING","LONG"),
field("EINDDATUM","DATE")
], "POLYGON")
elif arcpy.Exists('GEBOUWGEOMETRIEN') and not append:
arcpy.management.DeleteFeatures('GEBOUWGEOMETRIEN')
elif arcpy.Exists('GEBOUWGEOMETRIEN') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\GEBOUWGEOMETRIEN", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "GEBOUWGEOMETRIEN",
("ID", "TERREINOBJECTID", "METHODEGEBOUWGEOMETRIE",
"BEGINDATUM", "BEGINORGANISATIE", "BEGINBEWERKING", "EINDDATUM", "SHAPE@") )
for row in rows.getchildren():
ID = row.find("{http://crab.agiv.be}ID").text
TERREINOBJECTID = row.find("{http://crab.agiv.be}TERREINOBJECTID").text
GEBOUWGEOMETRIE = row.find("{http://crab.agiv.be}GEBOUWGEOMETRIE")
SHAPE = gmlPoly2esri( GEBOUWGEOMETRIE )
METHODEGEBOUWGEOMETRIE = row.find("{http://crab.agiv.be}METHODEGEBOUWGEOMETRIE").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, TERREINOBJECTID, METHODEGEBOUWGEOMETRIE,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM, SHAPE ))
elif includeEndDates:
curs.insertRow((ID, TERREINOBJECTID, METHODEGEBOUWGEOMETRIE,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM, SHAPE ))
del curs
#geo
def ADRESPOSITIES(self, includeEndDates=True, create=True, append=False):
rows = self.rootgrab[0].find("{http://crab.agiv.be}ADRESPOSITIES")
if create and not arcpy.Exists('ADRESPOSITIES'):
createTbl(self.geoDB, "ADRESPOSITIES",
[field("ID","LONG"),
field("ADRESID","LONG"),
field("AARDADRES","LONG"),
field("HERKOMSTADRESPOSITIE","LONG"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"),
field("BEGINBEWERKING","LONG"),
field("EINDDATUM","DATE")
], "POINT")
elif arcpy.Exists('ADRESPOSITIES') and not append:
arcpy.management.DeleteFeatures('ADRESPOSITIES')
elif arcpy.Exists('ADRESPOSITIES') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\ADRESPOSITIES", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "ADRESPOSITIES",
("ID", "ADRESID", "AARDADRES", "HERKOMSTADRESPOSITIE",
"BEGINDATUM", "BEGINORGANISATIE", "BEGINBEWERKING", "EINDDATUM", "SHAPE@XY") )
for row in rows.getchildren():
ID = row.find("{http://crab.agiv.be}ID").text
ADRESID = row.find("{http://crab.agiv.be}ADRESID").text
AARDADRES = row.find("{http://crab.agiv.be}AARDADRES").text
X, Y = row.find("{http://crab.agiv.be}ADRESPOSITIE")[0][0].text.split()
HERKOMSTADRESPOSITIE = row.find("{http://crab.agiv.be}HERKOMSTADRESPOSITIE").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, ADRESID, AARDADRES, HERKOMSTADRESPOSITIE,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM, [float(X),float(Y)] ))
elif includeEndDates:
curs.insertRow((ID, ADRESID, AARDADRES, HERKOMSTADRESPOSITIE,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM, [float(X),float(Y)] ))
del curs
def ADRES_RRADRES_RELATIES(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}ADRES_RRADRES_RELATIES")
if create and not arcpy.Exists('ADRES_RRADRES_RELATIES'):
createTbl(self.geoDB, "ADRES_RRADRES_RELATIES",
[field("ID","LONG"),
field("ADRESID","LONG"),
field("AARDADRES","LONG"),
field("RRADRESID","LONG"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"), field("BEGINBEWERKING","LONG"), field("EINDDATUM","DATE")
])
elif arcpy.Exists('ADRES_RRADRES_RELATIES') and not append:
arcpy.management.DeleteRows('ADRES_RRADRES_RELATIES')
elif arcpy.Exists('ADRES_RRADRES_RELATIES') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\ADRES_RRADRES_RELATIES", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "ADRES_RRADRES_RELATIES",
("ID", "ADRESID", "AARDADRES", "RRADRESID", "BEGINDATUM", "BEGINORGANISATIE", "BEGINBEWERKING", "EINDDATUM" ) )
for row in rows.getchildren():
ID = row[0].text
ADRESID = row.find("{http://crab.agiv.be}ADRESID").text
AARDADRES = row.find("{http://crab.agiv.be}AARDADRES").text
RRADRESID = row.find("{http://crab.agiv.be}RRADRESID").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, ADRESID, AARDADRES, RRADRESID, BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID, ADRESID, AARDADRES, RRADRESID, BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
def RRADRESSEN(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}RRADRESSEN")
if create and not arcpy.Exists('RRADRESSEN'):
createTbl(self.geoDB, "RRADRESSEN",
[field("ID","LONG"),
field("RRHUISNUMMER","TEXT"),
field("SUBKANTONCODE","TEXT"),
field("RRSTRAATCODE","TEXT"),
field("RRINDEX","TEXT"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"), field("BEGINBEWERKING","LONG"), field("EINDDATUM","DATE")
])
elif arcpy.Exists('RRADRESSEN') and not append:
arcpy.management.DeleteRows('RRADRESSEN')
elif arcpy.Exists('RRADRESSEN') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\RRADRESSEN", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "RRADRESSEN",
("ID", "RRHUISNUMMER", "SUBKANTONCODE", "RRSTRAATCODE", "RRINDEX", "BEGINDATUM", "BEGINORGANISATIE", "BEGINBEWERKING", "EINDDATUM" ) )
for row in rows.getchildren():
ID = row[0].text
RRHUISNUMMER = row.find("{http://crab.agiv.be}RRHUISNUMMER").text
SUBKANTONCODE = row.find("{http://crab.agiv.be}SUBKANTONCODE").text
RRSTRAATCODE = row.find("{http://crab.agiv.be}RRSTRAATCODE").text
INDEXnode = row.find("{http://crab.agiv.be}INDEX")
if INDEXnode != None: RRINDEX = INDEXnode.text
else: RRINDEX = None
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, RRHUISNUMMER, SUBKANTONCODE, RRSTRAATCODE, RRINDEX,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID, RRHUISNUMMER, SUBKANTONCODE, RRSTRAATCODE, RRINDEX,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
def RSTRAATNAAM_STRAATNAAM_RELATIES(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}RRSTRAATNAAM_STRAATNAAM_RELATIES")
if create and not arcpy.Exists('RRSTRAATNAAM_STRAATNAAM_RELATIES'):
createTbl(self.geoDB, "RSTRAATNAAM_STRAATNAAM_RELATIES",
[field("ID","LONG"),
field("STRAATNAAMID", "LONG"),
field("SUBKANTONCODE","TEXT"),
field("RRSTRAATCODE","TEXT"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"), field("BEGINBEWERKING","LONG"), field("EINDDATUM","DATE")
])
elif arcpy.Exists('RSTRAATNAAM_STRAATNAAM_RELATIES') and not append:
arcpy.management.DeleteRows('RSTRAATNAAM_STRAATNAAM_RELATIES')
elif arcpy.Exists('RSTRAATNAAM_STRAATNAAM_RELATIES') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\RSTRAATNAAM_STRAATNAAM_RELATIES", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "RRSTRAATNAAM_STRAATNAAM_RELATIES",
("ID", "STRAATNAAMID", "SUBKANTONCODE", "RRSTRAATCODE", "BEGINDATUM", "BEGINORGANISATIE", "BEGINBEWERKING", "EINDDATUM" ) )
for row in rows.getchildren():
ID = row[0].text
STRAATNAAMID = row.find("{http://crab.agiv.be}STRAATNAAMID").text
SUBKANTONCODE = row.find("{http://crab.agiv.be}SUBKANTONCODE").text
RRSTRAATCODE = row.find("{http://crab.agiv.be}RRSTRAATCODE").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, STRAATNAAMID, SUBKANTONCODE, RRSTRAATCODE,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID, STRAATNAAMID, SUBKANTONCODE, RRSTRAATCODE,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
def STRAATKANTEN(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}STRAATKANTEN")
if create and not arcpy.Exists('STRAATKANTEN'):
createTbl(self.geoDB, "STRAATKANTEN",
[field("ID","LONG"),
field("STRAATNAAMID", "LONG"),
field("WEGOBJECTID", "LONG"),
field("KANT", "LONG"),
field("PARITEIT", "LONG"),
field("EERSTEHUISNUMMER","TEXT"),
field("LAATSTEHUISNUMMER","TEXT"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"), field("BEGINBEWERKING","LONG"), field("EINDDATUM","DATE")
])
elif arcpy.Exists('STRAATKANTEN') and not append:
arcpy.management.DeleteRows('STRAATKANTEN')
elif arcpy.Exists('STRAATKANTEN') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\STRAATKANTEN", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "STRAATKANTEN",
("ID", "STRAATNAAMID", "WEGOBJECTID", "KANT", "PARITEIT", "EERSTEHUISNUMMER", "LAATSTEHUISNUMMER",
"BEGINDATUM", "BEGINORGANISATIE", "BEGINBEWERKING", "EINDDATUM" ) )
for row in rows.getchildren():
ID = row[0].text
STRAATNAAMID = row.find("{http://crab.agiv.be}STRAATNAAMID").text
WEGOBJECTID = row.find("{http://crab.agiv.be}WEGOBJECTID").text
KANT = row.find("{http://crab.agiv.be}KANT").text
PARITEITnode = row.find("{http://crab.agiv.be}PARITEIT")
if PARITEITnode != None: PARITEIT = PARITEITnode.text
else: PARITEIT = None
EERSTEHUISNUMMERnode = row.find("{http://crab.agiv.be}EERSTEHUISNUMMER")
if EERSTEHUISNUMMERnode != None: EERSTEHUISNUMMER = EERSTEHUISNUMMERnode.text
else: EERSTEHUISNUMMER = ""
LAATSTEHUISNUMMERnode = row.find("{http://crab.agiv.be}LAATSTEHUISNUMMER")
if LAATSTEHUISNUMMERnode != None: LAATSTEHUISNUMMER = LAATSTEHUISNUMMERnode.text
else: LAATSTEHUISNUMMER = ""
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, STRAATNAAMID, WEGOBJECTID, KANT, PARITEIT, EERSTEHUISNUMMER, LAATSTEHUISNUMMER,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID, STRAATNAAMID, WEGOBJECTID, KANT, PARITEIT, EERSTEHUISNUMMER, LAATSTEHUISNUMMER,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
def SUBADRESSEN(self, includeEndDates=True, create=True, append=False):
rows = self.components.find("{http://crab.agiv.be}SUBADRESSEN")
if create and not arcpy.Exists('SUBADRESSEN'):
createTbl(self.geoDB, "SUBADRESSEN",
[field("ID","LONG"),
field("HUISNUMMERID", "LONG"),
field("SUBADRES","TEXT"),
field("AARDSUBADRES","LONG"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"), field("BEGINBEWERKING","LONG"), field("EINDDATUM","DATE")
])
elif arcpy.Exists('SUBADRESSEN') and not append:
arcpy.management.DeleteRows('SUBADRESSEN')
elif arcpy.Exists('SUBADRESSEN') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\SUBADRESSEN", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "SUBADRESSEN",
("ID", "HUISNUMMERID", "SUBADRES", "AARDSUBADRES",
"BEGINDATUM", "BEGINORGANISATIE", "BEGINBEWERKING", "EINDDATUM" ) )
for row in rows.getchildren():
ID = row[0].text
HUISNUMMERID = row.find("{http://crab.agiv.be}HUISNUMMERID").text
SUBADRES = row.find("{http://crab.agiv.be}SUBADRES").text
AARDSUBADRES = row.find("{http://crab.agiv.be}AARDSUBADRES").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, HUISNUMMERID, SUBADRES, AARDSUBADRES,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
elif includeEndDates:
curs.insertRow((ID, HUISNUMMERID, SUBADRES, AARDSUBADRES,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM ))
del curs
#geo
def WEGVERBINDINGGEOMETRIEN(self, includeEndDates=True, create=True, append=False):
rows = self.rootgrab[0].find("{http://crab.agiv.be}WEGVERBINDINGGEOMETRIEN")
if create and not arcpy.Exists('WEGVERBINDINGGEOMETRIEN'):
createTbl(self.geoDB, "WEGVERBINDINGGEOMETRIEN",
[field("ID","LONG"),
field("WEGOBJECTID","LONG"),
field("METHODEWEGVERBINDINGGEOMETRIE","LONG"),
field("BEGINDATUM","DATE"),
field("BEGINORGANISATIE","LONG"),
field("BEGINBEWERKING","LONG"),
field("EINDDATUM","DATE")
], "POLYLINE")
elif arcpy.Exists('WEGVERBINDINGGEOMETRIEN') and not append:
arcpy.management.DeleteFeatures('WEGVERBINDINGGEOMETRIEN')
elif arcpy.Exists('WEGVERBINDINGGEOMETRIEN') and append:
ids = [int(r.find("{http://crab.agiv.be}ID").text) for r in rows.getchildren()]
with arcpy.da.UpdateCursor(self.geoDB + "\\WEGVERBINDINGGEOMETRIEN", ["ID"]) as cursor:
for row in cursor:
if row[0] in ids: cursor.deleteRow()
curs = arcpy.da.InsertCursor( self.geoDB + "\\"+ "WEGVERBINDINGGEOMETRIEN",
("ID", "WEGOBJECTID", "METHODEWEGVERBINDINGGEOMETRIE",
"BEGINDATUM", "BEGINORGANISATIE", "BEGINBEWERKING", "EINDDATUM", "SHAPE@") )
for row in rows.getchildren():
ID = row.find("{http://crab.agiv.be}ID").text
WEGVERBINDINGGEOMETRIE = row.find("{http://crab.agiv.be}WEGVERBINDINGGEOMETRIE")[0]
SHAPE = gmlLineEsri( WEGVERBINDINGGEOMETRIE )
WEGOBJECTID = row.find("{http://crab.agiv.be}WEGOBJECTID").text
METHODEWEGVERBINDINGGEOMETRIE = row.find("{http://crab.agiv.be}METHODEWEGVERBINDINGGEOMETRIE").text
BEGINDATUM = row.find("{http://crab.agiv.be}BEGINDATUM").text
BEGINORGANISATIE = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}ORGANISATIE").text
BEGINBEWERKING = row.find("{http://crab.agiv.be}BEGINMETADATA/{http://crab.agiv.be}BEWERKING").text
EINDnode = row.find("{http://crab.agiv.be}EINDDATUM")
if EINDnode != None: EINDDATUM = EINDnode.text
else: EINDDATUM = None
if not includeEndDates and EINDDATUM == None:
curs.insertRow((ID, WEGOBJECTID, METHODEWEGVERBINDINGGEOMETRIE,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM, SHAPE ))
elif includeEndDates:
curs.insertRow((ID, WEGOBJECTID, METHODEWEGVERBINDINGGEOMETRIE,
BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, EINDDATUM, SHAPE ))
del curs
|
|
from os.path import join as ojoin
def ground(data_path):
# authors
authors = []
author_fname = ojoin(data_path, 'author.txt')
with open(author_fname) as f:
for line in f:
line = line.strip()
if not line: continue
authors.append(line.split()[0])
# papers
papers = []
papers_fname = ojoin(data_path, 'paper.txt')
with open(papers_fname) as f:
for line in f:
line = line.strip()
if not line: continue
papers.append(line.split()[0])
var_id = 0
# PositiveReview
paper_to_reviwer = dict()
reviewer_to_paper = dict()
positive_review_rel = dict()
positive_review_truth = dict()
positive_review_fname = ojoin(data_path, 'positiveReview.txt')
with open(positive_review_fname) as f:
for line in f:
line = line.strip()
if not line: continue
[reviewer, paper, truth] = line.split()
if paper in paper_to_reviwer:
paper_to_reviwer[paper].append(reviewer)
else:
paper_to_reviwer[paper] = [reviewer]
if reviewer in reviewer_to_paper:
reviewer_to_paper[reviewer].append(paper)
else:
reviewer_to_paper[reviewer] = [paper]
positive_review_rel[(reviewer, paper)] = (False, var_id)
positive_review_truth[(reviewer, paper)] = (var_id, truth)
var_id += 1
# PositiveSummary
positive_summary_rel = dict()
positive_summary_fname = ojoin(data_path, 'positiveSummary.txt')
with open(positive_summary_fname) as f:
for line in f:
line = line.strip()
if not line: continue
[paper, truth] = line.split()
positive_summary_rel[paper]= (True, float(truth))
# Acceptable
acceptable_rel = dict()
acceptable_truth = dict()
acceptable_fname = ojoin(data_path, 'acceptable.txt')
with open(acceptable_fname) as f:
for line in f:
line = line.strip()
if not line: continue
[paper, truth] = line.split()
acceptable_rel[paper] = (False, var_id)
acceptable_truth[paper] = (var_id, truth)
var_id += 1
# Submits
submits_rel = dict()
submits_fname = ojoin(data_path, 'submits.txt')
with open(submits_fname) as f:
for line in f:
line = line.strip()
if not line: continue
[author, paper, truth] = line.split()
submits_rel[(author, paper)] = (True, float(truth))
# Presents
presents_rel = dict()
presents_truth = dict()
for a in authors:
presents_rel[a] = (False, var_id)
presents_truth[a] = (var_id, None)
var_id += 1
rules = []
# 1: PositiveSummary(P) & Reviews(R1,P) & Reviews(R2,P) &
# PositiveReview(R1,P) -> PositiveReview(R2,P)
for p in papers:
for r1 in paper_to_reviwer[p]:
for r2 in paper_to_reviwer[p]:
body = [positive_summary_rel[p] + (False,),
positive_review_rel[(r1, p)] + (False,)]
head = [positive_review_rel[(r2, p)] + (False,)]
rules.append((1, body, head))
# 1: !PositiveSummary(P) & Reviews(R1,P) & Reviews(R2,P) &
# PositiveReview(R1,P) -> !PositiveReview(R2,P)
for p in papers:
for r1 in paper_to_reviwer[p]:
for r2 in paper_to_reviwer[p]:
body = [positive_summary_rel[p] + (True,),
positive_review_rel[(r1, p)] + (False,)]
head = [positive_review_rel[(r2, p)] + (True,)]
rules.append((1, body, head))
# 1: !PositiveSummary(P) & Reviews(R1,P) & Reviews(R2,P) &
# !PositiveReview(R1,P) -> !PositiveReview(R2,P)
for p in papers:
for r1 in paper_to_reviwer[p]:
for r2 in paper_to_reviwer[p]:
body = [positive_summary_rel[p] + (True,),
positive_review_rel[(r1, p)] + (True,)]
head = [positive_review_rel[(r2, p)] + (True,)]
rules.append((1, body, head))
# 1: PositiveSummary(P) & Reviews(R,P) -> PositiveReview(R,P)
for p in papers:
for r in paper_to_reviwer[p]:
body = [positive_summary_rel[p] + (False, )]
head = [positive_review_rel[(r, p)] + (False,)]
rules.append((1, body, head))
# 1: !PositiveSummary(P) & Reviews(R,P) -> !PositiveReview(R,P)
for p in papers:
for r in paper_to_reviwer[p]:
body = [positive_summary_rel[p] + (True, )]
head = [positive_review_rel[(r, p)] + (True,)]
rules.append((1, body, head))
# 1: PositiveReview(R,P) & Reviews(R,P) -> Acceptable(P)
for p in papers:
for r in paper_to_reviwer[p]:
body = [positive_review_rel[(r, p)] + (False,)]
head = [acceptable_rel[p] + (False,)]
rules.append((1, body, head))
# 1: !PositiveReview(R,P) & Reviews(R,P) -> !Acceptable(P)
for p in papers:
for r in paper_to_reviwer[p]:
body = [positive_review_rel[(r, p)] + (True,)]
head = [acceptable_rel[p] + (True,)]
rules.append((1, body, head))
# 1: Reviews(R,P1) & Reviews(R,P2) & PositiveReview(R,P1) &
# Acceptable(P1) & Acceptable(P2) & (P1!=P2) -> !PositiveReview(R,P1)
for r in reviewer_to_paper:
for p1 in reviewer_to_paper[r]:
for p2 in reviewer_to_paper[r]:
if p1 == p2: continue
body = [positive_review_rel[(r, p1)] + (False,),
acceptable_rel[p1] + (False,),
acceptable_rel[p2] + (False,)]
head = [positive_review_rel[(r, p2)] + (True,)]
rules.append((1, body, head))
# 1: Reviews(R,P1) & Reviews(R,P2) & !PositiveReview(R,P1) &
# Acceptable(P1) & Acceptable(P2) & (P1!=P2) -> PositiveReview(R,P1)
for r in reviewer_to_paper:
for p1 in reviewer_to_paper[r]:
for p2 in reviewer_to_paper[r]:
if p1 == p2: continue
body = [positive_review_rel[(r, p1)] + (True,),
acceptable_rel[p1] + (False,),
acceptable_rel[p2] + (False,)]
head = [positive_review_rel[(r, p2)] + (False,)]
rules.append((1, body, head))
# 1: !Acceptable(P)
for p in papers:
body = []
head = [acceptable_rel[p] + (True,)]
rules.append((1, body, head))
hard_rules = []
# Acceptable(P) & Submits(A, P) -> Presents(A)
for a, p in submits_rel:
body = [acceptable_rel[p] + (False,),
(True, 1.0, False)]
head = [presents_rel[a] + (False,)]
hard_rules.append((None, body, head))
# !Acceptable(P) & Submits(A,P) -> !Presents(A)
for a, p in submits_rel:
body = [acceptable_rel[p] + (True,),
(True, 1.0, False)]
head = [presents_rel[a] + (True,)]
hard_rules.append((None, body, head))
affiliation_dict = dict()
affiliation_fname = ojoin(data_path, 'affiliation.txt')
with open(affiliation_fname) as f:
for line in f:
line = line.strip()
if not line: continue
[author, institute, truth] = line.split()
assert(float(truth)==1)
affiliation_dict[author] = institute
high_rank_rel = dict()
high_rank_fname = ojoin(data_path, 'highRank.txt')
with open(high_rank_fname) as f:
for line in f:
line = line.strip()
if not line: continue
[institute, truth] = line.split()
high_rank_rel[institute] = float(truth)
student_rel = dict()
student_fname = ojoin(data_path, 'student.txt')
with open(student_fname) as f:
for line in f:
line = line.strip()
if not line: continue
[author, truth] = line.split()
student_rel[author] = float(truth)
# F1:Affiliation(v, u) & highRank(u)
# F2: student(u)
# d: presents(A)
counts = []
for a in authors:
F1 = high_rank_rel[affiliation_dict[a]]
F2 = student_rel[a]
d = presents_rel[a]
counts.append((F1, F2, d))
atoms = {}
atoms['review'] = positive_review_truth
atoms['acceptable'] = acceptable_truth
atoms['presents'] = presents_truth
return rules, hard_rules, counts, atoms
|
|
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Match,
Instruction,
SetNwSrcAction,
SetNwDstAction,
SetTpSrcAction,
SetTpDstAction,
SetFieldAction,
OutputAction)
from pybvc.common.utils import load_dict_from_file
from pybvc.common.status import STATUS
from pybvc.common.constants import (ETH_TYPE_IPv4,
IP_PROTO_TCP,
IP_PROTO_UDP)
def delete_flows(ofswitch, table_id, flow_ids):
for flow_id in flow_ids:
result = ofswitch.delete_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow with id of '%s' successfully removed "
"from the Controller" % flow_id)
else:
print ("!!!Flow '%s' removal error, reason: %s" %
(flow_id, status.brief()))
def of_demo_40():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit(0)
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 40 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
first_flow_id = 110
# ---------------------------------------------------
# First flow entry
# ---------------------------------------------------
table_id = 0
flow_id = first_flow_id
flow_name = "Modify IP packet example1"
priority = 900
cookie = 1300
match_in_port = 10
match_eth_type = ETH_TYPE_IPv4
match_ip_proto = IP_PROTO_TCP
match_ipv4_src_addr = "192.1.2.0/24"
match_ipv4_dst_addr = "173.194.123.40/32"
match_tcp_dst_port = 8080
act_mod_ipv4_src_addr = "212.16.1.8/32"
act_mod_ipv4_dst_addr = "52.87.12.11/32"
act_mod_tcp_src_port = 8888
act_mod_tcp_dst_port = 9999
act_out_port = 119
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" IP Protocol (%s)\n"
" IPv4 Source Address (%s)\n"
" IPv4 Destination Address (%s)\n"
" TCP Destination Port (%s)" %
(match_in_port,
hex(match_eth_type),
match_ip_proto,
match_ipv4_src_addr,
match_ipv4_dst_addr,
match_tcp_dst_port))
print (" Actions: Modify IPv4 Source Address (%s)\n"
" Modify IPv4 Destination Address (%s)\n"
" Modify TCP Source Port (%s)\n"
" Modify TCP Destination Port (%s)\n"
" Output (%s)" %
(act_mod_ipv4_src_addr,
act_mod_ipv4_dst_addr,
act_mod_tcp_src_port,
act_mod_tcp_dst_port,
act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry1 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry1.set_flow_table_id(table_id)
flow_entry1.set_flow_name(flow_name)
flow_entry1.set_flow_id(flow_id)
flow_entry1.set_flow_cookie(cookie)
flow_entry1.set_flow_priority(priority)
flow_entry1.set_flow_hard_timeout(0)
flow_entry1.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = SetNwSrcAction(action_order)
action.set_nw_src(act_mod_ipv4_src_addr)
instruction.add_apply_action(action)
action_order += 1
action = SetNwDstAction(action_order)
action.set_nw_dst(act_mod_ipv4_dst_addr)
instruction.add_apply_action(action)
action_order += 1
action = SetTpSrcAction(action_order)
action.set_tp_src(act_mod_tcp_src_port)
instruction.add_apply_action(action)
action_order += 1
action = SetTpDstAction(action_order)
action.set_tp_dst(act_mod_tcp_dst_port)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry1.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_ip_proto(match_ip_proto)
match.set_ipv4_src(match_ipv4_src_addr)
match.set_ipv4_dst(match_ipv4_dst_addr)
match.set_tcp_dst_port(match_tcp_dst_port)
flow_entry1.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry1.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
# ---------------------------------------------------
# Second flow entry
# ---------------------------------------------------
table_id = 0
flow_id += 1
flow_name = "Modify IP packet example2"
priority = 900
cookie = 1300
match_in_port = 110
match_eth_type = ETH_TYPE_IPv4
match_ip_proto = IP_PROTO_UDP
match_ipv4_src_addr = "10.1.0.0/16"
match_ipv4_dst_addr = "168.1.1.101/32"
match_udp_dst_port = 1812
act_mod_ipv4_src_addr = "172.101.1.9/32"
act_mod_ipv4_dst_addr = "172.101.1.1/32"
act_mod_udp_src_port = 5555
act_mod_udp_dst_port = 7777
act_out_port = 120
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" IP Protocol (%s)\n"
" IPv4 Source Address (%s)\n"
" IPv4 Destination Address (%s)\n"
" UDP Destination Port (%s)" %
(match_in_port,
hex(match_eth_type),
match_ip_proto,
match_ipv4_src_addr,
match_ipv4_dst_addr,
match_udp_dst_port))
print (" Actions: Set Field (IPv4 Source Address %s)\n"
" Set Field (IPv4 Destination Address %s)\n"
" Set Field (UDP Source Port %s)\n"
" Set Field (UDP Destination Port %s)\n"
" Output (%s)" %
(act_mod_ipv4_src_addr,
act_mod_ipv4_dst_addr,
act_mod_udp_src_port,
act_mod_udp_dst_port,
act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry2 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry2.set_flow_table_id(table_id)
flow_entry2.set_flow_name(flow_name)
flow_entry2.set_flow_id(flow_id)
flow_entry2.set_flow_cookie(cookie)
flow_entry2.set_flow_priority(priority)
flow_entry2.set_flow_hard_timeout(0)
flow_entry2.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = SetFieldAction(action_order)
action.set_ipv4_src(act_mod_ipv4_src_addr)
instruction.add_apply_action(action)
action_order += 1
action = SetFieldAction(action_order)
action.set_ipv4_dst(act_mod_ipv4_dst_addr)
instruction.add_apply_action(action)
action_order += 1
action = SetFieldAction(action_order)
action.set_udp_src_port(act_mod_udp_src_port)
instruction.add_apply_action(action)
action_order += 1
action = SetFieldAction(action_order)
action.set_udp_dst_port(act_mod_udp_dst_port)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry2.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_ip_proto(match_ip_proto)
match.set_ipv4_src(match_ipv4_src_addr)
match.set_ipv4_dst(match_ipv4_dst_addr)
match.set_udp_dst_port(match_udp_dst_port)
flow_entry2.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry2.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry2)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
print ("\n")
print ("<<< Delete flows from the Controller's cache "
"and from the table '%s' on the '%s' node" % (table_id, nodeName))
time.sleep(rundelay)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_40()
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "rapidtide/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import os
import re
import subprocess
import sys
import threading
try:
import git_cache
except ImportError:
for p in os.environ['PATH'].split(os.pathsep):
if (os.path.basename(p) == 'depot_tools' and
os.path.exists(os.path.join(p, 'git_cache.py'))):
sys.path.append(p)
import git_cache
# Show more information about the commands being executed.
VERBOSE = False
# The longest any single subprocess will be allowed to run.
TIMEOUT = 40 * 60
class AbnormalExit(Exception):
pass
class StdioBuffer(object):
def __init__(self, name, out_queue):
self.closed = False
self.line_buffer = cStringIO.StringIO()
self.name = name
self.out_q = out_queue
def write(self, msg):
"""Write into the buffer. Only one thread should call write() at a time."""
assert not self.closed
self.line_buffer.write(msg)
# We can use '\n' instead of os.linesep because universal newlines is
# set to true below.
if '\n' in msg:
# We can assert that lines is at least 2 items if '\n' is present.
lines = self.line_buffer.getvalue().split('\n')
for line in lines[:-1]:
self.out_q.put('%s> %s' % (self.name, line))
self.line_buffer.close()
self.line_buffer = cStringIO.StringIO()
self.line_buffer.write(lines[-1])
def close(self):
# Empty out the line buffer.
self.write('\n')
self.out_q.put(None)
self.closed = True
def GetStatusOutput(cmd, cwd=None, out_buffer=None):
"""Return (status, output) of executing cmd in a shell."""
if VERBOSE:
print >> sys.stderr, ''
print >> sys.stderr, '[DEBUG] Running "%s"' % cmd
def _thread_main():
thr = threading.current_thread()
thr.status = -1
thr.stdout = ''
thr.stderr = '<timeout>'
try:
if out_buffer:
proc = subprocess.Popen(cmd, shell=True,
cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while True:
buf = proc.stdout.read(1)
if buf == '\r': # We want carriage returns in Linux to be newlines.
buf = '\n'
if not buf:
break
out_buffer.write(buf)
stdout = ''
proc.wait()
out_buffer.close()
else:
proc = subprocess.Popen(cmd, shell=True, universal_newlines=True,
cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(stdout, _) = proc.communicate()
except Exception, e:
thr.status = -1
thr.stdout = ''
thr.stderr = repr(e)
else:
thr.status = proc.returncode
thr.stdout = stdout
thr.stderr = ''
thr = threading.Thread(target=_thread_main)
thr.daemon = True
thr.start()
thr.join(TIMEOUT)
# pylint: disable=E1101
if VERBOSE:
short_output = ' '.join(thr.stdout.splitlines())
short_output = short_output.strip(' \t\n\r')
print >> sys.stderr, (
'[DEBUG] Output: %d, %-60s' % (thr.status, short_output))
return (thr.status, thr.stdout)
def Git(git_repo, command, is_mirror=False, out_buffer=None):
"""Execute a git command within a local git repo."""
if is_mirror:
if git_repo:
cmd = 'git --git-dir=%s %s' % (git_repo, command)
else:
cmd = 'git %s' % command
cwd = None
else:
cmd = 'git %s' % command
cwd = git_repo
(status, output) = GetStatusOutput(cmd, cwd, out_buffer)
# For Abnormal Exit, Windows returns -1, Posix returns 128.
if status in [-1, 128]:
raise AbnormalExit('Failed to run %s. Exited Abnormally. output %s' %
(cmd, output))
elif status != 0:
raise Exception('Failed to run %s. error %d. output %s' % (cmd, status,
output))
return (status, output)
def Clone(git_url, git_repo, is_mirror, out_buffer=None):
"""Clone a repository."""
cmd = 'clone'
if is_mirror:
cmd += ' --mirror'
cmd += ' %s %s' % (git_url, git_repo)
if not is_mirror and not os.path.exists(git_repo):
os.makedirs(git_repo)
return Git(None, cmd, is_mirror=is_mirror, out_buffer=out_buffer)
def PopulateCache(git_url, shallow=False):
# --shallow by default checks out 10000 revision, but for really large
# repos like adobe ones, we want significantly less than 10000.
depth = None
if shallow and 'adobe' in git_url:
depth = 10
mirror = git_cache.Mirror(git_url, print_func=lambda *args: None)
mirror.populate(depth=depth, shallow=shallow)
return mirror.mirror_path
def Fetch(git_repo, git_url, is_mirror):
"""Fetch the latest objects for a given git repository."""
# Always update the upstream url
Git(git_repo, 'config remote.origin.url %s' % git_url)
Git(git_repo, 'fetch origin', is_mirror)
def Ping(git_repo, verbose=False):
"""Confirm that a remote repository URL is valid."""
status, stdout = GetStatusOutput('git ls-remote ' + git_repo)
if status != 0 and verbose:
print >> sys.stderr, stdout
return status == 0
def CreateLessThanOrEqualRegex(number):
""" Return a regular expression to test whether an integer less than or equal
to 'number' is present in a given string.
"""
# In three parts, build a regular expression that match any numbers smaller
# than 'number'.
# For example, 78656 would give a regular expression that looks like:
# Part 1
# (78356| # 78356
# Part 2
# 7835[0-5]| # 78350-78355
# 783[0-4][0-9]| # 78300-78349
# 78[0-2][0-9][0-9]| # 78000-78299
# 7[0-7][0-9][0-9][0-9]| # 70000-77999
# [0-6][0-9][0-9][0-9][0-9]| # 10000-69999
# Part 3
# [0-9][0-9][0-9][0-9]| # 1000-9999
# [0-9][0-9][0-9]| # 100-999
# [0-9][0-9]| # 10-99
# [0-9]) # 0-9
# Part 1: Create an array with all the regexes, as described above.
# Prepopulate it with the number itself.
number = str(number)
expressions = [number]
# Convert the number to a list, so we can translate digits in it to
# expressions.
num_list = list(number)
num_len = len(num_list)
# Part 2: Go through all the digits in the number, starting from the end.
# Each iteration appends a line to 'expressions'.
for index in range (num_len - 1, -1, -1):
# Convert this digit back to an integer.
digit = int(num_list[index])
# Part 2.1: No processing if this digit is a zero.
if digit == 0:
continue
# Part 2.2: We switch the current digit X by a range "[0-(X-1)]".
num_list[index] = '[0-%d]' % (digit - 1)
# Part 2.3: We set all following digits to be "[0-9]".
# Since we just decrementented a digit in a most important position, all
# following digits don't matter. The possible numbers will always be smaller
# than before we decremented.
for next_digit in range(index + 1, num_len):
num_list[next_digit] = '[0-9]'
# Part 2.4: Add this new sub-expression to the list.
expressions.append(''.join(num_list))
# Part 3: We add all the full ranges to match all numbers that are at least
# one order of magnitude smaller than the original numbers.
for index in range(1, num_len):
expressions.append('[0-9]'*index)
# All done. We now have our final regular expression.
regex = '(%s)' % ('|'.join(expressions))
return regex
class SearchError(Exception):
pass
def _SearchImpl(git_repo, svn_rev, is_mirror, refspec, fetch_url, regex):
def _FindRevForCommitish(git_repo, commitish, is_mirror):
_, output = Git(git_repo, 'cat-file commit %s' % commitish, is_mirror)
match = re.match(r'git-svn-id: [^\s@]+@(\d+) \S+$', output.splitlines()[-1])
if match:
return int(match.group(1))
else:
# The last commit isn't from svn, but maybe the repo was converted to pure
# git at some point, so the last svn commit is somewhere farther back.
_, output = Git(
git_repo, ('log -E --grep="^git-svn-id: [^@]*@[0-9]* [A-Za-z0-9-]*$" '
'-1 --format="%%H" %s') % commitish, is_mirror)
assert output, 'no match on %s' % commitish
# Check if svn_rev is newer than the current refspec revision.
try:
found_rev = _FindRevForCommitish(git_repo, refspec, is_mirror)
# Sometimes this fails because it's looking in a branch that hasn't been
# fetched from upstream yet. Let it fetch and try again.
except AbnormalExit:
found_rev = None
if (not found_rev or found_rev < int(svn_rev)) and fetch_url:
if VERBOSE:
print >> sys.stderr, (
'Fetching %s %s [%s < %s]' % (git_repo, refspec, found_rev, svn_rev))
Fetch(git_repo, fetch_url, is_mirror)
found_rev = _FindRevForCommitish(git_repo, refspec, is_mirror)
# Find the first commit matching the given git-svn-id regex.
_, output = Git(
git_repo,
('log -E --grep="^git-svn-id: [^@]*@%s [A-Za-z0-9-]*$" '
'-1 --format="%%H" %s') % (regex, refspec),
is_mirror)
output = output.strip()
if not re.match('^[0-9a-fA-F]{40}$', output):
raise SearchError('Cannot find revision %s in %s:%s' % (svn_rev, git_repo,
refspec))
# Check if it actually matched the svn_rev that was requested.
found_rev = _FindRevForCommitish(git_repo, output, is_mirror)
found_msg = svn_rev
if found_rev != int(svn_rev):
found_msg = '%s [actual: %s]' % (svn_rev, found_rev)
print >> sys.stderr, '%s: %s <-> %s' % (git_repo, output, found_msg)
return output
def SearchExact(git_repo, svn_rev, is_mirror, refspec='FETCH_HEAD',
fetch_url=None):
"""Return the Git commit id exactly matching the given SVN revision.
If fetch_url is not None, will update repo if revision is newer."""
regex = str(svn_rev)
return _SearchImpl(git_repo, svn_rev, is_mirror, refspec, fetch_url, regex)
def Search(git_repo, svn_rev, is_mirror, refspec='FETCH_HEAD', fetch_url=None):
"""Return the Git commit id fuzzy matching the given SVN revision.
If fetch_url is not None, will update repo if revision is newer."""
regex = CreateLessThanOrEqualRegex(svn_rev)
return _SearchImpl(git_repo, svn_rev, is_mirror, refspec, fetch_url, regex)
|
|
"""
Tests for splitter objects.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar, Aneesh Pappu"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import tempfile
import unittest
import numpy as np
import deepchem as dc
class TestSplitters(unittest.TestCase):
"""
Test some basic splitters.
"""
def test_singletask_random_split(self):
"""
Test singletask RandomSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
random_splitter = dc.splits.RandomSplitter()
train_data, valid_data, test_data = \
random_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
merged_dataset = dc.data.DiskDataset.merge(
[train_data, valid_data, test_data])
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_index_split(self):
"""
Test singletask IndexSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
random_splitter = dc.splits.IndexSplitter()
train_data, valid_data, test_data = \
random_splitter.train_valid_test_split(
solubility_dataset)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
merged_dataset = dc.data.DiskDataset.merge(
[train_data, valid_data, test_data])
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
# TODO(rbharath): The IndexSplitter() had a bug with splitting sharded
# data. Make a test for properly splitting of sharded data. Perhaps using
# reshard() to handle this?
def test_singletask_scaffold_split(self):
"""
Test singletask ScaffoldSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
scaffold_splitter = dc.splits.ScaffoldSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_singletask_stratified_split(self):
"""
Test singletask SingletaskStratifiedSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
stratified_splitter = dc.splits.ScaffoldSplitter()
train_data, valid_data, test_data = \
stratified_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
merged_dataset = dc.data.DiskDataset.merge(
[train_data, valid_data, test_data])
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_butina_split(self):
"""
Test singletask ScaffoldSplitter class.
"""
solubility_dataset = dc.data.tests.load_butina_data()
scaffold_splitter = dc.splits.ButinaSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
solubility_dataset)
print(len(train_data), len(valid_data))
assert len(train_data) == 7
assert len(valid_data) == 3
assert len(test_data) == 0
def test_singletask_random_k_fold_split(self):
"""
Test singletask RandomSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
random_splitter = dc.splits.RandomSplitter()
ids_set = set(solubility_dataset.ids)
K = 5
fold_datasets = random_splitter.k_fold_split(solubility_dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold]
# Verify lengths is 10/k == 2
assert len(fold_dataset) == 2
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
merged_dataset = dc.data.DiskDataset.merge(fold_datasets)
assert len(merged_dataset) == len(solubility_dataset)
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_index_k_fold_split(self):
"""
Test singletask IndexSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
index_splitter = dc.splits.IndexSplitter()
ids_set = set(solubility_dataset.ids)
K = 5
fold_datasets = index_splitter.k_fold_split(solubility_dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold]
# Verify lengths is 10/k == 2
assert len(fold_dataset) == 2
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
merged_dataset = dc.data.DiskDataset.merge(fold_datasets)
assert len(merged_dataset) == len(solubility_dataset)
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_scaffold_k_fold_split(self):
"""
Test singletask ScaffoldSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
scaffold_splitter = dc.splits.ScaffoldSplitter()
ids_set = set(solubility_dataset.ids)
K = 5
fold_datasets = scaffold_splitter.k_fold_split(solubility_dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold]
# Verify lengths is 10/k == 2
assert len(fold_dataset) == 2
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
merged_dataset = dc.data.DiskDataset.merge(fold_datasets)
assert len(merged_dataset) == len(solubility_dataset)
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_stratified_column_indices(self):
"""
Test RandomStratifiedSplitter's split method on simple singletas.
"""
# Test singletask case.
n_samples = 100
n_positives = 20
n_features = 10
n_tasks = 1
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
y[:n_positives] = 1
w = np.ones((n_samples, n_tasks))
ids = np.arange(n_samples)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
column_indices = stratified_splitter.get_task_split_indices(
y, w, frac_split=.5)
split_index = column_indices[0]
# The split index should partition dataset in half.
assert np.count_nonzero(y[:split_index]) == 10
def test_singletask_stratified_column_indices_mask(self):
"""
Test RandomStratifiedSplitter's split method on dataset with mask.
"""
# Test singletask case.
n_samples = 100
n_positives = 20
n_features = 10
n_tasks = 1
# Test case where some weights are zero (i.e. masked)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
y[:n_positives] = 1
w = np.ones((n_samples, n_tasks))
# Set half the positives to have zero weight
w[:n_positives / 2] = 0
ids = np.arange(n_samples)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
column_indices = stratified_splitter.get_task_split_indices(
y, w, frac_split=.5)
split_index = column_indices[0]
# There are 10 nonzero actives.
# The split index should partition this into half, so expect 5
w_present = (w != 0)
y_present = y * w_present
assert np.count_nonzero(y_present[:split_index]) == 5
def test_multitask_stratified_column_indices(self):
"""
Test RandomStratifiedSplitter split on multitask dataset.
"""
n_samples = 100
n_features = 10
n_tasks = 10
X = np.random.rand(n_samples, n_features)
p = .05 # proportion actives
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
stratified_splitter = dc.splits.RandomStratifiedSplitter()
split_indices = stratified_splitter.get_task_split_indices(
y, w, frac_split=.5)
for task in range(n_tasks):
split_index = split_indices[task]
task_actives = np.count_nonzero(y[:, task])
# The split index should partition dataset in half.
assert np.count_nonzero(y[:split_index, task]) == int(task_actives / 2)
def test_multitask_stratified_column_indices_masked(self):
"""
Test RandomStratifiedSplitter split on multitask dataset.
"""
n_samples = 200
n_features = 10
n_tasks = 10
X = np.random.rand(n_samples, n_features)
p = .05 # proportion actives
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
# Mask half the examples
w[:n_samples / 2] = 0
stratified_splitter = dc.splits.RandomStratifiedSplitter()
split_indices = stratified_splitter.get_task_split_indices(
y, w, frac_split=.5)
w_present = (w != 0)
y_present = y * w_present
for task in range(n_tasks):
split_index = split_indices[task]
task_actives = np.count_nonzero(y_present[:, task])
# The split index should partition dataset in half.
assert np.count_nonzero(y_present[:split_index, task]) == int(
task_actives / 2)
def test_singletask_stratified_split(self):
"""
Test RandomStratifiedSplitter on a singletask split.
"""
np.random.seed(2314)
# Test singletask case.
n_samples = 20
n_positives = 10
n_features = 10
n_tasks = 1
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
y[:n_positives] = 1
w = np.ones((n_samples, n_tasks))
ids = np.arange(n_samples)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
dataset_1, dataset_2 = stratified_splitter.split(dataset, frac_split=.5)
# Should have split cleanly in half (picked random seed to ensure this)
assert len(dataset_1) == 10
assert len(dataset_2) == 10
# Check positives are correctly distributed
y_1 = dataset_1.y
assert np.count_nonzero(y_1) == n_positives / 2
y_2 = dataset_2.y
assert np.count_nonzero(y_2) == n_positives / 2
def test_singletask_stratified_k_fold_split(self):
"""
Test RandomStratifiedSplitter k-fold class.
"""
n_samples = 100
n_positives = 20
n_features = 10
n_tasks = 1
X = np.random.rand(n_samples, n_features)
y = np.zeros(n_samples)
y[:n_positives] = 1
w = np.ones(n_samples)
ids = np.arange(n_samples)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
ids_set = set(dataset.ids)
K = 5
fold_datasets = stratified_splitter.k_fold_split(dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold]
# Verify lengths is 100/k == 20
# Note: This wouldn't work for multitask str
# assert len(fold_dataset) == n_samples/K
fold_labels = fold_dataset.y
# Verify that each fold has n_positives/K = 4 positive examples.
assert np.count_nonzero(fold_labels == 1) == n_positives / K
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
merged_dataset = dc.data.DiskDataset.merge(fold_datasets)
assert len(merged_dataset) == len(dataset)
assert sorted(merged_dataset.ids) == (sorted(dataset.ids))
def test_multitask_random_split(self):
"""
Test multitask RandomSplitter class.
"""
multitask_dataset = dc.data.tests.load_multitask_data()
random_splitter = dc.splits.RandomSplitter()
train_data, valid_data, test_data = \
random_splitter.train_valid_test_split(
multitask_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_multitask_index_split(self):
"""
Test multitask IndexSplitter class.
"""
multitask_dataset = dc.data.tests.load_multitask_data()
index_splitter = dc.splits.IndexSplitter()
train_data, valid_data, test_data = \
index_splitter.train_valid_test_split(
multitask_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_multitask_scaffold_split(self):
"""
Test multitask ScaffoldSplitter class.
"""
multitask_dataset = dc.data.tests.load_multitask_data()
scaffold_splitter = dc.splits.ScaffoldSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
multitask_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_stratified_multitask_split(self):
"""
Test multitask RandomStratifiedSplitter class
"""
# sparsity is determined by number of w weights that are 0 for a given
# task structure of w np array is such that each row corresponds to a
# sample. The loaded sparse dataset has many rows with only zeros
sparse_dataset = dc.data.tests.load_sparse_multitask_dataset()
stratified_splitter = dc.splits.RandomStratifiedSplitter()
datasets = stratified_splitter.train_valid_test_split(
sparse_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
train_data, valid_data, test_data = datasets
for dataset_index, dataset in enumerate(datasets):
w = dataset.w
# verify that there are no rows (samples) in weights matrix w
# that have no hits.
assert len(np.where(~w.any(axis=1))[0]) == 0
if __name__ == "__main__":
import nose
nose.run(defaultTest=__name__)
|
|
# encoding: utf-8
import contextlib
import os
from xml.dom import minidom
from config import CREATE_OUTPUT, TEAMBEAM_EXE, UPLOAD_FOLDER
from engine.datastore.models.paper import Paper
from engine.datastore.models.reference import ReferenceType
from engine.datastore.models.text import TextType
from engine.importer.importer_base import ImporterBase
from engine.utils.exceptions.import_exceptions import WrongReferenceError
EXTENSION_TEXT = ".txt"
EXTENSION_STRUCTURE = ".xml"
OUTPUT_FILENAME = "output.txt"
OUTPUT = open(OUTPUT_FILENAME, "w") if CREATE_OUTPUT else None
IGNORE_CASES = ["heading", "<table border=\"1\" summary=\"\""]
class ImporterTeambeam(ImporterBase):
def __init__(self, run_exe=True):
self.run_exe = run_exe
@staticmethod
def __add_values_to_references(paper, reference_values):
full_round = False
i = j = 0
while j < len(reference_values):
value, data = reference_values[j]
try:
if value == 'ref-authorGivenName':
last_value, last_data = reference_values[j - 1]
surname = last_data if last_value == 'ref-authorSurname' else ''
paper.references[i].add_author(ReferenceType.AUTHOR, data, surname)
elif value == 'ref-authorOther':
name = data.split(',')
if len(name) < 2:
name.append('')
paper.references[i].add_author(ReferenceType.AUTHOR_OTHER, name[1], name[0])
elif value == 'ref-title':
paper.references[i].add_title(data)
elif value == 'ref-other':
paper.references[i].add_reference_info(ReferenceType.OTHER, data)
elif value == 'ref-source':
paper.references[i].add_reference_info(ReferenceType.SOURCE, data)
elif value == 'ref-date':
paper.references[i].add_reference_info(ReferenceType.DATE, data)
elif value == 'ref-note':
paper.references[i].add_reference_info(ReferenceType.NOTE, data)
elif value == 'ref-location':
paper.references[i].add_reference_info(ReferenceType.LOCATION, data)
elif value == 'ref-publisher':
paper.references[i].add_reference_info(ReferenceType.PUBLISHER, data)
elif value == 'ref-volume':
paper.references[i].add_reference_info(ReferenceType.VOLUME, data)
elif value == 'ref-editor':
paper.references[i].add_reference_info(ReferenceType.EDITOR, data)
elif value == 'ref-issue':
paper.references[i].add_reference_info(ReferenceType.ISSUE, data)
elif value == 'ref-pages':
paper.references[i].add_reference_info(ReferenceType.PAGES, data)
elif value == 'ref-conference':
paper.references[i].add_reference_info(ReferenceType.CONFERENCE, data)
elif CREATE_OUTPUT and (len(value.split()) == 1) and (value != 'ref-authorSurname'):
OUTPUT.write("REFERENCE NOT IN LIST!\n")
OUTPUT.write("Filename: " + paper.filename + " value: " + value + "\ntext: " + data + "\n")
OUTPUT.write("\n")
full_round = False
j += 1
except WrongReferenceError as error:
i += 1
if full_round:
OUTPUT.write("CAN'T FIND CORRECT REFERENCE FOR:\n")
OUTPUT.write(error.value + ":\n")
OUTPUT.write(error.data + "\n\n")
j += 1
full_round = False
elif i == len(paper.references):
full_round = True
i = 0
@staticmethod
def __delete_files(filename):
path_to_file = UPLOAD_FOLDER + filename
with contextlib.suppress(FileNotFoundError):
os.remove(path_to_file)
os.remove(path_to_file + EXTENSION_TEXT)
os.remove(path_to_file + EXTENSION_STRUCTURE)
@staticmethod
def __add_values_to_authors(paper, author_values):
i = 0
while i < len(author_values):
value, data = author_values[i]
if value == 'authors':
paper.add_authors_text(data)
elif value == 'surname':
prename = ""
middle_name = None
last_value, last_data = author_values[i - 1]
sec_last_value, sec_last_data = author_values[i - 2]
if last_value == 'given-name':
prename = last_data
elif last_value == 'middle-name':
middle_name = last_data
if sec_last_value == 'given-name':
prename = last_data
elif sec_last_value == 'middle-name':
middle_name = last_data
if not len(paper.authors):
paper.add_authors_text('')
paper.authors[-1].add_author(prename, data, middle_name)
elif value == 'emails':
if len(paper.authors):
paper.authors[-1].emails_text = data
elif value == 'email':
if len(paper.authors):
paper.authors[-1].add_email(data)
elif value == 'affiliations' or \
value == 'affiliation':
if len(paper.authors):
paper.authors[-1].add_affiliation(data)
i += 1
def import_paper(self, filename):
paper = Paper({'filename': filename})
path_to_file = UPLOAD_FOLDER + filename
if self.run_exe:
os.system('cd ' + TEAMBEAM_EXE + ' && sh pdf-to-xml -a \"' + path_to_file + '\"')
with open(path_to_file + EXTENSION_TEXT, "r", encoding="utf8") as textfile:
data = textfile.read()
tree = minidom.parse(path_to_file + EXTENSION_STRUCTURE)
features = tree.getElementsByTagName("feature")
reference_values = []
author_values = []
for feature in features:
value = feature.getAttribute("value").rstrip()
parent = feature.parentNode
start = int(parent.getAttribute("start"))
end = int(parent.getAttribute("end"))
text = data[start:end].rstrip('\n')
if value == 'section':
paper.add_section(text)
elif value == 'abstract':
paper.add_abstract(text)
elif value == 'title':
paper.set_title(text)
elif value == 'subsection':
paper.add_subsection(text)
elif value == 'subsubsection':
paper.add_subsubsection(text)
elif value == 'main':
paper.add_text_to_current_section(TextType.MAIN, text)
elif value == 'table':
paper.add_text_to_current_section(TextType.TABLE, text)
elif value == 'sparse':
paper.add_text_to_current_section(TextType.SPARSE, text)
elif value == 'caption':
paper.add_text_to_current_section(TextType.CAPTION, text)
elif value == 'paragraph':
paper.add_text_to_current_section(TextType.PARAGRAPH, text)
elif value == 'citation':
paper.add_text_to_current_section(TextType.CITATION, text)
elif value == 'reference':
paper.add_reference(text)
elif 'ref-' in value:
reference_values.append([value, text])
elif value == 'authors' or \
value == 'given-name' or \
value == 'middle-name' or \
value == 'surname' or \
value == 'email' or \
value == 'emails' or \
value == 'affiliations' or \
value == 'affiliation':
author_values.append([value, text])
else:
if CREATE_OUTPUT and (len(value.split()) == 1) and (not any(s in value for s in IGNORE_CASES)):
OUTPUT.write("VALUE NOT IN LIST!\n")
OUTPUT.write("Filename: " + filename + " value: " + value + "\ntext: " + text + "\n")
OUTPUT.write("\n")
self.__add_values_to_references(paper, reference_values)
self.__add_values_to_authors(paper, author_values)
self.__delete_files(filename)
return paper
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, missing-docstring, no-init, old-style-class, multiple-statements
# pylint: disable=arguments-differ, too-many-arguments, no-member
"""Visualization callback function
"""
try:
import datetime
except ImportError:
class Datetime_Failed_To_Import: pass
datetime = Datetime_Failed_To_Import
try:
import bokeh.plotting
except ImportError:
pass
try:
from collections import defaultdict
except ImportError:
class Defaultdict_Failed_To_Import: pass
defaultdict = Defaultdict_Failed_To_Import
try:
import pandas as pd
except ImportError:
class Pandas_Failed_To_Import: pass
pd = Pandas_Failed_To_Import
import time
# pylint: enable=missing-docstring, no-init, old-style-class, multiple-statements
def _add_new_columns(dataframe, metrics):
"""Add new metrics as new columns to selected pandas dataframe.
Parameters
----------
dataframe : pandas.DataFrame
Selected dataframe needs to be modified.
metrics : metric.EvalMetric
New metrics to be added.
"""
#TODO(leodirac): we don't really need to do this on every update. Optimize
new_columns = set(metrics.keys()) - set(dataframe.columns)
for col in new_columns:
dataframe[col] = None
def _extend(baseData, newData):
"""Assuming a is shorter than b, copy the end of b onto a
"""
baseData.extend(newData[len(baseData):])
class PandasLogger(object):
"""Logs statistics about training run into Pandas dataframes.
Records three separate dataframes: train, eval, epoch.
Parameters
----------
batch_size: int
batch_size of data
frequent: int
How many training mini-batches between calculations.
Defaults to calculating every 50 batches.
(Eval data is stored once per epoch over the entire
eval data set.)
"""
def __init__(self, batch_size, frequent=50):
self.batch_size = batch_size
self.frequent = frequent
self._dataframes = {
'train': pd.DataFrame(),
'eval': pd.DataFrame(),
'epoch': pd.DataFrame(),
}
self.last_time = time.time()
self.start_time = datetime.datetime.now()
self.last_epoch_time = datetime.datetime.now()
@property
def train_df(self):
"""The dataframe with training data.
This has metrics for training minibatches, logged every
"frequent" batches. (frequent is a constructor param)
"""
return self._dataframes['train']
@property
def eval_df(self):
"""The dataframe with evaluation data.
This has validation scores calculated at the end of each epoch.
"""
return self._dataframes['eval']
@property
def epoch_df(self):
"""The dataframe with epoch data.
This has timing information.
"""
return self._dataframes['epoch']
@property
def all_dataframes(self):
"""Return a dict of dataframes
"""
return self._dataframes
def elapsed(self):
"""Calcaulate the elapsed time from training starting.
"""
return datetime.datetime.now() - self.start_time
def append_metrics(self, metrics, df_name):
"""Append new metrics to selected dataframes.
Parameters
----------
metrics : metric.EvalMetric
New metrics to be added.
df_name : str
Name of the dataframe to be modified.
"""
dataframe = self._dataframes[df_name]
_add_new_columns(dataframe, metrics)
dataframe.loc[len(dataframe)] = metrics
def train_cb(self, param):
"""Callback funtion for training.
"""
if param.nbatch % self.frequent == 0:
self._process_batch(param, 'train')
def eval_cb(self, param):
"""Callback function for evaluation
"""
self._process_batch(param, 'eval')
def _process_batch(self, param, dataframe):
"""Update parameters for selected dataframe after a completed batch
Parameters
----------
dataframe : pandas.DataFrame
Selected dataframe needs to be modified.
"""
now = time.time()
if param.eval_metric is not None:
metrics = dict(param.eval_metric.get_name_value())
param.eval_metric.reset()
else:
metrics = {}
speed = self.frequent / (now - self.last_time)
metrics['batches_per_sec'] = speed * self.batch_size
metrics['records_per_sec'] = speed
metrics['elapsed'] = self.elapsed()
metrics['minibatch_count'] = param.nbatch
metrics['epoch'] = param.epoch
self.append_metrics(metrics, dataframe)
self.last_time = now
def epoch_cb(self):
"""Callback function after each epoch. Now it records each epoch time
and append it to epoch dataframe.
"""
metrics = {}
metrics['elapsed'] = self.elapsed()
now = datetime.datetime.now()
metrics['epoch_time'] = now - self.last_epoch_time
self.append_metrics(metrics, 'epoch')
self.last_epoch_time = now
def callback_args(self):
"""returns **kwargs parameters for model.fit()
to enable all callbacks. e.g.
model.fit(X=train, eval_data=test, **pdlogger.callback_args())
"""
return {
'batch_end_callback': self.train_cb,
'eval_end_callback': self.eval_cb,
'epoch_end_callback': self.epoch_cb,
}
class LiveBokehChart(object):
"""Callback object that renders a bokeh chart in a jupyter notebook
that gets updated as the training run proceeds.
Requires a PandasLogger to collect the data it will render.
This is an abstract base-class. Sub-classes define the specific chart.
"""
def __init__(self, pandas_logger, metric_name, display_freq=10,
batch_size=None, frequent=50):
if pandas_logger:
self.pandas_logger = pandas_logger
else:
self.pandas_logger = PandasLogger(batch_size=batch_size, frequent=frequent)
self.display_freq = display_freq
self.last_update = time.time()
#NOTE: would be nice to auto-detect the metric_name if there's only one.
self.metric_name = metric_name
bokeh.io.output_notebook()
self.handle = self.setup_chart()
def setup_chart(self):
"""Render a bokeh object and return a handle to it.
"""
raise NotImplementedError("Incomplete base class: LiveBokehChart must be sub-classed")
def update_chart_data(self):
"""Update the bokeh object with new data.
"""
raise NotImplementedError("Incomplete base class: LiveBokehChart must be sub-classed")
def interval_elapsed(self):
"""Check whether it is time to update plot.
Returns
-------
Boolean value of whethe to update now
"""
return time.time() - self.last_update > self.display_freq
def _push_render(self):
"""Render the plot with bokeh.io and push to notebook.
"""
bokeh.io.push_notebook(handle=self.handle)
self.last_update = time.time()
def _do_update(self):
"""Update the plot chart data and render the updates.
"""
self.update_chart_data()
self._push_render()
def batch_cb(self, param):
"""Callback function after a completed batch.
"""
if self.interval_elapsed():
self._do_update()
def eval_cb(self, param):
"""Callback function after an evaluation.
"""
# After eval results, force an update.
self._do_update()
def callback_args(self):
"""returns **kwargs parameters for model.fit()
to enable all callbacks. e.g.
model.fit(X=train, eval_data=test, **pdlogger.callback_args())
"""
return {
'batch_end_callback': self.batch_cb,
'eval_end_callback': self.eval_cb,
}
class LiveTimeSeries(LiveBokehChart):
"""Plot the elasped time during live learning.
"""
def __init__(self, **fig_params):
self.fig = bokeh.plotting.Figure(x_axis_type='datetime',
x_axis_label='Elapsed time', **fig_params)
super(LiveTimeSeries, self).__init__(None, None) # TODO: clean up this class hierarchy
def setup_chart(self):
self.start_time = datetime.datetime.now()
self.x_axis_val = []
self.y_axis_val = []
self.fig.line(self.x_axis_val, self.y_axis_val)
return bokeh.plotting.show(self.fig, notebook_handle=True)
def elapsed(self):
"""Calculate elasped time from starting
"""
return datetime.datetime.now() - self.start_time
def update_chart_data(self, value):
self.x_axis_val.append(self.elapsed())
self.y_axis_val.append(value)
self._push_render()
class LiveLearningCurve(LiveBokehChart):
"""Draws a learning curve with training & validation metrics
over time as the network trains.
"""
def __init__(self, metric_name, display_freq=10, frequent=50):
self.frequent = frequent
self.start_time = datetime.datetime.now()
self._data = {
'train': {'elapsed': [],},
'eval': {'elapsed': [],},
}
super(LiveLearningCurve, self).__init__(None, metric_name, display_freq, frequent)
def setup_chart(self):
self.fig = bokeh.plotting.Figure(x_axis_type='datetime',
x_axis_label='Training time')
#TODO(leodirac): There's got to be a better way to
# get a bokeh plot to dynamically update as a pandas dataframe changes,
# instead of copying into a list.
# I can't figure it out though. Ask a pyData expert.
self.x_axis_val1 = []
self.y_axis_val1 = []
self.train1 = self.fig.line(self.x_axis_val1, self.y_axis_val1, line_dash='dotted',
alpha=0.3, legend="train")
self.train2 = self.fig.circle(self.x_axis_val1, self.y_axis_val1, size=1.5,
line_alpha=0.3, fill_alpha=0.3, legend="train")
self.train2.visible = False # Turn this on later.
self.x_axis_val2 = []
self.y_axis_val2 = []
self.valid1 = self.fig.line(self.x_axis_val2, self.y_axis_val2,
line_color='green',
line_width=2,
legend="validation")
self.valid2 = self.fig.circle(self.x_axis_val2,
self.y_axis_val2,
line_color='green',
line_width=2, legend=None)
self.fig.legend.location = "bottom_right"
self.fig.yaxis.axis_label = self.metric_name
return bokeh.plotting.show(self.fig, notebook_handle=True)
def _do_update(self):
self.update_chart_data()
self._push_render()
def batch_cb(self, param):
if param.nbatch % self.frequent == 0:
self._process_batch(param, 'train')
if self.interval_elapsed():
self._do_update()
def eval_cb(self, param):
# After eval results, force an update.
self._process_batch(param, 'eval')
self._do_update()
def _process_batch(self, param, df_name):
"""Update selected dataframe after a completed batch
Parameters
----------
df_name : str
Selected dataframe name needs to be modified.
"""
if param.eval_metric is not None:
metrics = dict(param.eval_metric.get_name_value())
param.eval_metric.reset()
else:
metrics = {}
metrics['elapsed'] = datetime.datetime.now() - self.start_time
for key, value in metrics.items():
if key not in self._data[df_name]:
self._data[df_name][key] = []
self._data[df_name][key].append(value)
def update_chart_data(self):
dataframe = self._data['train']
if len(dataframe['elapsed']):
_extend(self.x_axis_val1, dataframe['elapsed'])
_extend(self.y_axis_val1, dataframe[self.metric_name])
dataframe = self._data['eval']
if len(dataframe['elapsed']):
_extend(self.x_axis_val2, dataframe['elapsed'])
_extend(self.y_axis_val2, dataframe[self.metric_name])
if len(dataframe) > 10:
self.train1.visible = False
self.train2.visible = True
def args_wrapper(*args):
"""Generates callback arguments for model.fit()
for a set of callback objects.
Callback objects like PandasLogger(), LiveLearningCurve()
get passed in. This assembles all their callback arguments.
"""
out = defaultdict(list)
for callback in args:
callback_args = callback.callback_args()
for k, v in callback_args.items():
out[k].append(v)
return dict(out)
|
|
# Nimble Storage, Inc. (c) 2013-2014
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for Nimble Storage.
This driver supports Nimble Storage controller CS-Series.
"""
import functools
import random
import re
import six
import string
import urllib2
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
from suds import client
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers.san import san
DRIVER_VERSION = '1.0'
VOL_EDIT_MASK = 4 + 16 + 32 + 64 + 512
SOAP_PORT = 5391
SM_ACL_APPLY_TO_BOTH = 3
SM_ACL_CHAP_USER_ANY = '*'
SM_SUBNET_DATA = 3
SM_SUBNET_MGMT_PLUS_DATA = 4
LUN_ID = '0'
WARN_LEVEL = 0.8
LOG = logging.getLogger(__name__)
nimble_opts = [
cfg.StrOpt('nimble_pool_name',
default='default',
help='Nimble Controller pool name'),
cfg.StrOpt('nimble_subnet_label',
default='*',
help='Nimble Subnet Label'), ]
CONF = cfg.CONF
CONF.register_opts(nimble_opts)
class NimbleDriverException(exception.VolumeDriverException):
message = _("Nimble Cinder Driver exception")
class NimbleAPIException(exception.VolumeBackendAPIException):
message = _("Unexpected response from Nimble API")
class NimbleISCSIDriver(san.SanISCSIDriver):
"""OpenStack driver to enable Nimble Controller.
Version history:
1.0 - Initial driver
"""
def __init__(self, *args, **kwargs):
super(NimbleISCSIDriver, self).__init__(*args, **kwargs)
self.APIExecutor = None
self.group_stats = {}
self.configuration.append_config_values(nimble_opts)
def _check_config(self):
"""Ensure that the flags we care about are set."""
required_config = ['san_ip', 'san_login', 'san_password']
for attr in required_config:
if not getattr(self.configuration, attr, None):
raise exception.InvalidInput(reason=_('%s is not set.') %
attr)
def _get_discovery_ip(self, netconfig):
"""Get discovery ip."""
subnet_label = self.configuration.nimble_subnet_label
LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s',
{'netlabel': subnet_label, 'netconf': netconfig})
ret_discovery_ip = ''
for subnet in netconfig['subnet-list']:
LOG.info(_LI('Exploring array subnet label %s'), subnet['label'])
if subnet_label == '*':
# Use the first data subnet, save mgmt+data for later
if subnet['subnet-id']['type'] == SM_SUBNET_DATA:
LOG.info(_LI('Discovery ip %(disc_ip)s is used '
'on data subnet %(net_label)s'),
{'disc_ip': subnet['discovery-ip'],
'net_label': subnet['label']})
return subnet['discovery-ip']
elif (subnet['subnet-id']['type'] ==
SM_SUBNET_MGMT_PLUS_DATA):
LOG.info(_LI('Discovery ip %(disc_ip)s is found'
' on mgmt+data subnet %(net_label)s'),
{'disc_ip': subnet['discovery-ip'],
'net_label': subnet['label']})
ret_discovery_ip = subnet['discovery-ip']
# If subnet is specified and found, use the subnet
elif subnet_label == subnet['label']:
LOG.info(_LI('Discovery ip %(disc_ip)s is used'
' on subnet %(net_label)s'),
{'disc_ip': subnet['discovery-ip'],
'net_label': subnet['label']})
return subnet['discovery-ip']
if ret_discovery_ip:
LOG.info(_LI('Discovery ip %s is used on mgmt+data subnet'),
ret_discovery_ip)
return ret_discovery_ip
else:
raise NimbleDriverException(_('No suitable discovery ip found'))
def do_setup(self, context):
"""Setup the Nimble Cinder volume driver."""
self._check_config()
# Setup API Executor
try:
self.APIExecutor = NimbleAPIExecutor(
username=self.configuration.san_login,
password=self.configuration.san_password,
ip=self.configuration.san_ip)
except Exception:
LOG.error(_LE('Failed to create SOAP client.'
'Check san_ip, username, password'
' and make sure the array version is compatible'))
raise
def _get_provider_location(self, volume_name):
"""Get volume iqn for initiator access."""
vol_info = self.APIExecutor.get_vol_info(volume_name)
iqn = vol_info['target-name']
netconfig = self.APIExecutor.get_netconfig('active')
target_ipaddr = self._get_discovery_ip(netconfig)
iscsi_portal = target_ipaddr + ':3260'
provider_location = '%s %s %s' % (iscsi_portal, iqn, LUN_ID)
LOG.info(_LI('vol_name=%(name)s provider_location=%(loc)s'),
{'name': volume_name, 'loc': provider_location})
return provider_location
def _get_model_info(self, volume_name):
"""Get model info for the volume."""
return (
{'provider_location': self._get_provider_location(volume_name),
'provider_auth': None})
def create_volume(self, volume):
"""Create a new volume."""
reserve = not self.configuration.san_thin_provision
self.APIExecutor.create_vol(
volume,
self.configuration.nimble_pool_name, reserve)
return self._get_model_info(volume['name'])
def delete_volume(self, volume):
"""Delete the specified volume."""
self.APIExecutor.online_vol(volume['name'], False,
ignore_list=['SM-enoent'])
self.APIExecutor.dissociate_volcoll(volume['name'],
ignore_list=['SM-enoent'])
self.APIExecutor.delete_vol(volume['name'], ignore_list=['SM-enoent'])
def _generate_random_string(self, length):
"""Generates random_string."""
char_set = string.ascii_lowercase
return ''.join(random.sample(char_set, length))
def _clone_volume_from_snapshot(self, volume, snapshot):
"""Clonevolume from snapshot. Extend the volume if the
size of the volume is more than the snapshot
"""
reserve = not self.configuration.san_thin_provision
self.APIExecutor.clone_vol(volume, snapshot, reserve)
if(volume['size'] > snapshot['volume_size']):
vol_size = volume['size'] * units.Gi
reserve_size = vol_size if reserve else 0
self.APIExecutor.edit_vol(
volume['name'],
VOL_EDIT_MASK, # mask for vol attributes
{'size': vol_size,
'reserve': reserve_size,
'warn-level': int(vol_size * WARN_LEVEL),
'quota': vol_size,
'snap-quota': vol_size})
return self._get_model_info(volume['name'])
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume."""
snapshot_name = ('openstack-clone-' +
volume['name'] + '-' +
self._generate_random_string(12))
snapshot = {'volume_name': src_vref['name'],
'name': snapshot_name,
'volume_size': src_vref['size'],
'display_name': '',
'display_description': ''}
self.APIExecutor.snap_vol(snapshot)
self._clone_volume_from_snapshot(volume, snapshot)
return self._get_model_info(volume['name'])
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
return self._get_model_info(volume['name'])
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
return self._get_model_info(volume['name'])
def create_snapshot(self, snapshot):
"""Create a snapshot."""
self.APIExecutor.snap_vol(snapshot)
def delete_snapshot(self, snapshot):
"""Delete a snapshot."""
self.APIExecutor.online_snap(
snapshot['volume_name'],
False,
snapshot['name'],
ignore_list=['SM-ealready', 'SM-enoent'])
self.APIExecutor.delete_snap(snapshot['volume_name'],
snapshot['name'],
ignore_list=['SM-enoent'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot."""
self._clone_volume_from_snapshot(volume, snapshot)
return self._get_model_info(volume['name'])
def get_volume_stats(self, refresh=False):
"""Get volume stats. This is more of getting group stats."""
if refresh:
group_info = self.APIExecutor.get_group_config()
if not group_info['spaceInfoValid']:
raise NimbleDriverException(_('SpaceInfo returned by'
'array is invalid'))
total_capacity = (group_info['usableCapacity'] /
float(units.Gi))
used_space = ((group_info['volUsageCompressed'] +
group_info['snapUsageCompressed'] +
group_info['unusedReserve']) /
float(units.Gi))
free_space = total_capacity - used_space
LOG.debug('total_capacity=%(capacity)f '
'used_space=%(used)f free_space=%(free)f',
{'capacity': total_capacity,
'used': used_space,
'free': free_space})
backend_name = self.configuration.safe_get(
'volume_backend_name') or self.__class__.__name__
self.group_stats = {'volume_backend_name': backend_name,
'vendor_name': 'Nimble',
'driver_version': DRIVER_VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': total_capacity,
'free_capacity_gb': free_space,
'reserved_percentage': 0,
'QoS_support': False}
return self.group_stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
volume_name = volume['name']
LOG.info(_LI('Entering extend_volume volume=%(vol)s '
'new_size=%(size)s'),
{'vol': volume_name, 'size': new_size})
vol_size = int(new_size) * units.Gi
reserve = not self.configuration.san_thin_provision
reserve_size = vol_size if reserve else 0
self.APIExecutor.edit_vol(
volume_name,
VOL_EDIT_MASK, # mask for vol attributes
{'size': vol_size,
'reserve': reserve_size,
'warn-level': int(vol_size * WARN_LEVEL),
'quota': vol_size,
'snap-quota': vol_size})
def _create_igroup_for_initiator(self, initiator_name):
"""Creates igroup for an initiator and returns the igroup name."""
igrp_name = 'openstack-' + self._generate_random_string(12)
LOG.info(_LI('Creating initiator group %(grp)s '
'with initiator %(iname)s'),
{'grp': igrp_name, 'iname': initiator_name})
self.APIExecutor.create_initiator_group(igrp_name, initiator_name)
return igrp_name
def _get_igroupname_for_initiator(self, initiator_name):
initiator_groups = self.APIExecutor.get_initiator_grp_list()
for initiator_group in initiator_groups:
if 'initiator-list' in initiator_group:
if (len(initiator_group['initiator-list']) == 1 and
initiator_group['initiator-list'][0]['name'] ==
initiator_name):
LOG.info(_LI('igroup %(grp)s found for '
'initiator %(iname)s'),
{'grp': initiator_group['name'],
'iname': initiator_name})
return initiator_group['name']
LOG.info(_LI('No igroup found for initiator %s'), initiator_name)
return ''
def initialize_connection(self, volume, connector):
"""Driver entry point to attach a volume to an instance."""
LOG.info(_LI('Entering initialize_connection volume=%(vol)s'
' connector=%(conn)s location=%(loc)s'),
{'vol': volume,
'conn': connector,
'loc': volume['provider_location']})
initiator_name = connector['initiator']
initiator_group_name = self._get_igroupname_for_initiator(
initiator_name)
if not initiator_group_name:
initiator_group_name = self._create_igroup_for_initiator(
initiator_name)
LOG.info(_LI('Initiator group name is %(grp)s for initiator '
'%(iname)s'),
{'grp': initiator_group_name, 'iname': initiator_name})
self.APIExecutor.add_acl(volume, initiator_group_name)
(iscsi_portal, iqn, lun_num) = volume['provider_location'].split()
properties = {}
properties['target_discovered'] = False # whether discovery was used
properties['target_portal'] = iscsi_portal
properties['target_iqn'] = iqn
properties['target_lun'] = lun_num
properties['volume_id'] = volume['id'] # used by xen currently
return {
'driver_volume_type': 'iscsi',
'data': properties,
}
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance."""
LOG.info(_LI('Entering terminate_connection volume=%(vol)s'
' connector=%(conn)s location=%(loc)s.'),
{'vol': volume,
'conn': connector,
'loc': volume['provider_location']})
initiator_name = connector['initiator']
initiator_group_name = self._get_igroupname_for_initiator(
initiator_name)
if not initiator_group_name:
raise NimbleDriverException(
_('No initiator group found for initiator %s') %
initiator_name)
self.APIExecutor.remove_acl(volume, initiator_group_name)
def _response_checker(func):
"""Decorator function to check if the response
of an API is positive
"""
@functools.wraps(func)
def inner_response_checker(self, *args, **kwargs):
response = func(self, *args, **kwargs)
ignore_list = (kwargs['ignore_list']
if 'ignore_list' in kwargs else [])
for err in response['err-list']['err-list']:
err_str = self._get_err_str(err['code'])
if err_str != 'SM-ok' and err_str not in ignore_list:
msg = (_('API %(name)s failed with error string %(err)s')
% {'name': func.__name__, 'err': err_str})
LOG.error(msg)
raise NimbleAPIException(msg)
return response
return inner_response_checker
def _connection_checker(func):
"""Decorator to re-establish and
re-run the api if session has expired.
"""
@functools.wraps(func)
def inner_connection_checker(self, *args, **kwargs):
for attempts in range(2):
try:
return func(self, *args, **kwargs)
except NimbleAPIException as e:
if attempts < 1 and (re.search('SM-eaccess',
six.text_type(e))):
LOG.info(_LI('Session might have expired.'
' Trying to relogin'))
self.login()
continue
else:
LOG.error(_LE('Re-throwing Exception %s'), e)
raise
return inner_connection_checker
class NimbleAPIExecutor(object):
"""Makes Nimble API calls."""
def __init__(self, *args, **kwargs):
self.sid = None
self.username = kwargs['username']
self.password = kwargs['password']
wsdl_url = 'https://%s/wsdl/NsGroupManagement.wsdl' % (kwargs['ip'])
LOG.debug('Using Nimble wsdl_url: %s', wsdl_url)
self.err_string_dict = self._create_err_code_to_str_mapper(wsdl_url)
self.client = client.Client(wsdl_url,
username=self.username,
password=self.password)
soap_url = ('https://%(ip)s:%(port)s/soap' % {'ip': kwargs['ip'],
'port': SOAP_PORT})
LOG.debug('Using Nimble soap_url: %s', soap_url)
self.client.set_options(location=soap_url)
self.login()
def _create_err_code_to_str_mapper(self, wsdl_url):
f = urllib2.urlopen(wsdl_url)
wsdl_file = f.read()
err_enums = re.findall(
r'<simpleType name="SmErrorType">(.*?)</simpleType>',
wsdl_file,
re.DOTALL)
err_enums = ''.join(err_enums).split('\n')
ret_dict = {}
for enum in err_enums:
m = re.search(r'"(.*?)"(.*?)= (\d+) ', enum)
if m:
ret_dict[int(m.group(3))] = m.group(1)
return ret_dict
def _get_err_str(self, code):
if code in self.err_string_dict:
return self.err_string_dict[code]
else:
return 'Unknown error Code: %s' % code
@_response_checker
def _execute_login(self):
return self.client.service.login(req={
'username': self.username,
'password': self.password
})
def login(self):
"""Execute Https Login API."""
response = self._execute_login()
LOG.info(_LI('Successful login by user %s'), self.username)
self.sid = response['authInfo']['sid']
@_connection_checker
@_response_checker
def _execute_get_netconfig(self, name):
return self.client.service.getNetConfig(request={'sid': self.sid,
'name': name})
def get_netconfig(self, name):
"""Execute getNetConfig API."""
response = self._execute_get_netconfig(name)
return response['config']
@_connection_checker
@_response_checker
def _execute_create_vol(self, volume, pool_name, reserve):
# Set volume size, display name and description
volume_size = volume['size'] * units.Gi
reserve_size = volume_size if reserve else 0
# Set volume description
display_list = [getattr(volume, 'display_name', ''),
getattr(volume, 'display_description', '')]
description = ':'.join(filter(None, display_list))
# Limit description size to 254 characters
description = description[:254]
LOG.info(_LI('Creating a new volume=%(vol)s size=%(size)s'
' reserve=%(reserve)s in pool=%(pool)s'
' description=%(description)s'),
{'vol': volume['name'],
'size': volume_size,
'reserve': reserve,
'pool': pool_name,
'description': description})
return self.client.service.createVol(
request={'sid': self.sid,
'attr': {'name': volume['name'],
'description': description,
'size': volume_size,
'perfpol-name': 'default',
'reserve': reserve_size,
'warn-level': int(volume_size * WARN_LEVEL),
'quota': volume_size,
'snap-quota': volume_size,
'online': True,
'pool-name': pool_name}})
def create_vol(self, volume, pool_name, reserve):
"""Execute createVol API."""
response = self._execute_create_vol(volume, pool_name, reserve)
LOG.info(_LI('Successfully create volume %s'), response['name'])
return response['name']
@_connection_checker
@_response_checker
def _execute_get_group_config(self):
LOG.debug('Getting group config information')
return self.client.service.getGroupConfig(request={'sid': self.sid})
def get_group_config(self):
"""Execute getGroupConfig API."""
response = self._execute_get_group_config()
LOG.debug('Successfully retrieved group config information')
return response['info']
@_connection_checker
@_response_checker
def add_acl(self, volume, initiator_group_name):
"""Execute addAcl API."""
LOG.info(_LI('Adding ACL to volume=%(vol)s with'
' initiator group name %(igrp)s'),
{'vol': volume['name'],
'igrp': initiator_group_name})
return self.client.service.addVolAcl(
request={'sid': self.sid,
'volname': volume['name'],
'apply-to': SM_ACL_APPLY_TO_BOTH,
'chapuser': SM_ACL_CHAP_USER_ANY,
'initiatorgrp': initiator_group_name})
@_connection_checker
@_response_checker
def remove_acl(self, volume, initiator_group_name):
"""Execute removeVolAcl API."""
LOG.info(_LI('Removing ACL from volume=%(vol)s'
' for initiator group %(igrp)s'),
{'vol': volume['name'],
'igrp': initiator_group_name})
return self.client.service.removeVolAcl(
request={'sid': self.sid,
'volname': volume['name'],
'apply-to': SM_ACL_APPLY_TO_BOTH,
'chapuser': SM_ACL_CHAP_USER_ANY,
'initiatorgrp': initiator_group_name})
@_connection_checker
@_response_checker
def _execute_get_vol_info(self, vol_name):
LOG.info(_LI('Getting volume information '
'for vol_name=%s'), vol_name)
return self.client.service.getVolInfo(request={'sid': self.sid,
'name': vol_name})
def get_vol_info(self, vol_name):
"""Execute getVolInfo API."""
response = self._execute_get_vol_info(vol_name)
LOG.info(_LI('Successfully got volume information for volume %s'),
vol_name)
return response['vol']
@_connection_checker
@_response_checker
def online_vol(self, vol_name, online_flag, *args, **kwargs):
"""Execute onlineVol API."""
LOG.info(_LI('Setting volume %(vol)s to online_flag %(flag)s'),
{'vol': vol_name, 'flag': online_flag})
return self.client.service.onlineVol(request={'sid': self.sid,
'name': vol_name,
'online': online_flag})
@_connection_checker
@_response_checker
def online_snap(self, vol_name, online_flag, snap_name, *args, **kwargs):
"""Execute onlineSnap API."""
LOG.info(_LI('Setting snapshot %(snap)s to online_flag %(flag)s'),
{'snap': snap_name, 'flag': online_flag})
return self.client.service.onlineSnap(request={'sid': self.sid,
'vol': vol_name,
'name': snap_name,
'online': online_flag})
@_connection_checker
@_response_checker
def dissociate_volcoll(self, vol_name, *args, **kwargs):
"""Execute dissocProtPol API."""
LOG.info(_LI('Dissociating volume %s '), vol_name)
return self.client.service.dissocProtPol(
request={'sid': self.sid,
'vol-name': vol_name})
@_connection_checker
@_response_checker
def delete_vol(self, vol_name, *args, **kwargs):
"""Execute deleteVol API."""
LOG.info(_LI('Deleting volume %s '), vol_name)
return self.client.service.deleteVol(request={'sid': self.sid,
'name': vol_name})
@_connection_checker
@_response_checker
def snap_vol(self, snapshot):
"""Execute snapVol API."""
volume_name = snapshot['volume_name']
snap_name = snapshot['name']
# Set snapshot description
display_list = [getattr(snapshot, 'display_name', ''),
getattr(snapshot, 'display_description', '')]
snap_description = ':'.join(filter(None, display_list))
# Limit to 254 characters
snap_description = snap_description[:254]
LOG.info(_LI('Creating snapshot for volume_name=%(vol)s'
' snap_name=%(name)s snap_description=%(desc)s'),
{'vol': volume_name,
'name': snap_name,
'desc': snap_description})
return self.client.service.snapVol(
request={'sid': self.sid,
'vol': volume_name,
'snapAttr': {'name': snap_name,
'description': snap_description}})
@_connection_checker
@_response_checker
def delete_snap(self, vol_name, snap_name, *args, **kwargs):
"""Execute deleteSnap API."""
LOG.info(_LI('Deleting snapshot %s '), snap_name)
return self.client.service.deleteSnap(request={'sid': self.sid,
'vol': vol_name,
'name': snap_name})
@_connection_checker
@_response_checker
def clone_vol(self, volume, snapshot, reserve):
"""Execute cloneVol API."""
volume_name = snapshot['volume_name']
snap_name = snapshot['name']
clone_name = volume['name']
snap_size = snapshot['volume_size']
reserve_size = snap_size * units.Gi if reserve else 0
LOG.info(_LI('Cloning volume from snapshot volume=%(vol)s '
'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s'
'reserve=%(reserve)s'),
{'vol': volume_name,
'snap': snap_name,
'clone': clone_name,
'size': snap_size,
'reserve': reserve})
clone_size = snap_size * units.Gi
return self.client.service.cloneVol(
request={'sid': self.sid,
'name': volume_name,
'attr': {'name': clone_name,
'perfpol-name': 'default',
'reserve': reserve_size,
'warn-level': int(clone_size * WARN_LEVEL),
'quota': clone_size,
'snap-quota': clone_size,
'online': True},
'snap-name': snap_name})
@_connection_checker
@_response_checker
def edit_vol(self, vol_name, mask, attr):
"""Execute editVol API."""
LOG.info(_LI('Editing Volume %(vol)s with mask %(mask)s'),
{'vol': vol_name, 'mask': str(mask)})
return self.client.service.editVol(request={'sid': self.sid,
'name': vol_name,
'mask': mask,
'attr': attr})
@_connection_checker
@_response_checker
def _execute_get_initiator_grp_list(self):
LOG.info(_LI('Getting getInitiatorGrpList'))
return (self.client.service.getInitiatorGrpList(
request={'sid': self.sid}))
def get_initiator_grp_list(self):
"""Execute getInitiatorGrpList API."""
response = self._execute_get_initiator_grp_list()
LOG.info(_LI('Successfully retrieved InitiatorGrpList'))
return (response['initiatorgrp-list']
if 'initiatorgrp-list' in response else [])
@_connection_checker
@_response_checker
def create_initiator_group(self, initiator_group_name, initiator_name):
"""Execute createInitiatorGrp API."""
LOG.info(_LI('Creating initiator group %(igrp)s'
' with one initiator %(iname)s'),
{'igrp': initiator_group_name, 'iname': initiator_name})
return self.client.service.createInitiatorGrp(
request={'sid': self.sid,
'attr': {'name': initiator_group_name,
'initiator-list': [{'label': initiator_name,
'name': initiator_name}]}})
@_connection_checker
@_response_checker
def delete_initiator_group(self, initiator_group_name, *args, **kwargs):
"""Execute deleteInitiatorGrp API."""
LOG.info(_LI('Deleting deleteInitiatorGrp %s '), initiator_group_name)
return self.client.service.deleteInitiatorGrp(
request={'sid': self.sid,
'name': initiator_group_name})
|
|
"""
This file contains models for the Quiz module of the e-learning platform
It has models defined for the Quiz, QuestionModule, Question and Submissions
Also defines the model for saving the history for the above
"""
import json
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import pre_save, post_save, pre_delete
from django.dispatch.dispatcher import receiver
from util.models import HitCounter, TimeKeeper
from util.methods import receiver_subclasses
from model_utils.managers import InheritanceManager
import courseware
class Quiz(models.Model):
"""
This class encapsulates the model of a quiz.
"""
title = models.TextField() # title of the quiz
# Number of question modules in Quiz, Auto
question_modules = models.IntegerField(default=0)
# Number of questions in Quiz, Auto
questions = models.IntegerField(default=0)
marks = models.FloatField(default=0.0) # max marks for this quiz
# JSON Object used to maintain the order of question
# modules in the Quiz
playlist = models.TextField(default='[]')
def update_score(self, difference):
self.marks += difference
self.save()
try:
concept = courseware.models.Concept.objects.get(quizzes__pk=self.id)
concept.update_score(difference)
except:
return
class QuestionModule(models.Model):
"""
Each quiz is composed of several QuestionModules where each QuestionModule
can have 1 or more questions. When the Question Module contains
only one question (thereby serving no purpose other than encapsulating
the question), and you do not want to display/use the content of the
module object itself, set the dummy flag to True
"""
#course = models.ForeignKey(
# Course,
# related_name="QuestionModule_Course",
# db_index=True
#)
quiz = models.ForeignKey(Quiz, related_name='QuestionModule_Quiz')
# title/description of the question module
title = models.TextField()
# Ordering of questions in the module
playlist = models.TextField(default='[]')
# Number of questions in Model, Auto
questions = models.IntegerField(default=0)
# flag whether the module is a dummy (for a single question)
dummy = models.BooleanField(default=False)
class QuizHistory(models.Model):
"""
This class captures the quiz history of a user
"""
quiz = models.ForeignKey(Quiz, related_name='QuizHistory_Quiz')
user = models.ForeignKey(User, related_name='QuizHistory_User')
current_question_module = models.ForeignKey(
QuestionModule, related_name='QuizHistory_QuestionModule', null=True)
marks = models.FloatField(default=0.0)
solved = models.IntegerField(default=0) # Number of questions solved
is_graded = models.BooleanField(default=False)
def progress(self):
data = {}
data['title'] = self.quiz.title
data['score'] = self.marks
data['max_score'] = self.quiz.marks
data['questions'] = self.quiz.questions
data['solved'] = self.solved
return data
def update_score(self, difference):
self.marks += difference
self.save()
try:
concept = courseware.models.Concept.objects.get(quizzes__pk=self.quiz.id)
ch, created = courseware.models.ConceptHistory.objects.get_or_create(
concept=concept, user=self.user)
ch.update_score(difference)
except:
return
class Meta:
"""
question and student combined should be unique
"""
unique_together = ("quiz", "user")
class Question(HitCounter):
"""
This is the basic gradable unit - a question. It can be of multiple types
"""
MANUAL_GRADING = 'M'
DIRECT_GRADING = 'D'
GRADER_TYPES = (
(MANUAL_GRADING, 'Manual Grading'),
(DIRECT_GRADING, 'Direct Grading')
)
SINGLE_CHOICE_QUESTION = 'S'
MULTIPLE_CHOICE_QUESTION = 'M'
FIXED_ANSWER_QUESTION = 'F'
DESCRIPTIVE_ANSWER_QUESTION = 'D'
PROGRAMMING_QUESTION = 'P'
QUESTION_TYPES = (
(SINGLE_CHOICE_QUESTION, 'Single Choice Correct'),
(MULTIPLE_CHOICE_QUESTION, 'Multiple Choice Correct'),
(FIXED_ANSWER_QUESTION, 'Fixed Answer'),
(DESCRIPTIVE_ANSWER_QUESTION, 'Descriptive Answer'),
(PROGRAMMING_QUESTION, 'Programming Question')
)
#course = models.ForeignKey(
# Course,
# related_name="Question_Course",
# db_index=True
#)
quiz = models.ForeignKey(Quiz, related_name="Question_Quiz")
question_module = models.ForeignKey(
QuestionModule,
related_name='Question_QuestionModule',
db_index=True) # index this field
description = models.TextField()
# hint_available = models.BooleanField(default=False)
hint = models.TextField(
help_text="Hint you want to give if any",
blank=True,
null=True)
grader_type = models.CharField(
max_length=1,
help_text="Which grader to use for grading this question",
choices=GRADER_TYPES,
default='D')
#order = models.IntegerField()
answer_description = models.TextField(
help_text="Description of the answer",
blank=True)
marks = models.FloatField(default=0)
gradable = models.BooleanField(default=True)
granularity = models.TextField(
help_text="Enter a string with marks separated by commas. \
Last entry will be repeated until infinity attempts")
# granularity after the hint is given
granularity_hint = models.TextField(
help_text="Enter a string with marks separated by commas. \
Last entry will be repeated until infinity attempts",
blank=True,
null=True)
# type : this fields exists so that the question type can be accessed
# directly. This need not be set explicitly. It is set automatically
# in the child classes
type = models.CharField(
max_length=1,
choices=QUESTION_TYPES,
help_text="Type of question",
)
attempts = models.IntegerField(default=1)
def is_hint_available(self):
""" Whether a hint is available """
if self.hint is not None:
return True
else:
return False
def get_data(self):
""" Get the data of the model as a dict """
return {
'id': self.pk,
'quiz': self.quiz,
'question_module': self.question_module,
'description': self.description,
'hint': self.hint,
'grader_type': self.grader_type,
'type': self.type,
'gradable': self.gradable,
'granularity': self.granularity,
'granularity_hint': self.granularity_hint,
'marks': self.marks,
'attempts': self.attempts,
'answer_description': self.answer_description
}
def get_default_granularity(self, hint=False):
"""
When courseware module is completed, uncomment the below code to
define the default granularity for a course
course_info = self.course.course_info
if hint:
if (course_info.granularity_hint is not None and
course_info.granularity_hint.strip() != ''):
return course_info.granularity_hint
else:
if (course_info.granularity is not None and
course_info.granularity.strip() != ''):
return course_info.granularity
"""
# NOTE on implementation of default granularity:
# It is very naive implementation since I couldn't get the serializer
# to accept Blank value for granularity. So UI send "undefined" to backend
# and backend takes this as a keyword and assignes default value
# In future we may need granularity in a different format since the current
# one seems to be inefficient for larger number of attempts
# Need to comment out lines from courseware models to add granularity
# to course info.
granularity = ""
marks = self.marks
factor = int(marks/self.attempts)
for i in range(self.attempts):
granularity = granularity + str(marks) + ","
marks = marks - factor
granularity = granularity + "0"
#granularity = ((str(self.marks) + ',') * self.attempts) + "0"
print granularity
return granularity
def save(self, *args, **kwargs):
""" Process some fields before save """
if self.hint is not None and self.hint.strip() == '':
self.hint = None
if (self.granularity is None or
self.granularity.strip() == '' or
self.granularity == 'undefined'):
self.granularity = self.get_default_granularity()
if (self.granularity_hint is None or
self.granularity_hint.strip() == '' or
self.granularity_hint == 'undefined'):
self.granularity_hint = self.get_default_granularity(hint=True)
if self.answer_description is None:
self.answer_description = ''
super(Question, self).save(*args, **kwargs)
objects = InheritanceManager()
class Meta:
"""
This is not an abstract class
"""
abstract = False
class DescriptiveQuestion(Question):
"""
A question with a descriptive answer.
Ideally, this will be graded manually and the answer field will
contain a model answer/guidelines.
"""
answer = models.TextField()
# set the type field of the question
def save(self, *args, **kwargs):
self.type = self.DESCRIPTIVE_ANSWER_QUESTION
super(DescriptiveQuestion, self).save(*args, **kwargs)
class SingleChoiceQuestion(Question):
"""
A question which has only 1 of the possible choices as the correct answer
"""
# JSON Array containing the options
# E.g.: "['Django', 'Ruby', 'Scala']"
options = models.TextField(
help_text='Enter choices one by one')
answer = models.IntegerField(
help_text="Answer will be the (0-based) index of the choice above")
# set the type field of the question
def save(self, *args, **kwargs):
self.type = self.SINGLE_CHOICE_QUESTION
super(SingleChoiceQuestion, self).save(*args, **kwargs)
def get_answer(self, showToUser=False):
"""
Return the answer to this question.
"""
# print self.answer
# print self.options
if showToUser:
options = json.loads(self.options)
return options[self.answer]
else:
return self.answer
def get_answer_data(self):
"""
Return answer data packaged up
"""
selected = [False] * len(json.loads(self.options))
selected[self.answer] = True
data = {
'options': self.options,
'selected': json.dumps(selected)
}
return data
class MultipleChoiceQuestion(Question):
"""
A question which may have 1 or more correct answers from
the possible choices
"""
# JSON Array containing the options
# E.g.: "['Django', 'Ruby', 'Scala']"
options = models.TextField(
help_text='Enter choices seperated by comma (no comma at end): \
e.g.: choice_1, choice 2, choice 2')
# JSON Array of Booleans
# E.g.: "[true, false, true]"
answer = models.TextField(
help_text='Answer will be in the form or "[true, false, true]" etc')
# set the type field of the question
def save(self, *args, **kwargs):
self.type = self.MULTIPLE_CHOICE_QUESTION
super(MultipleChoiceQuestion, self).save(*args, **kwargs)
def get_answer(self, showToUser=False):
"""
Return answer for grading if showToUSer not supplied
"""
if showToUser:
i = 0
selected = []
options = json.loads(self.options)
print options
answer = json.loads(self.answer)
print answer
for opt in answer:
if opt:
selected.append(options[i])
i = i + 1
print selected
print json.dumps(selected)
return json.dumps(selected)
else:
return self.answer
def get_answer_data(self):
"""
Return answer data packaged up
"""
data = {
'options': self.options,
'selected': self.get_answer()
}
return data
class FixedAnswerQuestion(Question):
"""
A question which has a fixed answer to be input in a text field
"""
# JSON array of acdeptable answers
answer = models.CharField(max_length=128)
def get_answer(self, showToUser=False):
"""
Return the answer to this question.
Ideally we want that we should set the answer_shown in question_history
whenever this is called, but that is expensive. So, wherever we
call get_answer, set answer_shown = True and save.
"""
print self.answer
return json.loads(self.answer)
# TODO : replace the function below to work with a json string
#if showToUser:
# answer = self.answer
# if len(self.answer.split(',')) > 1:
# answer = string.replace(answer, ',', ', ')
# return answer
#else:
# return self.answer.split(',')
def get_answer_data(self):
"""
Return answer data packaged up
"""
data = {
'answer': self.get_answer()
}
return data
# set the type field of the question
def save(self, *args, **kwargs):
self.type = self.FIXED_ANSWER_QUESTION
super(FixedAnswerQuestion, self).save(*args, **kwargs)
class ProgrammingQuestion(Question):
"""
A question which requires the submission of a file to be graded
according to the command given with it
"""
num_testcases = models.IntegerField()
command = models.TextField() # command to compile and run the submission
# string of file extensions separated by comma
acceptable_languages = models.TextField()
# set the type field of the question
def save(self, *args, **kwargs):
self.type = self.PROGRAMMING_QUESTION
super(ProgrammingQuestion, self).save(*args, **kwargs)
class Testcase(models.Model):
"""
A testcase is one of the many inputs against which a ProgrammingQuestion is
to be evaluated
"""
question = models.ForeignKey(
ProgrammingQuestion,
related_name='Testcase_ProgrammingQuestion')
input_text = models.TextField()
correct_output = models.TextField()
class QuestionHistory(models.Model):
"""
This class captures the history of a question associated with each student
"""
question = models.ForeignKey(
Question,
related_name='QuestionHistory_Question')
student = models.ForeignKey(User, related_name='QuestionHistory_User')
attempts = models.IntegerField(default=0)
marks = models.FloatField(default=0.0)
NOT_ATTEMPTED = 'N'
ATTEMPTED_ONCE = 'O'
AWAITING_RESULTS = 'A'
SOLVED = 'S'
status_codes = (
(NOT_ATTEMPTED, 'Not Attempted'),
(ATTEMPTED_ONCE, 'Attempted atleast once'),
(AWAITING_RESULTS, 'Awaiting Results'),
(SOLVED, 'Solved')
)
status = models.CharField(max_length=1, choices=status_codes, default='N')
hint_taken = models.BooleanField(default=False)
answer_shown = models.BooleanField(default=False)
class Meta:
"""
question and student combined should be unique
"""
unique_together = ("question", "student")
class Queue(TimeKeeper):
"""
This is a utility class to store objects that we need to perform actions
on asynchronously - email, notification, grading of programming question
"""
object_id = models.TextField() # id of notification or email or submission
is_processed = models.BooleanField(default=False)
EMAIL = 'E'
NOTIFICATION = 'N'
SUBMISSION = 'S'
object_types = (
(EMAIL, 'Email'),
(NOTIFICATION, 'Notification'),
(SUBMISSION, 'Submission')
)
object_type = models.CharField(
max_length=1,
choices=object_types,
default='E')
info = models.TextField() # extra information
class Submission(TimeKeeper):
"""
A submission is added when a student answers the question. Depending on the
type of grader being used, the evaluation may be instant or waiting
"""
#course = models.ForeignKey(
# Course,
# related_name="Submission_Course",
# db_index=True
#)
question = models.ForeignKey(Question, related_name='Submission_Question')
student = models.ForeignKey(User, related_name='Submission_User')
grader_type = models.CharField(
max_length=1,
choices=Question.GRADER_TYPES,
default='D') # so its easy to know rather than going to question
answer = models.TextField()
# No queue for the time being
#queue_id = models.ForeignKey(Queue, related_name='Submission_Queue')
AWAITING_RESULTS = 'A'
DONE = 'D'
status_codes = (
(AWAITING_RESULTS, 'Awaiting Results'),
(DONE, 'Done')
)
status = models.CharField(
max_length=1,
choices=status_codes,
default=AWAITING_RESULTS)
feedback = models.TextField(default='') # feedback from the grader
result = models.FloatField(default=0.0) # marks given to this submission
is_correct = models.BooleanField(default=False)
is_plagiarised = models.BooleanField(default=False) # plagiarism checking
# has been checked for plagiarism or not
has_been_checked = models.BooleanField(default=False)
@receiver_subclasses(pre_save, Question, "question_pre_save")
def update_question_stats_pre_save(sender, **kwargs):
""" Increase question count by 1 and max_marks"""
instance = kwargs['instance']
if instance.pk is not None: # update
instance.quiz.update_score(-1*(instance.marks))
@receiver_subclasses(post_save, Question, "question_post_save")
def update_question_stats_post_save(sender, **kwargs):
""" Increase question count by 1 and max_marks"""
instance = kwargs['instance']
if kwargs['created']: # create
instance.quiz.questions += 1
instance.question_module.questions += 1
instance.quiz.save()
instance.quiz.update_score(instance.marks)
instance.question_module.save()
@receiver_subclasses(pre_delete, Question, "question_pre_delete")
def update_question_stats_on_delete(sender, **kwargs):
""" Decrease question count by 1 and max_marks"""
instance = kwargs['instance']
if type(instance) != Question:
# This is necessary otherwise it is called twice: once for parent class
# and other for subclass
instance.quiz.questions -= 1
instance.quiz.save()
instance.quiz.update_score(-1*(instance.marks))
instance.question_module.questions -= 1
instance.question_module.save()
@receiver(post_save, sender=QuestionModule)
def update_question_module_stats_post_save(sender, **kwargs):
""" Increase question module count by 1"""
instance = kwargs['instance']
if kwargs['created']: # create
instance.quiz.question_modules += 1
instance.quiz.save()
@receiver(pre_delete, sender=QuestionModule)
def update_question_module_stats_on_delete(sender, **kwargs):
""" Decrease question module count by 1"""
instance = kwargs['instance']
instance.quiz.question_modules -= 1
instance.quiz.save()
#TODO add the above classes for question modules
@receiver(pre_delete, sender=QuestionHistory)
def update_concept_history_on_delete(sender, **kwargs):
"""Update quizhistory marks"""
instance = kwargs['instance']
quiz_history, created = QuizHistory.objects.get_or_create(
quiz=instance.question.quiz, user=instance.student)
quiz_history.update_score(-1*(instance.marks))
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
from test_framework.script import CScript, SignatureHash, SIGHASH_ALL, OP_TRUE, OP_FALSE
class PreviousSpendableOutput(object):
def __init__(self, tx = CTransaction(), n = -1):
self.tx = tx
self.n = n # the output we're spending
'''
This reimplements tests from the bitcoinj/FullBlockTestGenerator used
by the pull-tester.
We use the testing framework in which we expect a particular answer from
each test.
'''
class FullBlockTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
super().__init__()
self.num_nodes = 1
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.block_time = int(time.time())+1
self.tip = None
self.blocks = {}
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
return block
# Create a block on top of self.tip, and advance self.tip to point to the new block
# if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output,
# and rest will go to fees.
def next_block(self, number, spend=None, additional_coinbase_value=0, script=None):
if self.tip == None:
base_block_hash = self.genesis_hash
else:
base_block_hash = self.tip.sha256
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
if (spend != None):
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, self.block_time)
if (spend != None):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # no signature yet
# This copies the java comparison tool testing behavior: the first
# txout has a garbage scriptPubKey, "to make sure we're not
# pre-verifying too much" (?)
tx.vout.append(CTxOut(0, CScript([random.randint(0,255), height & 255])))
if script == None:
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
else:
tx.vout.append(CTxOut(1, script))
# Now sign it if necessary
scriptSig = b""
scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend
scriptSig = CScript([OP_TRUE])
else:
# We have to actually sign it
(sighash, err) = SignatureHash(spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL)
scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
tx.vin[0].scriptSig = scriptSig
# Now add the transaction to the block
block = self.add_transactions_to_block(block, [tx])
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
self.block_time += 1
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previous marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject = None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# add transactions to a block produced by next_block
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
old_hash = block.sha256
self.add_transactions_to_block(block, new_transactions)
block.solve()
# Update the internal state just like in next_block
self.tip = block
self.block_heights[block.sha256] = self.block_heights[old_hash]
del self.block_heights[old_hash]
self.blocks[block_number] = block
return block
# creates a new block and advances the tip to that block
block = self.next_block
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(1000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
out0 = get_spendable_output()
block(1, spend=out0)
save_spendable_output()
yield accepted()
out1 = get_spendable_output()
b2 = block(2, spend=out1)
yield accepted()
# so fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
b3 = block(3, spend=out1)
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 1)
yield rejected()
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
out2 = get_spendable_output()
block(4, spend=out2)
yield accepted()
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
block(5, spend=out2)
save_spendable_output()
yield rejected()
out3 = get_spendable_output()
block(6, spend=out3)
yield accepted()
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(7, spend=out2)
yield rejected()
out4 = get_spendable_output()
block(8, spend=out4)
yield rejected()
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
block(9, spend=out4, additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(10, spend=out3)
yield rejected()
block(11, spend=out4, additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
b12 = block(12, spend=out3)
save_spendable_output()
#yield TestInstance([[b12, False]])
b13 = block(13, spend=out4)
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
out5 = get_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
block(14, spend=out5, additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
# Test that a block with a lot of checksigs is okay
lots_of_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50 - 1))
tip(13)
block(15, spend=out5, script=lots_of_checksigs)
yield accepted()
# Test that a block with too many checksigs is rejected
out6 = get_spendable_output()
too_many_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50))
block(16, spend=out6, script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
tip(15)
block(17, spend=txout_b3)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
tip(13)
block(18, spend=txout_b3)
yield rejected()
block(19, spend=out6)
yield rejected()
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
out7 = get_spendable_output()
block(20, spend=out7)
yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
block(21, spend=out6)
yield rejected()
block(22, spend=out5)
yield rejected()
# Create a block on either side of MAX_BLOCK_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b23 = block(23, spend=out6)
old_hash = b23.sha256
tx = CTransaction()
script_length = MAX_BLOCK_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 1)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_SIZE)
yield accepted()
# Make the next block one byte bigger and check that it fails
tip(15)
b24 = block(24, spend=out6)
script_length = MAX_BLOCK_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length+1)])
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_SIZE+1)
yield rejected(RejectResult(16, b'bad-blk-length'))
b25 = block(25, spend=out7)
yield rejected()
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b26 = block(26, spend=out6)
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = update_block(26, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b26 chain to make sure bitcoind isn't accepting b26
b27 = block(27, spend=out7)
yield rejected()
# Now try a too-large-coinbase script
tip(15)
b28 = block(28, spend=out6)
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b28 chain to make sure bitcoind isn't accepted b28
b29 = block(29, spend=out7)
# TODO: Should get a reject message back with "bad-prevblk", except
# there's a bug that prevents this from being detected. Just note
# failure for now, and add the reject result later.
yield rejected()
# b30 has a max-sized coinbase scriptSig.
tip(23)
b30 = block(30)
b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
if __name__ == '__main__':
FullBlockTest().main()
|
|
# Copyright 2013 SolidFire Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import math
import random
import socket
import string
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
import requests
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume.targets import iscsi as iscsi_driver
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
sf_opts = [
cfg.BoolOpt('sf_emulate_512',
default=True,
help='Set 512 byte emulation on volume creation; '),
cfg.BoolOpt('sf_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('sf_account_prefix',
default=None,
help='Create SolidFire accounts with this prefix. Any string '
'can be used here, but the string \"hostname\" is special '
'and will create a prefix using the cinder node hostname '
'(previous default behavior). The default is NO prefix.'),
cfg.StrOpt('sf_template_account_name',
default='openstack-vtemplate',
help='Account name on the SolidFire Cluster to use as owner of '
'template/cache volumes (created if does not exist).'),
cfg.BoolOpt('sf_allow_template_caching',
default=True,
help='Create an internal cache of copy of images when '
'a bootable volume is created to eliminate fetch from '
'glance and qemu-conversion on subsequent calls.'),
cfg.IntOpt('sf_api_port',
default=443,
help='SolidFire API port. Useful if the device api is behind '
'a proxy on a different port.')]
CONF = cfg.CONF
CONF.register_opts(sf_opts)
def retry(exc_tuple, tries=5, delay=1, backoff=2):
def retry_dec(f):
@six.wraps(f)
def func_retry(*args, **kwargs):
_tries, _delay = tries, delay
while _tries > 1:
try:
return f(*args, **kwargs)
except exc_tuple:
time.sleep(_delay)
_tries -= 1
_delay *= backoff
LOG.debug('Retrying %(args)s, %(tries)s attempts '
'remaining...',
{'args': args, 'tries': _tries})
# NOTE(jdg): Don't log the params passed here
# some cmds like createAccount will have sensitive
# info in the params, grab only the second tuple
# which should be the Method
msg = (_('Retry count exceeded for command: %s') %
(args[1],))
LOG.error(msg)
raise exception.SolidFireAPIException(message=msg)
return func_retry
return retry_dec
class SolidFireDriver(san.SanISCSIDriver):
"""OpenStack driver to enable SolidFire cluster.
Version history:
1.0 - Initial driver
1.1 - Refactor, clone support, qos by type and minor bug fixes
1.2 - Add xfr and retype support
1.2.1 - Add export/import support
1.2.2 - Catch VolumeNotFound on accept xfr
2.0.0 - Move from httplib to requests
2.0.1 - Implement SolidFire Snapshots
2.0.2 - Implement secondary account
"""
VERSION = '2.0.2'
sf_qos_dict = {'slow': {'minIOPS': 100,
'maxIOPS': 200,
'burstIOPS': 200},
'medium': {'minIOPS': 200,
'maxIOPS': 400,
'burstIOPS': 400},
'fast': {'minIOPS': 500,
'maxIOPS': 1000,
'burstIOPS': 1000},
'performant': {'minIOPS': 2000,
'maxIOPS': 4000,
'burstIOPS': 4000},
'off': None}
sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS']
cluster_stats = {}
retry_exc_tuple = (exception.SolidFireRetryableException,
requests.exceptions.ConnectionError)
retryable_errors = ['xDBVersionMismatch',
'xMaxSnapshotsPerVolumeExceeded',
'xMaxClonesPerVolumeExceeded',
'xMaxSnapshotsPerNodeExceeded',
'xMaxClonesPerNodeExceeded',
'xNotReadyForIO']
def __init__(self, *args, **kwargs):
super(SolidFireDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(sf_opts)
self._endpoint = self._build_endpoint_info()
self.template_account_id = None
self.max_volumes_per_account = 1990
try:
self._update_cluster_status()
except exception.SolidFireAPIException:
pass
if self.configuration.sf_allow_template_caching:
account = self.configuration.sf_template_account_name
self.template_account_id = self._create_template_account(account)
self.target_driver = (
importutils.import_object(
'cinder.volume.drivers.solidfire.SolidFireISCSI',
solidfire_driver=self,
configuration=self.configuration))
def _create_template_account(self, account_name):
# We raise an API exception if the account doesn't exist
# We need to take account_prefix settings into consideration
# This just uses the same method to do template account create
# as we use for any other OpenStack account
account_name = self._get_sf_account_name(account_name)
try:
id = self._issue_api_request(
'GetAccountByName',
{'username': account_name})['result']['account']['accountID']
except exception.SolidFireAPIException:
chap_secret = self._generate_random_string(12)
params = {'username': account_name,
'initiatorSecret': chap_secret,
'targetSecret': chap_secret,
'attributes': {}}
id = self._issue_api_request('AddAccount',
params)['result']['accountID']
return id
def _build_endpoint_info(self, **kwargs):
endpoint = {}
endpoint['mvip'] =\
kwargs.get('mvip', self.configuration.san_ip)
endpoint['login'] =\
kwargs.get('login', self.configuration.san_login)
endpoint['passwd'] =\
kwargs.get('passwd', self.configuration.san_password)
endpoint['port'] =\
kwargs.get('port', self.configuration.sf_api_port)
endpoint['url'] = 'https://%s:%s' % (endpoint['mvip'],
endpoint['port'])
# TODO(jdg): consider a call to GetAPI and setting version
return endpoint
@retry(retry_exc_tuple, tries=6)
def _issue_api_request(self, method, params, version='1.0', endpoint=None):
if params is None:
params = {}
if endpoint is None:
endpoint = self._endpoint
payload = {'method': method, 'params': params}
url = '%s/json-rpc/%s/' % (endpoint['url'], version)
req = requests.post(url,
data=json.dumps(payload),
auth=(endpoint['login'], endpoint['passwd']),
verify=False,
timeout=30)
response = req.json()
req.close()
if (('error' in response) and
(response['error']['name'] in self.retryable_errors)):
msg = ('Retryable error (%s) encountered during '
'SolidFire API call.' % response['error']['name'])
LOG.debug(msg)
raise exception.SolidFireRetryableException(message=msg)
if 'error' in response:
msg = _('API response: %s') % response
raise exception.SolidFireAPIException(msg)
return response
def _get_volumes_by_sfaccount(self, account_id):
"""Get all volumes on cluster for specified account."""
params = {'accountID': account_id}
data = self._issue_api_request('ListVolumesForAccount', params)
if 'result' in data:
return data['result']['volumes']
def _get_sfaccount_by_name(self, sf_account_name):
"""Get SolidFire account object by name."""
sfaccount = None
params = {'username': sf_account_name}
try:
data = self._issue_api_request('GetAccountByName', params)
if 'result' in data and 'account' in data['result']:
LOG.debug('Found solidfire account: %s', sf_account_name)
sfaccount = data['result']['account']
except exception.SolidFireAPIException as ex:
if 'xUnknownAccount' in ex.msg:
return sfaccount
else:
raise
return sfaccount
def _get_sf_account_name(self, project_id):
"""Build the SolidFire account name to use."""
prefix = self.configuration.sf_account_prefix or ''
if prefix == 'hostname':
prefix = socket.gethostname()
return '%s%s%s' % (prefix, '-' if prefix else '', project_id)
def _get_sfaccount(self, project_id):
sf_account_name = self._get_sf_account_name(project_id)
sfaccount = self._get_sfaccount_by_name(sf_account_name)
if sfaccount is None:
raise exception.SolidFireAccountNotFound(
account_name=sf_account_name)
return sfaccount
def _create_sfaccount(self, project_id):
"""Create account on SolidFire device if it doesn't already exist.
We're first going to check if the account already exists, if it does
just return it. If not, then create it.
"""
sf_account_name = self._get_sf_account_name(project_id)
sfaccount = self._get_sfaccount_by_name(sf_account_name)
if sfaccount is None:
LOG.debug('solidfire account: %s does not exist, create it...',
sf_account_name)
chap_secret = self._generate_random_string(12)
params = {'username': sf_account_name,
'initiatorSecret': chap_secret,
'targetSecret': chap_secret,
'attributes': {}}
data = self._issue_api_request('AddAccount', params)
if 'result' in data:
sfaccount = self._get_sfaccount_by_name(sf_account_name)
return sfaccount
def _get_cluster_info(self):
"""Query the SolidFire cluster for some property info."""
params = {}
data = self._issue_api_request('GetClusterInfo', params)
if 'result' not in data:
msg = _("API response: %s") % data
raise exception.SolidFireAPIException(msg)
return data['result']
def _generate_random_string(self, length):
"""Generates random_string to use for CHAP password."""
char_set = string.ascii_uppercase + string.digits
return ''.join(random.sample(char_set, length))
def _get_model_info(self, sfaccount, sf_volume_id):
"""Gets the connection info for specified account and volume."""
cluster_info = self._get_cluster_info()
iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260'
chap_secret = sfaccount['targetSecret']
found_volume = False
iteration_count = 0
while not found_volume and iteration_count < 600:
volume_list = self._get_volumes_by_sfaccount(
sfaccount['accountID'])
iqn = None
for v in volume_list:
if v['volumeID'] == sf_volume_id:
iqn = v['iqn']
found_volume = True
break
if not found_volume:
time.sleep(2)
iteration_count += 1
if not found_volume:
LOG.error(_LE('Failed to retrieve volume SolidFire-'
'ID: %s in get_by_account!'), sf_volume_id)
raise exception.VolumeNotFound(volume_id=sf_volume_id)
model_update = {}
# NOTE(john-griffith): SF volumes are always at lun 0
model_update['provider_location'] = ('%s %s %s'
% (iscsi_portal, iqn, 0))
model_update['provider_auth'] = ('CHAP %s %s'
% (sfaccount['username'],
chap_secret))
if not self.configuration.sf_emulate_512:
model_update['provider_geometry'] = ('%s %s' % (4096, 4096))
model_update['provider_id'] = ('%s' % sf_volume_id)
return model_update
def _do_clone_volume(self, src_uuid,
src_project_id,
vref):
"""Create a clone of an existing volume or snapshot. """
attributes = {}
qos = {}
sf_accounts = self._get_sfaccounts_for_tenant(vref['project_id'])
if not sf_accounts:
sf_account = self._create_sfaccount(vref['project_id'])
else:
# Check availability for creates
sf_account = self._get_account_create_availability(sf_accounts)
if not sf_account:
# TODO(jdg): We're not doing tertiaries, so fail
msg = _('volumes/account exceeded on both primary '
'and secondary SolidFire accounts')
raise exception.SolidFireDriverException(msg)
params = {'name': 'UUID-%s' % vref['id'],
'newAccountID': sf_account['accountID']}
# NOTE(jdg): First check the SF snapshots
# if we don't find a snap by the given name, just move on to check
# volumes. This may be a running system that was updated from
# before we did snapshots, so need to check both
is_clone = False
snap_name = 'UUID-%s' % src_uuid
snaps = self._get_sf_snapshots()
snap = next((s for s in snaps if s["name"] == snap_name), None)
if snap:
params['snapshotID'] = int(snap['snapshotID'])
params['volumeID'] = int(snap['volumeID'])
params['newSize'] = int(vref['size'] * units.Gi)
else:
sf_vol = self._get_sf_volume(
src_uuid, {'accountID': sf_account['accountID']})
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=src_uuid)
params['volumeID'] = int(sf_vol['volumeID'])
params['newSize'] = int(vref['size'] * units.Gi)
is_clone = True
data = self._issue_api_request('CloneVolume', params, version='6.0')
if (('result' not in data) or ('volumeID' not in data['result'])):
msg = _("API response: %s") % data
raise exception.SolidFireAPIException(msg)
sf_volume_id = data['result']['volumeID']
if (self.configuration.sf_allow_tenant_qos and
vref.get('volume_metadata')is not None):
qos = self._set_qos_presets(vref)
ctxt = context.get_admin_context()
type_id = vref.get('volume_type_id', None)
if type_id is not None:
qos = self._set_qos_by_volume_type(ctxt, type_id)
# NOTE(jdg): all attributes are copied via clone, need to do an update
# to set any that were provided
params = {'volumeID': sf_volume_id}
create_time = vref['created_at'].isoformat()
attributes = {'uuid': vref['id'],
'is_clone': 'True',
'src_uuid': src_uuid,
'created_at': create_time}
if qos:
params['qos'] = qos
for k, v in qos.items():
attributes[k] = str(v)
params['attributes'] = attributes
data = self._issue_api_request('ModifyVolume', params)
model_update = self._get_model_info(sf_account, sf_volume_id)
if model_update is None:
mesg = _('Failed to get model update from clone')
raise exception.SolidFireAPIException(mesg)
# Increment the usage count, just for data collection
# We're only doing this for clones, not create_from snaps
if is_clone:
data = self._update_attributes(sf_vol)
return (data, sf_account, model_update)
def _update_attributes(self, sf_vol):
cloned_count = sf_vol['attributes'].get('cloned_count', 0)
cloned_count += 1
attributes = sf_vol['attributes']
attributes['cloned_count'] = cloned_count
params = {'volumeID': int(sf_vol['volumeID'])}
params['attributes'] = attributes
return self._issue_api_request('ModifyVolume', params)
def _do_volume_create(self, sf_account, params):
data = self._issue_api_request('CreateVolume', params)
if (('result' not in data) or ('volumeID' not in data['result'])):
msg = _("Failed volume create: %s") % data
raise exception.SolidFireAPIException(msg)
sf_volume_id = data['result']['volumeID']
return self._get_model_info(sf_account, sf_volume_id)
def _do_snapshot_create(self, params):
data = self._issue_api_request('CreateSnapshot', params, version='6.0')
if (('result' not in data) or ('snapshotID' not in data['result'])):
msg = _("Failed snapshot create: %s") % data
raise exception.SolidFireAPIException(msg)
return data['result']['snapshotID']
def _set_qos_presets(self, volume):
qos = {}
valid_presets = self.sf_qos_dict.keys()
# First look to see if they included a preset
presets = [i.value for i in volume.get('volume_metadata')
if i.key == 'sf-qos' and i.value in valid_presets]
if len(presets) > 0:
if len(presets) > 1:
LOG.warning(_LW('More than one valid preset was '
'detected, using %s'), presets[0])
qos = self.sf_qos_dict[presets[0]]
else:
# look for explicit settings
for i in volume.get('volume_metadata'):
if i.key in self.sf_qos_keys:
qos[i.key] = int(i.value)
return qos
def _set_qos_by_volume_type(self, ctxt, type_id):
qos = {}
volume_type = volume_types.get_volume_type(ctxt, type_id)
qos_specs_id = volume_type.get('qos_specs_id')
specs = volume_type.get('extra_specs')
# NOTE(jdg): We prefer the qos_specs association
# and over-ride any existing
# extra-specs settings if present
if qos_specs_id is not None:
kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
else:
kvs = specs
for key, value in kvs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.sf_qos_keys:
qos[key] = int(value)
return qos
def _get_sf_volume(self, uuid, params):
data = self._issue_api_request('ListVolumesForAccount', params)
if 'result' not in data:
msg = _("Failed to get SolidFire Volume: %s") % data
raise exception.SolidFireAPIException(msg)
found_count = 0
sf_volref = None
for v in data['result']['volumes']:
# NOTE(jdg): In the case of "name" we can't
# update that on manage/import, so we use
# the uuid attribute
meta = v.get('attributes')
alt_id = meta.get('uuid', 'empty')
if uuid in v['name'] or uuid in alt_id:
found_count += 1
sf_volref = v
LOG.debug("Mapped SolidFire volumeID %(volume_id)s "
"to cinder ID %(uuid)s.",
{'volume_id': v['volumeID'], 'uuid': uuid})
if found_count == 0:
# NOTE(jdg): Previously we would raise here, but there are cases
# where this might be a cleanup for a failed delete.
# Until we get better states we'll just log an error
LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid)
if found_count > 1:
LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."),
{'count': found_count,
'uuid': uuid})
raise exception.DuplicateSfVolumeNames(vol_name=uuid)
return sf_volref
def _get_sf_snapshots(self, sf_volid=None):
params = {}
if sf_volid:
params = {'volumeID': sf_volid}
data = self._issue_api_request('ListSnapshots', params, version='6.0')
if 'result' not in data:
msg = _("Failed to get SolidFire Snapshot: %s") % data
raise exception.SolidFireAPIException(msg)
return data['result']['snapshots']
def _create_image_volume(self, context,
image_meta, image_service,
image_id):
# NOTE(jdg): It's callers responsibility to ensure that
# the optional properties.virtual_size is set on the image
# before we get here
virt_size = int(image_meta['properties'].get('virtual_size'))
min_sz_in_bytes =\
math.ceil(virt_size / float(units.Gi)) * float(units.Gi)
min_sz_in_gig = math.ceil(min_sz_in_bytes / float(units.Gi))
attributes = {}
attributes['image_info'] = {}
attributes['image_info']['image_updated_at'] =\
image_meta['updated_at'].isoformat()
attributes['image_info']['image_name'] =\
image_meta['name']
attributes['image_info']['image_created_at'] =\
image_meta['created_at'].isoformat()
attributes['image_info']['image_id'] = image_meta['id']
params = {'name': 'OpenStackIMG-%s' % image_id,
'accountID': self.template_account_id,
'sliceCount': 1,
'totalSize': int(min_sz_in_bytes),
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': {}}
sf_account = self._issue_api_request(
'GetAccountByID',
{'accountID': self.template_account_id})
template_vol = self._do_volume_create(sf_account, params)
tvol = {}
tvol['id'] = image_id
tvol['provider_location'] = template_vol['provider_location']
tvol['provider_auth'] = template_vol['provider_auth']
connector = 'na'
conn = self.initialize_connection(tvol, connector)
attach_info = super(SolidFireDriver, self)._connect_device(conn)
properties = 'na'
try:
image_utils.fetch_to_raw(context,
image_service,
image_id,
attach_info['device']['path'],
self.configuration.volume_dd_blocksize,
size=min_sz_in_gig)
except Exception as exc:
params['volumeID'] = template_vol['volumeID']
LOG.error(_LE('Failed image conversion during cache creation: %s'),
exc)
LOG.debug('Removing SolidFire Cache Volume (SF ID): %s',
template_vol['volumeID'])
self._detach_volume(context, attach_info, tvol, properties)
self._issue_api_request('DeleteVolume', params)
return
self._detach_volume(context, attach_info, tvol, properties)
sf_vol = self._get_sf_volume(image_id, params)
LOG.debug('Successfully created SolidFire Image Template '
'for image-id: %s', image_id)
return sf_vol
def _verify_image_volume(self, context, image_meta, image_service):
# This method just verifies that IF we have a cache volume that
# it's still up to date and current WRT the image in Glance
# ie an image-update hasn't occurred since we grabbed it
# If it's out of date, just delete it and we'll create a new one
# Any other case we don't care and just return without doing anything
params = {'accountID': self.template_account_id}
sf_vol = self._get_sf_volume(image_meta['id'], params)
if sf_vol is None:
return
# Check updated_at field, delete copy and update if needed
if sf_vol['attributes']['image_info']['image_updated_at'] ==\
image_meta['updated_at'].isoformat():
return
else:
# Bummer, it's been updated, delete it
params = {'accountID': self.template_account_id}
params['volumeID'] = sf_vol['volumeID']
data = self._issue_api_request('DeleteVolume', params)
if 'result' not in data:
msg = _("Failed to delete SolidFire Image-Volume: %s") % data
raise exception.SolidFireAPIException(msg)
if not self._create_image_volume(context,
image_meta,
image_service,
image_meta['id']):
msg = _("Failed to create SolidFire Image-Volume")
raise exception.SolidFireAPIException(msg)
def _get_sfaccounts_for_tenant(self, cinder_project_id):
data = self._issue_api_request('ListAccounts', {})
if 'result' not in data:
msg = _("API response: %s") % data
raise exception.SolidFireAPIException(msg)
# Note(jdg): On SF we map account-name to OpenStack's tenant ID
# we use tenantID in here to get secondaries that might exist
# Also: we expect this to be sorted, so we get the primary first
# in the list
return sorted([acc for acc in data['result']['accounts'] if
cinder_project_id in acc['username']])
def _get_all_active_volumes(self, cinder_uuid=None):
params = {}
data = self._issue_api_request('ListActiveVolumes',
params)
if 'result' not in data:
msg = _("Failed get active SolidFire volumes: %s") % data
raise exception.SolidFireAPIException(msg)
if cinder_uuid:
deleted_vols = ([v for v in data['result']['volumes'] if
cinder_uuid in v.name])
else:
deleted_vols = [v for v in data['result']['volumes']]
return deleted_vols
def _get_all_deleted_volumes(self, cinder_uuid=None):
params = {}
data = self._issue_api_request('ListDeletedVolumes',
params)
if 'result' not in data:
msg = _("Failed get Deleted SolidFire volumes: %s") % data
raise exception.SolidFireAPIException(msg)
if cinder_uuid:
deleted_vols = ([v for v in data['result']['volumes'] if
cinder_uuid in v['name']])
else:
deleted_vols = [v for v in data['result']['volumes']]
return deleted_vols
def _get_account_create_availability(self, accounts):
# we'll check both the primary and the secondary
# if it exists and return whichever one has count
# available.
for acc in accounts:
if self._get_volumes_for_account(
acc['accountID']) > self.max_volumes_per_account:
return acc
if len(accounts) == 1:
sfaccount = self._create_sfaccount(accounts[0]['name'] + '_')
return sfaccount
return None
def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None):
# ListVolumesForAccount gives both Active and Deleted
# we require the solidfire accountID, uuid of volume
# is optional
params = {'accountID': sf_account_id}
response = self._issue_api_request('ListVolumesForAccount',
params)
if cinder_uuid:
vlist = [v for v in response['result']['volumes'] if
cinder_uuid in v['name']]
else:
vlist = [v for v in response['result']['volumes']]
vlist = sorted(vlist, key=lambda k: k['volumeID'])
return vlist
def clone_image(self, context,
volume, image_location,
image_meta, image_service):
# Check out pre-requisites:
# Is template caching enabled?
if not self.configuration.sf_allow_template_caching:
return None, False
# Is the image owned by this tenant or public?
if ((not image_meta.get('is_public', False)) and
(image_meta['owner'] != volume['project_id'])):
LOG.warning(_LW("Requested image is not "
"accessible by current Tenant."))
return None, False
# Is virtual_size property set on the image?
if ((not image_meta.get('properties', None)) or
(not image_meta['properties'].get('virtual_size', None))):
LOG.info(_LI('Unable to create cache volume because image: %s '
'does not include properties.virtual_size'),
image_meta['id'])
return None, False
try:
self._verify_image_volume(context,
image_meta,
image_service)
except exception.SolidFireAPIException:
return None, False
account = self.configuration.sf_template_account_name
try:
(data, sfaccount, model) = self._do_clone_volume(image_meta['id'],
account,
volume)
except exception.VolumeNotFound:
if self._create_image_volume(context,
image_meta,
image_service,
image_meta['id']) is None:
# We failed, dump out
return None, False
# Ok, should be good to go now, try it again
(data, sfaccount, model) = self._do_clone_volume(image_meta['id'],
account,
volume)
return model, True
def create_volume(self, volume):
"""Create volume on SolidFire device.
The account is where CHAP settings are derived from, volume is
created and exported. Note that the new volume is immediately ready
for use.
One caveat here is that an existing user account must be specified
in the API call to create a new volume. We use a set algorithm to
determine account info based on passed in cinder volume object. First
we check to see if the account already exists (and use it), or if it
does not already exist, we'll go ahead and create it.
"""
slice_count = 1
attributes = {}
qos = {}
if (self.configuration.sf_allow_tenant_qos and
volume.get('volume_metadata')is not None):
qos = self._set_qos_presets(volume)
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
if type_id is not None:
qos = self._set_qos_by_volume_type(ctxt, type_id)
create_time = volume['created_at'].isoformat()
attributes = {'uuid': volume['id'],
'is_clone': 'False',
'created_at': create_time}
if qos:
for k, v in qos.items():
attributes[k] = str(v)
sf_accounts = self._get_sfaccounts_for_tenant(volume['project_id'])
if not sf_accounts:
sf_account = self._create_sfaccount(volume['project_id'])
else:
sf_account = self._get_account_create_availability(sf_accounts)
params = {'name': 'UUID-%s' % volume['id'],
'accountID': sf_account['accountID'],
'sliceCount': slice_count,
'totalSize': int(volume['size'] * units.Gi),
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': qos}
# NOTE(jdg): Check if we're a migration tgt, if so
# use the old volume-id here for the SF Name
migration_status = volume.get('migration_status', None)
if migration_status and 'target' in migration_status:
k, v = migration_status.split(':')
params['name'] = 'UUID-%s' % v
params['attributes']['migration_uuid'] = volume['id']
params['attributes']['uuid'] = v
return self._do_volume_create(sf_account, params)
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of an existing volume."""
(_data, _sfaccount, model) = self._do_clone_volume(
src_vref['id'],
src_vref['project_id'],
volume)
return model
def delete_volume(self, volume):
"""Delete SolidFire Volume from device.
SolidFire allows multiple volumes with same name,
volumeID is what's guaranteed unique.
"""
accounts = self._get_sfaccounts_for_tenant(volume['project_id'])
if accounts is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"delete_volume operation!"), volume['id'])
LOG.error(_LE("This usually means the volume was never "
"successfully created."))
return
for acc in accounts:
sf_vol = self._get_volumes_for_account(acc['accountID'],
volume['id'])[0]
if sf_vol:
break
if sf_vol is not None:
params = {'volumeID': sf_vol['volumeID']}
data = self._issue_api_request('DeleteVolume', params)
if 'result' not in data:
msg = _("Failed to delete SolidFire Volume: %s") % data
raise exception.SolidFireAPIException(msg)
else:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"delete_volume operation!"), volume['id'])
def delete_snapshot(self, snapshot):
"""Delete the specified snapshot from the SolidFire cluster."""
sf_snap_name = 'UUID-%s' % snapshot['id']
accounts = self._get_sfaccounts_for_tenant(snapshot['project_id'])
snap = None
for a in accounts:
params = {'accountID': a['accountID']}
sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
sf_snaps = self._get_sf_snapshots(sf_vol['volumeID'])
snap = next((s for s in sf_snaps if s["name"] == sf_snap_name),
None)
if snap:
params = {'snapshotID': snap['snapshotID']}
data = self._issue_api_request('DeleteSnapshot',
params,
version='6.0')
if 'result' not in data:
msg = (_("Failed to delete SolidFire Snapshot: %s") %
data)
raise exception.SolidFireAPIException(msg)
return
# Make sure it's not "old style" using clones as snaps
LOG.debug("Snapshot not found, checking old style clones.")
self.delete_volume(snapshot)
def create_snapshot(self, snapshot):
sfaccount = self._get_sfaccount(snapshot['project_id'])
if sfaccount is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"create_snapshot operation!"), snapshot['volume_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=snapshot['volume_id'])
params = {'volumeID': sf_vol['volumeID'],
'name': 'UUID-%s' % snapshot['id']}
self._do_snapshot_create(params)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from the specified snapshot."""
(_data, _sfaccount, model) = self._do_clone_volume(
snapshot['id'],
snapshot['project_id'],
volume)
return model
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data
"""
if refresh:
try:
self._update_cluster_status()
except exception.SolidFireAPIException:
pass
return self.cluster_stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"extend_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
params = {
'volumeID': sf_vol['volumeID'],
'totalSize': int(new_size * units.Gi)
}
data = self._issue_api_request('ModifyVolume',
params, version='5.0')
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
def _update_cluster_status(self):
"""Retrieve status info for the Cluster."""
params = {}
# NOTE(jdg): The SF api provides an UNBELIEVABLE amount
# of stats data, this is just one of the calls
results = self._issue_api_request('GetClusterCapacity', params)
if 'result' not in results:
LOG.error(_LE('Failed to get updated stats'))
results = results['result']['clusterCapacity']
free_capacity =\
results['maxProvisionedSpace'] - results['usedSpace']
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'SolidFire Inc'
data["driver_version"] = self.VERSION
data["storage_protocol"] = 'iSCSI'
data['total_capacity_gb'] =\
float(results['maxProvisionedSpace'] / units.Gi)
data['free_capacity_gb'] = float(free_capacity / units.Gi)
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = True
data['compression_percent'] =\
results['compressionPercent']
data['deduplicaton_percent'] =\
results['deDuplicationPercent']
data['thin_provision_percent'] =\
results['thinProvisioningPercent']
self.cluster_stats = data
def attach_volume(self, context, volume,
instance_uuid, host_name,
mountpoint):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"attach_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['attach_time'] = volume.get('attach_time', None)
attributes['attached_to'] = instance_uuid
params = {
'volumeID': sf_vol['volumeID'],
'attributes': attributes
}
data = self._issue_api_request('ModifyVolume', params)
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
def detach_volume(self, context, volume, attachment=None):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"detach_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['attach_time'] = None
attributes['attached_to'] = None
params = {
'volumeID': sf_vol['volumeID'],
'attributes': attributes
}
data = self._issue_api_request('ModifyVolume', params)
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
def accept_transfer(self, context, volume,
new_user, new_project):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"accept_transfer operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
if new_project != volume['project_id']:
# do a create_sfaccount here as this tenant
# may not exist on the cluster yet
sfaccount = self._create_sfaccount(new_project)
params = {
'volumeID': sf_vol['volumeID'],
'accountID': sfaccount['accountID']
}
data = self._issue_api_request('ModifyVolume',
params, version='5.0')
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
volume['project_id'] = new_project
volume['user_id'] = new_user
return self.target_driver.ensure_export(context, volume, None)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (Not Used).
"""
qos = {}
attributes = {}
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['retyped_at'] = timeutils.utcnow().isoformat()
params = {'volumeID': sf_vol['volumeID']}
qos = self._set_qos_by_volume_type(ctxt, new_type['id'])
if qos:
params['qos'] = qos
for k, v in qos.items():
attributes[k] = str(v)
params['attributes'] = attributes
self._issue_api_request('ModifyVolume', params)
return True
def manage_existing(self, volume, external_ref):
"""Manages an existing SolidFire Volume (import to Cinder).
Renames the Volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant.
"""
sfid = external_ref.get('source-id', None)
sfname = external_ref.get('name', None)
if sfid is None:
raise exception.SolidFireAPIException(_("Manage existing volume "
"requires 'source-id'."))
# First get the volume on the SF cluster (MUST be active)
params = {'startVolumeID': sfid,
'limit': 1}
data = self._issue_api_request('ListActiveVolumes', params)
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
sf_ref = data['result']['volumes'][0]
sfaccount = self._create_sfaccount(volume['project_id'])
attributes = {}
qos = {}
if (self.configuration.sf_allow_tenant_qos and
volume.get('volume_metadata')is not None):
qos = self._set_qos_presets(volume)
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id', None)
if type_id is not None:
qos = self._set_qos_by_volume_type(ctxt, type_id)
import_time = volume['created_at'].isoformat()
attributes = {'uuid': volume['id'],
'is_clone': 'False',
'os_imported_at': import_time,
'old_name': sfname}
if qos:
for k, v in qos.items():
attributes[k] = str(v)
params = {'name': volume['name'],
'volumeID': sf_ref['volumeID'],
'accountID': sfaccount['accountID'],
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': qos}
data = self._issue_api_request('ModifyVolume',
params, version='5.0')
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
return self._get_model_info(sfaccount, sf_ref['volumeID'])
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing LV for manage_existing.
existing_ref is a dictionary of the form:
{'name': <name of existing volume on SF Cluster>}
"""
sfid = external_ref.get('source-id', None)
if sfid is None:
raise exception.SolidFireAPIException(_("Manage existing get size "
"requires 'id'."))
params = {'startVolumeID': int(sfid),
'limit': 1}
data = self._issue_api_request('ListActiveVolumes', params)
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
sf_ref = data['result']['volumes'][0]
return int(sf_ref['totalSize']) / int(units.Gi)
def unmanage(self, volume):
"""Mark SolidFire Volume as unmanaged (export from Cinder)."""
sfaccount = self._get_sfaccount(volume['project_id'])
if sfaccount is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"unmanage operation!"), volume['id'])
raise exception.SolidFireAPIException(_("Failed to find account "
"for volume."))
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=volume['id'])
export_time = timeutils.utcnow().isoformat()
attributes = sf_vol['attributes']
attributes['os_exported_at'] = export_time
params = {'volumeID': int(sf_vol['volumeID']),
'attributes': attributes}
data = self._issue_api_request('ModifyVolume',
params, version='5.0')
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
# #### Interface methods for transport layer #### #
# TODO(jdg): SolidFire can mix and do iSCSI and FC on the
# same cluster, we'll modify these later to check based on
# the volume info if we need an FC target driver or an
# iSCSI target driver
def ensure_export(self, context, volume):
return self.target_driver.ensure_export(context, volume, None)
def create_export(self, context, volume):
return self.target_driver.create_export(
context,
volume,
None)
def remove_export(self, context, volume):
return self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
return self.target_driver.initialize_connection(volume, connector)
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
return self.target_driver.terminate_connection(volume, connector,
**kwargs)
class SolidFireISCSI(iscsi_driver.SanISCSITarget):
def __init__(self, *args, **kwargs):
super(SolidFireISCSI, self).__init__(*args, **kwargs)
self.sf_driver = kwargs.get('solidfire_driver')
def _do_iscsi_export(self, volume):
sfaccount = self.sf_driver._get_sfaccount(volume['project_id'])
model_update = {}
model_update['provider_auth'] = ('CHAP %s %s'
% (sfaccount['username'],
sfaccount['targetSecret']))
return model_update
def create_export(self, context, volume, volume_path):
return self._do_iscsi_export(volume)
def ensure_export(self, context, volume, volume_path):
try:
return self._do_iscsi_export(volume)
except exception.SolidFireAPIException:
return None
# Following are abc's that we make sure are caught and
# paid attention to. In our case we don't use them
# so just stub them out here.
def remove_export(self, context, volume):
pass
def terminate_connection(self, volume, connector, **kwargs):
pass
|
|
"""
kombu.serialization
===================
Serialization utilities.
"""
from __future__ import absolute_import
import codecs
import os
import sys
import pickle as pypickle
try:
import cPickle as cpickle
except ImportError: # pragma: no cover
cpickle = None # noqa
from collections import namedtuple
from contextlib import contextmanager
from .exceptions import (
ContentDisallowed, DecodeError, EncodeError, SerializerNotInstalled
)
from .five import BytesIO, reraise, text_t
from .utils import entrypoints
from .utils.encoding import str_to_bytes, bytes_t
__all__ = ['pickle', 'loads', 'dumps', 'register', 'unregister']
SKIP_DECODE = frozenset(['binary', 'ascii-8bit'])
TRUSTED_CONTENT = frozenset(['application/data', 'application/text'])
if sys.platform.startswith('java'): # pragma: no cover
def _decode(t, coding):
return codecs.getdecoder(coding)(t)[0]
else:
_decode = codecs.decode
pickle = cpickle or pypickle
pickle_load = pickle.load
#: Kombu requires Python 2.5 or later so we use protocol 2 by default.
#: There's a new protocol (3) but this is only supported by Python 3.
pickle_protocol = int(os.environ.get('PICKLE_PROTOCOL', 2))
codec = namedtuple('codec', ('content_type', 'content_encoding', 'encoder'))
@contextmanager
def _reraise_errors(wrapper,
include=(Exception, ), exclude=(SerializerNotInstalled, )):
try:
yield
except exclude:
raise
except include as exc:
reraise(wrapper, wrapper(exc), sys.exc_info()[2])
def pickle_loads(s, load=pickle_load):
# used to support buffer objects
return load(BytesIO(s))
def parenthesize_alias(first, second):
return '%s (%s)' % (first, second) if first else second
class SerializerRegistry(object):
"""The registry keeps track of serialization methods."""
def __init__(self):
self._encoders = {}
self._decoders = {}
self._default_encode = None
self._default_content_type = None
self._default_content_encoding = None
self._disabled_content_types = set()
self.type_to_name = {}
self.name_to_type = {}
def register(self, name, encoder, decoder, content_type,
content_encoding='utf-8'):
if encoder:
self._encoders[name] = codec(
content_type, content_encoding, encoder,
)
if decoder:
self._decoders[content_type] = decoder
self.type_to_name[content_type] = name
self.name_to_type[name] = content_type
def enable(self, name):
if '/' not in name:
name = self.name_to_type[name]
self._disabled_content_types.discard(name)
def disable(self, name):
if '/' not in name:
name = self.name_to_type[name]
self._disabled_content_types.add(name)
def unregister(self, name):
try:
content_type = self.name_to_type[name]
self._decoders.pop(content_type, None)
self._encoders.pop(name, None)
self.type_to_name.pop(content_type, None)
self.name_to_type.pop(name, None)
except KeyError:
raise SerializerNotInstalled(
'No encoder/decoder installed for {0}'.format(name))
def _set_default_serializer(self, name):
"""
Set the default serialization method used by this library.
:param name: The name of the registered serialization method.
For example, `json` (default), `pickle`, `yaml`, `msgpack`,
or any custom methods registered using :meth:`register`.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
"""
try:
(self._default_content_type, self._default_content_encoding,
self._default_encode) = self._encoders[name]
except KeyError:
raise SerializerNotInstalled(
'No encoder installed for {0}'.format(name))
def dumps(self, data, serializer=None):
if serializer == 'raw':
return raw_encode(data)
if serializer and not self._encoders.get(serializer):
raise SerializerNotInstalled(
'No encoder installed for {0}'.format(serializer))
# If a raw string was sent, assume binary encoding
# (it's likely either ASCII or a raw binary file, and a character
# set of 'binary' will encompass both, even if not ideal.
if not serializer and isinstance(data, bytes_t):
# In Python 3+, this would be "bytes"; allow binary data to be
# sent as a message without getting encoder errors
return 'application/data', 'binary', data
# For Unicode objects, force it into a string
if not serializer and isinstance(data, text_t):
with _reraise_errors(EncodeError, exclude=()):
payload = data.encode('utf-8')
return 'text/plain', 'utf-8', payload
if serializer:
content_type, content_encoding, encoder = \
self._encoders[serializer]
else:
encoder = self._default_encode
content_type = self._default_content_type
content_encoding = self._default_content_encoding
with _reraise_errors(EncodeError):
payload = encoder(data)
return content_type, content_encoding, payload
encode = dumps # XXX compat
def loads(self, data, content_type, content_encoding,
accept=None, force=False, _trusted_content=TRUSTED_CONTENT):
content_type = content_type or 'application/data'
if accept is not None:
if content_type not in _trusted_content \
and content_type not in accept:
raise self._for_untrusted_content(content_type, 'untrusted')
else:
if content_type in self._disabled_content_types and not force:
raise self._for_untrusted_content(content_type, 'disabled')
content_encoding = (content_encoding or 'utf-8').lower()
if data:
decode = self._decoders.get(content_type)
if decode:
with _reraise_errors(DecodeError):
return decode(data)
if content_encoding not in SKIP_DECODE and \
not isinstance(data, text_t):
with _reraise_errors(DecodeError):
return _decode(data, content_encoding)
return data
decode = loads # XXX compat
def _for_untrusted_content(self, ctype, why):
return ContentDisallowed(
'Refusing to deserialize {0} content of type {1}'.format(
why,
parenthesize_alias(self.type_to_name.get(ctype, ctype), ctype),
),
)
#: Global registry of serializers/deserializers.
registry = SerializerRegistry()
"""
.. function:: dumps(data, serializer=default_serializer)
Serialize a data structure into a string suitable for sending
as an AMQP message body.
:param data: The message data to send. Can be a list,
dictionary or a string.
:keyword serializer: An optional string representing
the serialization method you want the data marshalled
into. (For example, `json`, `raw`, or `pickle`).
If :const:`None` (default), then json will be used, unless
`data` is a :class:`str` or :class:`unicode` object. In this
latter case, no serialization occurs as it would be
unnecessary.
Note that if `serializer` is specified, then that
serialization method will be used even if a :class:`str`
or :class:`unicode` object is passed in.
:returns: A three-item tuple containing the content type
(e.g., `application/json`), content encoding, (e.g.,
`utf-8`) and a string containing the serialized
data.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
"""
dumps = encode = registry.encode # XXX encode is a compat alias
"""
.. function:: loads(data, content_type, content_encoding):
Deserialize a data stream as serialized using `dumps`
based on `content_type`.
:param data: The message data to deserialize.
:param content_type: The content-type of the data.
(e.g., `application/json`).
:param content_encoding: The content-encoding of the data.
(e.g., `utf-8`, `binary`, or `us-ascii`).
:returns: The unserialized data.
"""
loads = decode = registry.decode # XXX decode is a compat alias
"""
.. function:: register(name, encoder, decoder, content_type,
content_encoding='utf-8'):
Register a new encoder/decoder.
:param name: A convenience name for the serialization method.
:param encoder: A method that will be passed a python data structure
and should return a string representing the serialized data.
If :const:`None`, then only a decoder will be registered. Encoding
will not be possible.
:param decoder: A method that will be passed a string representing
serialized data and should return a python data structure.
If :const:`None`, then only an encoder will be registered.
Decoding will not be possible.
:param content_type: The mime-type describing the serialized
structure.
:param content_encoding: The content encoding (character set) that
the `decoder` method will be returning. Will usually be
`utf-8`, `us-ascii`, or `binary`.
"""
register = registry.register
"""
.. function:: unregister(name):
Unregister registered encoder/decoder.
:param name: Registered serialization method name.
"""
unregister = registry.unregister
def raw_encode(data):
"""Special case serializer."""
content_type = 'application/data'
payload = data
if isinstance(payload, text_t):
content_encoding = 'utf-8'
with _reraise_errors(EncodeError, exclude=()):
payload = payload.encode(content_encoding)
else:
content_encoding = 'binary'
return content_type, content_encoding, payload
def register_json():
"""Register a encoder/decoder for JSON serialization."""
from anyjson import loads as json_loads, dumps as json_dumps
def _loads(obj):
if isinstance(obj, bytes_t):
obj = obj.decode()
return json_loads(obj)
registry.register('json', json_dumps, _loads,
content_type='application/json',
content_encoding='utf-8')
def register_yaml():
"""Register a encoder/decoder for YAML serialization.
It is slower than JSON, but allows for more data types
to be serialized. Useful if you need to send data such as dates"""
try:
import yaml
registry.register('yaml', yaml.safe_dump, yaml.safe_load,
content_type='application/x-yaml',
content_encoding='utf-8')
except ImportError:
def not_available(*args, **kwargs):
"""In case a client receives a yaml message, but yaml
isn't installed."""
raise SerializerNotInstalled(
'No decoder installed for YAML. Install the PyYAML library')
registry.register('yaml', None, not_available, 'application/x-yaml')
if sys.version_info[0] == 3: # pragma: no cover
def unpickle(s):
return pickle_loads(str_to_bytes(s))
else:
unpickle = pickle_loads # noqa
def register_pickle():
"""The fastest serialization method, but restricts
you to python clients."""
def pickle_dumps(obj, dumper=pickle.dumps):
return dumper(obj, protocol=pickle_protocol)
registry.register('pickle', pickle_dumps, unpickle,
content_type='application/x-python-serialize',
content_encoding='binary')
def register_msgpack():
"""See http://msgpack.sourceforge.net/"""
try:
try:
from msgpack import packb as pack, unpackb
unpack = lambda s: unpackb(s, encoding='utf-8')
except ImportError:
# msgpack < 0.2.0 and Python 2.5
from msgpack import packs as pack, unpacks as unpack # noqa
registry.register(
'msgpack', pack, unpack,
content_type='application/x-msgpack',
content_encoding='binary')
except (ImportError, ValueError):
def not_available(*args, **kwargs):
"""In case a client receives a msgpack message, but msgpack
isn't installed."""
raise SerializerNotInstalled(
'No decoder installed for msgpack. '
'Please install the msgpack library')
registry.register('msgpack', None, not_available,
'application/x-msgpack')
# Register the base serialization methods.
register_json()
register_pickle()
register_yaml()
register_msgpack()
# Default serializer is 'json'
registry._set_default_serializer('json')
_setupfuns = {
'json': register_json,
'pickle': register_pickle,
'yaml': register_yaml,
'msgpack': register_msgpack,
'application/json': register_json,
'application/x-yaml': register_yaml,
'application/x-python-serialize': register_pickle,
'application/x-msgpack': register_msgpack,
}
def enable_insecure_serializers(choices=['pickle', 'yaml', 'msgpack']):
"""Enable serializers that are considered to be unsafe.
Will enable ``pickle``, ``yaml`` and ``msgpack`` by default,
but you can also specify a list of serializers (by name or content type)
to enable.
"""
for choice in choices:
try:
registry.enable(choice)
except KeyError:
pass
def disable_insecure_serializers(allowed=['json']):
"""Disable untrusted serializers.
Will disable all serializers except ``json``
or you can specify a list of deserializers to allow.
.. note::
Producers will still be able to serialize data
in these formats, but consumers will not accept
incoming data using the untrusted content types.
"""
for name in registry._decoders:
registry.disable(name)
if allowed is not None:
for name in allowed:
registry.enable(name)
# Insecure serializers are disabled by default since v3.0
disable_insecure_serializers()
# Load entrypoints from installed extensions
for ep, args in entrypoints('kombu.serializers'): # pragma: no cover
register(ep.name, *args)
def prepare_accept_content(l, name_to_type=registry.name_to_type):
if l is not None:
return set(n if '/' in n else name_to_type[n] for n in l)
return l
|
|
from pandas import read_csv
from nlp.common import prt
from time import time
from nlp.common import exists
import cPickle as pickle
import gzip
import codecs
from os.path import splitext, join
from collections import Counter
from nlp.morpho.mystem import get_pos
from collections import defaultdict
from sys import stderr
rt_fpath = "/home/ubuntu/russe/annotate/rt-test.csv"
ae_fpath = "/home/ubuntu/russe/annotate/ae-test.csv"
ae2_fpath = "/home/ubuntu/russe/annotate/ae2-test.csv"
freq_fpath = "/home/ubuntu/russe/freq.csv"
pmi_w1_fpath = "/home/ubuntu/russe/w1-pmi.csv"
pmi_fpath = "/home/ubuntu/russe/pmi.csv.gz" # 265058509 lines
MAX_PMI = -0.02
MIN_LEN = 3
MULT_COEFF = 2
def print_rel_stat(s, title):
print title
pos = 0
neg = 0
for x in s:
print x, s[x]
if x == "random": neg += s[x]
else: pos += s[x]
print "pos:", pos
print "neg:", neg
def filter_unbalanced_words(train_fpath, print_skipped=False):
out_fpath = splitext(train_fpath)[0] + "-out.csv"
with codecs.open(out_fpath, "w", "utf-8") as out:
print >> out, "word1,word2,related,sim"
df = read_csv(train_fpath, ',', encoding='utf8')
word_num = 0
rel_num = 0
rel_skipped_num = 0
rel_used_num = 0
s_total = Counter()
s_skipped = Counter()
s_used = Counter()
for w1, rows in df.groupby(["word1"]):
word_num += 1
# calculate distribution of relations
s = Counter()
for i, row in rows.iterrows(): s[row.sim] += 1
# save word relations if distribution is ok
if "random" in s and ("syn" in s or "hyper" in s or "hypo" in s or "assoc" in s):
for i, row in rows.iterrows():
rel_num += 1
print >> out, "%s,%s,%s,%d" % (
row.word1, row.word2, row.sim, 0 if row.sim == "random" else 1)
rel_used_num += 1
s_used += s
else:
for i, row in rows.iterrows():
if print_skipped: print >> stderr, "%s,%s,%s,%d" % (
row.word1, row.word2, row.sim, 0 if row.sim == "random" else 1)
rel_skipped_num += 1
s_skipped += s
s_total += s
print "words num:", word_num
print "relations num:", rel_num
print "relations skipped num:", rel_skipped_num
print "relations used num:", rel_used_num
print_rel_stat(s_total, "\ntotal relations:")
print_rel_stat(s_skipped, "\nskipped relations:")
print_rel_stat(s_used, "\nused relations:")
def merge_pos_and_neg(pos_fpath, neg_fpath, freq_fpath):
output_fpath = splitext(pos_fpath)[0] + "-final.csv"
# load resources
pos_df = read_csv(pos_fpath, ',', encoding='utf8')
neg_df = read_csv(neg_fpath, ';', encoding='utf8')
freq_df = read_csv(freq_fpath, '\t', encoding='utf8')
freq = {r["word"]: r["freq"] for i, r in freq_df.iterrows()}
pos = defaultdict(dict)
for i, r in pos_df.iterrows():
pos[r["word1"]][r["word2"]] = freq[r["word2"]] if r["word2"] in freq else 1
neg = defaultdict(dict)
for i, r in neg_df.iterrows():
neg[r["word1"]][r["word2"]] = freq[r["word2"]] if r["word2"] in freq else 1
neg[r["word2"]][r["word1"]] = freq[r["word1"]] if r["word1"] in freq else 1
# merge pos and neg
w_skipped = 0
rel_num = 0
pos_skipped = 0
res = defaultdict(dict)
for i, w1 in enumerate(pos):
if w1 not in neg:
print w1, "is missing in neg"
w_skipped += 1
continue
else:
rlen = min(len(pos[w1]), len(neg[w1]))
if len(pos[w1]) > len(neg[w1]):
print w1, "skipping ", len(pos[w1]) - len(neg[w1]), "of", len(pos[w1]), "positives"
pos_skipped += len(pos[w1]) - len(neg[w1])
if rlen < 1:
print w1, "has no relations"
w_skipped += 1
continue
pos_lst = sorted(pos[w1], key=pos[w1].get, reverse=True)
neg_lst = sorted(neg[w1], key=neg[w1].get, reverse=True)
for i in range(rlen):
res[w1][pos_lst[i]] = 1
res[w1][neg_lst[i]] = 0
rel_num += 2
with codecs.open(output_fpath, "w", "utf-8") as output:
for x in res:
for y in sorted(res[x], key=res[x].get, reverse=True):
print >> output, "%s,%s,%d" % (x, y, res[x][y])
print "# relations:", rel_num
print "# word skipped:", w_skipped
print "# pos skipped:", pos_skipped
print "output:", output_fpath
def generate_negatives(relations_fpath, freq_fpath, pmi_w1_fpath, pmi_fpath, mult_coeff=MULT_COEFF):
print "relations:", relations_fpath
print "freq dictionary:", freq_fpath
print "pmi w1:", pmi_w1_fpath
print "pmi:", pmi_fpath
print "multiplication coefficient:", mult_coeff
tic = time()
output_fpath = splitext(relations_fpath)[0] + "-mc" + str(mult_coeff) + "-out.csv"
common_fpath = splitext(relations_fpath)[0] + "-common.csv"
freq_pkl_fpath = freq_fpath + ".pkl"
pos_df = read_csv(relations_fpath, ',', encoding='utf8')
rel_freq = Counter([w for w in pos_df["word1"]])
if exists(freq_pkl_fpath):
print "loading frequency dictionary from:", freq_pkl_fpath
freq = pickle.load(open(freq_pkl_fpath, "rb"))
else:
print "building frequency dictionary from:", freq_fpath
freq_df = read_csv(freq_fpath, '\t', encoding='utf8')
freq = {row["word"]: row["freq"] for i, row in freq_df.iterrows()}
pickle.dump(freq, open(freq_pkl_fpath, "wb"))
print "frequency dictionary saved to:", freq_pkl_fpath
w1_df = read_csv(pmi_w1_fpath, ',', encoding='utf8')
w1_pmi = {w for w in w1_df.word}
w1_rel = set(rel_freq.keys())
common = w1_rel.intersection(w1_pmi)
print "w1 total:", len(w1_rel)
print "w1 found:", len(common)
# save common relations and load them
idx2del = []
for i, row in pos_df.iterrows():
#print row
if "word1" in row and row["word1"] not in common: idx2del.append(i)
common_df = pos_df.copy()
common_df.drop(idx2del)
common_df.to_csv(common_fpath, delimiter=";", encoding="utf-8", index=False)
print "common relations (pmi && dict):", common_fpath
positives = defaultdict(list)
for w1, rows in common_df.groupby(["word1"]):
for i, row in rows.iterrows(): positives[w1].append(row["word2"])
# find all related words
with codecs.open(output_fpath, "w", "utf-8") as out:
used_words = {}
out.write("word1;word2;sim;freq\n")
w1_prev = ""
rels = []
for i, line in enumerate(gzip.open(pmi_fpath, "rb")):
if i % 1000000 == 0: print i / 1000000
# is entry good?
f = line.split("\t")
w1 = f[0].decode("utf-8")
if len(f) < 3 or w1 not in common: continue
sim = float(f[2])
w2 = f[1].decode("utf-8")
if sim > MAX_PMI or w2 not in freq: continue
pos = get_pos(w2)[0]
if pos != "S" or len(w2) < MIN_LEN or w2 in used_words: continue
# good entry
if w1 != w1_prev and w1_prev != "":
print ".",
r = {(w,s): freq[w] for w, s in rels}
i = 0
rnd_num = mult_coeff * rel_freq[w1_prev]
for w, s in sorted(r, key=r.get, reverse=True):
out.write("%s;%s;%s;%s\n" % (w1_prev, w, s, freq[w]))
used_words[w] = 1
i += 1
if i >= rnd_num: break
rels = [(w2, sim)]
else:
rels.append((w2, sim))
w1_prev = w1
print "negatives:", output_fpath
print "elapsed: %d sec." % (time() - tic)
# Generate negatives like this:
"""
dir_fpath = "/home/sasha/tmp/russe"
freq_fpath = join(dir_fpath, "freq.csv")
pmi_w1_fpath = join(dir_fpath, "w1-pmi.csv")
pmi_fpath = join(dir_fpath, "pmi.csv.gz")
for f in ["rt-train.csv","ae-train.csv", "ae2-train.csv"]:
generate_negatives(join(dir_fpath, f), freq_fpath, pmi_w1_fpath, pmi_fpath, mult_coeff=1.5)
for f in ["rt-train.csv","ae-train.csv", "ae2-train.csv"]:
generate_negatives(join(dir_fpath, f), freq_fpath, pmi_w1_fpath, pmi_fpath, mult_coeff=2)
for f in ["rt-train.csv","ae-train.csv", "ae2-train.csv"]:
generate_negatives(join(dir_fpath, f), freq_fpath, pmi_w1_fpath, pmi_fpath, mult_coeff=1)
"""
# Merge positives and negatives like this:
"""
neg_fpath = "/home/sasha/russe/data/release/test/neg0.csv"
freq_fpath = "/home/sasha/russe/data/freq/wiki-freq-short3.csv"
pos_fpath = "/home/sasha/russe/data/release/test/ae-test.csv"
merge_pos_and_neg(pos_fpath, neg_fpath, freq_fpath)
pos_fpath = "/home/sasha/russe/data/release/test/ae2-test.csv"
merge_pos_and_neg(pos_fpath, neg_fpath, freq_fpath)
pos_fpath = "/home/sasha/russe/data/release/test/rt-test.csv"
merge_pos_and_neg(pos_fpath, neg_fpath, freq_fpath)
"""
|
|
"""
Test lldb core component: SourceManager.
Test cases:
o test_display_source_python:
Test display of source using the SBSourceManager API.
o test_modify_source_file_while_debugging:
Test the caching mechanism of the source manager.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
def ansi_underline_surround_regex(inner_regex_text):
# return re.compile(r"\[4m%s\[0m" % inner_regex_text)
return "4.+\033\\[4m%s\033\\[0m" % inner_regex_text
def ansi_color_surround_regex(inner_regex_text):
return "\033\\[3[0-7]m%s\033\\[0m" % inner_regex_text
class SourceManagerTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.file = self.getBuildArtifact("main-copy.c")
self.line = line_number("main.c", '// Set break point at this line.')
def get_expected_stop_column_number(self):
"""Return the 1-based column number of the first non-whitespace
character in the breakpoint source line."""
stop_line = get_line(self.file, self.line)
# The number of spaces that must be skipped to get to the first non-
# whitespace character --- where we expect the debugger breakpoint
# column to be --- is equal to the number of characters that get
# stripped off the front when we lstrip it, plus one to specify
# the character column after the initial whitespace.
return len(stop_line) - len(stop_line.lstrip()) + 1
def do_display_source_python_api(self, use_color, needle_regex, highlight_source=False):
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Launch the process, and do not stop at the entry point.
args = None
envp = None
process = target.LaunchSimple(
args, envp, self.get_process_working_directory())
self.assertIsNotNone(process)
#
# Exercise Python APIs to display source lines.
#
# Setup whether we should use ansi escape sequences, including color
# and styles such as underline.
self.dbg.SetUseColor(use_color)
# Disable syntax highlighting if needed.
self.runCmd("settings set highlight-source " + str(highlight_source).lower())
filespec = lldb.SBFileSpec(self.file, False)
source_mgr = self.dbg.GetSourceManager()
# Use a string stream as the destination.
stream = lldb.SBStream()
column = self.get_expected_stop_column_number()
context_before = 2
context_after = 2
current_line_prefix = "=>"
source_mgr.DisplaySourceLinesWithLineNumbersAndColumn(
filespec, self.line, column, context_before, context_after,
current_line_prefix, stream)
# 2
# 3 int main(int argc, char const *argv[]) {
# => 4 printf("Hello world.\n"); // Set break point at this line.
# 5 return 0;
# 6 }
self.expect(stream.GetData(), "Source code displayed correctly:\n" + stream.GetData(),
exe=False,
patterns=['=> %d.*Hello world' % self.line,
needle_regex])
# Boundary condition testings for SBStream(). LLDB should not crash!
stream.Print(None)
stream.RedirectToFile(None, True)
@add_test_categories(['pyapi'])
def test_display_source_python_dumb_terminal(self):
"""Test display of source using the SBSourceManager API, using a
dumb terminal and thus no color support (the default)."""
use_color = False
self.do_display_source_python_api(use_color, r"\s+\^")
@add_test_categories(['pyapi'])
def test_display_source_python_ansi_terminal(self):
"""Test display of source using the SBSourceManager API, using a
dumb terminal and thus no color support (the default)."""
use_color = True
underline_regex = ansi_underline_surround_regex(r"printf")
self.do_display_source_python_api(use_color, underline_regex)
@add_test_categories(['pyapi'])
def test_display_source_python_ansi_terminal_syntax_highlighting(self):
"""Test display of source using the SBSourceManager API and check for
the syntax highlighted output"""
use_color = True
syntax_highlighting = True;
# Just pick 'int' as something that should be colored.
color_regex = ansi_color_surround_regex("int")
self.do_display_source_python_api(use_color, color_regex, syntax_highlighting)
# Same for 'char'.
color_regex = ansi_color_surround_regex("char")
self.do_display_source_python_api(use_color, color_regex, syntax_highlighting)
# Test that we didn't color unrelated identifiers.
self.do_display_source_python_api(use_color, r" main\(", syntax_highlighting)
self.do_display_source_python_api(use_color, r"\);", syntax_highlighting)
def test_move_and_then_display_source(self):
"""Test that target.source-map settings work by moving main.c to hidden/main.c."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Move main.c to hidden/main.c.
hidden = self.getBuildArtifact("hidden")
lldbutil.mkdir_p(hidden)
main_c_hidden = os.path.join(hidden, "main-copy.c")
os.rename(self.file, main_c_hidden)
if self.TraceOn():
system([["ls"]])
system([["ls", "hidden"]])
# Set source remapping with invalid replace path and verify we get an
# error
self.expect(
"settings set target.source-map /a/b/c/d/e /q/r/s/t/u",
error=True,
substrs=['''error: the replacement path doesn't exist: "/q/r/s/t/u"'''])
# 'make -C' has resolved current directory to its realpath form.
builddir_real = os.path.realpath(self.getBuildDir())
hidden_real = os.path.realpath(hidden)
# Set target.source-map settings.
self.runCmd("settings set target.source-map %s %s" %
(builddir_real, hidden_real))
# And verify that the settings work.
self.expect("settings show target.source-map",
substrs=[builddir_real, hidden_real])
# Display main() and verify that the source mapping has been kicked in.
self.expect("source list -n main", SOURCE_DISPLAYED_CORRECTLY,
substrs=['Hello world'])
def test_modify_source_file_while_debugging(self):
"""Modify a source file while debugging the executable."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main-copy.c", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'main-copy.c:%d' % self.line,
'stop reason = breakpoint'])
# Display some source code.
self.expect(
"source list -f main-copy.c -l %d" %
self.line,
SOURCE_DISPLAYED_CORRECTLY,
substrs=['Hello world'])
# The '-b' option shows the line table locations from the debug information
# that indicates valid places to set source level breakpoints.
# The file to display is implicit in this case.
self.runCmd("source list -l %d -c 3 -b" % self.line)
output = self.res.GetOutput().splitlines()[0]
# If the breakpoint set command succeeded, we should expect a positive number
# of breakpoints for the current line, i.e., self.line.
import re
m = re.search('^\[(\d+)\].*// Set break point at this line.', output)
if not m:
self.fail("Fail to display source level breakpoints")
self.assertTrue(int(m.group(1)) > 0)
# Read the main.c file content.
with io.open(self.file, 'r', newline='\n') as f:
original_content = f.read()
if self.TraceOn():
print("original content:", original_content)
# Modify the in-memory copy of the original source code.
new_content = original_content.replace('Hello world', 'Hello lldb', 1)
# Modify the source code file.
with io.open(self.file, 'w', newline='\n') as f:
time.sleep(1)
f.write(new_content)
if self.TraceOn():
print("new content:", new_content)
print(
"os.path.getmtime() after writing new content:",
os.path.getmtime(self.file))
# Display the source code again. We should see the updated line.
self.expect(
"source list -f main-copy.c -l %d" %
self.line,
SOURCE_DISPLAYED_CORRECTLY,
substrs=['Hello lldb'])
def test_set_breakpoint_with_absolute_path(self):
self.build()
hidden = self.getBuildArtifact("hidden")
lldbutil.mkdir_p(hidden)
# 'make -C' has resolved current directory to its realpath form.
builddir_real = os.path.realpath(self.getBuildDir())
hidden_real = os.path.realpath(hidden)
self.runCmd("settings set target.source-map %s %s" %
(builddir_real, hidden_real))
exe = self.getBuildArtifact("a.out")
main = os.path.join(builddir_real, "hidden", "main-copy.c")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, main, self.line, num_expected_locations=1, loc_exact=False)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'main-copy.c:%d' % self.line,
'stop reason = breakpoint'])
|
|
"""Metrics utility functions."""
import enum
import io
from typing import Tuple
import imageio
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
from sklearn import metrics
class EvaluationMetric(enum.Enum):
"""Evaluation metric kinds."""
def _generate_next_value_(name, start, count, last_values):
return name.lower()
ACCURACY = enum.auto()
WEIGHTED_F1_SCORE = enum.auto()
WEIGHTED_F1_SCORE_ERROR_ONLY = enum.auto()
MACRO_F1_SCORE = enum.auto()
BINARY_F1_SCORE = enum.auto()
BINARY_AUC = enum.auto()
BINARY_RECALL_AT_90 = enum.auto()
CONFUSION_MATRIX = enum.auto()
INSTRUCTION_POINTER = enum.auto()
LOCALIZATION_ACCURACY = enum.auto()
def all_metric_names() -> Tuple[str]:
""""Returns a tuple of all evaluation metric names."""
return tuple(m.value for m in EvaluationMetric)
def evaluate(targets, predictions, logits, num_classes,
localization_targets, localization_num_targets, localization_predictions,
eval_metric_names, info):
# Diagnose unknown metrics.
unknown_metric_names = set(eval_metric_names).difference(all_metric_names())
if unknown_metric_names:
raise ValueError(f'Unknown metric names: {unknown_metric_names}')
# Compute metrics.
# logits.shape: num_eval_examples, num_classes
results = {}
if EvaluationMetric.ACCURACY.value in eval_metric_names:
results[EvaluationMetric.ACCURACY.value] = (
jnp.sum(predictions == targets) / jnp.sum(jnp.ones_like(targets)))
if EvaluationMetric.MACRO_F1_SCORE.value in eval_metric_names:
results[EvaluationMetric.MACRO_F1_SCORE.value] = metrics.f1_score(
targets, predictions, average='macro')
if EvaluationMetric.WEIGHTED_F1_SCORE.value in eval_metric_names:
results[EvaluationMetric.WEIGHTED_F1_SCORE.value] = metrics.f1_score(
targets, predictions, average='weighted')
if EvaluationMetric.BINARY_F1_SCORE.value in eval_metric_names:
results[EvaluationMetric.BINARY_F1_SCORE.value] = compute_binary_f1_score(
targets, logits, info)
if EvaluationMetric.BINARY_AUC.value in eval_metric_names:
results[EvaluationMetric.BINARY_AUC.value] = compute_binary_auc(
targets, logits, info)
if EvaluationMetric.BINARY_RECALL_AT_90.value in eval_metric_names:
results[EvaluationMetric.BINARY_RECALL_AT_90.value] = compute_recall_at_precision(
targets, logits, info, target_precision=0.90)
if EvaluationMetric.WEIGHTED_F1_SCORE_ERROR_ONLY.value in eval_metric_names:
results[EvaluationMetric.WEIGHTED_F1_SCORE_ERROR_ONLY.value] = compute_weighted_f1_score_error_only(
targets, predictions, info)
if EvaluationMetric.CONFUSION_MATRIX.value in eval_metric_names and num_classes < 40:
results[EvaluationMetric.CONFUSION_MATRIX.value] = metrics.confusion_matrix(
targets,
predictions,
labels=range(num_classes),
normalize='true')
if EvaluationMetric.LOCALIZATION_ACCURACY.value in eval_metric_names:
localization_accuracy = compute_localization_accuracy(
localization_targets, localization_num_targets, localization_predictions
)
if localization_accuracy is not None:
results[EvaluationMetric.LOCALIZATION_ACCURACY.value] = localization_accuracy
return results
def write_metric(metric_name,
metrics_dict,
summary_fn,
step,
transform_fn=None):
"""Writes an evaluation metric using a TensorBoard SummaryWriter function."""
if metric_name in metrics_dict:
metric = metrics_dict[metric_name]
if transform_fn is not None:
metric = transform_fn(metric)
summary_fn(metric_name, metric, step)
def figure_to_image(figure, dpi=None, close=True):
"""Converts the matplotlib plot specified by `figure` to a NumPy image.
Args:
figure: A matplotlib plot.
Returns:
A 3-D NumPy array representing the image of the matplotlib plot.
"""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
figure.savefig(buf, format='png', dpi=dpi, bbox_inches='tight')
buf.seek(0)
# Convert PNG buffer to NumPy array.
image = imageio.imread(buf, format='png')
buf.close()
if close:
plt.close(figure)
return image
def make_figure(*,
data,
title,
xlabel,
ylabel,
interpolation='nearest',
**kwargs):
""""Creates a matplotlib plot from the given data."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
plt.imshow(data, interpolation=interpolation, **kwargs)
ax.set_aspect('equal')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.colorbar(orientation='vertical')
return fig
def instruction_pointer_to_image(instruction_pointer):
"""Converts the given instruction pointer array to an image."""
instruction_pointer_figure = make_figure(
data=instruction_pointer,
title='Instruction Pointer',
xlabel='Timestep',
ylabel='Node')
return figure_to_image(instruction_pointer_figure)
def confusion_matrix_to_image(cm, class_names):
"""Returns an image tensor representing the confusion matrix plotted.
Args:
cm: a `[num_classes, num_classes]` confusion matrix of integer classes.
class_names: an `[num_classes]` array of the names of the integer classes.
Returns:
A `[1, height, width, channels]` PNG image tensor representing the confusion
matrix.
"""
cm_display = metrics.ConfusionMatrixDisplay(cm, display_labels=class_names)
cm_display.plot(xticks_rotation=45)
figure = cm_display.figure_
image = figure_to_image(figure)
return np.expand_dims(image, 0)
def instruction_pointers_to_images(instruction_pointer, multidevice: bool):
"""Converts the given batched instruction pointer to images."""
if multidevice:
# instruction_pointer: device, batch_size / device, timesteps, num_nodes
instruction_pointer = instruction_pointer[0]
# instruction_pointer: batch_size / device, timesteps, num_nodes
instruction_pointer = jnp.transpose(instruction_pointer[:, :16, :],
(1, 2, 0))
# instruction_pointer: logging_slice_size, num_nodes, timesteps
instruction_pointer_image_list = [
instruction_pointer_to_image(ip)
for ip in instruction_pointer
]
instruction_pointer_image_leading_dim_max = max(
image.shape[0] for image in instruction_pointer_image_list)
instruction_pointer_image_list = [
pad(image, instruction_pointer_image_leading_dim_max)
for image in instruction_pointer_image_list
]
return jnp.array(instruction_pointer_image_list)
def pad(array, leading_dim_size: int):
"""Pad the leading dimension of the given array."""
leading_dim_difference = max(0, leading_dim_size - array.shape[0])
leading_pad_width = [(0, leading_dim_difference)]
trailing_pad_widths = [(0, 0)] * (array.ndim - 1)
return jnp.pad(array, leading_pad_width + trailing_pad_widths)
def compute_localization_accuracy(
localization_targets, localization_num_targets, localization_predictions):
if localization_predictions is None:
return None
def is_correct(targets, num_targets, prediction):
# targets.shape: max_num_targets
# num_targets.shape: scalar.
is_example = num_targets > 0
mask = jnp.arange(targets.shape[0]) < num_targets
# mask.shape: max_num_nodes
correct = targets == prediction
# correct.shape: max_num_nodes
correct_and_valid = jnp.logical_and(mask, correct)
# correct_and_valid.shape: max_num_nodes
overall_correct = jnp.max(correct_and_valid, axis=-1)
# overall_correct.shape: scalar.
return overall_correct, is_example
is_corrects, is_examples = jax.vmap(is_correct)(
localization_targets, localization_num_targets, localization_predictions)
# is_corrects.shape: num_examples
total_correct = jnp.sum(is_corrects)
total_examples = jnp.maximum(1, jnp.sum(is_examples))
return total_correct / total_examples
def compute_binary_targets(targets, info):
targets = jnp.array(targets)
error_ids = jnp.array(info.error_ids)
def matches(t, idx):
return t == idx
def matches_any(t, indexes):
matches_each = jax.vmap(matches, in_axes=(None, 0))(t, indexes)
# matches_each.shape: batch_size, num_indexes
return jnp.max(matches_each, axis=-1)
# matches = jax.vmap(lambda t: jnp.equals(targets, t))(, out_axes=1)
binary_targets = jax.vmap(matches_any, in_axes=(0, None))(targets, error_ids)
# ms.shape: batch_size
# In binary_targets, True indicates the target is error and False no-error.
return binary_targets
def compute_binary_predictions(logits, info):
logits = jnp.array(logits)
get_logits = jax.vmap(lambda index: logits[:, index], out_axes=1)
no_error_logits = get_logits(jnp.array(info.no_error_ids))
error_logits = get_logits(jnp.array(info.error_ids))
# no_error_logits.shape: batch_size, num_no_error_classes
# error_logits.shape: batch_size, num_error_classes
no_error_ps = jax.scipy.special.logsumexp(no_error_logits, axis=-1)
error_ps = jax.scipy.special.logsumexp(error_logits, axis=-1)
# no_error_ps.shape: batch_size
# error_ps.shape: batch_size
binary_predictions = error_ps >= no_error_ps
# binary_predictions.shape: batch_size
# True indicates the prediction is error, False indicates no-error.
return binary_predictions
def compute_binary_probabilities(logits, info):
logits = jnp.array(logits)
get_logits = jax.vmap(lambda index: logits[:, index], out_axes=1)
no_error_logits = get_logits(jnp.array(info.no_error_ids))
error_logits = get_logits(jnp.array(info.error_ids))
# no_error_logits.shape: batch_size, num_no_error_classes
# error_logits.shape: batch_size, num_error_classes
no_error_ps = jax.scipy.special.logsumexp(no_error_logits, axis=-1)
error_ps = jax.scipy.special.logsumexp(error_logits, axis=-1)
# no_error_ps.shape: batch_size
# error_ps.shape: batch_size
binary_logits = jnp.stack([error_ps, no_error_ps], axis=-1)
# binary_logits.shape: batch_size, 2
return jax.nn.softmax(binary_logits) # P(error), P(no-error)
def compute_binary_f1_score(targets, logits, info):
binary_predictions = compute_binary_predictions(logits, info)
binary_targets = compute_binary_targets(targets, info)
metric = metrics.f1_score(binary_targets, binary_predictions, average='binary')
return metric
def compute_binary_auc(targets, logits, info):
binary_targets = jnp.int32(compute_binary_targets(targets, info))
binary_probabilities = compute_binary_probabilities(logits, info)
error_probabilities = binary_probabilities[:, 0] # P(error)
return metrics.roc_auc_score(binary_targets, error_probabilities)
def compute_recall_at_precision(targets, logits, info, target_precision):
binary_targets = jnp.int32(compute_binary_targets(targets, info))
binary_probabilities = compute_binary_probabilities(logits, info)
error_probabilities = binary_probabilities[:, 0] # P(error)
precisions, recalls, thresholds = metrics.precision_recall_curve(binary_targets, error_probabilities, pos_label=1)
for precision, recall in zip(precisions, recalls):
# The last precision value is 1, starts from ~0.
# The last recall value is 0, starts from ~1.
if precision >= target_precision:
return recall
return 0
def compute_precision_at_recall(targets, logits, info, target_recall):
binary_targets = jnp.int32(compute_binary_targets(targets, info))
binary_probabilities = compute_binary_probabilities(logits, info)
error_probabilities = binary_probabilities[:, 0] # P(error)
precisions, recalls, thresholds = metrics.precision_recall_curve(binary_targets, error_probabilities, pos_label=1)
for precision, recall in reversed(list(zip(precisions, recalls))):
# The first precision value is 1, tends toward 0.
# The first recall value is 0, tends toward 1.
if recall >= target_recall:
return precision
return 0
def compute_weighted_f1_score_error_only(targets, predictions, info):
labels = info.error_ids
metric = metrics.f1_score(targets, predictions, labels=labels, average='weighted')
return metric
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
"""phy main CLI tool.
Usage:
phy --help
"""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import sys
import os.path as op
import argparse
from textwrap import dedent
import numpy as np
from six import exec_, string_types
#------------------------------------------------------------------------------
# Parser utilities
#------------------------------------------------------------------------------
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
class Parser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write(message + '\n\n')
self.print_help()
sys.exit(2)
_examples = dedent("""
examples:
phy -v display the version of phy
phy download hybrid_120sec.dat -o data/
download a sample raw data file in `data/`
phy describe my_file.kwik
display information about a Kwik dataset
phy spikesort my_params.prm
run the whole suite (spike detection and clustering)
phy detect my_params.prm
run spike detection on a parameters file
phy cluster-auto my_file.kwik
run klustakwik on a dataset (after spike detection)
phy cluster-manual my_file.kwik
run the manual clustering GUI
""")
#------------------------------------------------------------------------------
# Parser creator
#------------------------------------------------------------------------------
class ParserCreator(object):
def __init__(self):
self.create_main()
self.create_download()
self.create_traces()
self.create_describe()
self.create_spikesort()
self.create_detect()
self.create_auto()
self.create_manual()
self.create_notebook()
@property
def parser(self):
return self._parser
def _add_sub_parser(self, name, desc):
p = self._subparsers.add_parser(name, help=desc, description=desc)
self._add_options(p)
return p
def _add_options(self, parser):
parser.add_argument('--debug', '-d',
action='store_true',
help='activate debug logging mode')
parser.add_argument('--hide-traceback',
action='store_true',
help='hide the traceback for cleaner error '
'messages')
parser.add_argument('--profiler', '-p',
action='store_true',
help='activate the profiler')
parser.add_argument('--line-profiler', '-lp',
dest='line_profiler',
action='store_true',
help='activate the line-profiler -- you '
'need to decorate the functions '
'to profile with `@profile` '
'in the code')
parser.add_argument('--ipython', '-i', action='store_true',
help='launch the script in an interactive '
'IPython console')
parser.add_argument('--pdb', action='store_true',
help='activate the Python debugger')
def create_main(self):
import phy
desc = sys.modules['phy'].__doc__
self._parser = Parser(description=desc,
epilog=_examples,
formatter_class=CustomFormatter,
)
self._parser.add_argument('--version', '-v',
action='version',
version=phy.__version_git__,
help='print the version of phy')
self._add_options(self._parser)
self._subparsers = self._parser.add_subparsers(dest='command',
title='subcommand',
)
def create_download(self):
desc = 'download a sample dataset'
p = self._add_sub_parser('download', desc)
p.add_argument('file', help='dataset filename')
p.add_argument('--output-dir', '-o', help='output directory')
p.add_argument('--base',
default='cortexlab',
choices=('cortexlab', 'github'),
help='data repository name: `cortexlab` or `github`',
)
p.set_defaults(func=download)
def create_describe(self):
desc = 'describe a `.kwik` file'
p = self._add_sub_parser('describe', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main',
help='name of the clustering to use')
p.set_defaults(func=describe)
def create_traces(self):
desc = 'show the traces of a raw data file'
p = self._add_sub_parser('traces', desc)
p.add_argument('file', help='path to a `.kwd` or `.dat` file')
p.add_argument('--interval',
help='detection interval in seconds (e.g. `0,10`)')
p.add_argument('--n-channels', '-n',
help='number of channels in the recording '
'(only required when using a flat binary file)')
p.add_argument('--dtype',
help='NumPy data type '
'(only required when using a flat binary file)',
default='int16',
)
p.add_argument('--sample-rate', '-s',
help='sample rate in Hz '
'(only required when using a flat binary file)')
p.set_defaults(func=traces)
def create_spikesort(self):
desc = 'launch the whole spike sorting pipeline on a `.prm` file'
p = self._add_sub_parser('spikesort', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help='filename of the `.kwik` file '
'to create (by default, `"experiment_name".kwik`)')
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval',
help='detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=spikesort)
def create_detect(self):
desc = 'launch the spike detection algorithm on a `.prm` file'
p = self._add_sub_parser('detect', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help='filename of the `.kwik` file '
'to create (by default, `"experiment_name".kwik`)')
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval',
help='detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=detect)
def create_auto(self):
desc = 'launch the automatic clustering algorithm on a `.kwik` file'
p = self._add_sub_parser('cluster-auto', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main',
help='name of the clustering to use')
p.add_argument('--channel-group', default=None,
help='channel group to cluster')
p.set_defaults(func=cluster_auto)
def create_manual(self):
desc = 'launch the manual clustering GUI on a `.kwik` file'
p = self._add_sub_parser('cluster-manual', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main',
help='name of the clustering to use')
p.add_argument('--channel-group', default=None,
help='channel group to manually cluster')
p.add_argument('--cluster-ids', '-c',
help='list of clusters to select initially')
p.add_argument('--no-store', action='store_true', default=False,
help='do not create the store (faster loading time, '
'slower GUI)')
p.set_defaults(func=cluster_manual)
def create_notebook(self):
# TODO
pass
def parse(self, args):
try:
return self._parser.parse_args(args)
except SystemExit as e:
if e.code != 0:
raise e
#------------------------------------------------------------------------------
# Subcommand functions
#------------------------------------------------------------------------------
def _get_kwik_path(args):
kwik_path = args.file
if not op.exists(kwik_path):
raise IOError("The file `{}` doesn't exist.".format(kwik_path))
return kwik_path
def _create_session(args, **kwargs):
from phy.session import Session
kwik_path = _get_kwik_path(args)
session = Session(kwik_path, **kwargs)
return session
def describe(args):
from phy.io.kwik import KwikModel
path = _get_kwik_path(args)
model = KwikModel(path, clustering=args.clustering)
return 'model.describe()', dict(model=model)
def download(args):
from phy import download_sample_data
download_sample_data(args.file,
output_dir=args.output_dir,
base=args.base,
)
def traces(args):
from vispy.app import run
from phy.plot.traces import TraceView
from phy.io.h5 import open_h5
from phy.io.traces import read_kwd, read_dat
path = args.file
if path.endswith('.kwd'):
f = open_h5(args.file)
traces = read_kwd(f)
elif path.endswith(('.dat', '.bin')):
if not args.n_channels:
raise ValueError("Please specify `--n-channels`.")
if not args.dtype:
raise ValueError("Please specify `--dtype`.")
if not args.sample_rate:
raise ValueError("Please specify `--sample-rate`.")
n_channels = int(args.n_channels)
dtype = np.dtype(args.dtype)
traces = read_dat(path, dtype=dtype, n_channels=n_channels)
start, end = map(int, args.interval.split(','))
sample_rate = float(args.sample_rate)
start = int(sample_rate * start)
end = int(sample_rate * end)
c = TraceView(keys='interactive')
c.visual.traces = .01 * traces[start:end, ...]
c.show()
run()
return None, None
def detect(args):
from phy.io import create_kwik
assert args.file.endswith('.prm')
kwik_path = args.kwik_path
kwik_path = create_kwik(args.file,
overwrite=args.overwrite,
kwik_path=kwik_path)
interval = args.interval
if interval is not None:
interval = list(map(float, interval.split(',')))
# Create the session with the newly-created .kwik file.
args.file = kwik_path
session = _create_session(args, use_store=False)
return ('session.detect(interval=interval)',
dict(session=session, interval=interval))
def cluster_auto(args):
from phy.utils._misc import _read_python
from phy.session import Session
assert args.file.endswith('.prm')
channel_group = (int(args.channel_group)
if args.channel_group is not None else None)
params = _read_python(args.file)
kwik_path = params['experiment_name'] + '.kwik'
session = Session(kwik_path)
ns = dict(session=session,
clustering=args.clustering,
channel_group=channel_group,
)
cmd = ('session.cluster('
'clustering=clustering, '
'channel_group=channel_group)')
return (cmd, ns)
def spikesort(args):
from phy.io import create_kwik
assert args.file.endswith('.prm')
kwik_path = args.kwik_path
kwik_path = create_kwik(args.file,
overwrite=args.overwrite,
kwik_path=kwik_path,
)
# Create the session with the newly-created .kwik file.
args.file = kwik_path
session = _create_session(args, use_store=False)
interval = args.interval
if interval is not None:
interval = list(map(float, interval.split(',')))
ns = dict(session=session,
interval=interval,
n_s_clusters=100, # TODO: better handling of KK parameters
)
cmd = ('session.detect(interval=interval); session.cluster();')
return (cmd, ns)
def cluster_manual(args):
channel_group = (int(args.channel_group)
if args.channel_group is not None else None)
session = _create_session(args,
clustering=args.clustering,
channel_group=channel_group,
use_store=not(args.no_store),
)
cluster_ids = (list(map(int, args.cluster_ids.split(',')))
if args.cluster_ids else None)
session.model.describe()
from phy.gui import start_qt_app
start_qt_app()
gui = session.show_gui(cluster_ids=cluster_ids, show=False)
print("\nPress `ctrl+h` to see the list of keyboard shortcuts.\n")
return 'gui.show()', dict(session=session, gui=gui, requires_qt=True)
#------------------------------------------------------------------------------
# Main functions
#------------------------------------------------------------------------------
def main(args=None):
p = ParserCreator()
if args is None:
args = sys.argv[1:]
elif isinstance(args, string_types):
args = args.split(' ')
args = p.parse(args)
if args is None:
return
if args.profiler or args.line_profiler:
from phy.utils.testing import _enable_profiler, _profile
prof = _enable_profiler(args.line_profiler)
else:
prof = None
import phy
if args.debug:
phy.debug()
# Hide the traceback.
if args.hide_traceback:
def exception_handler(exception_type, exception, traceback):
print("{}: {}".format(exception_type.__name__, exception))
sys.excepthook = exception_handler
# Activate IPython debugger.
if args.pdb:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux',
call_pdb=1,
)
func = getattr(args, 'func', None)
if func is None:
p.parser.print_help()
return
out = func(args)
if not out:
return
cmd, ns = out
if not cmd:
return
requires_qt = ns.pop('requires_qt', False)
requires_vispy = ns.pop('requires_vispy', False)
# Default variables in namespace.
ns.update(phy=phy, path=args.file)
if 'session' in ns:
ns['model'] = ns['session'].model
# Interactive mode with IPython.
if args.ipython:
print("\nStarting IPython...")
from IPython import start_ipython
args_ipy = ["-i", "-c='{}'".format(cmd)]
if requires_qt or requires_vispy:
# Activate Qt event loop integration with Qt.
args_ipy += ["--gui=qt"]
start_ipython(args_ipy, user_ns=ns)
else:
if not prof:
exec_(cmd, {}, ns)
else:
_profile(prof, cmd, {}, ns)
if requires_qt:
# Launch the Qt app.
from phy.gui import run_qt_app
run_qt_app()
elif requires_vispy:
# Launch the VisPy Qt app.
from vispy.app import use_app, run
use_app('pyqt4')
run()
#------------------------------------------------------------------------------
# Entry point
#------------------------------------------------------------------------------
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
import json
import unittest
from datetime import datetime
from flask.ext.testing import TestCase
from flask import url_for
from app import create_app, db
from app.models import User, Todo, TodoList
class TodolistAPITestCase(TestCase):
def create_app(self):
return create_app('testing')
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def assert404Response(self, response):
self.assert_404(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['error'], 'Not found')
def assert400Response(self, response):
self.assert_400(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['error'], 'Bad Request')
@staticmethod
def setup_new_user(username):
user_data = {
'username': username,
'email': username + '@example.com',
'password': 'correcthorsebatterystaple'
}
return user_data
@staticmethod
def get_headers():
return {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def add_user(self, username):
user_data = self.setup_new_user(username)
User(**user_data).save()
return User.query.filter_by(username=username).first()
@staticmethod
def add_todolist(title, username=None):
todolist = TodoList(title=title, creator=username).save()
return TodoList.query.filter_by(id=todolist.id).first()
def add_todo(self, description, todolist_id, username=None):
todolist = TodoList.query.filter_by(id=todolist_id).first()
todo = Todo(description=description, todolist_id=todolist.id,
creator=username).save()
return Todo.query.filter_by(id=todo.id).first()
def add_user_through_json_post(self, username):
user_data = self.setup_new_user(username)
return self.client.post(url_for('api.add_user'),
headers=self.get_headers(),
data=json.dumps(user_data))
# test for routes
def test_main_route(self):
response = self.client.get(url_for('api.get_routes'))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue('users' in json_response)
self.assertTrue('todolists' in json_response)
# test for errors
def test_bad_request(self):
response = self.client.post(url_for('api.add_user'),
headers=self.get_headers(), data='')
self.assert400Response(response)
def test_not_found(self):
response = self.client.get('/api/not/found')
self.assert404Response(response)
# test api post calls
def test_add_user(self):
username = 'adam'
post_response = self.add_user_through_json_post(username)
self.assertEqual(post_response.headers['Content-Type'],
'application/json')
self.assert_status(post_response, 201)
response = self.client.get(url_for('api.get_users'))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['users'][0]['user']['username'],
username)
def test_add_user_only_using_the_username(self):
user_data = {'username': 'adam'}
response = self.client.post(url_for('api.add_user'),
headers=self.get_headers(),
data=json.dumps(user_data))
self.assert400Response(response)
def test_add_user_only_using_the_username_and_email(self):
user_data = {'username': 'adam', 'email': 'adam@example.com'}
response = self.client.post(url_for('api.add_user'),
headers=self.get_headers(),
data=json.dumps(user_data))
self.assert400Response(response)
def test_add_user_with_to_long_username(self):
user_data = {
'username': 65 * 'a',
'email': 'adam@example.com',
'password': 'correcthorsebatterystaple',
}
response = self.client.post(url_for('api.add_user'),
headers=self.get_headers(),
data=json.dumps(user_data))
self.assert400Response(response)
def test_add_user_with_invalid_username(self):
user_data = {
'username': 'not a valid username',
'email': 'adam@example.com',
'password': 'correcthorsebatterystaple',
}
response = self.client.post(url_for('api.add_user'),
headers=self.get_headers(),
data=json.dumps(user_data))
self.assert400Response(response)
def test_add_user_without_username(self):
user_data = {
'username': '',
'email': 'adam@example.com',
'password': 'correcthorsebatterystaple',
}
response = self.client.post(url_for('api.add_user'),
headers=self.get_headers(),
data=json.dumps(user_data))
self.assert400Response(response)
def test_add_user_with_invalid_email(self):
user_data = {
'username': 'adam',
'email': 'adamexample.com',
'password': 'correcthorsebatterystaple',
}
response = self.client.post(url_for('api.add_user'),
headers=self.get_headers(),
data=json.dumps(user_data))
self.assert400Response(response)
def test_add_user_withoout_email(self):
user_data = {
'username': 'adam',
'email': '',
'password': 'correcthorsebatterystaple',
}
response = self.client.post(url_for('api.add_user'),
headers=self.get_headers(),
data=json.dumps(user_data))
self.assert400Response(response)
def test_add_user_with_too_long_email(self):
user_data = {
'username': 'adam',
'email': 53 * 'a' + '@example.com',
'password': 'correcthorsebatterystaple',
}
response = self.client.post(url_for('api.add_user'),
headers=self.get_headers(),
data=json.dumps(user_data))
self.assert400Response(response)
def test_add_user_without_password(self):
user_data = {
'username': 'adam',
'email': 'adam@example.com',
'password': '',
}
response = self.client.post(url_for('api.add_user'),
headers=self.get_headers(),
data=json.dumps(user_data))
self.assert400Response(response)
def test_add_user_with_extra_fields(self):
user_data = {
'username': 'adam',
'email': 'adam@example.com',
'password': 'correcthorsebatterystaple',
'extra-field': 'will be ignored'
}
post_response = self.client.post(url_for('api.add_user'),
headers=self.get_headers(),
data=json.dumps(user_data))
self.assertEqual(post_response.headers['Content-Type'],
'application/json')
self.assert_status(post_response, 201)
response = self.client.get(url_for('api.get_users'))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['users'][0]['user']['username'],
'adam')
def test_add_user_only_using_the_username_and_password(self):
user_data = {
'username': 'adam', 'password': 'correcthorsebatterystaple'
}
response = self.client.post(url_for('api.add_user'),
headers=self.get_headers(),
data=json.dumps(user_data))
self.assert400Response(response)
def test_add_todolist(self):
post_response = self.client.post(
url_for('api.add_todolist'),
headers=self.get_headers(),
data=json.dumps({'title': 'todolist'})
)
self.assert_status(post_response, 201)
# the expected id of the todolist is 1, as it is the first to be added
response = self.client.get(url_for('api.get_todolist', todolist_id=1))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['todolist']['title'], 'todolist')
def test_add_todolist_without_title(self):
response = self.client.post(
url_for('api.add_todolist'),
headers=self.get_headers()
)
# opposed to the form, the title is a required argument
self.assert400Response(response)
def test_add_todolist_with_too_long_title(self):
response = self.client.post(
url_for('api.add_todolist'),
headers=self.get_headers(),
data=json.dumps({'title': 129 * 't'})
)
self.assert400Response(response)
def test_add_user_todolist(self):
username = 'adam'
new_user = self.add_user(username)
post_response = self.client.post(
url_for('api.add_user_todolist', username=username),
headers=self.get_headers(),
data=json.dumps({'title': 'todolist'})
)
self.assert_status(post_response, 201)
response = self.client.get(url_for('api.get_user_todolists',
username=username))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
# check title, creator are set correctly and a total of one todolist
self.assertEqual(json_response['todolists'][0]['title'], 'todolist')
self.assertEqual(json_response['todolists'][0]['creator'], username)
self.assertEqual(len(json_response['todolists']), 1)
def test_add_user_todolist_when_user_does_not_exist(self):
username = 'adam'
post_response = self.client.post(
url_for('api.add_user_todolist', username=username),
headers=self.get_headers(),
data=json.dumps({'title': 'todolist'})
)
self.assert400Response(post_response)
def test_add_user_todolist_todo(self):
username = 'adam'
todolist_title = 'new todolist'
new_user = self.add_user(username)
new_todolist = self.add_todolist(todolist_title, username)
post_response = self.client.post(
url_for('api.add_user_todolist_todo',
username=username, todolist_id=new_todolist.id),
headers=self.get_headers(),
data=json.dumps({
'description': 'new todo',
'creator': username,
'todolist_id': new_todolist.id
})
)
self.assert_status(post_response, 201)
response = self.client.get(url_for('api.get_user_todolist_todos',
username=username,
todolist_id=new_todolist.id))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
# check title, creator are set correctly and a total of one todo
self.assertEqual(json_response['todos'][0]['description'], 'new todo')
self.assertEqual(json_response['todos'][0]['creator'], username)
self.assertEqual(len(json_response['todos']), 1)
def test_add_user_todolist_todo_when_todolist_does_not_exist(self):
username = 'adam'
new_user = self.add_user(username)
post_response = self.client.post(
url_for('api.add_user_todolist_todo',
username=username, todolist_id=1),
headers=self.get_headers(),
data=json.dumps({
'description': 'new todo',
'creator': username,
'todolist_id': 1
})
)
self.assert400Response(post_response)
def test_add_user_todolist_todo_without_todo_data(self):
username = 'adam'
todolist_title = 'new todolist'
new_user = self.add_user(username)
new_todolist = self.add_todolist(todolist_title, username)
post_response = self.client.post(
url_for('api.add_user_todolist_todo',
username=username, todolist_id=new_todolist.id),
headers=self.get_headers()
)
self.assert400Response(post_response)
def test_add_todolist_todo(self):
new_todolist = TodoList().save() # todolist with default title
post_response = self.client.post(
url_for('api.add_todolist_todo', todolist_id=new_todolist.id),
headers=self.get_headers(),
data=json.dumps({
'description': 'new todo',
'creator': 'null',
'todolist_id': new_todolist.id
})
)
self.assert_status(post_response, 201)
response = self.client.get(url_for('api.get_todolist_todos',
todolist_id=new_todolist.id))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
# check title, creator are set correctly and a total of one todo
self.assertEqual(json_response['todos'][0]['description'], 'new todo')
self.assertEqual(json_response['todos'][0]['creator'], None)
self.assertEqual(len(json_response['todos']), 1)
def test_add_todolist_todo_when_todolist_does_not_exist(self):
post_response = self.client.post(
url_for('api.add_todolist_todo', todolist_id=1),
headers=self.get_headers(),
data=json.dumps({
'description': 'new todo',
'creator': 'null',
'todolist_id': 1
})
)
self.assert400Response(post_response)
def test_add_todolist_todo_without_todo_data(self):
new_todolist = TodoList().save()
post_response = self.client.post(
url_for('api.add_todolist_todo', todolist_id=new_todolist.id),
headers=self.get_headers()
)
self.assert400Response(post_response)
# test api get calls
def test_get_users(self):
username = 'adam'
new_user = self.add_user(username)
response = self.client.get(url_for('api.get_users'))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['users'][0]['user']['username'],
username)
def test_get_users_when_no_users_exist(self):
response = self.client.get(url_for('api.get_users'))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['users'], [])
def test_get_user(self):
username = 'adam'
new_user = self.add_user(username)
response = self.client.get(url_for('api.get_user', username=username))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['user']['username'], username)
def test_get_user_when_user_does_not_exist(self):
username = 'adam'
response = self.client.get(url_for('api.get_user', username=username))
self.assert404Response(response)
def test_get_todolists(self):
username = 'adam'
todolist_title = 'new todolist '
new_user = self.add_user(username)
first_todolist = self.add_todolist(todolist_title + '1', username)
second_todolist = self.add_todolist(todolist_title + '2', username)
response = self.client.get(url_for('api.get_todolists'))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['todolists'][0]['title'],
'new todolist 1')
self.assertEqual(json_response['todolists'][0]['creator'], username)
self.assertEqual(json_response['todolists'][1]['title'],
'new todolist 2')
self.assertEqual(json_response['todolists'][1]['creator'], username)
self.assertEqual(len(json_response['todolists']), 2)
def test_get_todolists_when_no_todolists_exist(self):
response = self.client.get(url_for('api.get_todolists'))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['todolists'], [])
self.assertEqual(len(json_response['todolists']), 0)
def test_get_user_todolists(self):
username = 'adam'
todolist_title = 'new todolist '
new_user = self.add_user(username)
first_todolist = self.add_todolist(todolist_title + '1', username)
second_todolist = self.add_todolist(todolist_title + '2', username)
response = self.client.get(url_for('api.get_user_todolists',
username=username))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['todolists'][0]['title'],
'new todolist 1')
self.assertEqual(json_response['todolists'][0]['creator'], username)
self.assertEqual(json_response['todolists'][1]['title'],
'new todolist 2')
self.assertEqual(json_response['todolists'][1]['creator'], username)
self.assertEqual(len(json_response['todolists']), 2)
def test_get_user_todolists_when_user_does_not_exist(self):
username = 'adam'
response = self.client.get(url_for('api.get_user_todolists',
username=username))
self.assert404Response(response)
def test_get_user_todolists_when_user_has_no_todolists(self):
username = 'adam'
new_user = self.add_user(username)
response = self.client.get(url_for('api.get_user_todolists',
username=username))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['todolists'], [])
self.assertEqual(len(json_response['todolists']), 0)
def test_get_todolist_todos(self):
todolist_title = 'new todolist'
new_todolist = self.add_todolist(todolist_title)
first_todo = self.add_todo('first', new_todolist.id)
second_todo = self.add_todo('second', new_todolist.id)
response = self.client.get(url_for('api.get_todolist_todos',
todolist_id=new_todolist.id))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['todos'][0]['description'], 'first')
self.assertEqual(json_response['todos'][0]['creator'], None)
self.assertEqual(json_response['todos'][1]['description'], 'second')
self.assertEqual(json_response['todos'][1]['creator'], None)
self.assertEqual(len(json_response['todos']), 2)
def test_get_todolist_todos_when_todolist_does_not_exist(self):
response = self.client.get(url_for('api.get_todolist_todos',
todolist_id=1))
self.assert404Response(response)
def test_get_todolist_todos_when_todolist_has_no_todos(self):
todolist_title = 'new todolist'
new_todolist = self.add_todolist(todolist_title)
response = self.client.get(url_for('api.get_todolist_todos',
todolist_id=new_todolist.id))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['todos'], [])
self.assertEqual(len(json_response['todos']), 0)
def test_get_user_todolist_todos(self):
username = 'adam'
todolist_title = 'new todolist'
new_user = self.add_user(username)
new_todolist = self.add_todolist(todolist_title, username)
first_todo = self.add_todo('first', new_todolist.id, username)
second_todo = self.add_todo('second', new_todolist.id, username)
response = self.client.get(url_for('api.get_user_todolist_todos',
username=username,
todolist_id=new_todolist.id))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['todos'][0]['description'], 'first')
self.assertEqual(json_response['todos'][0]['creator'], username)
self.assertEqual(json_response['todos'][1]['description'], 'second')
self.assertEqual(json_response['todos'][1]['creator'], username)
self.assertEqual(len(json_response['todos']), 2)
def test_get_user_todolist_todos_when_user_does_not_exist(self):
username = 'adam'
response = self.client.get(url_for('api.get_user_todolist_todos',
username=username, todolist_id=1))
self.assert404Response(response)
def test_get_user_todolist_todos_when_todolist_does_not_exist(self):
username = 'adam'
new_user = self.add_user(username)
response = self.client.get(url_for('api.get_user_todolist_todos',
username=username, todolist_id=1))
self.assert404Response(response)
def test_get_user_todolist_todos_when_todolist_has_no_todos(self):
username = 'adam'
todolist_title = 'new todolist'
new_user = self.add_user(username)
new_todolist = self.add_todolist(todolist_title, username)
response = self.client.get(url_for('api.get_user_todolist_todos',
username=username,
todolist_id=new_todolist.id))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['todos'], [])
self.assertEqual(len(json_response['todos']), 0)
def test_get_user_todolist_todos_when_todolist_does_not_belong_to_user(self):
first_username = 'adam'
second_username = 'ben'
todolist_title = 'new todolist'
first_user = self.add_user(first_username)
second_user = self.add_user(second_username)
new_todolist = self.add_todolist(todolist_title, second_username)
first_todo = self.add_todo('first', new_todolist.id, second_username)
second_todo = self.add_todo('second', new_todolist.id, second_username)
response = self.client.get(url_for('api.get_user_todolist_todos',
username=first_user,
todolist_id=new_todolist.id))
self.assert404Response(response)
def test_get_user_todolist(self):
username = 'adam'
todolist_title = 'new todolist'
new_user = self.add_user(username)
new_todolist = self.add_todolist(todolist_title, username)
response = self.client.get(url_for('api.get_user_todolist',
username=username,
todolist_id=new_todolist.id))
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['todolist']['title'], todolist_title)
self.assertEqual(json_response['todolist']['creator'], username)
def test_get_user_todolist_when_user_does_not_exist(self):
username = 'adam'
response = self.client.get(url_for('api.get_user_todolist',
username=username,
todolist_id=1))
self.assert404Response(response)
def test_get_user_todolist_when_todolist_does_not_exist(self):
username = 'adam'
new_user = self.add_user(username)
response = self.client.get(url_for('api.get_user_todolist',
username=username,
todolist_id=1))
self.assert404Response(response)
# test api put call
def test_update_todo_status_to_finished(self):
todolist = self.add_todolist('new todolist')
todo = self.add_todo('first', todolist.id)
self.assertFalse(todo.is_finished)
now = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
response = self.client.put(
url_for('api.update_todo_status', todo_id=todo.id),
headers=self.get_headers(),
data=json.dumps({'todo': {'is_finished': True,
'finished_at': now}})
)
todo = Todo.query.get(todo.id)
self.assertTrue(todo.is_finished)
self.assertEqual(
todo.finished_at.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
now
)
def test_update_todo_status_to_open(self):
todolist = self.add_todolist('new todolist')
todo = self.add_todo('first', todolist.id)
todo.finished()
self.assertTrue(todo.is_finished)
response = self.client.put(
url_for('api.update_todo_status', todo_id=todo.id),
headers=self.get_headers(),
data=json.dumps({'todo': {'is_finished': False}})
)
todo = Todo.query.get(todo.id)
self.assertFalse(todo.is_finished)
self.assertTrue(todo.finished_at is None)
def test_change_todolist_title(self):
todolist = self.add_todolist('new todolist')
response = self.client.put(
url_for('api.change_todolist_title', todolist_id=todolist.id),
headers=self.get_headers(),
data=json.dumps({'todolist': {'title': 'changed title'}})
)
self.assert_200(response)
json_response = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_response['todolist']['title'], 'changed title')
def test_change_todolist_title_too_long_title(self):
todolist = self.add_todolist('new todolist')
response = self.client.put(
url_for('api.change_todolist_title', todolist_id=todolist.id),
headers=self.get_headers(),
data=json.dumps({'title': 129 * 't'})
)
self.assert_400(response)
def test_change_todolist_title_empty_title(self):
todolist = self.add_todolist('new todolist')
response = self.client.put(
url_for('api.change_todolist_title', todolist_id=todolist.id),
headers=self.get_headers(),
data=json.dumps({'title': ''})
)
self.assert_400(response)
def test_change_todolist_title_without_title(self):
todolist = self.add_todolist('new todolist')
response = self.client.put(
url_for('api.change_todolist_title', todolist_id=todolist.id),
headers=self.get_headers()
)
self.assert_400(response)
# test api delete calls
@unittest.skip('because acquiring admin rights is currently an issue')
def test_delete_user(self):
user = self.add_user('adam')
user_id = user.id
response = self.client.delete(
url_for('api.delete_user', user_id=user_id),
headers=self.get_headers(),
data=json.dumps({'user_id': user_id})
)
self.assert_200(response)
response = self.client.get(url_for('api.get_user', user_id=user_id))
self.assert_404(response)
@unittest.skip('because acquiring admin rights is currently an issue')
def test_delete_todolist(self):
todolist = self.add_todolist('new todolist')
todolist_id = todolist.id
response = self.client.delete(
url_for('api.delete_todolist', todolist_id=todolist_id),
headers=self.get_headers(),
data=json.dumps({'todolist_id': todolist_id})
)
self.assert_200(response)
response = self.client.get(
url_for('api.get_todolist', todolist_id=todolist_id)
)
self.assert_404(response)
@unittest.skip('because acquiring admin rights is currently an issue')
def test_delete_todo(self):
# we need admin rights for this test
todolist = self.add_todolist('new todolist')
todo = self.add_todo('new todo', todolist.id)
todo_id = todo.id
response = self.client.delete(
url_for('api.delete_todo', todo_id=todo_id),
headers=self.get_headers(),
data=json.dumps({'todo_id': todo_id})
)
self.assert_200(response)
response = self.client.get(
url_for('api.get_todo', todo_id=todo_id)
)
self.assert_404(response)
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from matplotlib.pyplot import axes
import hickle as hkl
import numpy as np
np.random.seed(2 ** 10)
import tensorflow as tf
from keras import backend as K
K.set_image_dim_ordering('tf')
from keras import regularizers
from keras.layers import Dropout
from keras.models import Sequential
from keras.utils.vis_utils import plot_model
from keras.layers.wrappers import TimeDistributed
from keras.layers.merge import concatenate
from keras.layers.core import Dense
from keras.layers.core import Lambda
from keras.layers.core import Flatten
from keras.utils import to_categorical
from keras.layers.normalization import BatchNormalization
from keras.layers import Input
from keras.models import Model
from keras.models import model_from_json
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import confusion_matrix
from image_utils import random_rotation
from image_utils import random_shift
from image_utils import flip_axis
from image_utils import random_brightness
from config_basec import *
from sys import stdout
import tb_callback
import lrs_callback
import argparse
import cv2
import os
def process_prec3d():
json_file = open(PRETRAINED_C3D, 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights(PRETRAINED_C3D_WEIGHTS)
print("Loaded weights from disk")
for layer in model.layers[:13]:
layer.trainable = RETRAIN_CLASSIFIER
# i = 0
# for layer in model.layers:
# print(layer, i)
# i = i + 1
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
return model
def pretrained_c3d():
c3d = process_prec3d()
inputs = Input(shape=(16, 128, 112, 3))
resized = TimeDistributed(Lambda(lambda image: tf.image.resize_images(image, (112, 112))))(inputs)
c3d_out = c3d(resized)
model = Model(inputs=inputs, outputs=c3d_out)
# i = 0
# for layer in model.layers:
# print(layer, i)
# i = i + 1
print (c3d.summary())
return model
def ensemble_c3d():
inputs = Input(shape=(16, 128, 208, 3))
def sliceA(x):
return x[:, :, :, 0:112, :]
def sliceB(x):
return x[:, :, :, 96:208, :]
A = Lambda(sliceA)(inputs)
B = Lambda(sliceB)(inputs)
c3d_A = pretrained_c3d()
c3d_B = pretrained_c3d()
c3d_A.compile(loss="binary_crossentropy",
optimizer=OPTIM_C)
c3d_B.compile(loss="binary_crossentropy",
optimizer=OPTIM_C)
A_out = c3d_A(A)
B_out = c3d_B(B)
features = concatenate([A_out, B_out])
dense = Dense(units=1024, activation='relu', kernel_regularizer=regularizers.l2(0.01))(features)
x = BatchNormalization()(dense)
x = Dropout(0.5)(x)
x = Dense(units=512, activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
actions = Dense(units=len(simple_ped_set), activation='sigmoid', kernel_regularizer=regularizers.l2(0.01))(x)
model = Model(inputs=inputs, outputs=actions)
# i=0
# for layer in model.layers:
# print (layer, i)
# i = i+1
# exit(0)
return model
def set_trainability(model, trainable):
model.trainable = trainable
for layer in model.layers:
layer.trainable = trainable
def arrange_images(video_stack):
n_frames = video_stack.shape[0] * video_stack.shape[1]
frames = np.zeros((n_frames,) + video_stack.shape[2:], dtype=video_stack.dtype)
frame_index = 0
for i in range(video_stack.shape[0]):
for j in range(video_stack.shape[1]):
frames[frame_index] = video_stack[i, j]
frame_index += 1
img_height = video_stack.shape[2]
img_width = video_stack.shape[3]
width = img_width * video_stack.shape[1]
height = img_height * video_stack.shape[0]
shape = frames.shape[1:]
image = np.zeros((height, width, shape[2]), dtype=video_stack.dtype)
frame_number = 0
for i in range(video_stack.shape[0]):
for j in range(video_stack.shape[1]):
image[(i * img_height):((i + 1) * img_height), (j * img_width):((j + 1) * img_width)] = frames[frame_number]
frame_number = frame_number + 1
return image
def load_weights(weights_file, model):
model.load_weights(weights_file)
def run_utilities(classifier, CLA_WEIGHTS):
if PRINT_MODEL_SUMMARY:
if CLASSIFIER:
print("Classifier:")
print (classifier.summary())
# exit(0)
# Save model to file
if SAVE_MODEL:
if CLASSIFIER:
model_json = classifier.to_json()
with open(os.path.join(MODEL_DIR, "classifier.json"), "w") as json_file:
json_file.write(model_json)
if PLOT_MODEL:
if CLASSIFIER:
plot_model(classifier, to_file=os.path.join(MODEL_DIR, 'classifier.png'), show_shapes=True)
if CLASSIFIER:
if CLA_WEIGHTS != "None":
print("Pre-loading classifier with weights.")
load_weights(CLA_WEIGHTS, classifier)
def random_augmentation(video):
# Toss a die
k = np.random.randint(0, 5, dtype=int)
if k == 0:
for i in range(VIDEO_LENGTH):
video[i] = (video[i].astype(np.float32) - 127.5) / 127.5
return video
elif k == 1:
# Random Rotation
theta = np.random.uniform(-ROT_MAX, ROT_MAX)
for i in range (VIDEO_LENGTH):
video[i] = random_rotation(video[i], (i*theta)/VIDEO_LENGTH, row_axis=0,
col_axis=1, channel_axis=2, fill_mode="nearest")
video[i] = (video[i].astype(np.float32) - 127.5) / 127.5
elif k == 2:
# Random shift
h, w = video.shape[1], video.shape[2]
tx = np.random.uniform(-SFT_V_MAX, SFT_V_MAX) * h
ty = np.random.uniform(-SFT_H_MAX, SFT_H_MAX) * w
for i in range(VIDEO_LENGTH):
video[i] = random_shift(video[i], tx, ty, row_axis=0,
col_axis=1, channel_axis=2, fill_mode="nearest")
video[i] = (video[i].astype(np.float32) - 127.5) / 127.5
elif k == 3:
# Horizontal Flip
for i in range(VIDEO_LENGTH):
video[i] = flip_axis(video[i], axis=1)
video[i] = (video[i].astype(np.float32) - 127.5) / 127.5
else:
# Vary brightness
u = np.random.uniform(BRIGHT_RANGE_L, BRIGHT_RANGE_H)
for i in range(VIDEO_LENGTH):
video[i] = random_brightness(video[i], u)
video[i] = (video[i].astype(np.float32) - 127.5) / 127.5
return video
def load_X_y_RAM(videos_list, index, frames, ped_action_cats):
if RAM_DECIMATE:
X = []
y = []
for i in range(BATCH_SIZE):
video = np.take(frames, videos_list[(index*BATCH_SIZE + i)], axis=0)
video = random_augmentation(video)
X.append(video)
if (len(ped_action_cats) != 0):
y.append(np.take(ped_action_cats, videos_list[(index * BATCH_SIZE + i)], axis=0))
X = np.asarray(X)
y = np.asarray(y)
return X, y
else:
print ("RAM usage flag not set. Are you sure you want to do this?")
exit(0)
def load_to_RAM(frames_source):
frames = np.zeros(shape=((len(frames_source),) + IMG_SIZE))
j = 1
for i in range(1, len(frames_source)):
filename = "frame_" + str(j) + ".png"
im_file = os.path.join(DATA_DIR, filename)
try:
frame = cv2.imread(im_file, cv2.IMREAD_COLOR)
frames[i] = frame.astype(np.float32)
j = j + 1
except AttributeError as e:
print(im_file)
print(e)
return frames
def load_X_y(videos_list, index, data_dir, ped_action_cats, batch_size=BATCH_SIZE):
X = np.zeros((batch_size, VIDEO_LENGTH,) + IMG_SIZE)
y = []
for i in range(batch_size):
y_per_vid = []
for j in range(VIDEO_LENGTH):
frame_number = (videos_list[(index*batch_size + i), j])
filename = "frame_" + str(frame_number) + ".png"
im_file = os.path.join(data_dir, filename)
try:
frame = cv2.imread(im_file, cv2.IMREAD_COLOR)
X[i, j] = (frame.astype(np.float32) - 127.5) / 127.5
except AttributeError as e:
print (im_file)
print (e)
if (len(ped_action_cats) != 0):
try:
y_per_vid.append(ped_action_cats[frame_number - 1])
except IndexError as e:
print(frame_number)
print(e)
if (len(ped_action_cats) != 0):
y.append(y_per_vid)
return X, np.asarray(y)
def map_to_simple(ped_action):
if (ped_action == 0):
return 0
elif (ped_action == 1):
return 1
elif (ped_action == 2):
return 1
elif (ped_action == 5):
return 2
elif (ped_action == 6):
return 2
elif (ped_action == 7):
return 2
elif (ped_action == 8):
return 0
elif (ped_action == 9):
return 1
elif (ped_action == 12):
return 3
elif (ped_action == 13):
return 4
else:
print ("Irrelevant ped_action found. Exiting.")
print (ped_action)
exit(0)
def get_action_classes(action_labels, mode='softmax'):
# Load labels into per frame numerical indices from the action set
print("Loading annotations.")
ped_action_classes = []
count = [0] * len(simple_ped_set)
for i in range(len(action_labels)):
action_dict = dict(ele.split(':') for ele in action_labels[i].split(', ')[2:])
# Settle pedestrian classes
a_clean = []
for key, value in action_dict.iteritems():
if 'pedestrian' in key:
if ',' in value:
splits = value.split(',')
for k in range(len(splits)):
a_clean.append(splits[k])
else:
a_clean.append(value)
if len(a_clean) == 0:
a_clean = ['no ped']
ped_actions_per_frame = list(set([a.lower() for a in a_clean]))
simple_ped_actions_per_frame = []
encoded_ped_action = np.zeros(shape=(len(simple_ped_set)), dtype=np.float32)
for action in ped_actions_per_frame:
# Get ped action number and map it to simple set
if action.lower() not in ped_actions:
print ("Unknown action in labels. Exiting.")
print (action)
exit(0)
if action.lower() == 'crossing':
ped_action = simple_ped_set.index('crossing')
simple_ped_actions_per_frame.append(ped_action)
# if action.lower() == 'standing':
# ped_action = simple_ped_set.index('standing')
# simple_ped_actions_per_frame.append(ped_action)
# if action.lower() == 'no ped':
# ped_action = simple_ped_set.index('no ped')
# simple_ped_actions_per_frame.append(ped_action)
if mode=='softmax':
if 2 in simple_ped_actions_per_frame:
act = 2
if 0 in simple_ped_actions_per_frame:
act = 0
if 1 in simple_ped_actions_per_frame:
act = 1
encoded_ped_action = to_categorical(act, len(simple_ped_set))
count[act] = count[act] + 1
elif mode=='sigmoid':
for action in simple_ped_actions_per_frame:
count[action] = count[action] + 1
# Add all unique categorical one-hot vectors
encoded_ped_action = encoded_ped_action + to_categorical(action, len(simple_ped_set))
else:
print ("No mode selected to determine action labels. Exiting.")
exit(0)
ped_action_classes.append(encoded_ped_action.T)
ped_action_classes = np.asarray(ped_action_classes)
ped_action_classes = np.reshape(ped_action_classes, newshape=(ped_action_classes.shape[0:2]))
return ped_action_classes, count
def remove_zero_classes(videos_list, simple_ped_actions_per_frame):
r_indices = []
for i in range(len(videos_list)):
# if (len(list(simple_ped_actions_per_frame[videos_list[i, CLASS_TARGET_INDEX]])) == 0):
if sum(simple_ped_actions_per_frame[videos_list[i, CLASS_TARGET_INDEX]]) == 0:
r_indices.append(i)
for i in sorted(r_indices, reverse=True):
videos_list = np.delete(videos_list, i, axis=0)
return videos_list
def get_video_lists(frames_source, stride, frame_skip=0):
# Build video progressions
videos_list = []
start_frame_index = 1
end_frame_index = ((frame_skip + 1) * VIDEO_LENGTH) + 1 - frame_skip
while (end_frame_index <= len(frames_source)):
frame_list = frames_source[start_frame_index:end_frame_index]
if (len(set(frame_list)) == 1):
videos_list.append(range(start_frame_index, end_frame_index, frame_skip+1))
start_frame_index = start_frame_index + stride
end_frame_index = end_frame_index + stride
else:
start_frame_index = end_frame_index - 1
end_frame_index = start_frame_index + (frame_skip+1)*VIDEO_LENGTH -frame_skip
videos_list = np.asarray(videos_list, dtype=np.int32)
return np.asarray(videos_list)
def get_classwise_data(videos_list, ped_action_labels):
classwise_videos_list = [[] for _ in range(len(simple_ped_set))]
count = [0] * len(simple_ped_set)
for i in range(len(videos_list)):
labels = np.where(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]] == 1)
for j in labels[0]:
count[j] += 1
classwise_videos_list[j].append(np.asarray(videos_list[i]))
print('Before subsampling')
print(str(count))
return classwise_videos_list, count
def prob_subsample(classwise_videos_list, count):
train_videos_list = []
sample_size = min(count)
for i in range(len(classwise_videos_list)):
indices = np.random.choice(count[i], sample_size, replace=False)
videos_list = np.asarray(np.take(classwise_videos_list[i], indices, axis=0))
train_videos_list.extend(np.asarray(videos_list))
train_videos_list = np.random.permutation(train_videos_list)
return np.asarray(train_videos_list)
def subsample_videos(videos_list, ped_action_labels):
print (videos_list.shape)
AP_MAX = 3
CR_MAX = 10
ST_MAX = 10
NP_MAX = 3
ap_count = 0
cr_count = 0
st_count = 0
np_count = 0
r_indices = []
classwise_videos_list, count = get_classwise_data(videos_list, ped_action_labels)
videos_list = prob_subsample(classwise_videos_list, count)
exit(0)
for i in range(len(videos_list)):
# Approaching count
if (list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1) == 1):
ap_count = ap_count + 1
if (ap_count < AP_MAX):
r_indices.append(i)
else:
ap_count = 0
# Crossing count
if (list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1) == 2):
cr_count = cr_count + 1
if (cr_count < CR_MAX):
r_indices.append(i)
else:
cr_count = 0
# Stopped count
if (list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1) == 3):
st_count = st_count + 1
if (st_count < ST_MAX):
r_indices.append(i)
else:
st_count = 0
# No ped count
if (list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1) == 6):
np_count = np_count + 1
if (np_count < NP_MAX):
r_indices.append(i)
else:
np_count = 0
# if (list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1) ==
# list(ped_action_labels[videos_list[i, 8]]).index(1)):
# r_indices.append(i)
for i in sorted(r_indices, reverse=True):
videos_list = np.delete(videos_list, i, axis=0)
count = [0] * len(simple_ped_set)
for i in range(len(videos_list)):
count[list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1)] = \
count[list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1)] + 1
print ('After subsampling')
print (str(count))
return videos_list
def get_sklearn_metrics(y_true, y_pred, avg=None, pos_label=1):
return precision_recall_fscore_support(y_true, np.round(y_pred), average=avg, pos_label=pos_label)
def get_classification_report(y_true, y_pred):
return classification_report(y_true, np.round(y_pred), target_names=['crossing', 'not crossing'])
def train(BATCH_SIZE, ENC_WEIGHTS, DEC_WEIGHTS, CLA_WEIGHTS):
print("Loading data definitions.")
frames_source = hkl.load(os.path.join(DATA_DIR, 'sources_train_208.hkl'))
videos_list_1 = get_video_lists(frames_source=frames_source, stride=8, frame_skip=0)
videos_list_2 = get_video_lists(frames_source=frames_source, stride=8, frame_skip=1)
videos_list = np.concatenate((videos_list_1, videos_list_2), axis=0)
# Load actions from annotations
action_labels = hkl.load(os.path.join(DATA_DIR, 'annotations_train_208.hkl'))
ped_action_classes, ped_class_count = get_action_classes(action_labels=action_labels, mode='sigmoid')
print("Training Stats: " + str(ped_class_count))
# videos_list = remove_zero_classes(videos_list, ped_action_classes)
# classwise_videos_list, count = get_classwise_data(videos_list, ped_action_classes)
# videos_list = prob_subsample(classwise_videos_list, count)
if RAM_DECIMATE:
frames = load_to_RAM(frames_source=frames_source)
if SHUFFLE:
# Shuffle images to aid generalization
videos_list = np.random.permutation(videos_list)
# Setup validation
val_frames_source = hkl.load(os.path.join(VAL_DATA_DIR, 'sources_val_208.hkl'))
val_videos_list = get_video_lists(frames_source=val_frames_source, stride=8, frame_skip=0)
# Load val action annotations
val_action_labels = hkl.load(os.path.join(VAL_DATA_DIR, 'annotations_val_208.hkl'))
val_ped_action_classes, val_ped_class_count = get_action_classes(val_action_labels, mode='sigmoid')
# val_videos_list = remove_zero_classes(val_videos_list, val_ped_action_classes)
print("Val Stats: " + str(val_ped_class_count))
# Build the Spatio-temporal Autoencoder
print ("Creating models.")
# Build stacked classifier
# classifier = pretrained_c3d()
classifier = ensemble_c3d()
# classifier = c3d_scratch()
classifier.compile(loss="binary_crossentropy",
optimizer=OPTIM_C,
# metrics=[metric_precision, metric_recall, metric_mpca, 'accuracy'])
metrics=['acc'])
# Build attention layer output
intermediate_classifier = Model(inputs=classifier.layers[0].input, outputs=classifier.layers[1].output)
mask_gen_1 = Sequential()
# mask_gen_1.add(encoder)
mask_gen_1.add(intermediate_classifier)
mask_gen_1.compile(loss='binary_crossentropy', optimizer=OPTIM_C)
run_utilities(classifier, CLA_WEIGHTS)
n_videos = videos_list.shape[0]
n_val_videos = val_videos_list.shape[0]
NB_ITERATIONS = int(n_videos/BATCH_SIZE)
# NB_ITERATIONS = 5
NB_VAL_ITERATIONS = int(n_val_videos/BATCH_SIZE)
# NB_VAL_ITERATIONS = 5
# Setup TensorBoard Callback
TC_cla = tb_callback.TensorBoard(log_dir=TF_LOG_CLA_DIR, histogram_freq=0, write_graph=False, write_images=False)
LRS_clas = lrs_callback.LearningRateScheduler(schedule=schedule)
LRS_clas.set_model(classifier)
print ("Beginning Training.")
# Begin Training
# Train Classifier
if CLASSIFIER:
print("Training Classifier...")
for epoch in range(1, NB_EPOCHS_CLASS+1):
print("\n\nEpoch ", epoch)
c_loss = []
val_c_loss = []
# # Set learning rate every epoch
LRS_clas.on_epoch_begin(epoch=epoch)
lr = K.get_value(classifier.optimizer.lr)
print("Learning rate: " + str(lr))
print("c_loss_metrics: " + str(classifier.metrics_names))
y_train_pred = []
y_train_true = []
for index in range(NB_ITERATIONS):
# Train Autoencoder
if RAM_DECIMATE:
# videos_list = prob_subsample(classwise_videos_list, count)
X, y = load_X_y_RAM(videos_list, index, frames, ped_action_classes)
else:
# videos_list = prob_subsample(classwise_videos_list, count)
X, y = load_X_y(videos_list, index, DATA_DIR, ped_action_classes)
X_train = X
y_true_class = y[:, CLASS_TARGET_INDEX]
c_loss.append(classifier.train_on_batch(X_train, y_true_class))
y_train_true.extend(y_true_class)
y_train_pred.extend(classifier.predict(X_train, verbose=0))
arrow = int(index / (NB_ITERATIONS / 30))
stdout.write("\rIter: " + str(index) + "/" + str(NB_ITERATIONS - 1) + " " +
"c_loss: " + str([ c_loss[len(c_loss) - 1][j] for j in [0, 1]]) + " " +
"\t [" + "{0}>".format("=" * (arrow)))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
ped_pred_class = classifier.predict(X_train, verbose=0)
# pred_seq = arrange_images(np.concatenate((X_train, predicted_images), axis=1))
pred_seq = arrange_images(X_train)
pred_seq = pred_seq * 127.5 + 127.5
font = cv2.FONT_HERSHEY_SIMPLEX
y_orig_classes = y
# Add labels as text to the image
for k in range(BATCH_SIZE):
for j in range(int(VIDEO_LENGTH )):
class_num_past = np.argmax(y_orig_classes[k, j])
class_num_y = np.argmax(ped_pred_class[k])
label_true = str(y_orig_classes[k, j])
label_pred = str([round(float(i), 2) for i in ped_pred_class[k]])
cv2.putText(pred_seq, 'truth: ' + label_true,
(2 + j * (208), 94 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.putText(pred_seq, label_pred,
(2 + j * (208), 114 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.imwrite(os.path.join(CLA_GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_cla_pred.png"), pred_seq)
slices = mask_gen_1.predict(X_train)
slice_images = arrange_images(slices)
slice_images = slice_images * 127.5 + 127.5
cv2.imwrite(os.path.join(CLA_GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_slice_pred.png"), slice_images)
# Run over val data
print('')
y_val_pred = []
y_val_true = []
for index in range(NB_VAL_ITERATIONS):
X, y = load_X_y(val_videos_list, index, VAL_DATA_DIR, val_ped_action_classes)
X_val = X
y_true_class = y[:, CLASS_TARGET_INDEX]
val_c_loss.append(classifier.test_on_batch(X_val, y_true_class))
y_val_true.extend(y_true_class)
y_val_pred.extend(classifier.predict(X_val, verbose=0))
arrow = int(index / (NB_VAL_ITERATIONS / 40))
stdout.write("\rIter: " + str(index) + "/" + str(NB_VAL_ITERATIONS - 1) + " " +
"val_c_loss: " + str([ val_c_loss[len(val_c_loss) - 1][j] for j in [0, 1]]))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
val_ped_pred_class = classifier.predict(X_val, verbose=0)
# pred_seq = arrange_images(np.concatenate((X_train, predicted_images), axis=1))
pred_seq = arrange_images(X_val)
pred_seq = pred_seq * 127.5 + 127.5
font = cv2.FONT_HERSHEY_SIMPLEX
y_orig_classes = y
# Add labels as text to the image
for k in range(BATCH_SIZE):
for j in range(int(VIDEO_LENGTH)):
class_num_past = np.argmax(y_orig_classes[k, j])
class_num_y = np.argmax(val_ped_pred_class[k])
label_true = str(y_orig_classes[k, j])
label_pred = str([round(float(i), 2) for i in ped_pred_class[k]])
cv2.putText(pred_seq, 'truth: ' + label_true,
(2 + j * (208), 94 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.putText(pred_seq, label_pred,
(2 + j * (208), 114 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.imwrite(os.path.join(CLA_GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_cla_val_pred.png"), pred_seq)
# then after each epoch
avg_c_loss = np.mean(np.asarray(c_loss, dtype=np.float32), axis=0)
avg_val_c_loss = np.mean(np.asarray(val_c_loss, dtype=np.float32), axis=0)
train_prec, train_rec, train_fbeta, train_support = get_sklearn_metrics(np.asarray(y_train_true),
np.asarray(y_train_pred),
avg='binary',
pos_label=1)
val_prec, val_rec, val_fbeta, val_support = get_sklearn_metrics(np.asarray(y_val_true),
np.asarray(y_val_pred),
avg='binary',
pos_label=1)
loss_values = np.asarray(avg_c_loss.tolist() + [train_prec.tolist()] +
[train_rec.tolist()] +
avg_val_c_loss.tolist() + [val_prec.tolist()] +
[val_rec.tolist()], dtype=np.float32)
precs = ['prec_' + action for action in simple_ped_set]
recs = ['rec_' + action for action in simple_ped_set]
fbeta = ['fbeta_' + action for action in simple_ped_set]
c_loss_keys = ['c_' + metric for metric in classifier.metrics_names+precs+recs]
val_c_loss_keys = ['c_val_' + metric for metric in classifier.metrics_names+precs+recs]
loss_keys = c_loss_keys + val_c_loss_keys
logs = dict(zip(loss_keys, loss_values))
TC_cla.on_epoch_end(epoch, logs)
# Log the losses
with open(os.path.join(LOG_DIR, 'losses_cla.json'), 'a') as log_file:
log_file.write("{\"epoch\":%d, %s\n" % (epoch, str(logs).strip('{')))
print("\nAvg c_loss: " + str(avg_c_loss) +
" Avg val_c_loss: " + str(avg_val_c_loss))
print ("Train Prec: %.2f, Recall: %.2f, Fbeta: %.2f" %(train_prec, train_rec, train_fbeta))
print("Val Prec: %.2f, Recall: %.2f, Fbeta: %.2f" % (val_prec, val_rec, val_fbeta))
# Save model weights per epoch to file
classifier.save_weights(os.path.join(CHECKPOINT_DIR, 'classifier_cla_epoch_' + str(epoch) + '.h5'),
True)
classifier.save(os.path.join(CHECKPOINT_DIR, 'full_classifier_cla_epoch_' + str(epoch) + '.h5'))
print (get_classification_report(np.asarray(y_train_true), np.asarray(y_train_pred)))
print (get_classification_report(np.asarray(y_val_true), np.asarray(y_val_pred)))
def test(CLA_WEIGHTS):
if not os.path.exists(TEST_RESULTS_DIR + '/pred/'):
os.mkdir(TEST_RESULTS_DIR + '/pred/')
# Setup test
test_frames_source = hkl.load(os.path.join(TEST_DATA_DIR, 'sources_test_208.hkl'))
# test_videos_list = get_video_lists(frames_source=test_frames_source, stride=8, frame_skip=0)
# test_videos_list = get_video_lists(frames_source=test_frames_source, stride=16, frame_skip=0)
test_videos_list = get_video_lists(frames_source=test_frames_source, stride=16, frame_skip=2)
# Load test action annotations
test_action_labels = hkl.load(os.path.join(TEST_DATA_DIR, 'annotations_test_208.hkl'))
test_ped_action_classes, test_ped_class_count = get_action_classes(test_action_labels, mode='sigmoid')
print("Test Stats: " + str(test_ped_class_count))
# Build the Spatio-temporal Autoencoder
print("Creating models.")
# Build stacked classifier
# classifier = pretrained_c3d()
classifier = ensemble_c3d()
# classifier = c3d_scratch()
classifier.compile(loss="binary_crossentropy",
optimizer=OPTIM_C,
# metrics=[metric_precision, metric_recall, metric_mpca, 'accuracy'])
metrics=['acc'])
# Build attention layer output
intermediate_classifier = Model(inputs=classifier.layers[0].input, outputs=classifier.layers[1].output)
mask_gen_1 = Sequential()
# mask_gen_1.add(encoder)
mask_gen_1.add(intermediate_classifier)
mask_gen_1.compile(loss='binary_crossentropy', optimizer=OPTIM_C)
run_utilities(classifier, CLA_WEIGHTS)
n_test_videos = test_videos_list.shape[0]
NB_TEST_ITERATIONS = int(n_test_videos / TEST_BATCH_SIZE)
# NB_TEST_ITERATIONS = 5
# Setup TensorBoard Callback
TC_cla = tb_callback.TensorBoard(log_dir=TF_LOG_CLA_DIR, histogram_freq=0, write_graph=False, write_images=False)
LRS_clas = lrs_callback.LearningRateScheduler(schedule=schedule)
LRS_clas.set_model(classifier)
if CLASSIFIER:
print("Testing Classifier...")
# Run over test data
print('')
y_test_pred = []
y_test_true = []
test_c_loss = []
for index in range(NB_TEST_ITERATIONS):
X, y = load_X_y(test_videos_list, index, TEST_DATA_DIR, test_ped_action_classes, batch_size=TEST_BATCH_SIZE)
X_test = X
y_true_class = y[:, CLASS_TARGET_INDEX]
test_c_loss.append(classifier.test_on_batch(X_test, y_true_class))
y_test_true.extend(y_true_class)
y_test_pred.extend(classifier.predict(X_test, verbose=0))
arrow = int(index / (NB_TEST_ITERATIONS / 40))
stdout.write("\rIter: " + str(index) + "/" + str(NB_TEST_ITERATIONS - 1) + " " +
"test_c_loss: " + str([test_c_loss[len(test_c_loss) - 1][j] for j in [0, 1]]))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
test_ped_pred_class = classifier.predict(X_test, verbose=0)
# pred_seq = arrange_images(np.concatenate((X_train, predicted_images), axis=1))
pred_seq = arrange_images(X_test)
pred_seq = pred_seq * 127.5 + 127.5
font = cv2.FONT_HERSHEY_SIMPLEX
y_orig_classes = y
# Add labels as text to the image
for k in range(TEST_BATCH_SIZE):
for j in range(int(VIDEO_LENGTH)):
if (y_orig_classes[k, j] > 0.5):
label_true = "crossing"
else:
label_true = "not crossing"
if (test_ped_pred_class[k] > 0.5):
label_pred = "crossing"
else:
label_pred = "not crossing"
cv2.putText(pred_seq, 'truth: ' + label_true,
(2 + j * (208), 94 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.putText(pred_seq, label_pred,
(2 + j * (208), 114 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR + '/pred/', str(index) + "_cla_test_pred.png"),
pred_seq)
# then after each epoch
avg_test_c_loss = np.mean(np.asarray(test_c_loss, dtype=np.float32), axis=0)
test_prec, test_rec, test_fbeta, test_support = get_sklearn_metrics(np.asarray(y_test_true),
np.asarray(y_test_pred),
avg='binary',
pos_label=1)
print("\nAvg test_c_loss: " + str(avg_test_c_loss))
print("Test Prec: %.4f, Recall: %.4f, Fbeta: %.4f" % (test_prec, test_rec, test_fbeta))
print ("Classification Report")
print(get_classification_report(np.asarray(y_test_true), np.asarray(y_test_pred)))
print ("Confusion matrix")
tn, fp, fn, tp = confusion_matrix(y_test_true, np.round(y_test_pred)).ravel()
print ("TN: %.2f, FP: %.2f, FN: %.2f, TP: %.2f" % (tn, fp, fn, tp))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str)
parser.add_argument("--enc_weights", type=str, default="None")
parser.add_argument("--dec_weights", type=str, default="None")
parser.add_argument("--cla_weights", type=str, default="None")
parser.add_argument("--batch_size", type=int, default=BATCH_SIZE)
parser.add_argument("--nice", dest="nice", action="store_true")
parser.set_defaults(nice=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
if args.mode == "train":
train(BATCH_SIZE=args.batch_size,
ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights,
CLA_WEIGHTS=args.cla_weights)
if args.mode == "test":
test(CLA_WEIGHTS=args.cla_weights)
|
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# execsnoop Trace new processes via exec() syscalls.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: execsnoop [-h] [-T] [-t] [-x] [-q] [-n NAME] [-l LINE]
# [--max-args MAX_ARGS]
#
# This currently will print up to a maximum of 19 arguments, plus the process
# name, so 20 fields in total (MAXARG).
#
# This won't catch all new processes: an application may fork() but not exec().
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 07-Feb-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
from bcc.containers import filter_by_containers
from bcc.utils import ArgString, printb
import bcc.utils as utils
import argparse
import re
import time
import pwd
from collections import defaultdict
from time import strftime
def parse_uid(user):
try:
result = int(user)
except ValueError:
try:
user_info = pwd.getpwnam(user)
except KeyError:
raise argparse.ArgumentTypeError(
"{0!r} is not valid UID or user entry".format(user))
else:
return user_info.pw_uid
else:
# Maybe validate if UID < 0 ?
return result
# arguments
examples = """examples:
./execsnoop # trace all exec() syscalls
./execsnoop -x # include failed exec()s
./execsnoop -T # include time (HH:MM:SS)
./execsnoop -U # include UID
./execsnoop -u 1000 # only trace UID 1000
./execsnoop -u user # get user UID and trace only them
./execsnoop -t # include timestamps
./execsnoop -q # add "quotemarks" around arguments
./execsnoop -n main # only print command lines containing "main"
./execsnoop -l tpkg # only print command where arguments contains "tpkg"
./execsnoop --cgroupmap mappath # only trace cgroups in this BPF map
./execsnoop --mntnsmap mappath # only trace mount namespaces in the map
"""
parser = argparse.ArgumentParser(
description="Trace exec() syscalls",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-T", "--time", action="store_true",
help="include time column on output (HH:MM:SS)")
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-x", "--fails", action="store_true",
help="include failed exec()s")
parser.add_argument("--cgroupmap",
help="trace cgroups in this BPF map only")
parser.add_argument("--mntnsmap",
help="trace mount namespaces in this BPF map only")
parser.add_argument("-u", "--uid", type=parse_uid, metavar='USER',
help="trace this UID only")
parser.add_argument("-q", "--quote", action="store_true",
help="Add quotemarks (\") around arguments."
)
parser.add_argument("-n", "--name",
type=ArgString,
help="only print commands matching this name (regex), any arg")
parser.add_argument("-l", "--line",
type=ArgString,
help="only print commands where arg contains this line (regex)")
parser.add_argument("-U", "--print-uid", action="store_true",
help="print UID column")
parser.add_argument("--max-args", default="20",
help="maximum number of arguments parsed and displayed, defaults to 20")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#include <linux/fs.h>
#define ARGSIZE 128
enum event_type {
EVENT_ARG,
EVENT_RET,
};
struct data_t {
u32 pid; // PID as in the userspace term (i.e. task->tgid in kernel)
u32 ppid; // Parent PID as in the userspace term (i.e task->real_parent->tgid in kernel)
u32 uid;
char comm[TASK_COMM_LEN];
enum event_type type;
char argv[ARGSIZE];
int retval;
};
BPF_PERF_OUTPUT(events);
static int __submit_arg(struct pt_regs *ctx, void *ptr, struct data_t *data)
{
bpf_probe_read_user(data->argv, sizeof(data->argv), ptr);
events.perf_submit(ctx, data, sizeof(struct data_t));
return 1;
}
static int submit_arg(struct pt_regs *ctx, void *ptr, struct data_t *data)
{
const char *argp = NULL;
bpf_probe_read_user(&argp, sizeof(argp), ptr);
if (argp) {
return __submit_arg(ctx, (void *)(argp), data);
}
return 0;
}
int syscall__execve(struct pt_regs *ctx,
const char __user *filename,
const char __user *const __user *__argv,
const char __user *const __user *__envp)
{
u32 uid = bpf_get_current_uid_gid() & 0xffffffff;
UID_FILTER
if (container_should_be_filtered()) {
return 0;
}
// create data here and pass to submit_arg to save stack space (#555)
struct data_t data = {};
struct task_struct *task;
data.pid = bpf_get_current_pid_tgid() >> 32;
task = (struct task_struct *)bpf_get_current_task();
// Some kernels, like Ubuntu 4.13.0-generic, return 0
// as the real_parent->tgid.
// We use the get_ppid function as a fallback in those cases. (#1883)
data.ppid = task->real_parent->tgid;
bpf_get_current_comm(&data.comm, sizeof(data.comm));
data.type = EVENT_ARG;
__submit_arg(ctx, (void *)filename, &data);
// skip first arg, as we submitted filename
#pragma unroll
for (int i = 1; i < MAXARG; i++) {
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0)
goto out;
}
// handle truncated argument list
char ellipsis[] = "...";
__submit_arg(ctx, (void *)ellipsis, &data);
out:
return 0;
}
int do_ret_sys_execve(struct pt_regs *ctx)
{
if (container_should_be_filtered()) {
return 0;
}
struct data_t data = {};
struct task_struct *task;
u32 uid = bpf_get_current_uid_gid() & 0xffffffff;
UID_FILTER
data.pid = bpf_get_current_pid_tgid() >> 32;
data.uid = uid;
task = (struct task_struct *)bpf_get_current_task();
// Some kernels, like Ubuntu 4.13.0-generic, return 0
// as the real_parent->tgid.
// We use the get_ppid function as a fallback in those cases. (#1883)
data.ppid = task->real_parent->tgid;
bpf_get_current_comm(&data.comm, sizeof(data.comm));
data.type = EVENT_RET;
data.retval = PT_REGS_RC(ctx);
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
"""
bpf_text = bpf_text.replace("MAXARG", args.max_args)
if args.uid:
bpf_text = bpf_text.replace('UID_FILTER',
'if (uid != %s) { return 0; }' % args.uid)
else:
bpf_text = bpf_text.replace('UID_FILTER', '')
bpf_text = filter_by_containers(args) + bpf_text
if args.ebpf:
print(bpf_text)
exit()
# initialize BPF
b = BPF(text=bpf_text)
execve_fnname = b.get_syscall_fnname("execve")
b.attach_kprobe(event=execve_fnname, fn_name="syscall__execve")
b.attach_kretprobe(event=execve_fnname, fn_name="do_ret_sys_execve")
# header
if args.time:
print("%-9s" % ("TIME"), end="")
if args.timestamp:
print("%-8s" % ("TIME(s)"), end="")
if args.print_uid:
print("%-6s" % ("UID"), end="")
print("%-16s %-6s %-6s %3s %s" % ("PCOMM", "PID", "PPID", "RET", "ARGS"))
class EventType(object):
EVENT_ARG = 0
EVENT_RET = 1
start_ts = time.time()
argv = defaultdict(list)
# This is best-effort PPID matching. Short-lived processes may exit
# before we get a chance to read the PPID.
# This is a fallback for when fetching the PPID from task->real_parent->tgip
# returns 0, which happens in some kernel versions.
def get_ppid(pid):
try:
with open("/proc/%d/status" % pid) as status:
for line in status:
if line.startswith("PPid:"):
return int(line.split()[1])
except IOError:
pass
return 0
# process event
def print_event(cpu, data, size):
event = b["events"].event(data)
skip = False
if event.type == EventType.EVENT_ARG:
argv[event.pid].append(event.argv)
elif event.type == EventType.EVENT_RET:
if event.retval != 0 and not args.fails:
skip = True
if args.name and not re.search(bytes(args.name), event.comm):
skip = True
if args.line and not re.search(bytes(args.line),
b' '.join(argv[event.pid])):
skip = True
if args.quote:
argv[event.pid] = [
b"\"" + arg.replace(b"\"", b"\\\"") + b"\""
for arg in argv[event.pid]
]
if not skip:
if args.time:
printb(b"%-9s" % strftime("%H:%M:%S").encode('ascii'), nl="")
if args.timestamp:
printb(b"%-8.3f" % (time.time() - start_ts), nl="")
if args.print_uid:
printb(b"%-6d" % event.uid, nl="")
ppid = event.ppid if event.ppid > 0 else get_ppid(event.pid)
ppid = b"%d" % ppid if ppid > 0 else b"?"
argv_text = b' '.join(argv[event.pid]).replace(b'\n', b'\\n')
printb(b"%-16s %-6d %-6s %3d %s" % (event.comm, event.pid,
ppid, event.retval, argv_text))
try:
del(argv[event.pid])
except Exception:
pass
# loop with callback to print_event
b["events"].open_perf_buffer(print_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
|
from rdflib.namespace import RDF
from rdflib.term import BNode, URIRef
from random import randint
__all__ = ["Container", "Bag", "Seq", "Alt", "NoElementException"]
class Container(object):
"""A class for constructing RDF containers, as per https://www.w3.org/TR/rdf11-mt/#rdf-containers
Basic usage, creating a ``Bag`` and adding to it::
>>> from rdflib import Graph, BNode, Literal, Bag
>>> g = Graph()
>>> b = Bag(g, BNode(), [Literal("One"), Literal("Two"), Literal("Three")])
>>> print(g.serialize(format="turtle"))
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
<BLANKLINE>
[] a rdf:Bag ;
rdf:_1 "One" ;
rdf:_2 "Two" ;
rdf:_3 "Three" .
<BLANKLINE>
<BLANKLINE>
>>> # print out an item using an index reference
>>> print(b[2])
Two
>>> # add a new item
>>> b.append(Literal("Hello")) # doctest: +ELLIPSIS
<rdflib.container.Bag object at ...>
>>> print(g.serialize(format="turtle"))
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
<BLANKLINE>
[] a rdf:Bag ;
rdf:_1 "One" ;
rdf:_2 "Two" ;
rdf:_3 "Three" ;
rdf:_4 "Hello" .
<BLANKLINE>
<BLANKLINE>
"""
def __init__(self, graph, uri, seq=[], rtype="Bag"):
"""Creates a Container
:param graph: a Graph instance
:param uri: URI or Blank Node of the Container
:param seq: the elements of the Container
:param rtype: the type of Container, one of "Bag", "Seq" or "Alt"
"""
self.graph = graph
self.uri = uri or BNode()
self._len = 0
self._rtype = rtype # rdf:Bag or rdf:Seq or rdf:Alt
self.append_multiple(seq)
# adding triple corresponding to container type
self.graph.add((self.uri, RDF.type, RDF[self._rtype]))
def n3(self):
items = []
for i in range(len(self)):
v = self[i + 1]
items.append(v)
return "( %s )" % " ".join([a.n3() for a in items])
def _get_container(self):
"""Returns the URI of the container"""
return self.uri
def __len__(self):
"""Number of items in container"""
return self._len
def type_of_conatiner(self):
return self._rtype
def index(self, item):
"""Returns the 1-based numerical index of the item in the container"""
pred = self.graph.predicates(self.uri, item)
if not pred:
raise ValueError("%s is not in %s" % (item, "container"))
LI_INDEX = URIRef(str(RDF) + "_")
i = None
for p in pred:
i = int(p.replace(LI_INDEX, ""))
return i
def __getitem__(self, key):
"""Returns item of the container at index key"""
c = self._get_container()
assert isinstance(key, int)
elem_uri = str(RDF) + "_" + str(key)
if key <= 0 or key > len(self):
raise KeyError(key)
v = self.graph.value(c, URIRef(elem_uri))
if v:
return v
else:
raise KeyError(key)
def __setitem__(self, key, value):
"""Sets the item at index key or predicate rdf:_key of the container to value"""
assert isinstance(key, int)
c = self._get_container()
elem_uri = str(RDF) + "_" + str(key)
if key <= 0 or key > len(self):
raise KeyError(key)
self.graph.set((c, URIRef(elem_uri), value))
def __delitem__(self, key):
"""Removing the item with index key or predicate rdf:_key"""
assert isinstance(key, int)
if key <= 0 or key > len(self):
raise KeyError(key)
graph = self.graph
container = self.uri
elem_uri = str(RDF) + "_" + str(key)
graph.remove((container, URIRef(elem_uri), None))
for j in range(key + 1, len(self) + 1):
elem_uri = str(RDF) + "_" + str(j)
v = graph.value(container, URIRef(elem_uri))
graph.remove((container, URIRef(elem_uri), v))
elem_uri = str(RDF) + "_" + str(j - 1)
graph.add((container, URIRef(elem_uri), v))
self._len -= 1
def items(self):
"""Returns a list of all items in the container"""
l_ = []
container = self.uri
i = 1
while True:
elem_uri = str(RDF) + "_" + str(i)
if (container, URIRef(elem_uri), None) in self.graph:
i += 1
l_.append(self.graph.value(container, URIRef(elem_uri)))
else:
break
return l_
def end(self): #
# find end index (1-based) of container
container = self.uri
i = 1
while True:
elem_uri = str(RDF) + "_" + str(i)
if (container, URIRef(elem_uri), None) in self.graph:
i += 1
else:
return i - 1
def append(self, item):
"""Adding item to the end of the container"""
end = self.end()
elem_uri = str(RDF) + "_" + str(end + 1)
container = self.uri
self.graph.add((container, URIRef(elem_uri), item))
self._len += 1
return self
def append_multiple(self, other):
"""Adding multiple elements to the container to the end which are in python list other"""
end = self.end() # it should return the last index
container = self.uri
for item in other:
end += 1
self._len += 1
elem_uri = str(RDF) + "_" + str(end)
self.graph.add((container, URIRef(elem_uri), item))
return self
def clear(self):
"""Removing all elements from the container"""
container = self.uri
graph = self.graph
i = 1
while True:
elem_uri = str(RDF) + "_" + str(i)
if (container, URIRef(elem_uri), None) in self.graph:
graph.remove((container, URIRef(elem_uri), None))
i += 1
else:
break
self._len = 0
return self
class Bag(Container):
"""Unordered container (no preference order of elements)"""
def __init__(self, graph, uri, seq=[]):
Container.__init__(self, graph, uri, seq, "Bag")
class Alt(Container):
def __init__(self, graph, uri, seq=[]):
Container.__init__(self, graph, uri, seq, "Alt")
def anyone(self):
if len(self) == 0:
raise NoElementException()
else:
p = randint(1, len(self))
item = self.__getitem__(p)
return item
class Seq(Container):
def __init__(self, graph, uri, seq=[]):
Container.__init__(self, graph, uri, seq, "Seq")
def add_at_position(self, pos, item):
assert isinstance(pos, int)
if pos <= 0 or pos > len(self) + 1:
raise ValueError("Invalid Position for inserting element in rdf:Seq")
if pos == len(self) + 1:
self.append(item)
else:
for j in range(len(self), pos - 1, -1):
container = self._get_container()
elem_uri = str(RDF) + "_" + str(j)
v = self.graph.value(container, URIRef(elem_uri))
self.graph.remove((container, URIRef(elem_uri), v))
elem_uri = str(RDF) + "_" + str(j + 1)
self.graph.add((container, URIRef(elem_uri), v))
elem_uri_pos = str(RDF) + "_" + str(pos)
self.graph.add((container, URIRef(elem_uri_pos), item))
self._len += 1
return self
class NoElementException(Exception):
def __init__(self, message="rdf:Alt Container is empty"):
self.message = message
def __str__(self):
return self.message
|
|
#!/usr/bin/env python -u
from operator import add
import time
import sys
import numpy as np
import glob
from operator import add
from optparse import OptionParser
from sets import Set
import l_bp
def get_p(ls):
return np.exp(ls)
def get_ls(p):
if p == 0:
return float("-inf")
else:
return np.log(p)
def ls_multiply(x, y):
if (x == float("-inf")) or (y == float("-inf")):
return float("-inf")
else:
return x + y
def ls_divide(x, y):
return x - y
def ls_add(x, y):
if x == float("-inf"):
return y
elif y == float("-inf"):
return x
elif (x < y):
return y + np.log(1 + np.exp(x - y))
else:
return x + np.log(1 + np.exp(y - x))
def print_var_line(l):
A = l.rstrip().split('\t')
if A[4] == '<INV>' and ('--:0' in A[7] or '++:0' in A[7]):
[sv_type,chr_l,chr_r,strands,start_l,end_l,start_r,end_r,m] = \
l_bp.split_v(l)
STRAND_DICT = dict(x.split(':') for x in m['STRANDS'].split(','))
for o in STRAND_DICT.keys():
if STRAND_DICT[o] == '0':
del(STRAND_DICT[o])
STRANDS = ','.join(['%s:%s' % (o,STRAND_DICT[o]) for o in STRAND_DICT])
if STRANDS[:2] == '++':
ALT = 'N]' + chr_l + ':' + m['END'] + ']'
elif STRANDS[:2] == '--':
ALT = '[' + chr_l + ':' + m['END'] + '[N'
SVTYPE = 'BND'
CIPOS = m['CIEND']
CIEND = m['CIPOS']
CIPOS95 = m['CIEND95']
CIEND95 = m['CIPOS95']
IMPRECISE = 'IMPRECISE'
SU = m['SU']
PE = m['PE']
SR = m['SR']
PRPOS = m['PREND']
PREND = m['PRPOS']
SNAME = m['SNAME']
EVENT = A[2]
A[4] = ALT
A[7] = ';'.join(['SVTYPE=' + str(SVTYPE),
'STRANDS=' + str(STRANDS),
'CIPOS=' + str(CIPOS),
'CIEND=' + str(CIEND),
'CIPOS95=' + str(CIPOS95),
'CIEND95=' + str(CIEND95),
str(IMPRECISE),
'SU=' + str(SU),
'PE=' + str(PE),
'SR=' + str(SR),
'PRPOS=' + str(PRPOS),
'PREND=' + str(PREND),
'SNAME=' + str(SNAME),
'EVENT=' + str(EVENT)])
# reconstruct the line
l = '\t'.join(A)
if A[4] not in ['<DEL>', '<DUP>', '<INV>']:
[sv_type,chr_l,chr_r,strands,start_l,end_l,start_r,end_r,m] = \
l_bp.split_v(l)
CHROM = chr_r
POS = m['END']
ID = A[2] + '_2'
REF = 'N'
ALT = ''
if A[4][0] == '[':
ALT = '[' + chr_l + ':' + A[1] + '[N'
elif A[4][0] == ']':
ALT = 'N[' + chr_l + ':' + A[1] + '['
elif A[4][-1] == '[':
ALT = ']' + chr_l + ':' + A[1] + ']N'
elif A[4][-1] == ']':
ALT = 'N]' + chr_l + ':' + A[1] + ']'
QUAL = A[5]
FILTER = '.'
SVTYPE = 'BND'
STRANDS = m['STRANDS']
CIPOS = m['CIEND']
CIEND = m['CIPOS']
CIPOS95 = m['CIEND95']
CIEND95 = m['CIPOS95']
IMPRECISE = 'IMPRECISE'
SU = m['SU']
PE = m['PE']
SR = m['SR']
PRPOS = m['PREND']
PREND = m['PRPOS']
SNAME = m['SNAME']
EVENT = A[2]
SECONDARY = 'SECONDARY'
MATEID=A[2] + '_1'
INFO = ';'.join(['SVTYPE=' + str(SVTYPE),
'STRANDS=' + str(STRANDS),
'CIPOS=' + str(CIPOS),
'CIEND=' + str(CIEND),
'CIPOS95=' + str(CIPOS95),
'CIEND95=' + str(CIEND95),
str(IMPRECISE),
str(SECONDARY),
'SU=' + str(SU),
'PE=' + str(PE),
'SR=' + str(SR),
'PRPOS=' + str(PRPOS),
'PREND=' + str(PREND),
'SNAME=' + str(SNAME),
'EVENT=' + str(EVENT),
'MATEID=' + str(MATEID)])
O = [CHROM,POS,ID,REF,ALT,QUAL,FILTER,INFO]
A[7] += ';MATEID=' + A[2] + '_2'
A[2] += '_1'
print '\t'.join(A[:8])
print '\t'.join([str(o) for o in O])
else:
print '\t'.join(A[:8])
def merge(BP, sample_order, v_id, use_product):
#sys.stderr.write(str(len(BP)) + '\n')
if len(BP) == 1:
A = BP[0].l.rstrip().split('\t')
#tack on id to SNAME
#A[7]+= ':' + A[2]
s_start=A[7].find('SNAME=')
s_end=A[7].find(';',s_start)
if (s_end > -1):
A[7] = A[7][:s_start] + \
A[7][s_start:s_end] + \
':' + A[2] + \
A[7][s_end:]
else:
A[7]+= ':' + A[2]
# reset the id to be unique in this file
v_id += 1
A[2] = str(v_id)
#clip out old mate id
s_start=A[7].find('MATEID=')
s_end=A[7].find(';',s_start)
if (s_end > -1):
A[7] = A[7][:s_start] + A[7][s_end+1:]
else:
A[7] = A[7][:s_start]
#clip out old event id
s_start=A[7].find('EVENT=')
s_end=A[7].find(';', s_start)
if (s_end > -1):
A[7] = A[7][:s_start] + A[7][s_end+1:]
else:
A[7] = A[7][:s_start]
#add new mate
A[7]+= ';EVENT=' + A[2]
#add new alg
if use_product:
A[7]+= ';ALG=PROD'
else:
A[7] += ';ALG=SUM'
print_var_line('\t'.join(A))
return v_id
# this find the max clique
#G = {}
#l_bp.connect(G, BP, 0)
#for g in G:
# sys.stderr.write( str(g) + '\t' + str(len(G[g].edges)) + '\n')
#C = []
#_G = G.copy()
#while len(_G) != 0:
# R = Set()
# X = Set()
# P = Set(_G.keys())
# clique = [x for x in l_bp.bron_kerbosch(_G, R, P, X)]
# max_clique = sorted(clique, key=len)[0]
# sys.stderr.write(str(max_clique) +'\n')
# C.append(list(max_clique))
# # remove these from the graph
# for g in _G:
# E = [e for e in _G[g].edges if e[0] not in clique]
# G[g].edges = E
# for c in max_clique:
# del _G[c]
#Sweep the set. Find the largest intersecting set. Remove it. Continue.
import heapq
BP.sort(key=lambda x: x.start_l)
BP_i = range(len(BP))
C = []
#print BP_i
while len(BP_i) > 0:
h_l = []
max_c = []
max_c_len = 0
for i in BP_i:
while (len(h_l) > 0) and (h_l[0][0] < BP[i].start_l):
heapq.heappop(h_l)
heapq.heappush(h_l, (BP[i].end_l, i))
# at this point everything in h_l intersects on the left
# but we need to take into account what is going on on the right
h_r = []
h_l_i = [x[1] for x in h_l]
h_l_i.sort(key=lambda x:BP[x].start_r)
for j in h_l_i:
while (len(h_r) > 0) and (h_r[0][0] < BP[j].start_r):
heapq.heappop(h_r)
heapq.heappush(h_r, (BP[j].end_r, j))
if max_c_len < len(h_r):
max_c_len = len(h_r)
max_c = [y[1] for y in h_r]
C.append(max_c)
for c in max_c:
BP_i.remove(c)
for c in C:
L = []
R = []
#for g_i in c:
for b_i in c:
#b = G[g_i].b
b = BP[b_i]
L.append([b.start_l,b.end_l,b.p_l])
R.append([b.start_r,b.end_r,b.p_r])
[start_R, end_R, a_R] = l_bp.align_intervals(R)
[start_L, end_L, a_L] = l_bp.align_intervals(L)
p_L = [0] * len(a_L[0])
p_R = [0] * len(a_R[0])
for c_i in range(len(c)):
for i in range(len(a_L[c_i])):
#p_L[i] = p_L[i] * a_L[c_i][i]
p_L[i] += a_L[c_i][i]
for i in range(len(a_R[c_i])):
#p_R[i] = p_R[i] * a_R[c_i][i]
p_R[i] += a_R[c_i][i]
ALG = 'SUM'
if use_product:
pmax_i_L = p_L.index(max(p_L))
pmax_i_R = p_R.index(max(p_R))
miss = 0
for c_i in range(len(c)):
if (a_L[c_i][pmax_i_L] == 0) or (a_R[c_i][pmax_i_R] == 0):
miss += 1
#exit(1)
if miss == 0:
ALG = "PROD"
ls_p_L = [get_ls(1)] * len(a_L[0])
ls_p_R = [get_ls(1)] * len(a_R[0])
for c_i in range(len(c)):
for i in range(len(a_L[c_i])):
ls_p_L[i] = ls_multiply(ls_p_L[i], get_ls(a_L[c_i][i]))
#p_L[i] = p_L[i] * a_L[c_i][i]
for i in range(len(a_R[c_i])):
ls_p_R[i] = ls_multiply(ls_p_R[i], get_ls(a_R[c_i][i]))
#p_R[i] = p_R[i] * a_R[c_i][i]
ls_sum_L = get_ls(0)
ls_sum_R = get_ls(0)
for ls_p in ls_p_L:
ls_sum_L = ls_add(ls_sum_L, ls_p)
for ls_p in ls_p_R:
ls_sum_R = ls_add(ls_sum_R, ls_p)
p_L = []
for ls_p in ls_p_L:
p_L.append(get_p(ls_divide(ls_p, ls_sum_L)))
p_R = []
for ls_p in ls_p_R:
p_R.append(get_p(ls_divide(ls_p, ls_sum_R)))
sum_L = sum(p_L)
sum_R = sum(p_R)
p_L = [x/sum_L for x in p_L]
p_R = [x/sum_L for x in p_R]
[clip_start_L, clip_end_L] = l_bp.trim(p_L)
[clip_start_R, clip_end_R] = l_bp.trim(p_R)
new_start_L = start_L + clip_start_L
new_end_L = end_L - clip_end_L
new_start_R = start_R + clip_start_R
new_end_R = end_R - clip_end_R
p_L = p_L[clip_start_L:len(p_L)-clip_end_L]
p_R = p_R[clip_start_R:len(p_R)-clip_end_R]
s_p_L = sum(p_L)
s_p_R = sum(p_R)
p_L = [x/s_p_L for x in p_L]
p_R = [x/s_p_R for x in p_R]
max_i_L = p_L.index(max(p_L))
max_i_R = p_R.index(max(p_R))
ninefive_i_L_start = max_i_L
ninefive_i_L_end = max_i_L
ninefive_i_L_total = p_L[max_i_L]
updated = 0
while (ninefive_i_L_total < 0.95):
if ninefive_i_L_start > 0:
ninefive_i_L_start -= 1
ninefive_i_L_total += p_L[ninefive_i_L_start]
updated = 1
if ninefive_i_L_end < len(p_L)-1:
ninefive_i_L_end += 1
ninefive_i_L_total += p_L[ninefive_i_L_end]
updated = 1
if not updated:
break
ninefive_i_R_start = max_i_R
ninefive_i_R_end = max_i_R
ninefive_i_R_total = p_R[max_i_R]
updated = 0
while (ninefive_i_R_total < 0.95):
if ninefive_i_R_start > 0:
ninefive_i_R_start -= 1
ninefive_i_R_total += p_R[ninefive_i_R_start]
updated = 1
if ninefive_i_R_end < len(p_R)-1:
ninefive_i_R_end += 1
ninefive_i_R_total += p_R[ninefive_i_R_end]
updated = 1
if not updated:
break
CIPOS95=str(ninefive_i_L_start) + ',' + str(ninefive_i_L_end)
CIEND95=str(ninefive_i_R_start) + ',' + str(ninefive_i_R_end)
#CHROM = G[c[0]].b.chr_l
CHROM = BP[c[0]].chr_l
POS = new_start_L + max_i_L
v_id += 1
ID = str(v_id)
REF = 'N'
ALT = ''
#if G[c[0]].b.sv_type == 'BND':
if BP[c[0]].sv_type == 'BND':
#G[c[0]].b.chr_r + \
# this is very wrong: strand orientation
# is destroyed when merging breakend variants
#ALT = 'N]' + \
#BP[c[0]].chr_r + \
#':' + \
#str(new_start_R + max_i_R) + \
#']'
if BP[c[0]].strands[:2] == '++':
ALT = 'N]' + \
BP[c[0]].chr_r + \
':' + \
str(new_start_R + max_i_R) + \
']'
elif BP[c[0]].strands[:2] == '-+':
ALT = ']' + \
BP[c[0]].chr_r + \
':' + \
str(new_start_R + max_i_R) + \
']N'
elif BP[c[0]].strands[:2] == '+-':
ALT = 'N[' + \
BP[c[0]].chr_r + \
':' + \
str(new_start_R + max_i_R) + \
'['
elif BP[c[0]].strands[:2] == '--':
ALT = '[' + \
BP[c[0]].chr_r + \
':' + \
str(new_start_R + max_i_R) + \
'[N'
else:
#ALT = '<' + G[c[0]].b.sv_type + '>'
ALT = '<' + BP[c[0]].sv_type + '>'
QUAL = 0.0
FILTER = '.'
#FORMAT = G[c[0]].b.l.split('\t')[8]
FORMAT = BP[c[0]].l.split('\t')[8]
#SVTYPE = G[c[0]].b.sv_type
SVTYPE = BP[c[0]].sv_type
STRANDS = ''
strand_map = {}
e_type_map = {}
SU = 0
PE = 0
SR = 0
s_name_list = []
gt_list = []
#for g_i in c:
for b_i in c:
#A = G[g_i].b.l.rstrip().split('\t')
A = BP[b_i].l.rstrip().split('\t')
if A[5].isdigit():
QUAL += float(A[5])
m = l_bp.to_map(A[7])
for strand_entry in m['STRANDS'].split(','):
s_type,s_count = strand_entry.split(':')
if s_type not in strand_map:
strand_map[s_type] = 0
strand_map[s_type] += int(s_count)
SU += int(m['SU'])
PE += int(m['PE'])
SR += int(m['SR'])
s_name_list.append(m['SNAME'] + ':' + A[2])
gt_list += A[9:]
SNAME=','.join(s_name_list)
GTS = '\t'.join(gt_list)
strand_types_counts = []
for strand in strand_map:
strand_types_counts.append(strand + ':' + str(strand_map[strand]))
STRANDS = ','.join(strand_types_counts)
if SVTYPE=='DEL':
SVLEN = (new_start_L + max_i_L) - (new_start_R + max_i_R)
else:
SVLEN = (new_start_R + max_i_R) - (new_start_L + max_i_L)
END = new_start_R + max_i_R
CIPOS=','.join([str(x) for x in [-1*max_i_L, len(p_L) - max_i_L - 1]])
CIEND=','.join([str(x) for x in [-1*max_i_R, len(p_R) - max_i_R - 1]])
IMPRECISE='IMPRECISE'
PRPOS=','.join([str(x) for x in p_L])
PREND=','.join([str(x) for x in p_R])
I = ['SVTYPE=' + str(SVTYPE),
'STRANDS=' + str(STRANDS),
'SVLEN=' + str(SVLEN),
'CIPOS=' + str(CIPOS),
'CIEND=' + str(CIEND),
'CIPOS95=' + str(CIPOS95),
'CIEND95=' + str(CIEND95),
str(IMPRECISE),
'SU=' + str(SU),
'PE=' + str(PE),
'SR=' + str(SR),
'PRPOS=' + str(PRPOS),
'PREND=' + str(PREND),
'ALG=' + str(ALG),
'SNAME=' + str(SNAME)]
if BP[c[0]].sv_type == 'BND':
I.append('EVENT=' + str(ID))
else:
I.append('END=' + str(END))
INFO = ';'.join(I)
QUAL = str(QUAL)
#O = [CHROM,POS,ID,REF,ALT,QUAL,FILTER,INFO,FORMAT,GTS]
O = [CHROM,POS,ID,REF,ALT,QUAL,FILTER,INFO]
print_var_line('\t'.join([str(o) for o in O]))
return v_id
def r_cluster(BP_l, sample_order, v_id, use_product):
# need to resort based on the right side, then extract clusters
BP_l.sort(key=lambda x: x.start_r)
BP_l.sort(key=lambda x: x.chr_r)
BP_r = []
BP_max_end_r = -1
BP_chr_r = ''
for b in BP_l:
if (len(BP_r) == 0) or \
((b.start_r <= BP_max_end_r) and \
(b.chr_r == BP_chr_r)):
BP_r.append(b)
BP_max_end_r = max(BP_max_end_r, b.end_r)
BP_chr_r = b.chr_r
else:
#print len(BP_r)
v_id = merge(BP_r, sample_order, v_id, use_product)
BP_r = [b]
BP_max_end_r = b.end_r
BP_chr_r = b.chr_r
if len(BP_r) > 0:
#print len(BP_r)
v_id = merge(BP_r, sample_order, v_id, use_product)
return v_id
def l_cluster(file_name, percent_slop=0, fixed_slop=0, use_product=False):
v_id = 0
vcf_lines = []
vcf_headers = list()
r = l_bp.parse_vcf(file_name, vcf_lines, vcf_headers, add_sname=False)
vcf_headers.append("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n")
sample_order = []
for header in vcf_headers:
if header[:8] == '##SAMPLE':
sample_order.append(header.rstrip()[13:-1])
#elif header[:8] == '##FORMAT':
#i,n,t=header[header.find('<')+1:header.find('>')].split(',')[0:3]
#print i,n,t
#exit(1)
for h in vcf_headers:
print h,
BP_l = []
BP_sv_type = ''
BP_max_end_l = -1
BP_chr_l = ''
for l in vcf_lines:
b = l_bp.breakpoint(l,
percent_slop=percent_slop,
fixed_slop=fixed_slop)
if (len(BP_l) == 0) or \
((b.start_l <= BP_max_end_l) and \
(b.chr_l == BP_chr_l) and \
(b.sv_type == BP_sv_type)):
BP_l.append(b)
BP_max_end_l = max(BP_max_end_l, b.end_l)
BP_chr_l = b.chr_l
BP_sv_type = b.sv_type
else:
#print len(BP_l)
v_id = r_cluster(BP_l, sample_order, v_id, use_product)
BP_l = [b]
BP_max_end_l = b.end_l
BP_sv_type = b.sv_type
BP_chr_l = b.chr_l
if len(BP_l) > 0:
#print len(BP_l)
v_id = r_cluster(BP_l, sample_order, v_id, use_product)
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main():
usage = """%prog -i <file>
lmerge
Author: Ryan Layer & Ira Hall
Description: merge lumpy calls.
Version: ira_7
"""
parser = OptionParser(usage)
parser.add_option("-i", \
"--inFile", \
dest="inFile",
help="A sorted lumpy output file generated by " + \
"lsort; or stdin (-i stdin). Column 7 must " + \
"have the format sample:variantID", \
metavar="FILE")
parser.add_option("-p", \
"--percent_slop", \
dest="percent_slop",
type="float",
default=0.0,
help="Increase the the breakpoint confidence " + \
"interval both up and down stream by a given " + \
"proportion of the original size. If both slop " + \
"parameters are set, the max is used.")
parser.add_option("-f", \
"--fixed_slop", \
dest="fixed_slop",
type="int",
default=0,
help="Increase the the breakpoint confidence " + \
"interval both up and down stream by a given " + \
"fixed size. If both slop " + \
"parameters are set, the max is used.")
parser.add_option("--product", \
dest="use_product",
action="store_true",
default=False,
help="Calculate breakpoint PDF and position " + \
"using product.")
(opts, args) = parser.parse_args()
#if opts.inFile is None or opts.configFile is None:
if opts.inFile is None:
parser.print_help()
print
else:
try:
l_cluster(opts.inFile,
percent_slop=opts.percent_slop,
fixed_slop=opts.fixed_slop,
use_product=opts.use_product)
except IOError as err:
sys.stderr.write("IOError " + str(err) + "\n");
return
if __name__ == "__main__":
sys.exit(main())
|
|
"""Proxy camera platform that enables image processing of camera data."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import io
import logging
from PIL import Image
import voluptuous as vol
from homeassistant.components.camera import (
PLATFORM_SCHEMA,
Camera,
async_get_image,
async_get_mjpeg_stream,
async_get_still_stream,
)
from homeassistant.const import CONF_ENTITY_ID, CONF_MODE, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_CACHE_IMAGES = "cache_images"
CONF_FORCE_RESIZE = "force_resize"
CONF_IMAGE_QUALITY = "image_quality"
CONF_IMAGE_REFRESH_RATE = "image_refresh_rate"
CONF_MAX_IMAGE_WIDTH = "max_image_width"
CONF_MAX_IMAGE_HEIGHT = "max_image_height"
CONF_MAX_STREAM_WIDTH = "max_stream_width"
CONF_MAX_STREAM_HEIGHT = "max_stream_height"
CONF_IMAGE_TOP = "image_top"
CONF_IMAGE_LEFT = "image_left"
CONF_STREAM_QUALITY = "stream_quality"
MODE_RESIZE = "resize"
MODE_CROP = "crop"
DEFAULT_BASENAME = "Camera Proxy"
DEFAULT_QUALITY = 75
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_CACHE_IMAGES, False): cv.boolean,
vol.Optional(CONF_FORCE_RESIZE, False): cv.boolean,
vol.Optional(CONF_MODE, default=MODE_RESIZE): vol.In([MODE_RESIZE, MODE_CROP]),
vol.Optional(CONF_IMAGE_QUALITY): int,
vol.Optional(CONF_IMAGE_REFRESH_RATE): float,
vol.Optional(CONF_MAX_IMAGE_WIDTH): int,
vol.Optional(CONF_MAX_IMAGE_HEIGHT): int,
vol.Optional(CONF_MAX_STREAM_WIDTH): int,
vol.Optional(CONF_MAX_STREAM_HEIGHT): int,
vol.Optional(CONF_IMAGE_LEFT): int,
vol.Optional(CONF_IMAGE_TOP): int,
vol.Optional(CONF_STREAM_QUALITY): int,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Proxy camera platform."""
async_add_entities([ProxyCamera(hass, config)])
def _precheck_image(image, opts):
"""Perform some pre-checks on the given image."""
if not opts:
raise ValueError()
try:
img = Image.open(io.BytesIO(image))
except OSError as err:
_LOGGER.warning("Failed to open image")
raise ValueError() from err
imgfmt = str(img.format)
if imgfmt not in ("PNG", "JPEG"):
_LOGGER.warning("Image is of unsupported type: %s", imgfmt)
raise ValueError()
if img.mode != "RGB":
img = img.convert("RGB")
return img
def _resize_image(image, opts):
"""Resize image."""
try:
img = _precheck_image(image, opts)
except ValueError:
return image
quality = opts.quality or DEFAULT_QUALITY
new_width = opts.max_width
(old_width, old_height) = img.size
old_size = len(image)
if old_width <= new_width:
if opts.quality is None:
_LOGGER.debug("Image is smaller-than/equal-to requested width")
return image
new_width = old_width
scale = new_width / float(old_width)
new_height = int(float(old_height) * float(scale))
img = img.resize((new_width, new_height), Image.ANTIALIAS)
imgbuf = io.BytesIO()
img.save(imgbuf, "JPEG", optimize=True, quality=quality)
newimage = imgbuf.getvalue()
if not opts.force_resize and len(newimage) >= old_size:
_LOGGER.debug(
"Using original image (%d bytes) "
"because resized image (%d bytes) is not smaller",
old_size,
len(newimage),
)
return image
_LOGGER.debug(
"Resized image from (%dx%d - %d bytes) to (%dx%d - %d bytes)",
old_width,
old_height,
old_size,
new_width,
new_height,
len(newimage),
)
return newimage
def _crop_image(image, opts):
"""Crop image."""
try:
img = _precheck_image(image, opts)
except ValueError:
return image
quality = opts.quality or DEFAULT_QUALITY
(old_width, old_height) = img.size
old_size = len(image)
if opts.top is None:
opts.top = 0
if opts.left is None:
opts.left = 0
if opts.max_width is None or opts.max_width > old_width - opts.left:
opts.max_width = old_width - opts.left
if opts.max_height is None or opts.max_height > old_height - opts.top:
opts.max_height = old_height - opts.top
img = img.crop(
(opts.left, opts.top, opts.left + opts.max_width, opts.top + opts.max_height)
)
imgbuf = io.BytesIO()
img.save(imgbuf, "JPEG", optimize=True, quality=quality)
newimage = imgbuf.getvalue()
_LOGGER.debug(
"Cropped image from (%dx%d - %d bytes) to (%dx%d - %d bytes)",
old_width,
old_height,
old_size,
opts.max_width,
opts.max_height,
len(newimage),
)
return newimage
class ImageOpts:
"""The representation of image options."""
def __init__(self, max_width, max_height, left, top, quality, force_resize):
"""Initialize image options."""
self.max_width = max_width
self.max_height = max_height
self.left = left
self.top = top
self.quality = quality
self.force_resize = force_resize
def __bool__(self):
"""Bool evaluation rules."""
return bool(self.max_width or self.quality)
class ProxyCamera(Camera):
"""The representation of a Proxy camera."""
def __init__(self, hass, config):
"""Initialize a proxy camera component."""
super().__init__()
self.hass = hass
self._proxied_camera = config.get(CONF_ENTITY_ID)
self._name = (
config.get(CONF_NAME) or f"{DEFAULT_BASENAME} - {self._proxied_camera}"
)
self._image_opts = ImageOpts(
config.get(CONF_MAX_IMAGE_WIDTH),
config.get(CONF_MAX_IMAGE_HEIGHT),
config.get(CONF_IMAGE_LEFT),
config.get(CONF_IMAGE_TOP),
config.get(CONF_IMAGE_QUALITY),
config.get(CONF_FORCE_RESIZE),
)
self._stream_opts = ImageOpts(
config.get(CONF_MAX_STREAM_WIDTH),
config.get(CONF_MAX_STREAM_HEIGHT),
config.get(CONF_IMAGE_LEFT),
config.get(CONF_IMAGE_TOP),
config.get(CONF_STREAM_QUALITY),
True,
)
self._image_refresh_rate = config.get(CONF_IMAGE_REFRESH_RATE)
self._cache_images = bool(
config.get(CONF_IMAGE_REFRESH_RATE) or config.get(CONF_CACHE_IMAGES)
)
self._last_image_time = dt_util.utc_from_timestamp(0)
self._last_image = None
self._mode = config.get(CONF_MODE)
def camera_image(
self, width: int | None = None, height: int | None = None
) -> bytes | None:
"""Return camera image."""
return asyncio.run_coroutine_threadsafe(
self.async_camera_image(), self.hass.loop
).result()
async def async_camera_image(
self, width: int | None = None, height: int | None = None
) -> bytes | None:
"""Return a still image response from the camera."""
now = dt_util.utcnow()
if self._image_refresh_rate and now < self._last_image_time + timedelta(
seconds=self._image_refresh_rate
):
return self._last_image
self._last_image_time = now
image = await async_get_image(self.hass, self._proxied_camera)
if not image:
_LOGGER.error("Error getting original camera image")
return self._last_image
if self._mode == MODE_RESIZE:
job = _resize_image
else:
job = _crop_image
image_bytes: bytes = await self.hass.async_add_executor_job(
job, image.content, self._image_opts
)
if self._cache_images:
self._last_image = image_bytes
return image_bytes
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from camera images."""
if not self._stream_opts:
return await async_get_mjpeg_stream(
self.hass, request, self._proxied_camera
)
return await async_get_still_stream(
request, self._async_stream_image, self.content_type, self.frame_interval
)
@property
def name(self):
"""Return the name of this camera."""
return self._name
async def _async_stream_image(self):
"""Return a still image response from the camera."""
try:
image = await async_get_image(self.hass, self._proxied_camera)
if not image:
return None
except HomeAssistantError as err:
raise asyncio.CancelledError() from err
if self._mode == MODE_RESIZE:
job = _resize_image
else:
job = _crop_image
return await self.hass.async_add_executor_job(
job, image.content, self._stream_opts
)
|
|
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from oslo_db import exception as oslo_db_exc
from oslo_log import log as logging
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy import orm
from sqlalchemy.orm import aliased
from sqlalchemy.orm import exc as sa_exc
from neutron_lib import constants as lib_consts
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as n_exc
from neutron.db import address_scope_db
from neutron.db import common_db_mixin as common_db
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import bgp as bgp_ext
from neutron.plugins.ml2 import models as ml2_models
LOG = logging.getLogger(__name__)
DEVICE_OWNER_ROUTER_GW = lib_consts.DEVICE_OWNER_ROUTER_GW
DEVICE_OWNER_ROUTER_INTF = lib_consts.DEVICE_OWNER_ROUTER_INTF
class BgpSpeakerPeerBinding(model_base.BASEV2):
"""Represents a mapping between BGP speaker and BGP peer"""
__tablename__ = 'bgp_speaker_peer_bindings'
bgp_speaker_id = sa.Column(sa.String(length=36),
sa.ForeignKey('bgp_speakers.id',
ondelete='CASCADE'),
nullable=False,
primary_key=True)
bgp_peer_id = sa.Column(sa.String(length=36),
sa.ForeignKey('bgp_peers.id',
ondelete='CASCADE'),
nullable=False,
primary_key=True)
class BgpSpeakerNetworkBinding(model_base.BASEV2):
"""Represents a mapping between a network and BGP speaker"""
__tablename__ = 'bgp_speaker_network_bindings'
bgp_speaker_id = sa.Column(sa.String(length=36),
sa.ForeignKey('bgp_speakers.id',
ondelete='CASCADE'),
nullable=False,
primary_key=True)
network_id = sa.Column(sa.String(length=36),
sa.ForeignKey('networks.id',
ondelete='CASCADE'),
nullable=False,
primary_key=True)
ip_version = sa.Column(sa.Integer, nullable=False, autoincrement=False,
primary_key=True)
class BgpSpeaker(model_base.BASEV2,
model_base.HasId,
model_base.HasTenant):
"""Represents a BGP speaker"""
__tablename__ = 'bgp_speakers'
name = sa.Column(sa.String(attr.NAME_MAX_LEN), nullable=False)
local_as = sa.Column(sa.Integer, nullable=False, autoincrement=False)
advertise_floating_ip_host_routes = sa.Column(sa.Boolean, nullable=False)
advertise_tenant_networks = sa.Column(sa.Boolean, nullable=False)
peers = orm.relationship(BgpSpeakerPeerBinding,
backref='bgp_speaker_peer_bindings',
cascade='all, delete, delete-orphan',
lazy='joined')
networks = orm.relationship(BgpSpeakerNetworkBinding,
backref='bgp_speaker_network_bindings',
cascade='all, delete, delete-orphan',
lazy='joined')
ip_version = sa.Column(sa.Integer, nullable=False, autoincrement=False)
class BgpPeer(model_base.BASEV2,
model_base.HasId,
model_base.HasTenant):
"""Represents a BGP routing peer."""
__tablename__ = 'bgp_peers'
name = sa.Column(sa.String(attr.NAME_MAX_LEN), nullable=False)
peer_ip = sa.Column(sa.String(64),
nullable=False)
remote_as = sa.Column(sa.Integer, nullable=False, autoincrement=False)
auth_type = sa.Column(sa.String(16), nullable=False)
password = sa.Column(sa.String(255), nullable=True)
class BgpDbMixin(common_db.CommonDbMixin):
def create_bgp_speaker(self, context, bgp_speaker):
uuid = uuidutils.generate_uuid()
self._save_bgp_speaker(context, bgp_speaker, uuid)
return self.get_bgp_speaker(context, uuid)
def get_bgp_speakers(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
with context.session.begin(subtransactions=True):
return self._get_collection(context, BgpSpeaker,
self._make_bgp_speaker_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
page_reverse=page_reverse)
def get_bgp_speaker(self, context, bgp_speaker_id, fields=None):
with context.session.begin(subtransactions=True):
bgp_speaker = self._get_bgp_speaker(context, bgp_speaker_id)
return self._make_bgp_speaker_dict(bgp_speaker, fields)
def get_bgp_speaker_with_advertised_routes(self, context,
bgp_speaker_id):
bgp_speaker_attrs = ['id', 'local_as', 'tenant_id']
bgp_peer_attrs = ['peer_ip', 'remote_as', 'password']
with context.session.begin(subtransactions=True):
bgp_speaker = self.get_bgp_speaker(context, bgp_speaker_id,
fields=bgp_speaker_attrs)
res = dict((k, bgp_speaker[k]) for k in bgp_speaker_attrs)
res['peers'] = self.get_bgp_peers_by_bgp_speaker(context,
bgp_speaker['id'],
fields=bgp_peer_attrs)
res['advertised_routes'] = self.get_routes_by_bgp_speaker_id(
context,
bgp_speaker_id)
return res
def update_bgp_speaker(self, context, bgp_speaker_id, bgp_speaker):
bp = bgp_speaker[bgp_ext.BGP_SPEAKER_BODY_KEY_NAME]
with context.session.begin(subtransactions=True):
bgp_speaker_db = self._get_bgp_speaker(context, bgp_speaker_id)
bgp_speaker_db.update(bp)
bgp_speaker_dict = self._make_bgp_speaker_dict(bgp_speaker_db)
return bgp_speaker_dict
def _save_bgp_speaker(self, context, bgp_speaker, uuid):
ri = bgp_speaker[bgp_ext.BGP_SPEAKER_BODY_KEY_NAME]
ri['tenant_id'] = context.tenant_id
with context.session.begin(subtransactions=True):
res_keys = ['local_as', 'tenant_id', 'name', 'ip_version',
'advertise_floating_ip_host_routes',
'advertise_tenant_networks']
res = dict((k, ri[k]) for k in res_keys)
res['id'] = uuid
bgp_speaker_db = BgpSpeaker(**res)
context.session.add(bgp_speaker_db)
def add_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info):
bgp_peer_id = self._get_id_for(bgp_peer_info, 'bgp_peer_id')
self._save_bgp_speaker_peer_binding(context,
bgp_speaker_id,
bgp_peer_id)
return {'bgp_peer_id': bgp_peer_id}
def remove_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info):
bgp_peer_id = self._get_id_for(bgp_peer_info, 'bgp_peer_id')
self._remove_bgp_speaker_peer_binding(context,
bgp_speaker_id,
bgp_peer_id)
return {'bgp_peer_id': bgp_peer_id}
def add_gateway_network(self, context, bgp_speaker_id, network_info):
network_id = self._get_id_for(network_info, 'network_id')
with context.session.begin(subtransactions=True):
try:
self._save_bgp_speaker_network_binding(context,
bgp_speaker_id,
network_id)
except oslo_db_exc.DBDuplicateEntry:
raise bgp_ext.BgpSpeakerNetworkBindingError(
network_id=network_id,
bgp_speaker_id=bgp_speaker_id)
return {'network_id': network_id}
def remove_gateway_network(self, context, bgp_speaker_id, network_info):
with context.session.begin(subtransactions=True):
network_id = self._get_id_for(network_info, 'network_id')
self._remove_bgp_speaker_network_binding(context,
bgp_speaker_id,
network_id)
return {'network_id': network_id}
def delete_bgp_speaker(self, context, bgp_speaker_id):
with context.session.begin(subtransactions=True):
bgp_speaker_db = self._get_bgp_speaker(context, bgp_speaker_id)
context.session.delete(bgp_speaker_db)
def create_bgp_peer(self, context, bgp_peer):
ri = bgp_peer[bgp_ext.BGP_PEER_BODY_KEY_NAME]
auth_type = ri.get('auth_type')
password = ri.get('password')
if auth_type == 'md5' and not password:
raise bgp_ext.InvalidBgpPeerMd5Authentication()
with context.session.begin(subtransactions=True):
res_keys = ['tenant_id', 'name', 'remote_as', 'peer_ip',
'auth_type', 'password']
res = dict((k, ri[k]) for k in res_keys)
res['id'] = uuidutils.generate_uuid()
bgp_peer_db = BgpPeer(**res)
context.session.add(bgp_peer_db)
peer = self._make_bgp_peer_dict(bgp_peer_db)
peer.pop('password')
return peer
def get_bgp_peers(self, context, fields=None, filters=None, sorts=None,
limit=None, marker=None, page_reverse=False):
return self._get_collection(context, BgpPeer,
self._make_bgp_peer_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
page_reverse=page_reverse)
def get_bgp_peers_by_bgp_speaker(self, context,
bgp_speaker_id, fields=None):
filters = [BgpSpeakerPeerBinding.bgp_speaker_id == bgp_speaker_id,
BgpSpeakerPeerBinding.bgp_peer_id == BgpPeer.id]
with context.session.begin(subtransactions=True):
query = context.session.query(BgpPeer)
query = query.filter(*filters)
return [self._make_bgp_peer_dict(x) for x in query.all()]
def get_bgp_peer(self, context, bgp_peer_id, fields=None):
bgp_peer_db = self._get_bgp_peer(context, bgp_peer_id)
return self._make_bgp_peer_dict(bgp_peer_db, fields=fields)
def delete_bgp_peer(self, context, bgp_peer_id):
with context.session.begin(subtransactions=True):
bgp_peer_db = self._get_bgp_peer(context, bgp_peer_id)
context.session.delete(bgp_peer_db)
def update_bgp_peer(self, context, bgp_peer_id, bgp_peer):
bp = bgp_peer[bgp_ext.BGP_PEER_BODY_KEY_NAME]
with context.session.begin(subtransactions=True):
bgp_peer_db = self._get_bgp_peer(context, bgp_peer_id)
if ((bp['password'] is not None) and
(bgp_peer_db['auth_type'] == 'none')):
raise bgp_ext.BgpPeerNotAuthenticated(bgp_peer_id=bgp_peer_id)
bgp_peer_db.update(bp)
bgp_peer_dict = self._make_bgp_peer_dict(bgp_peer_db)
return bgp_peer_dict
def _get_bgp_speaker(self, context, bgp_speaker_id):
try:
return self._get_by_id(context, BgpSpeaker,
bgp_speaker_id)
except sa_exc.NoResultFound:
raise bgp_ext.BgpSpeakerNotFound(id=bgp_speaker_id)
def _get_bgp_speaker_ids_by_router(self, context, router_id):
with context.session.begin(subtransactions=True):
network_binding = aliased(BgpSpeakerNetworkBinding)
r_port = aliased(l3_db.RouterPort)
query = context.session.query(network_binding.bgp_speaker_id)
query = query.filter(
r_port.router_id == router_id,
r_port.port_type == lib_consts.DEVICE_OWNER_ROUTER_GW,
r_port.port_id == models_v2.Port.id,
models_v2.Port.network_id == network_binding.network_id)
return [binding.bgp_speaker_id for binding in query.all()]
def _get_bgp_speaker_ids_by_binding_network(self, context, network_id):
with context.session.begin(subtransactions=True):
query = context.session.query(
BgpSpeakerNetworkBinding.bgp_speaker_id)
query = query.filter(
BgpSpeakerNetworkBinding.network_id == network_id)
return query.all()
def get_advertised_routes(self, context, bgp_speaker_id):
routes = self.get_routes_by_bgp_speaker_id(context, bgp_speaker_id)
return self._make_advertised_routes_dict(routes)
def _get_id_for(self, resource, id_name):
try:
return resource.get(id_name)
except AttributeError:
msg = _("%s must be specified") % id_name
raise n_exc.BadRequest(resource=bgp_ext.BGP_SPEAKER_RESOURCE_NAME,
msg=msg)
def _get_bgp_peers_by_bgp_speaker_binding(self, context, bgp_speaker_id):
with context.session.begin(subtransactions=True):
query = context.session.query(BgpPeer)
query = query.filter(
BgpSpeakerPeerBinding.bgp_speaker_id == bgp_speaker_id,
BgpSpeakerPeerBinding.bgp_peer_id == BgpPeer.id)
return query.all()
def _save_bgp_speaker_peer_binding(self, context, bgp_speaker_id,
bgp_peer_id):
with context.session.begin(subtransactions=True):
try:
bgp_speaker = self._get_by_id(context, BgpSpeaker,
bgp_speaker_id)
except sa_exc.NoResultFound:
raise bgp_ext.BgpSpeakerNotFound(id=bgp_speaker_id)
try:
bgp_peer = self._get_by_id(context, BgpPeer,
bgp_peer_id)
except sa_exc.NoResultFound:
raise bgp_ext.BgpPeerNotFound(id=bgp_peer_id)
peers = self._get_bgp_peers_by_bgp_speaker_binding(context,
bgp_speaker_id)
self._validate_peer_ips(bgp_speaker_id, peers, bgp_peer)
binding = BgpSpeakerPeerBinding(bgp_speaker_id=bgp_speaker.id,
bgp_peer_id=bgp_peer.id)
context.session.add(binding)
def _validate_peer_ips(self, bgp_speaker_id, current_peers, new_peer):
for peer in current_peers:
if peer.peer_ip == new_peer.peer_ip:
raise bgp_ext.DuplicateBgpPeerIpException(
bgp_peer_id=new_peer.id,
peer_ip=new_peer.peer_ip,
bgp_speaker_id=bgp_speaker_id)
def _remove_bgp_speaker_peer_binding(self, context, bgp_speaker_id,
bgp_peer_id):
with context.session.begin(subtransactions=True):
try:
binding = self._get_bgp_speaker_peer_binding(context,
bgp_speaker_id,
bgp_peer_id)
except sa_exc.NoResultFound:
raise bgp_ext.BgpSpeakerPeerNotAssociated(
bgp_peer_id=bgp_peer_id,
bgp_speaker_id=bgp_speaker_id)
context.session.delete(binding)
def _save_bgp_speaker_network_binding(self,
context,
bgp_speaker_id,
network_id):
with context.session.begin(subtransactions=True):
try:
bgp_speaker = self._get_by_id(context, BgpSpeaker,
bgp_speaker_id)
except sa_exc.NoResultFound:
raise bgp_ext.BgpSpeakerNotFound(id=bgp_speaker_id)
try:
network = self._get_by_id(context, models_v2.Network,
network_id)
except sa_exc.NoResultFound:
raise n_exc.NetworkNotFound(net_id=network_id)
binding = BgpSpeakerNetworkBinding(
bgp_speaker_id=bgp_speaker.id,
network_id=network.id,
ip_version=bgp_speaker.ip_version)
context.session.add(binding)
def _remove_bgp_speaker_network_binding(self, context,
bgp_speaker_id, network_id):
with context.session.begin(subtransactions=True):
try:
binding = self._get_bgp_speaker_network_binding(
context,
bgp_speaker_id,
network_id)
except sa_exc.NoResultFound:
raise bgp_ext.BgpSpeakerNetworkNotAssociated(
network_id=network_id,
bgp_speaker_id=bgp_speaker_id)
context.session.delete(binding)
def _make_bgp_speaker_dict(self, bgp_speaker, fields=None):
attrs = {'id', 'local_as', 'tenant_id', 'name', 'ip_version',
'advertise_floating_ip_host_routes',
'advertise_tenant_networks'}
peer_bindings = bgp_speaker['peers']
network_bindings = bgp_speaker['networks']
res = dict((k, bgp_speaker[k]) for k in attrs)
res['peers'] = [x.bgp_peer_id for x in peer_bindings]
res['networks'] = [x.network_id for x in network_bindings]
return self._fields(res, fields)
def _make_advertised_routes_dict(self, routes):
return {'advertised_routes': list(routes)}
def _get_bgp_peer(self, context, bgp_peer_id):
try:
return self._get_by_id(context, BgpPeer, bgp_peer_id)
except sa_exc.NoResultFound:
raise bgp_ext.BgpPeerNotFound(id=bgp_peer_id)
def _get_bgp_speaker_peer_binding(self, context,
bgp_speaker_id, bgp_peer_id):
query = self._model_query(context, BgpSpeakerPeerBinding)
return query.filter(
BgpSpeakerPeerBinding.bgp_speaker_id == bgp_speaker_id,
BgpSpeakerPeerBinding.bgp_peer_id == bgp_peer_id).one()
def _get_bgp_speaker_network_binding(self, context,
bgp_speaker_id, network_id):
query = self._model_query(context, BgpSpeakerNetworkBinding)
return query.filter(bgp_speaker_id == bgp_speaker_id,
network_id == network_id).one()
def _make_bgp_peer_dict(self, bgp_peer, fields=None):
attrs = ['tenant_id', 'id', 'name', 'peer_ip', 'remote_as',
'auth_type', 'password']
res = dict((k, bgp_peer[k]) for k in attrs)
return self._fields(res, fields)
def _get_address_scope_ids_for_bgp_speaker(self, context, bgp_speaker_id):
with context.session.begin(subtransactions=True):
binding = aliased(BgpSpeakerNetworkBinding)
address_scope = aliased(address_scope_db.AddressScope)
query = context.session.query(address_scope)
query = query.filter(
binding.bgp_speaker_id == bgp_speaker_id,
models_v2.Subnet.ip_version == binding.ip_version,
models_v2.Subnet.network_id == binding.network_id,
models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id,
models_v2.SubnetPool.address_scope_id == address_scope.id)
return [scope.id for scope in query.all()]
def get_routes_by_bgp_speaker_id(self, context, bgp_speaker_id):
"""Get all routes that should be advertised by a BgpSpeaker."""
with context.session.begin(subtransactions=True):
net_routes = self._get_tenant_network_routes_by_bgp_speaker(
context,
bgp_speaker_id)
fip_routes = self._get_central_fip_host_routes_by_bgp_speaker(
context,
bgp_speaker_id)
dvr_fip_routes = self._get_dvr_fip_host_routes_by_bgp_speaker(
context,
bgp_speaker_id)
return itertools.chain(fip_routes, net_routes, dvr_fip_routes)
def get_routes_by_bgp_speaker_binding(self, context,
bgp_speaker_id, network_id):
"""Get all routes for the given bgp_speaker binding."""
with context.session.begin(subtransactions=True):
fip_routes = self._get_central_fip_host_routes_by_binding(
context,
network_id,
bgp_speaker_id)
net_routes = self._get_tenant_network_routes_by_binding(
context,
network_id,
bgp_speaker_id)
dvr_fip_routes = self._get_dvr_fip_host_routes_by_binding(
context,
network_id,
bgp_speaker_id)
return itertools.chain(fip_routes, net_routes, dvr_fip_routes)
def _get_routes_by_router(self, context, router_id):
bgp_speaker_ids = self._get_bgp_speaker_ids_by_router(context,
router_id)
route_dict = {}
for bgp_speaker_id in bgp_speaker_ids:
fip_routes = self._get_central_fip_host_routes_by_router(
context,
router_id,
bgp_speaker_id)
net_routes = self._get_tenant_network_routes_by_router(
context,
router_id,
bgp_speaker_id)
dvr_fip_routes = self._get_dvr_fip_host_routes_by_router(
context,
router_id,
bgp_speaker_id)
routes = itertools.chain(fip_routes, net_routes, dvr_fip_routes)
route_dict[bgp_speaker_id] = list(routes)
return route_dict
def _get_central_fip_host_routes_by_router(self, context, router_id,
bgp_speaker_id):
"""Get floating IP host routes with the given router as nexthop."""
with context.session.begin(subtransactions=True):
dest_alias = aliased(l3_db.FloatingIP,
name='destination')
next_hop_alias = aliased(models_v2.IPAllocation,
name='next_hop')
binding_alias = aliased(BgpSpeakerNetworkBinding,
name='binding')
router_attrs = aliased(l3_attrs_db.RouterExtraAttributes,
name='router_attrs')
query = context.session.query(dest_alias.floating_ip_address,
next_hop_alias.ip_address)
query = query.join(
next_hop_alias,
next_hop_alias.network_id == dest_alias.floating_network_id)
query = query.join(l3_db.Router,
dest_alias.router_id == l3_db.Router.id)
query = query.filter(
l3_db.Router.id == router_id,
dest_alias.router_id == l3_db.Router.id,
l3_db.Router.id == router_attrs.router_id,
router_attrs.distributed == sa.sql.false(),
l3_db.Router.gw_port_id == next_hop_alias.port_id,
next_hop_alias.subnet_id == models_v2.Subnet.id,
models_v2.Subnet.ip_version == 4,
binding_alias.network_id == models_v2.Subnet.network_id,
binding_alias.bgp_speaker_id == bgp_speaker_id,
binding_alias.ip_version == 4,
BgpSpeaker.advertise_floating_ip_host_routes == sa.sql.true())
query = query.outerjoin(router_attrs,
l3_db.Router.id == router_attrs.router_id)
query = query.filter(router_attrs.distributed != sa.sql.true())
return self._host_route_list_from_tuples(query.all())
def _get_dvr_fip_host_routes_by_router(self, context, bgp_speaker_id,
router_id):
with context.session.begin(subtransactions=True):
gw_query = self._get_gateway_query(context, bgp_speaker_id)
fip_query = self._get_fip_query(context, bgp_speaker_id)
fip_query.filter(l3_db.FloatingIP.router_id == router_id)
#Create the join query
join_query = self._join_fip_by_host_binding_to_agent_gateway(
context,
fip_query.subquery(),
gw_query.subquery())
return self._host_route_list_from_tuples(join_query.all())
def _get_central_fip_host_routes_by_binding(self, context,
network_id, bgp_speaker_id):
"""Get all floating IP host routes for the given network binding."""
with context.session.begin(subtransactions=True):
# Query the DB for floating IP's and the IP address of the
# gateway port
dest_alias = aliased(l3_db.FloatingIP,
name='destination')
next_hop_alias = aliased(models_v2.IPAllocation,
name='next_hop')
binding_alias = aliased(BgpSpeakerNetworkBinding,
name='binding')
router_attrs = aliased(l3_attrs_db.RouterExtraAttributes,
name='router_attrs')
query = context.session.query(dest_alias.floating_ip_address,
next_hop_alias.ip_address)
query = query.join(
next_hop_alias,
next_hop_alias.network_id == dest_alias.floating_network_id)
query = query.join(
binding_alias,
binding_alias.network_id == dest_alias.floating_network_id)
query = query.join(l3_db.Router,
dest_alias.router_id == l3_db.Router.id)
query = query.filter(
dest_alias.floating_network_id == network_id,
dest_alias.router_id == l3_db.Router.id,
l3_db.Router.gw_port_id == next_hop_alias.port_id,
next_hop_alias.subnet_id == models_v2.Subnet.id,
models_v2.Subnet.ip_version == 4,
binding_alias.network_id == models_v2.Subnet.network_id,
binding_alias.bgp_speaker_id == BgpSpeaker.id,
BgpSpeaker.id == bgp_speaker_id,
BgpSpeaker.advertise_floating_ip_host_routes == sa.sql.true())
query = query.outerjoin(router_attrs,
l3_db.Router.id == router_attrs.router_id)
query = query.filter(router_attrs.distributed != sa.sql.true())
return self._host_route_list_from_tuples(query.all())
def _get_dvr_fip_host_routes_by_binding(self, context, network_id,
bgp_speaker_id):
with context.session.begin(subtransactions=True):
BgpBinding = BgpSpeakerNetworkBinding
gw_query = self._get_gateway_query(context, bgp_speaker_id)
gw_query.filter(BgpBinding.network_id == network_id)
fip_query = self._get_fip_query(context, bgp_speaker_id)
fip_query.filter(BgpBinding.network_id == network_id)
#Create the join query
join_query = self._join_fip_by_host_binding_to_agent_gateway(
context,
fip_query.subquery(),
gw_query.subquery())
return self._host_route_list_from_tuples(join_query.all())
def _get_central_fip_host_routes_by_bgp_speaker(self, context,
bgp_speaker_id):
"""Get all the floating IP host routes advertised by a BgpSpeaker."""
with context.session.begin(subtransactions=True):
dest_alias = aliased(l3_db.FloatingIP,
name='destination')
next_hop_alias = aliased(models_v2.IPAllocation,
name='next_hop')
speaker_binding = aliased(BgpSpeakerNetworkBinding,
name="speaker_network_mapping")
router_attrs = aliased(l3_attrs_db.RouterExtraAttributes,
name='router_attrs')
query = context.session.query(dest_alias.floating_ip_address,
next_hop_alias.ip_address)
query = query.select_from(dest_alias,
BgpSpeaker,
l3_db.Router,
models_v2.Subnet)
query = query.join(
next_hop_alias,
next_hop_alias.network_id == dest_alias.floating_network_id)
query = query.join(
speaker_binding,
speaker_binding.network_id == dest_alias.floating_network_id)
query = query.join(l3_db.Router,
dest_alias.router_id == l3_db.Router.id)
query = query.filter(
BgpSpeaker.id == bgp_speaker_id,
BgpSpeaker.advertise_floating_ip_host_routes,
speaker_binding.bgp_speaker_id == BgpSpeaker.id,
dest_alias.floating_network_id == speaker_binding.network_id,
next_hop_alias.network_id == speaker_binding.network_id,
dest_alias.router_id == l3_db.Router.id,
l3_db.Router.gw_port_id == next_hop_alias.port_id,
next_hop_alias.subnet_id == models_v2.Subnet.id,
models_v2.Subnet.ip_version == 4)
query = query.outerjoin(router_attrs,
l3_db.Router.id == router_attrs.router_id)
query = query.filter(router_attrs.distributed != sa.sql.true())
return self._host_route_list_from_tuples(query.all())
def _get_gateway_query(self, context, bgp_speaker_id):
BgpBinding = BgpSpeakerNetworkBinding
ML2PortBinding = ml2_models.PortBinding
IpAllocation = models_v2.IPAllocation
Port = models_v2.Port
gw_query = context.session.query(Port.network_id,
ML2PortBinding.host,
IpAllocation.ip_address)
#Subquery for FIP agent gateway ports
gw_query = gw_query.filter(
ML2PortBinding.port_id == Port.id,
IpAllocation.port_id == Port.id,
IpAllocation.subnet_id == models_v2.Subnet.id,
models_v2.Subnet.ip_version == 4,
Port.device_owner == lib_consts.DEVICE_OWNER_AGENT_GW,
Port.network_id == BgpBinding.network_id,
BgpBinding.bgp_speaker_id == bgp_speaker_id,
BgpBinding.ip_version == 4)
return gw_query
def _get_fip_query(self, context, bgp_speaker_id):
BgpBinding = BgpSpeakerNetworkBinding
ML2PortBinding = ml2_models.PortBinding
#Subquery for floating IP's
fip_query = context.session.query(
l3_db.FloatingIP.floating_network_id,
ML2PortBinding.host,
l3_db.FloatingIP.floating_ip_address)
fip_query = fip_query.filter(
l3_db.FloatingIP.fixed_port_id == ML2PortBinding.port_id,
l3_db.FloatingIP.floating_network_id == BgpBinding.network_id,
BgpBinding.bgp_speaker_id == bgp_speaker_id)
return fip_query
def _get_dvr_fip_host_routes_by_bgp_speaker(self, context,
bgp_speaker_id):
with context.session.begin(subtransactions=True):
gw_query = self._get_gateway_query(context, bgp_speaker_id)
fip_query = self._get_fip_query(context, bgp_speaker_id)
#Create the join query
join_query = self._join_fip_by_host_binding_to_agent_gateway(
context,
fip_query.subquery(),
gw_query.subquery())
return self._host_route_list_from_tuples(join_query.all())
def _join_fip_by_host_binding_to_agent_gateway(self, context,
fip_subq, gw_subq):
join_query = context.session.query(fip_subq.c.floating_ip_address,
gw_subq.c.ip_address)
and_cond = and_(
gw_subq.c.host == fip_subq.c.host,
gw_subq.c.network_id == fip_subq.c.floating_network_id)
return join_query.join(gw_subq, and_cond)
def _get_tenant_network_routes_by_binding(self, context,
network_id, bgp_speaker_id):
"""Get all tenant network routes for the given network."""
with context.session.begin(subtransactions=True):
tenant_networks_query = self._tenant_networks_by_network_query(
context,
network_id,
bgp_speaker_id)
nexthops_query = self._nexthop_ip_addresses_by_binding_query(
context,
network_id,
bgp_speaker_id)
join_q = self._join_tenant_networks_to_next_hops(
context,
tenant_networks_query.subquery(),
nexthops_query.subquery())
return self._make_advertised_routes_list(join_q.all())
def _get_tenant_network_routes_by_router(self, context, router_id,
bgp_speaker_id):
"""Get all tenant network routes with the given router as nexthop."""
with context.session.begin(subtransactions=True):
scopes = self._get_address_scope_ids_for_bgp_speaker(
context,
bgp_speaker_id)
address_scope = aliased(address_scope_db.AddressScope)
inside_query = context.session.query(
models_v2.Subnet.cidr,
models_v2.IPAllocation.ip_address,
address_scope.id)
outside_query = context.session.query(
address_scope.id,
models_v2.IPAllocation.ip_address)
speaker_binding = aliased(BgpSpeakerNetworkBinding,
name="speaker_network_mapping")
port_alias = aliased(l3_db.RouterPort, name='routerport')
inside_query = inside_query.filter(
port_alias.router_id == router_id,
models_v2.IPAllocation.port_id == port_alias.port_id,
models_v2.IPAllocation.subnet_id == models_v2.Subnet.id,
models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id,
models_v2.SubnetPool.address_scope_id == address_scope.id,
address_scope.id.in_(scopes),
port_alias.port_type != lib_consts.DEVICE_OWNER_ROUTER_GW,
speaker_binding.bgp_speaker_id == bgp_speaker_id)
outside_query = outside_query.filter(
port_alias.router_id == router_id,
port_alias.port_type == lib_consts.DEVICE_OWNER_ROUTER_GW,
models_v2.IPAllocation.port_id == port_alias.port_id,
models_v2.IPAllocation.subnet_id == models_v2.Subnet.id,
models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id,
models_v2.SubnetPool.address_scope_id == address_scope.id,
address_scope.id.in_(scopes),
speaker_binding.bgp_speaker_id == bgp_speaker_id,
speaker_binding.network_id == models_v2.Port.network_id,
port_alias.port_id == models_v2.Port.id)
inside_query = inside_query.subquery()
outside_query = outside_query.subquery()
join_query = context.session.query(inside_query.c.cidr,
outside_query.c.ip_address)
and_cond = and_(inside_query.c.id == outside_query.c.id)
join_query = join_query.join(outside_query, and_cond)
return self._make_advertised_routes_list(join_query.all())
def _get_tenant_network_routes_by_bgp_speaker(self, context,
bgp_speaker_id):
"""Get all tenant network routes to be advertised by a BgpSpeaker."""
with context.session.begin(subtransactions=True):
tenant_nets_q = self._tenant_networks_by_bgp_speaker_query(
context,
bgp_speaker_id)
nexthops_q = self._nexthop_ip_addresses_by_bgp_speaker_query(
context,
bgp_speaker_id)
join_q = self._join_tenant_networks_to_next_hops(
context,
tenant_nets_q.subquery(),
nexthops_q.subquery())
return self._make_advertised_routes_list(join_q.all())
def _join_tenant_networks_to_next_hops(self, context,
tenant_networks_subquery,
nexthops_subquery):
"""Join subquery for tenant networks to subquery for nexthop IP's"""
left_subq = tenant_networks_subquery
right_subq = nexthops_subquery
join_query = context.session.query(left_subq.c.cidr,
right_subq.c.ip_address)
and_cond = and_(left_subq.c.router_id == right_subq.c.router_id,
left_subq.c.ip_version == right_subq.c.ip_version)
join_query = join_query.join(right_subq, and_cond)
return join_query
def _tenant_networks_by_network_query(self, context,
network_id, bgp_speaker_id):
"""Return subquery for tenant networks by binding network ID"""
address_scope = aliased(address_scope_db.AddressScope,
name='address_scope')
router_attrs = aliased(l3_attrs_db.RouterExtraAttributes,
name='router_attrs')
tenant_networks_query = context.session.query(
l3_db.RouterPort.router_id,
models_v2.Subnet.cidr,
models_v2.Subnet.ip_version,
address_scope.id)
tenant_networks_query = tenant_networks_query.filter(
l3_db.RouterPort.port_type != lib_consts.DEVICE_OWNER_ROUTER_GW,
l3_db.RouterPort.port_type != lib_consts.DEVICE_OWNER_ROUTER_SNAT,
l3_db.RouterPort.router_id == router_attrs.router_id,
models_v2.IPAllocation.port_id == l3_db.RouterPort.port_id,
models_v2.IPAllocation.subnet_id == models_v2.Subnet.id,
models_v2.Subnet.network_id != network_id,
models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id,
models_v2.SubnetPool.address_scope_id == address_scope.id,
BgpSpeaker.id == bgp_speaker_id,
BgpSpeaker.ip_version == address_scope.ip_version,
models_v2.Subnet.ip_version == address_scope.ip_version)
return tenant_networks_query
def _tenant_networks_by_bgp_speaker_query(self, context, bgp_speaker_id):
"""Return subquery for tenant networks by binding bgp_speaker_id"""
router_id = l3_db.RouterPort.router_id.distinct().label('router_id')
tenant_nets_subq = context.session.query(router_id,
models_v2.Subnet.cidr,
models_v2.Subnet.ip_version)
scopes = self._get_address_scope_ids_for_bgp_speaker(context,
bgp_speaker_id)
filters = self._tenant_networks_by_bgp_speaker_filters(scopes)
tenant_nets_subq = tenant_nets_subq.filter(*filters)
return tenant_nets_subq
def _tenant_networks_by_bgp_speaker_filters(self, address_scope_ids):
"""Return the filters for querying tenant networks by BGP speaker"""
router_attrs = aliased(l3_attrs_db.RouterExtraAttributes,
name='router_attrs')
return [models_v2.IPAllocation.port_id == l3_db.RouterPort.port_id,
l3_db.RouterPort.router_id == router_attrs.router_id,
l3_db.RouterPort.port_type != lib_consts.DEVICE_OWNER_ROUTER_GW,
l3_db.RouterPort.port_type != lib_consts.DEVICE_OWNER_ROUTER_SNAT,
models_v2.IPAllocation.subnet_id == models_v2.Subnet.id,
models_v2.Subnet.network_id != BgpSpeakerNetworkBinding.network_id,
models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id,
models_v2.SubnetPool.address_scope_id.in_(address_scope_ids),
models_v2.Subnet.ip_version == BgpSpeakerNetworkBinding.ip_version,
BgpSpeakerNetworkBinding.bgp_speaker_id == BgpSpeaker.id,
BgpSpeaker.advertise_tenant_networks == sa.sql.true()]
def _nexthop_ip_addresses_by_binding_query(self, context,
network_id, bgp_speaker_id):
"""Return the subquery for locating nexthops by binding network"""
nexthops_query = context.session.query(
l3_db.RouterPort.router_id,
models_v2.IPAllocation.ip_address,
models_v2.Subnet.ip_version)
filters = self._next_hop_ip_addresses_by_binding_filters(
network_id,
bgp_speaker_id)
nexthops_query = nexthops_query.filter(*filters)
return nexthops_query
def _next_hop_ip_addresses_by_binding_filters(self,
network_id,
bgp_speaker_id):
"""Return the filters for querying nexthops by binding network"""
address_scope = aliased(address_scope_db.AddressScope,
name='address_scope')
return [models_v2.IPAllocation.port_id == l3_db.RouterPort.port_id,
models_v2.IPAllocation.subnet_id == models_v2.Subnet.id,
BgpSpeaker.id == bgp_speaker_id,
BgpSpeakerNetworkBinding.bgp_speaker_id == BgpSpeaker.id,
BgpSpeakerNetworkBinding.network_id == network_id,
models_v2.Subnet.network_id == BgpSpeakerNetworkBinding.network_id,
models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id,
models_v2.SubnetPool.address_scope_id == address_scope.id,
models_v2.Subnet.ip_version == address_scope.ip_version,
l3_db.RouterPort.port_type == DEVICE_OWNER_ROUTER_GW]
def _nexthop_ip_addresses_by_bgp_speaker_query(self, context,
bgp_speaker_id):
"""Return the subquery for locating nexthops by BGP speaker"""
nexthops_query = context.session.query(
l3_db.RouterPort.router_id,
models_v2.IPAllocation.ip_address,
models_v2.Subnet.ip_version)
filters = self._next_hop_ip_addresses_by_bgp_speaker_filters(
bgp_speaker_id)
nexthops_query = nexthops_query.filter(*filters)
return nexthops_query
def _next_hop_ip_addresses_by_bgp_speaker_filters(self, bgp_speaker_id):
"""Return the filters for querying nexthops by BGP speaker"""
router_attrs = aliased(l3_attrs_db.RouterExtraAttributes,
name='router_attrs')
return [l3_db.RouterPort.port_type == DEVICE_OWNER_ROUTER_GW,
l3_db.RouterPort.router_id == router_attrs.router_id,
BgpSpeakerNetworkBinding.network_id == models_v2.Subnet.network_id,
BgpSpeakerNetworkBinding.ip_version == models_v2.Subnet.ip_version,
BgpSpeakerNetworkBinding.bgp_speaker_id == bgp_speaker_id,
models_v2.IPAllocation.port_id == l3_db.RouterPort.port_id,
models_v2.IPAllocation.subnet_id == models_v2.Subnet.id]
def _tenant_prefixes_by_router(self, context, router_id, bgp_speaker_id):
with context.session.begin(subtransactions=True):
query = context.session.query(models_v2.Subnet.cidr.distinct())
filters = self._tenant_prefixes_by_router_filters(router_id,
bgp_speaker_id)
query = query.filter(*filters)
return [x[0] for x in query.all()]
def _tenant_prefixes_by_router_filters(self, router_id, bgp_speaker_id):
binding = aliased(BgpSpeakerNetworkBinding, name='network_binding')
subnetpool = aliased(models_v2.SubnetPool,
name='subnetpool')
router_attrs = aliased(l3_attrs_db.RouterExtraAttributes,
name='router_attrs')
return [models_v2.Subnet.id == models_v2.IPAllocation.subnet_id,
models_v2.Subnet.subnetpool_id == subnetpool.id,
l3_db.RouterPort.router_id == router_id,
l3_db.Router.id == l3_db.RouterPort.router_id,
l3_db.Router.id == router_attrs.router_id,
l3_db.Router.gw_port_id == models_v2.Port.id,
models_v2.Port.network_id == binding.network_id,
binding.bgp_speaker_id == BgpSpeaker.id,
l3_db.RouterPort.port_type == DEVICE_OWNER_ROUTER_INTF,
models_v2.IPAllocation.port_id == l3_db.RouterPort.port_id]
def _tenant_prefixes_by_router_interface(self,
context,
router_port_id,
bgp_speaker_id):
with context.session.begin(subtransactions=True):
query = context.session.query(models_v2.Subnet.cidr.distinct())
filters = self._tenant_prefixes_by_router_filters(router_port_id,
bgp_speaker_id)
query = query.filter(*filters)
return [x[0] for x in query.all()]
def _tenant_prefixes_by_router_port_filters(self,
router_port_id,
bgp_speaker_id):
binding = aliased(BgpSpeakerNetworkBinding, name='network_binding')
return [models_v2.Subnet.id == models_v2.IPAllocation.subnet_id,
l3_db.RouterPort.port_id == router_port_id,
l3_db.Router.id == l3_db.RouterPort.router_id,
l3_db.Router.gw_port_id == models_v2.Port.id,
models_v2.Port.network_id == binding.network_id,
binding.bgp_speaker_id == BgpSpeaker.id,
models_v2.Subnet.ip_version == binding.ip_version,
l3_db.RouterPort.port_type == DEVICE_OWNER_ROUTER_INTF,
models_v2.IPAllocation.port_id == l3_db.RouterPort.port_id]
def _bgp_speakers_for_gateway_network(self, context, network_id):
"""Return all BgpSpeakers for the given gateway network"""
with context.session.begin(subtransactions=True):
query = context.session.query(BgpSpeaker)
query = query.filter(
BgpSpeakerNetworkBinding.network_id == network_id,
BgpSpeakerNetworkBinding.bgp_speaker_id == BgpSpeaker.id)
return query.all()
def _bgp_speakers_for_gw_network_by_family(self, context,
network_id, ip_version):
"""Return the BgpSpeaker by given gateway network and ip_version"""
with context.session.begin(subtransactions=True):
query = context.session.query(BgpSpeaker)
query = query.filter(
BgpSpeakerNetworkBinding.network_id == network_id,
BgpSpeakerNetworkBinding.bgp_speaker_id == BgpSpeaker.id,
BgpSpeakerNetworkBinding.ip_version == ip_version)
return query.all()
def _make_advertised_routes_list(self, routes):
route_list = ({'destination': x,
'next_hop': y} for x, y in routes)
return route_list
def _route_list_from_prefixes_and_next_hop(self, routes, next_hop):
route_list = [{'destination': x,
'next_hop': next_hop} for x in routes]
return route_list
def _host_route_list_from_tuples(self, ip_next_hop_tuples):
"""Return the list of host routes given a list of (IP, nexthop)"""
return ({'destination': x + '/32',
'next_hop': y} for x, y in ip_next_hop_tuples)
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config properties and functions for managing email notifications."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import logging
from constants import constants
from core.domain import config_domain
from core.domain import email_services
from core.domain import html_cleaner
from core.domain import rights_domain
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
import feconf
import python_utils
import utils
(email_models,) = models.Registry.import_models([models.NAMES.email])
app_identity_services = models.Registry.import_app_identity_services()
transaction_services = models.Registry.import_transaction_services()
def log_new_error(*args, **kwargs):
"""Logs an error message (This is a stub for logging.error(), so that the
latter can be swapped out in tests).
"""
logging.error(*args, **kwargs)
NEW_REVIEWER_EMAIL_DATA = {
constants.REVIEW_CATEGORY_TRANSLATION: {
'review_category': 'translations',
'to_check': 'translation suggestions',
'description_template': '%s language translations',
'rights_message_template': (
'review translation suggestions made by contributors in the %s '
'language')
},
constants.REVIEW_CATEGORY_VOICEOVER: {
'review_category': 'voiceovers',
'to_check': 'voiceover applications',
'description_template': '%s language voiceovers',
'rights_message_template': (
'review voiceover applications made by contributors in the %s '
'language')
},
constants.REVIEW_CATEGORY_QUESTION: {
'review_category': 'questions',
'to_check': 'question suggestions',
'description': 'questions',
'rights_message': 'review question suggestions made by contributors'
}
}
REMOVED_REVIEWER_EMAIL_DATA = {
constants.REVIEW_CATEGORY_TRANSLATION: {
'review_category': 'translation',
'role_description_template': (
'translation reviewer role in the %s language'),
'rights_message_template': (
'review translation suggestions made by contributors in the %s '
'language'),
'contribution_allowed': 'translations'
},
constants.REVIEW_CATEGORY_VOICEOVER: {
'review_category': 'voiceover',
'role_description_template': (
'voiceover reviewer role in the %s language'),
'rights_message_template': (
'review voiceover applications made by contributors in the %s '
'language'),
'contribution_allowed': 'voiceovers'
},
constants.REVIEW_CATEGORY_QUESTION: {
'review_category': 'question',
'role_description': 'question reviewer role',
'rights_message': 'review question suggestions made by contributors',
'contribution_allowed': 'questions'
}
}
NOTIFICATION_EMAIL_LIST_SCHEMA = {
'type': 'list',
'items': {
'type': 'unicode',
'validators': [{
'id': 'is_valid_email',
}]
},
'validators': [{
'id': 'has_length_at_most',
'max_value': 5
}, {
'id': 'is_uniquified',
}]
}
EMAIL_HTML_BODY_SCHEMA = {
'type': 'unicode',
'ui_config': {
'rows': 20,
}
}
EMAIL_CONTENT_SCHEMA = {
'type': 'dict',
'properties': [{
'name': 'subject',
'schema': {
'type': 'unicode',
},
}, {
'name': 'html_body',
'schema': EMAIL_HTML_BODY_SCHEMA,
}],
}
EMAIL_SENDER_NAME = config_domain.ConfigProperty(
'email_sender_name', {'type': 'unicode'},
'The default sender name for outgoing emails.', 'Site Admin')
EMAIL_FOOTER = config_domain.ConfigProperty(
'email_footer', {'type': 'unicode', 'ui_config': {'rows': 5}},
'The footer to append to all outgoing emails. (This should be written in '
'HTML and include an unsubscribe link.)',
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
_PLACEHOLDER_SUBJECT = 'THIS IS A PLACEHOLDER.'
_PLACEHOLDER_HTML_BODY = 'THIS IS A <b>PLACEHOLDER</b> AND SHOULD BE REPLACED.'
SIGNUP_EMAIL_CONTENT = config_domain.ConfigProperty(
'signup_email_content', EMAIL_CONTENT_SCHEMA,
'Content of email sent after a new user signs up. (The email body should '
'be written with HTML and not include a salutation or footer.) These '
'emails are only sent if the functionality is enabled in feconf.py.',
{
'subject': _PLACEHOLDER_SUBJECT,
'html_body': _PLACEHOLDER_HTML_BODY,
})
EXPLORATION_ROLE_MANAGER = 'manager rights'
EXPLORATION_ROLE_EDITOR = 'editor rights'
EXPLORATION_ROLE_VOICE_ARTIST = 'voice artist rights'
EXPLORATION_ROLE_PLAYTESTER = 'playtest access'
EDITOR_ROLE_EMAIL_HTML_ROLES = {
rights_domain.ROLE_OWNER: EXPLORATION_ROLE_MANAGER,
rights_domain.ROLE_EDITOR: EXPLORATION_ROLE_EDITOR,
rights_domain.ROLE_VOICE_ARTIST: EXPLORATION_ROLE_VOICE_ARTIST,
rights_domain.ROLE_VIEWER: EXPLORATION_ROLE_PLAYTESTER
}
_EDITOR_ROLE_EMAIL_HTML_RIGHTS = {
'can_manage': '<li>Change the exploration permissions</li><br>',
'can_edit': '<li>Edit the exploration</li><br>',
'can_voiceover': '<li>Voiceover the exploration</li><br>',
'can_play': '<li>View and playtest the exploration</li><br>'
}
# We don't include "can_voiceover" for managers and editors, since this is
# implied by the email description for "can_edit".
EDITOR_ROLE_EMAIL_RIGHTS_FOR_ROLE = {
EXPLORATION_ROLE_MANAGER: (
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_manage'] +
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_edit'] +
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_play']),
EXPLORATION_ROLE_EDITOR: (
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_edit'] +
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_play']),
EXPLORATION_ROLE_VOICE_ARTIST: (
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_voiceover'] +
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_play']),
EXPLORATION_ROLE_PLAYTESTER: _EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_play']
}
UNPUBLISH_EXPLORATION_EMAIL_HTML_BODY = config_domain.ConfigProperty(
'unpublish_exploration_email_html_body', EMAIL_HTML_BODY_SCHEMA,
'Default content for the email sent after an exploration is unpublished '
'by a moderator. These emails are only sent if the functionality is '
'enabled in feconf.py. Leave this field blank if emails should not be '
'sent.',
'I\'m writing to inform you that I have unpublished the above '
'exploration.')
NOTIFICATION_EMAILS_FOR_FAILED_TASKS = config_domain.ConfigProperty(
'notification_emails_for_failed_tasks',
NOTIFICATION_EMAIL_LIST_SCHEMA,
'Email(s) to notify if an ML training task fails',
[]
)
SENDER_VALIDATORS = {
feconf.EMAIL_INTENT_SIGNUP: (lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_UNPUBLISH_EXPLORATION: (
user_services.is_at_least_moderator),
feconf.EMAIL_INTENT_DAILY_BATCH: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_SUGGESTION_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_MARKETING: user_services.is_admin,
feconf.EMAIL_INTENT_DELETE_EXPLORATION: (
user_services.is_at_least_moderator),
feconf.EMAIL_INTENT_REPORT_BAD_CONTENT: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_ONBOARD_REVIEWER: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_REMOVE_REVIEWER: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_REVIEW_SUGGESTIONS: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_VOICEOVER_APPLICATION_UPDATES: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_ACCOUNT_DELETED: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.BULK_EMAIL_INTENT_MARKETING: user_services.is_admin,
feconf.BULK_EMAIL_INTENT_IMPROVE_EXPLORATION: user_services.is_admin,
feconf.BULK_EMAIL_INTENT_CREATE_EXPLORATION: user_services.is_admin,
feconf.BULK_EMAIL_INTENT_CREATOR_REENGAGEMENT: user_services.is_admin,
feconf.BULK_EMAIL_INTENT_LEARNER_REENGAGEMENT: user_services.is_admin,
feconf.BULK_EMAIL_INTENT_TEST: user_services.is_admin
}
def require_sender_id_is_valid(intent, sender_id):
"""Ensure that the sender ID is valid, based on the email's intent.
Many emails are only allowed to be sent by a certain user or type of user,
e.g. 'admin' or an admin/moderator. This function will raise an exception
if the given sender is not allowed to send this type of email.
Args:
intent: str. The intent string, i.e. the purpose of the email.
Valid intent strings are defined in feconf.py.
sender_id: str. The ID of the user sending the email.
Raises:
Exception. The email intent is invalid.
Exception. The sender_id is not appropriate for the given intent.
"""
if intent not in SENDER_VALIDATORS:
raise Exception('Invalid email intent string: %s' % intent)
else:
if not SENDER_VALIDATORS[intent](sender_id):
logging.error(
'Invalid sender_id %s for email with intent \'%s\'' %
(sender_id, intent))
raise Exception(
'Invalid sender_id for email with intent \'%s\'' % intent)
def _send_email(
recipient_id, sender_id, intent, email_subject, email_html_body,
sender_email, bcc_admin=False, sender_name=None, reply_to_id=None,
recipient_email=None):
"""Sends an email to the given recipient.
This function should be used for sending all user-facing emails.
Raises an Exception if the sender_id is not appropriate for the given
intent. Currently we support only system-generated emails and emails
initiated by moderator actions.
Args:
recipient_id: str. The user ID of the recipient.
sender_id: str. The user ID of the sender.
intent: str. The intent string for the email, i.e. the purpose/type.
email_subject: str. The subject of the email.
email_html_body: str. The body (message) of the email.
sender_email: str. The sender's email address.
bcc_admin: bool. Whether to send a copy of the email to the admin's
email address.
sender_name: str or None. The name to be shown in the "sender" field of
the email.
reply_to_id: str or None. The unique reply-to id used in reply-to email
address sent to recipient.
recipient_email: str or None. Override for the recipient email.
This should only be used when the user with user_id equal to
recipient_id does not exist or is deleted and their email cannot be
retrieved via get_email_from_user_id.
"""
if sender_name is None:
sender_name = EMAIL_SENDER_NAME.value
require_sender_id_is_valid(intent, sender_id)
if recipient_email is None:
recipient_email = user_services.get_email_from_user_id(recipient_id)
cleaned_html_body = html_cleaner.clean(email_html_body)
if cleaned_html_body != email_html_body:
log_new_error(
'Original email HTML body does not match cleaned HTML body:\n'
'Original:\n%s\n\nCleaned:\n%s\n' %
(email_html_body, cleaned_html_body))
return
raw_plaintext_body = cleaned_html_body.replace('<br/>', '\n').replace(
'<br>', '\n').replace('<li>', '<li>- ').replace('</p><p>', '</p>\n<p>')
cleaned_plaintext_body = html_cleaner.strip_html_tags(raw_plaintext_body)
if email_models.SentEmailModel.check_duplicate_message(
recipient_id, email_subject, cleaned_plaintext_body):
log_new_error(
'Duplicate email:\n'
'Details:\n%s %s\n%s\n\n' %
(recipient_id, email_subject, cleaned_plaintext_body))
return
def _send_email_in_transaction():
"""Sends the email to a single recipient."""
sender_name_email = '%s <%s>' % (sender_name, sender_email)
email_services.send_mail(
sender_name_email, recipient_email, email_subject,
cleaned_plaintext_body, cleaned_html_body, bcc_admin=bcc_admin,
reply_to_id=reply_to_id)
email_models.SentEmailModel.create(
recipient_id, recipient_email, sender_id, sender_name_email, intent,
email_subject, cleaned_html_body, datetime.datetime.utcnow())
transaction_services.run_in_transaction(_send_email_in_transaction)
def _send_bulk_mail(
recipient_ids, sender_id, intent, email_subject, email_html_body,
sender_email, sender_name, instance_id):
"""Sends an email to all given recipients.
Args:
recipient_ids: list(str). The user IDs of the email recipients.
sender_id: str. The ID of the user sending the email.
intent: str. The intent string, i.e. the purpose of the email.
email_subject: str. The subject of the email.
email_html_body: str. The body (message) of the email.
sender_email: str. The sender's email address.
sender_name: str. The name to be shown in the "sender" field of the
email.
instance_id: str. The ID of the BulkEmailModel entity instance.
"""
require_sender_id_is_valid(intent, sender_id)
recipients_settings = user_services.get_users_settings(recipient_ids)
recipient_emails = [user.email for user in recipients_settings]
cleaned_html_body = html_cleaner.clean(email_html_body)
if cleaned_html_body != email_html_body:
log_new_error(
'Original email HTML body does not match cleaned HTML body:\n'
'Original:\n%s\n\nCleaned:\n%s\n' %
(email_html_body, cleaned_html_body))
return
raw_plaintext_body = cleaned_html_body.replace('<br/>', '\n').replace(
'<br>', '\n').replace('<li>', '<li>- ').replace('</p><p>', '</p>\n<p>')
cleaned_plaintext_body = html_cleaner.strip_html_tags(raw_plaintext_body)
def _send_bulk_mail_in_transaction(instance_id):
"""Sends the emails in bulk to the recipients.
Args:
instance_id: str. The ID of the BulkEmailModel entity instance.
"""
sender_name_email = '%s <%s>' % (sender_name, sender_email)
email_services.send_bulk_mail(
sender_name_email, recipient_emails, email_subject,
cleaned_plaintext_body, cleaned_html_body)
email_models.BulkEmailModel.create(
instance_id, recipient_ids, sender_id, sender_name_email, intent,
email_subject, cleaned_html_body, datetime.datetime.utcnow())
transaction_services.run_in_transaction(
_send_bulk_mail_in_transaction, instance_id)
def send_job_failure_email(job_id):
"""Sends an email to admin email as well as any email addresses
specificed on the admin config page.
Args:
job_id: str. The Job ID of the failing job.
"""
mail_subject = 'Failed ML Job'
mail_body = ((
'ML job %s has failed. For more information,'
'please visit the admin page at:\n'
'https://www.oppia.org/admin#/jobs') % job_id)
send_mail_to_admin(mail_subject, mail_body)
other_recipients = (
NOTIFICATION_EMAILS_FOR_FAILED_TASKS.value)
system_name_email = '%s <%s>' % (
feconf.SYSTEM_EMAIL_NAME, feconf.SYSTEM_EMAIL_ADDRESS)
if other_recipients:
email_services.send_bulk_mail(
system_name_email, other_recipients,
mail_subject, mail_body,
mail_body.replace('\n', '<br/>'))
def send_dummy_mail_to_admin(username):
"""Send an email from the specified email address to admin.
Args:
username: str. Username of the sender.
"""
email_body = 'This is a test mail from %s.' % (username)
email_subject = 'Test Mail'
system_name_email = '%s <%s>' % (
feconf.SYSTEM_EMAIL_NAME, feconf.SYSTEM_EMAIL_ADDRESS)
email_services.send_mail(
system_name_email, feconf.ADMIN_EMAIL_ADDRESS, email_subject,
email_body, email_body.replace('\n', '<br/>'), bcc_admin=False)
def send_mail_to_admin(email_subject, email_body):
"""Send an email to the admin email address.
The email is sent to the ADMIN_EMAIL_ADDRESS set in feconf.py.
Args:
email_subject: str. Subject of the email.
email_body: str. Body (message) of the email.
"""
app_id = app_identity_services.get_application_id()
body = '(Sent from %s)\n\n%s' % (app_id, email_body)
system_name_email = '%s <%s>' % (
feconf.SYSTEM_EMAIL_NAME, feconf.SYSTEM_EMAIL_ADDRESS)
email_services.send_mail(
system_name_email, feconf.ADMIN_EMAIL_ADDRESS, email_subject,
body, body.replace('\n', '<br/>'), bcc_admin=False)
def send_post_signup_email(user_id, test_for_duplicate_email=False):
"""Sends a post-signup email to the given user.
Raises an exception if emails are not allowed to be sent to users (i.e.
feconf.CAN_SEND_EMAILS is False).
Args:
user_id: str. User ID of the user that signed up.
test_for_duplicate_email: bool. For testing duplicate emails.
"""
if not test_for_duplicate_email:
for key, content in SIGNUP_EMAIL_CONTENT.value.items():
if content == SIGNUP_EMAIL_CONTENT.default_value[key]:
log_new_error(
'Please ensure that the value for the admin config '
'property SIGNUP_EMAIL_CONTENT is set, before allowing '
'post-signup emails to be sent.')
return
user_settings = user_services.get_user_settings(user_id)
email_subject = SIGNUP_EMAIL_CONTENT.value['subject']
email_body = 'Hi %s,<br><br>%s<br><br>%s' % (
user_settings.username,
SIGNUP_EMAIL_CONTENT.value['html_body'],
EMAIL_FOOTER.value)
_send_email(
user_id, feconf.SYSTEM_COMMITTER_ID, feconf.EMAIL_INTENT_SIGNUP,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def get_moderator_unpublish_exploration_email():
"""Returns a draft of the text of the body for an email sent immediately
when a moderator unpublishes an exploration. An empty body is a signal to
the frontend that no email will be sent.
Returns:
str. Draft of the email body for an email sent after the moderator
unpublishes an exploration, or an empty string if no email should
be sent.
"""
try:
require_moderator_email_prereqs_are_satisfied()
return config_domain.Registry.get_config_property(
'unpublish_exploration_email_html_body').value
except Exception:
return ''
def require_moderator_email_prereqs_are_satisfied():
"""Raises an exception if, for any reason, moderator emails cannot be sent.
Raises:
Exception. The feconf.REQUIRE_EMAIL_ON_MODERATOR_ACTION is False.
Exception. The feconf.CAN_SEND_EMAILS is False.
"""
if not feconf.REQUIRE_EMAIL_ON_MODERATOR_ACTION:
raise Exception(
'For moderator emails to be sent, please ensure that '
'REQUIRE_EMAIL_ON_MODERATOR_ACTION is set to True.')
if not feconf.CAN_SEND_EMAILS:
raise Exception(
'For moderator emails to be sent, please ensure that '
'CAN_SEND_EMAILS is set to True.')
def send_moderator_action_email(
sender_id, recipient_id, intent, exploration_title, email_body):
"""Sends a email immediately following a moderator action (unpublish,
delete) to the given user.
Raises an exception if emails are not allowed to be sent to users (i.e.
feconf.CAN_SEND_EMAILS is False).
Args:
sender_id: str. User ID of the sender.
recipient_id: str. User ID of the recipient.
intent: str. The intent string (cause/purpose) of the email.
exploration_title: str. The title of the exploration on which the
moderator action was taken.
email_body: str. The email content/message.
"""
require_moderator_email_prereqs_are_satisfied()
email_config = feconf.VALID_MODERATOR_ACTIONS[intent]
recipient_user_settings = user_services.get_user_settings(recipient_id)
sender_user_settings = user_services.get_user_settings(sender_id)
email_subject = feconf.VALID_MODERATOR_ACTIONS[intent]['email_subject_fn'](
exploration_title)
email_salutation_html = email_config['email_salutation_html_fn'](
recipient_user_settings.username)
email_signoff_html = email_config['email_signoff_html_fn'](
sender_user_settings.username)
full_email_content = (
'%s<br><br>%s<br><br>%s<br><br>%s' % (
email_salutation_html, email_body, email_signoff_html,
EMAIL_FOOTER.value))
_send_email(
recipient_id, sender_id, intent, email_subject, full_email_content,
feconf.SYSTEM_EMAIL_ADDRESS, bcc_admin=True)
def send_role_notification_email(
inviter_id, recipient_id, recipient_role, exploration_id,
exploration_title):
"""Sends a email when a new user is given activity rights (Manager, Editor,
Viewer) to an exploration by creator of exploration.
Email will only be sent if recipient wants to receive these emails (i.e.
'can_receive_editor_role_email' is set True in recipent's preferences).
Args:
inviter_id: str. ID of the user who invited the recipient to the new
role.
recipient_id: str. User ID of the recipient.
recipient_role: str. Role given to the recipient. Must be defined in
EDITOR_ROLE_EMAIL_HTML_ROLES.
exploration_id: str. ID of the exploration for which the recipient has
been given the new role.
exploration_title: str. Title of the exploration for which the recipient
has been given the new role.
Raises:
Exception. The role is invalid (i.e. not defined in
EDITOR_ROLE_EMAIL_HTML_ROLES).
"""
# Editor role email body and email subject templates.
email_subject_template = (
'%s - invitation to collaborate')
email_body_template = (
'Hi %s,<br>'
'<br>'
'<b>%s</b> has granted you %s to their exploration, '
'"<a href="https://www.oppia.org/create/%s">%s</a>", on Oppia.org.<br>'
'<br>'
'This allows you to:<br>'
'<ul>%s</ul>'
'You can find the exploration '
'<a href="https://www.oppia.org/create/%s">here</a>.<br>'
'<br>'
'Thanks, and happy collaborating!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>%s')
# Return from here if sending email is turned off.
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
# Return from here is sending editor role email is disabled.
if not feconf.CAN_SEND_EDITOR_ROLE_EMAILS:
log_new_error('This app cannot send editor role emails to users.')
return
recipient_user_settings = user_services.get_user_settings(recipient_id)
inviter_user_settings = user_services.get_user_settings(inviter_id)
recipient_preferences = user_services.get_email_preferences(recipient_id)
if not recipient_preferences.can_receive_editor_role_email:
# Do not send email if recipient has declined.
return
if recipient_role not in EDITOR_ROLE_EMAIL_HTML_ROLES:
raise Exception(
'Invalid role: %s' % recipient_role)
role_description = EDITOR_ROLE_EMAIL_HTML_ROLES[recipient_role]
rights_html = EDITOR_ROLE_EMAIL_RIGHTS_FOR_ROLE[role_description]
email_subject = email_subject_template % exploration_title
email_body = email_body_template % (
recipient_user_settings.username, inviter_user_settings.username,
role_description, exploration_id, exploration_title, rights_html,
exploration_id, EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION, email_subject, email_body,
feconf.NOREPLY_EMAIL_ADDRESS,
sender_name=inviter_user_settings.username)
def send_emails_to_subscribers(creator_id, exploration_id, exploration_title):
"""Sends an email to all the subscribers of the creators when the creator
publishes an exploration.
Args:
creator_id: str. The id of the creator who has published an exploration
and to whose subscribers we are sending emails.
exploration_id: str. The id of the exploration which the creator has
published.
exploration_title: str. The title of the exploration which the creator
has published.
"""
creator_name = user_services.get_username(creator_id)
email_subject = ('%s has published a new exploration!' % creator_name)
email_body_template = (
'Hi %s,<br>'
'<br>'
'%s has published a new exploration! You can play it here: '
'<a href="https://www.oppia.org/explore/%s">%s</a><br>'
'<br>'
'Thanks, and happy learning!<br>'
'<br>'
'Best wishes,<br>'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
if not feconf.CAN_SEND_SUBSCRIPTION_EMAILS:
log_new_error('This app cannot send subscription emails to users.')
return
recipient_list = subscription_services.get_all_subscribers_of_creator(
creator_id)
recipients_usernames = user_services.get_usernames(recipient_list)
recipients_preferences = user_services.get_users_email_preferences(
recipient_list)
for index, username in enumerate(recipients_usernames):
if recipients_preferences[index].can_receive_subscription_email:
email_body = email_body_template % (
username, creator_name, exploration_id,
exploration_title, EMAIL_FOOTER.value)
_send_email(
recipient_list[index], feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_feedback_message_email(recipient_id, feedback_messages):
"""Sends an email when creator receives feedback message to an exploration.
Args:
recipient_id: str. User ID of recipient.
feedback_messages: dict. Contains feedback messages. Example:
{
'exploration_id': {
'title': 'Exploration 1234',
'messages': ['Feedback message 1', 'Feedback message 2']
}
}
"""
email_subject_template = (
'You\'ve received %s new message%s on your explorations')
email_body_template = (
'Hi %s,<br>'
'<br>'
'You\'ve received %s new message%s on your Oppia explorations:<br>'
'<ul>%s</ul>'
'You can view and reply to your messages from your '
'<a href="https://www.oppia.org/creator-dashboard">dashboard</a>.'
'<br>'
'<br>Thanks, and happy teaching!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
if not feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS:
log_new_error('This app cannot send feedback message emails to users.')
return
if not feedback_messages:
return
recipient_user_settings = user_services.get_user_settings(recipient_id)
messages_html = ''
count_messages = 0
for exp_id, reference in feedback_messages.items():
messages_html += (
'<li><a href="https://www.oppia.org/create/%s#/feedback">'
'%s</a>:<br><ul>' % (exp_id, reference['title']))
for message in reference['messages']:
messages_html += ('<li>%s<br></li>' % message)
count_messages += 1
messages_html += '</ul></li>'
email_subject = email_subject_template % (
(count_messages, 's') if count_messages > 1 else ('a', ''))
email_body = email_body_template % (
recipient_user_settings.username, count_messages if count_messages > 1
else 'a', 's' if count_messages > 1 else '', messages_html,
EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def can_users_receive_thread_email(
recipient_ids, exploration_id, has_suggestion):
"""Returns if users can receive email.
Args:
recipient_ids: list(str). IDs of persons that should receive the email.
exploration_id: str. ID of exploration that received new message.
has_suggestion: bool. True if thread contains suggestion.
Returns:
list(bool). True if user can receive the email, False otherwise.
"""
users_global_prefs = (
user_services.get_users_email_preferences(recipient_ids))
users_exploration_prefs = (
user_services.get_users_email_preferences_for_exploration(
recipient_ids, exploration_id))
zipped_preferences = list(
python_utils.ZIP(users_global_prefs, users_exploration_prefs))
result = []
if has_suggestion:
for user_global_prefs, user_exploration_prefs in zipped_preferences:
result.append(
user_global_prefs.can_receive_feedback_message_email
and not user_exploration_prefs.mute_suggestion_notifications)
else:
for user_global_prefs, user_exploration_prefs in zipped_preferences:
result.append(
user_global_prefs.can_receive_feedback_message_email
and not user_exploration_prefs.mute_feedback_notifications)
return result
def send_suggestion_email(
exploration_title, exploration_id, author_id, recipient_list):
"""Send emails to notify the given recipients about new suggestion.
Each recipient will only be emailed if their email preferences allow for
incoming feedback message emails.
Args:
exploration_title: str. Title of the exploration with the new
suggestion.
exploration_id: str. The ID of the exploration with the new suggestion.
author_id: str. The user ID of the author of the suggestion.
recipient_list: list(str). The user IDs of the email recipients.
"""
email_subject = 'New suggestion for "%s"' % exploration_title
email_body_template = (
'Hi %s,<br>'
'%s has submitted a new suggestion for your Oppia exploration, '
'<a href="https://www.oppia.org/create/%s">"%s"</a>.<br>'
'You can accept or reject this suggestion by visiting the '
'<a href="https://www.oppia.org/create/%s#/feedback">feedback page</a> '
'for your exploration.<br>'
'<br>'
'Thanks!<br>'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
if not feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS:
log_new_error('This app cannot send feedback message emails to users.')
return
author_settings = user_services.get_user_settings(author_id)
can_users_receive_email = (
can_users_receive_thread_email(recipient_list, exploration_id, True))
for index, recipient_id in enumerate(recipient_list):
recipient_user_settings = user_services.get_user_settings(recipient_id)
# Send email only if recipient wants to receive.
if can_users_receive_email[index]:
email_body = email_body_template % (
recipient_user_settings.username, author_settings.username,
exploration_id, exploration_title, exploration_id,
EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SUGGESTION_NOTIFICATION,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_instant_feedback_message_email(
recipient_id, sender_id, message, email_subject, exploration_title,
exploration_id, thread_title, reply_to_id=None):
"""Send an email when a new message is posted to a feedback thread, or when
the thread's status is changed.
Args:
recipient_id: str. The user ID of the recipient.
sender_id: str. The user ID of the sender.
message: str. The message text or status change text from the sender.
email_subject: str. The subject line to be sent in the email.
exploration_title: str. The title of the exploration.
exploration_id: str. ID of the exploration the feedback thread is about.
thread_title: str. The title of the feedback thread.
reply_to_id: str or None. The unique reply-to id used in reply-to email
sent to recipient.
"""
email_body_template = (
'Hi %s,<br><br>'
'New update to thread "%s" on '
'<a href="https://www.oppia.org/create/%s#/feedback">%s</a>:<br>'
'<ul><li>%s: %s<br></li></ul>'
'(You received this message because you are a '
'participant in this thread.)<br><br>'
'Best wishes,<br>'
'The Oppia team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
if not feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS:
log_new_error('This app cannot send feedback message emails to users.')
return
sender_settings = user_services.get_user_settings(sender_id)
recipient_settings = user_services.get_user_settings(recipient_id)
recipient_preferences = user_services.get_email_preferences(recipient_id)
if recipient_preferences.can_receive_feedback_message_email:
email_body = email_body_template % (
recipient_settings.username, thread_title, exploration_id,
exploration_title, sender_settings.username, message,
EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION, email_subject,
email_body, feconf.NOREPLY_EMAIL_ADDRESS, reply_to_id=reply_to_id)
def send_flag_exploration_email(
exploration_title, exploration_id, reporter_id, report_text):
"""Send an email to all moderators when an exploration is flagged.
Args:
exploration_title: str. The title of the flagged exporation.
exploration_id: str. The ID of the flagged exploration.
reporter_id: str. The user ID of the reporter.
report_text: str. The message entered by the reporter.
"""
email_subject = 'Exploration flagged by user: "%s"' % exploration_title
email_body_template = (
'Hello Moderator,<br>'
'%s has flagged exploration "%s" on the following '
'grounds: <br>'
'%s .<br>'
'You can modify the exploration by clicking '
'<a href="https://www.oppia.org/create/%s">here</a>.<br>'
'<br>'
'Thanks!<br>'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
email_body = email_body_template % (
user_services.get_user_settings(reporter_id).username,
exploration_title, report_text, exploration_id,
EMAIL_FOOTER.value)
recipient_list = user_services.get_user_ids_by_role(
feconf.ROLE_ID_MODERATOR)
for recipient_id in recipient_list:
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_REPORT_BAD_CONTENT,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_query_completion_email(recipient_id, query_id):
"""Send an email to the initiator of a bulk email query with a link to view
the query results.
Args:
recipient_id: str. The recipient ID.
query_id: str. The query ID.
"""
email_subject = 'Query %s has successfully completed' % query_id
email_body_template = (
'Hi %s,<br>'
'Your query with id %s has succesfully completed its '
'execution. Visit the result page '
'<a href="https://www.oppia.org/emaildashboardresult/%s">here</a> '
'to see result of your query.<br><br>'
'Thanks!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>%s')
recipient_user_settings = user_services.get_user_settings(recipient_id)
email_body = email_body_template % (
recipient_user_settings.username, query_id, query_id,
EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION, email_subject,
email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_query_failure_email(recipient_id, query_id, query_params):
"""Send an email to the initiator of a failed bulk email query.
Args:
recipient_id: str. The recipient ID.
query_id: str. The query ID.
query_params: dict. The parameters of the query, as key:value.
"""
email_subject = 'Query %s has failed' % query_id
email_body_template = (
'Hi %s,<br>'
'Your query with id %s has failed due to error '
'during execution. '
'Please check the query parameters and submit query again.<br><br>'
'Thanks!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>%s')
recipient_user_settings = user_services.get_user_settings(recipient_id)
email_body = email_body_template % (
recipient_user_settings.username, query_id, EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION, email_subject,
email_body, feconf.NOREPLY_EMAIL_ADDRESS)
admin_email_subject = 'Query job has failed.'
admin_email_body_template = (
'Query job with %s query id has failed in its execution.\n'
'Query parameters:\n\n')
for key in sorted(query_params):
admin_email_body_template += '%s: %s\n' % (key, query_params[key])
admin_email_body = admin_email_body_template % query_id
send_mail_to_admin(admin_email_subject, admin_email_body)
def send_user_query_email(
sender_id, recipient_ids, email_subject, email_body, email_intent):
"""Sends an email to all the recipients of the query.
Args:
sender_id: str. The ID of the user sending the email.
recipient_ids: list(str). The user IDs of the email recipients.
email_subject: str. The subject of the email.
email_body: str. The body of the email.
email_intent: str. The intent string, i.e. the purpose of the email.
Returns:
bulk_email_model_id: str. The ID of the bulk email model.
"""
bulk_email_model_id = email_models.BulkEmailModel.get_new_id('')
sender_name = user_services.get_username(sender_id)
sender_email = user_services.get_email_from_user_id(sender_id)
_send_bulk_mail(
recipient_ids, sender_id, email_intent, email_subject, email_body,
sender_email, sender_name, bulk_email_model_id)
return bulk_email_model_id
def send_test_email_for_bulk_emails(tester_id, email_subject, email_body):
"""Sends a test email to the tester.
Args:
tester_id: str. The user ID of the tester.
email_subject: str. The subject of the email.
email_body: str. The body of the email.
"""
tester_name = user_services.get_username(tester_id)
tester_email = user_services.get_email_from_user_id(tester_id)
_send_email(
tester_id, tester_id, feconf.BULK_EMAIL_INTENT_TEST,
email_subject, email_body, tester_email, sender_name=tester_name)
def send_mail_to_onboard_new_reviewers(user_id, category):
"""Sends an email to users who have crossed the threshold score.
Args:
user_id: str. The ID of the user who is being offered to become a
reviewer.
category: str. The category that the user is being offered to review.
"""
email_subject = 'Invitation to review suggestions'
email_body_template = (
'Hi %s,<br><br>'
'Thank you for actively contributing high-quality suggestions for '
'Oppia\'s lessons in %s, and for helping to make these lessons better '
'for students around the world!<br><br>'
'In recognition of your contributions, we would like to invite you to '
'become one of Oppia\'s reviewers. As a reviewer, you will be able to '
'review suggestions in %s, and contribute to helping ensure that any '
'edits made to lessons preserve the lessons\' quality and are '
'beneficial for students.<br><br>'
'If you\'d like to help out as a reviewer, please visit your '
'<a href="https://www.oppia.org/creator-dashboard/">dashboard</a>. '
'and set your review preferences accordingly. Note that, if you accept,'
'you will receive occasional emails inviting you to review incoming '
'suggestions by others.<br><br>'
'Again, thank you for your contributions to the Oppia community!<br>'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
recipient_user_settings = user_services.get_user_settings(user_id)
can_user_receive_email = user_services.get_email_preferences(
user_id).can_receive_email_updates
# Send email only if recipient wants to receive.
if can_user_receive_email:
email_body = email_body_template % (
recipient_user_settings.username, category, category,
EMAIL_FOOTER.value)
_send_email(
user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_ONBOARD_REVIEWER,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_mail_to_notify_users_to_review(user_id, category):
"""Sends an email to users to review suggestions in categories they have
agreed to review for.
Args:
user_id: str. The id of the user who is being pinged to review
suggestions.
category: str. The category of the suggestions to review.
"""
email_subject = 'Notification to review suggestions'
email_body_template = (
'Hi %s,<br><br>'
'Just a heads-up that there are new suggestions to '
'review in %s, which you are registered as a reviewer for.'
'<br><br>Please take a look at and accept/reject these suggestions at'
' your earliest convenience. You can visit your '
'<a href="https://www.oppia.org/creator-dashboard/">dashboard</a> '
'to view the list of suggestions that need a review.<br><br>'
'Thank you for helping improve Oppia\'s lessons!'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
recipient_user_settings = user_services.get_user_settings(user_id)
can_user_receive_email = user_services.get_email_preferences(
user_id).can_receive_email_updates
# Send email only if recipient wants to receive.
if can_user_receive_email:
email_body = email_body_template % (
recipient_user_settings.username, category, EMAIL_FOOTER.value)
_send_email(
user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_REVIEW_SUGGESTIONS,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_accepted_voiceover_application_email(
user_id, lesson_title, language_code):
"""Sends an email to users to an give update on the accepted voiceover
application.
Args:
user_id: str. The id of the user whose voiceover application got
accepted.
lesson_title: str. The title of the lessons for which the voiceover
application got accepted.
language_code: str. The language code for which the voiceover
application got accepted.
"""
email_subject = '[Accepted] Updates on submitted voiceover application'
email_body_template = (
'Hi %s,<br><br>'
'Congratulations! Your voiceover application for "%s" lesson got '
'accepted and you have been assigned with a voice artist role in the '
'lesson. Now you will be able to add voiceovers to the lesson in %s '
'language.'
'<br><br>You can check the wiki page to learn'
'<a href="https://github.com/oppia/oppia/wiki/'
'Instructions-for-voice-artists">how to voiceover a lesson</a><br><br>'
'Thank you for helping improve Oppia\'s lessons!'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
recipient_user_settings = user_services.get_user_settings(user_id)
can_user_receive_email = user_services.get_email_preferences(
user_id).can_receive_email_updates
# Send email only if recipient wants to receive.
if can_user_receive_email:
language = utils.get_supported_audio_language_description(language_code)
email_body = email_body_template % (
recipient_user_settings.username, lesson_title, language,
EMAIL_FOOTER.value)
_send_email(
user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_VOICEOVER_APPLICATION_UPDATES,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_rejected_voiceover_application_email(
user_id, lesson_title, language_code, rejection_message):
"""Sends an email to users to give update on the rejected voiceover
application.
Args:
user_id: str. The id of the user whose voiceover application got
accepted.
lesson_title: str. The title of the lessons for which the voiceover
application got accepted.
language_code: str. The language code in which for which the voiceover
application got accepted.
rejection_message: str. The message left by the reviewer while rejecting
the voiceover application.
"""
email_subject = 'Updates on submitted voiceover application'
email_body_template = (
'Hi %s,<br><br>'
'Your voiceover application for "%s" lesson in language %s got rejected'
' and the reviewer has left a message.'
'<br><br>Review message: %s<br><br>'
'You can create a new voiceover application through the'
'<a href="https://oppia.org/contributor-dashboard">'
'contributor dashboard</a> page.<br><br>'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
recipient_user_settings = user_services.get_user_settings(user_id)
can_user_receive_email = user_services.get_email_preferences(
user_id).can_receive_email_updates
# Send email only if recipient wants to receive.
if can_user_receive_email:
language = utils.get_supported_audio_language_description(language_code)
email_body = email_body_template % (
recipient_user_settings.username, lesson_title, language,
rejection_message, EMAIL_FOOTER.value)
_send_email(
user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_VOICEOVER_APPLICATION_UPDATES,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_account_deleted_email(user_id, user_email):
"""Sends an email to user whose account was deleted.
Args:
user_id: str. The id of the user whose account got deleted.
user_email: str. The email of the user whose account got deleted.
"""
email_subject = 'Account deleted'
email_body_template = (
'Hi %s,<br><br>'
'Your account was successfully deleted.'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
email_body = email_body_template % (
user_email, EMAIL_FOOTER.value)
_send_email(
user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_ACCOUNT_DELETED, email_subject, email_body,
feconf.NOREPLY_EMAIL_ADDRESS, bcc_admin=True,
recipient_email=user_email)
def send_email_to_new_contribution_reviewer(
user_id, review_category, language_code=None):
"""Sends an email to user who is assigned as a reviewer.
Args:
user_id: str. The ID of the user.
review_category: str. The category in which user can review.
language_code: None|str. The language code for a language if the review
item is translation or voiceover else None.
"""
if review_category not in NEW_REVIEWER_EMAIL_DATA:
raise Exception('Invalid review_category: %s' % review_category)
review_category_data = NEW_REVIEWER_EMAIL_DATA[review_category]
email_subject = 'You have been invited to review Oppia %s' % (
review_category_data['review_category'])
if review_category in [
constants.REVIEW_CATEGORY_TRANSLATION,
constants.REVIEW_CATEGORY_VOICEOVER]:
language_description = utils.get_supported_audio_language_description(
language_code).capitalize()
review_category_description = (
review_category_data['description_template'] % language_description)
reviewer_rights_message = (
review_category_data['rights_message_template'] % (
language_description))
else:
review_category_description = review_category_data['description']
reviewer_rights_message = review_category_data['rights_message']
to_review = review_category_data['to_check']
email_body_template = (
'Hi %s,<br><br>'
'This is to let you know that the Oppia team has added you as a '
'reviewer for %s. This allows you to %s.<br><br>'
'You can check the %s waiting for review in the '
'<a href="https://www.oppia.org/contributor-dashboard">'
'Contributor Dashboard</a>.<br><br>'
'Thanks, and happy contributing!<br><br>'
'Best wishes,<br>'
'The Oppia Community')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
recipient_user_settings = user_services.get_user_settings(user_id)
can_user_receive_email = user_services.get_email_preferences(
user_id).can_receive_email_updates
# Send email only if recipient wants to receive.
if can_user_receive_email:
email_body = email_body_template % (
recipient_user_settings.username, review_category_description,
reviewer_rights_message, to_review)
_send_email(
user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_ONBOARD_REVIEWER, email_subject, email_body,
feconf.NOREPLY_EMAIL_ADDRESS)
def send_email_to_removed_contribution_reviewer(
user_id, review_category, language_code=None):
"""Sends an email to user who is removed from the reviewer position.
Args:
user_id: str. The ID of the user.
review_category: str. The category which for which review role is
removed.
language_code: None|str. The language code for a language if the review
item is translation or voiceover else None.
"""
if review_category not in REMOVED_REVIEWER_EMAIL_DATA:
raise Exception('Invalid review_category: %s' % review_category)
review_category_data = REMOVED_REVIEWER_EMAIL_DATA[review_category]
email_subject = 'You have been unassigned as a %s reviewer' % (
review_category_data['review_category'])
if review_category in [
constants.REVIEW_CATEGORY_TRANSLATION,
constants.REVIEW_CATEGORY_VOICEOVER]:
language_description = utils.get_supported_audio_language_description(
language_code).capitalize()
reviewer_role_description = (
review_category_data['role_description_template'] % (
language_description))
reviewer_rights_message = (
review_category_data['rights_message_template'] % (
language_description))
else:
reviewer_role_description = review_category_data['role_description']
reviewer_rights_message = review_category_data['rights_message']
email_body_template = (
'Hi %s,<br><br>'
'The Oppia team has removed you from the %s. You won\'t be able to %s '
'any more, but you can still contribute %s through the '
'<a href="https://www.oppia.org/contributor-dashboard">'
'Contributor Dashboard</a>.<br><br>'
'Thanks, and happy contributing!<br><br>'
'Best wishes,<br>'
'The Oppia Community')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
recipient_user_settings = user_services.get_user_settings(user_id)
can_user_receive_email = user_services.get_email_preferences(
user_id).can_receive_email_updates
# Send email only if recipient wants to receive.
if can_user_receive_email:
email_body = email_body_template % (
recipient_user_settings.username, reviewer_role_description,
reviewer_rights_message,
review_category_data['contribution_allowed'])
_send_email(
user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_REMOVE_REVIEWER, email_subject, email_body,
feconf.NOREPLY_EMAIL_ADDRESS)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._database_blob_auditing_policies_operations import build_create_or_update_request, build_get_request, build_list_by_database_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DatabaseBlobAuditingPoliciesOperations:
"""DatabaseBlobAuditingPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
server_name: str,
database_name: str,
**kwargs: Any
) -> "_models.DatabaseBlobAuditingPolicy":
"""Gets a database's blob auditing policy.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatabaseBlobAuditingPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.DatabaseBlobAuditingPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatabaseBlobAuditingPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DatabaseBlobAuditingPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/auditingSettings/{blobAuditingPolicyName}'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
server_name: str,
database_name: str,
parameters: "_models.DatabaseBlobAuditingPolicy",
**kwargs: Any
) -> "_models.DatabaseBlobAuditingPolicy":
"""Creates or updates a database's blob auditing policy.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param parameters: The database blob auditing policy.
:type parameters: ~azure.mgmt.sql.models.DatabaseBlobAuditingPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatabaseBlobAuditingPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.DatabaseBlobAuditingPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatabaseBlobAuditingPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'DatabaseBlobAuditingPolicy')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DatabaseBlobAuditingPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DatabaseBlobAuditingPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/auditingSettings/{blobAuditingPolicyName}'} # type: ignore
@distributed_trace
def list_by_database(
self,
resource_group_name: str,
server_name: str,
database_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DatabaseBlobAuditingPolicyListResult"]:
"""Lists auditing settings of a database.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DatabaseBlobAuditingPolicyListResult or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.DatabaseBlobAuditingPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatabaseBlobAuditingPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_database_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_database.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_database_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DatabaseBlobAuditingPolicyListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/auditingSettings'} # type: ignore
|
|
# Copyright (c) Citrix Systems Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""A module for network specific tests"""
import testbase
from utils import *
import os.path
import sys
import math
log = get_logger('auto-cert-kit')
class FixedOffloadException(Exception):
pass
class IperfTest:
"""Utility class for running an Iperf test between two VMs
or Dom0 + VM. This class can be setup and consumed by
other test classes in this module"""
default_config = {'window_size': '256K',
'format': 'm',
'buffer_length': '256K',
'thread_count': '1'}
def __init__(self, session,
client_vm_ref,
server_vm_ref,
network_ref,
static_manager,
username='root',
password=DEFAULT_PASSWORD,
config=None):
self.session = session
self.server = server_vm_ref
self.client = client_vm_ref
self.network = network_ref
self.static_manager = static_manager
self.username = username
self.password = password
if not config:
self.config = self.default_config
else:
self.config = config
# Store pool master in order to make plugin calls
self.host = get_pool_master(self.session)
self.timeout = 60
#Validate the references and setup run method
#self.validate_refs()
def validate_refs(self):
"""Check that the specified references are valid,
and in a configuration that is supported."""
if self.session.xenapi.VM.get_is_control_domain(self.server):
raise Exception("Expecting Dom0 to be the client, not the server")
def record_stats(self):
"""Record the interface statistics before running any tests"""
self.stats_rec = {self.client: self.get_iface_stats(self.client),
self.server: self.get_iface_stats(self.server)}
def validate_stats(self, bytes_sent):
# Load previous
client_stats = self.stats_rec[self.client]
server_stats = self.stats_rec[self.server]
# Obtain current
log.debug("Get Client Stats:")
client_now_stats = self.get_iface_stats(self.client)
log.debug("Get Server Stats:")
server_now_stats = self.get_iface_stats(self.server)
itsv_cli = IperfTestStatsValidator(client_stats, client_now_stats)
itsv_srv = IperfTestStatsValidator(server_stats, server_now_stats)
log.debug("Validate Client tx_bytes")
itsv_cli.validate_bytes(bytes_sent, 'tx_bytes')
log.debug("Validate Server rx_bytes")
itsv_srv.validate_bytes(bytes_sent, 'rx_bytes')
def configure_routes(self):
"""Ensure that the routing table is setup correctly in the client"""
log.debug("Configuring routes...")
# Make a plugin call to ensure the server is going to recieve
# packets over the correct interface
self.plugin_call('reset_arp',
{'vm_ref': self.client,
})
self.plugin_call('reset_arp',
{'vm_ref': self.server,
})
# Make a plugin call to add a route to the client
self.plugin_call('add_route',
{'vm_ref': self.client,
'dest_ip': self.get_server_ip(self.get_device_name(self.server)),
'dest_mac': get_vm_device_mac(self.session,
self.server,
self.get_device_name(self.server),
),
'device': self.get_device_name(self.client)}
)
self.plugin_call('add_route',
{'vm_ref': self.server,
'dest_ip': self.get_client_ip(self.get_device_name(self.client)),
'dest_mac': get_vm_device_mac(self.session,
self.client,
self.get_device_name(self.client),
),
'device': self.get_device_name(self.server)}
)
def run(self):
"""This classes run test function"""
self.deploy_iperf()
self.configure_server_ip()
self.configure_client_ip()
self.run_iperf_server()
log.debug("IPerf deployed and server started")
# Configure routes
self.configure_routes()
# Capture interface statistics pre test run
self.record_stats()
iperf_test_inst = TimeoutFunction(self.run_iperf_client,
self.timeout,
'iPerf test timed out %d' % self.timeout)
# Run the iperf tests
iperf_data = iperf_test_inst()
# Capture interface statistcs post test run
bytes_transferred = int(iperf_data['transfer'])
self.validate_stats(bytes_transferred)
return iperf_data
############# Utility Functions used by Class ###############
def get_server_ip(self, iface=None):
# By default return the interface the server will be listening on
if not iface:
iface = self.get_device_name(self.server)
if self.session.xenapi.VM.get_is_control_domain(self.server):
# Handle Dom0 Case
host_ref = self.session.xenapi.VM.get_resident_on(self.server)
ip = self.session.xenapi.host.call_plugin(host_ref,
'autocertkit',
'get_local_device_ip',
{'device':iface})
return ip
else:
# Handle DroidVM Case
return wait_for_ip(self.session, self.server, iface)
def get_client_ip(self, iface='eth0'):
ip = wait_for_ip(self.session, self.client, iface)
log.debug("Client (%s) IP for '%s' is '%s'" % (self.client,
iface,
ip))
return ip
def deploy_iperf(self):
"""deploy iPerf on both client and server"""
def deploy(vm_ref):
self.plugin_call('deploy_iperf',
{'vm_ref': vm_ref,
'username': self.username,
'password': self.password})
deploy(self.client)
deploy(self.server)
def get_device_name(self, vm_ref):
vm_host = self.session.xenapi.VM.get_resident_on(vm_ref)
if self.session.xenapi.VM.get_is_control_domain(vm_ref):
# Handle the Dom0 case
pifs = self.session.xenapi.network.get_PIFs(self.network)
device_names = []
for pif in pifs:
host_ref = self.session.xenapi.PIF.get_host(pif)
if vm_host == host_ref:
device_names.append(self.session.xenapi.PIF.get_device(pif))
if len(device_names) > 1:
raise Exception("Error: expected only a single device " + \
"name to be found in PIF list ('%s') " + \
"Instead, '%s' were returned." %
(pifs, device_names))
device_name = device_names.pop()
# For control domains, only deal with bridges
device_name = device_name.replace('eth','xenbr')
else:
# Handle the case where we are dealing with a VM
vm_vifs = self.session.xenapi.VM.get_VIFs(vm_ref)
filtered_vifs = [vif for vif in vm_vifs \
if self.session.xenapi.VIF.get_device(vif) != '0']
network_vifs = self.session.xenapi.network.get_VIFs(self.network)
int_vifs = intersection(filtered_vifs, network_vifs)
if len(int_vifs) > 1:
raise Exception("Error: more than one VIF connected " + \
"to VM '%s' ('%s')" % (int_vifs, filtered_vifs))
device_name = "eth%s" % \
self.session.xenapi.VIF.get_device(int_vifs.pop())
log.debug("Device under test for VM %s is '%s'" % (vm_ref, device_name))
return device_name
def get_iface_stats(self, vm_ref):
device_name = self.get_device_name(vm_ref)
# Make plugin call to get statistics
return get_iface_statistics(self.session, vm_ref, device_name)
def configure_server_ip(self):
log.debug("configure_server_ip")
return self.configure_vm_ip(self.server)
def configure_client_ip(self):
log.debug("configure_client_ip")
return self.configure_vm_ip(self.client)
def configure_vm_ip(self, vm_ref):
"""Make sure that the client has an IP, which may not be the case
if we are dealing with Dom0 to Dom0 tests."""
if self.session.xenapi.VM.get_is_control_domain(vm_ref):
log.debug("Client VM is Dom0... setup IP on bridge")
args = {'device': self.get_device_name(vm_ref)}
if self.static_manager:
args['mode'] = 'static'
ip = self.static_manager.get_ip()
args['ip_addr'] = ip.addr
args['ip_netmask'] = ip.netmask
else:
args['mode'] = 'dhcp'
host_ref = self.session.xenapi.VM.get_resident_on(vm_ref)
call_ack_plugin(self.session,
'configure_local_device',
args,
host=host_ref)
else:
log.debug("Client VM is a droid VM, no need to configure an IP")
def run_iperf_server(self):
"""Start the iPerf server listening on a VM"""
log.debug("Starting IPerf server")
if self.session.xenapi.VM.get_is_control_domain(self.server):
host_ref = self.session.xenapi.VM.get_resident_on(self.server)
log.debug("Host ref = %s" % host_ref)
args = {'device': self.get_device_name(self.server)}
if self.static_manager:
args['mode'] = 'static'
ip = self.static_manager.get_ip()
args['ip_addr'] = ip.addr
args['ip_netmask'] = ip.netmask
else:
args['mode'] = 'dhcp'
call_ack_plugin(self.session,
'start_iperf_server',
args,
host=host_ref)
else:
m_ip_address = self.get_server_ip('eth0')
test_ip = self.get_server_ip()
cmd_str = "iperf -s -D -B %s < /dev/null >&/dev/null" % test_ip
ssh_command(m_ip_address, self.username, self.password, cmd_str)
def parse_iperf_line(self, data):
"""Take a CSV line from iperf, parse, returning a dictionary"""
lines = data.strip().split('\n')
log.debug("Iperf Lines: %s" % lines)
arr = lines[0].split(',')
rec = {}
rec['datetime'] = arr[0]
rec['client_ip'] = arr[1]
rec['client_port'] = arr[2]
rec['server_ip'] = arr[3]
rec['server_port'] = arr[4]
rec['id'] = arr[5]
rec['interval'] = arr[6]
rec['transfer'] = arr[7]
rec['bandwidth'] = arr[8]
# The the case where iperf returned information, it will seperate it from the csv format
# by a new line character. We would like to capture this information, and pass it
# back to the called. So insert into the record field 'info'.
if len(lines) > 1:
# Join any extra lines back together again
rec['info'] = " ".join(lines[1:])
return rec
def plugin_call(self, method, args):
"""Make a plugin call to autocertkit"""
log.debug("Host: %s Plugin: %s Method: %s Args: %s" %
(self.host, 'autocertkit', method, str(args)))
return self.session.xenapi.host.call_plugin(self.host,
'autocertkit',
method,
args)
def get_iperf_command(self):
params = []
def copy(param, arg_str):
if param in self.config.keys() and self.config[param]:
params.append(arg_str % self.config[param])
copy('window_size', '-w %s')
copy('buffer_length', '-l %s')
copy('format', '-f %s')
copy('thread_count', '-P %s')
cmd_str = "iperf -y csv %s -m -c %s" % (" ".join(params), self.get_server_ip())
return cmd_str
def run_iperf_client(self):
"""Run test iperf command on droid VM"""
log.debug("Starting IPerf client")
if self.session.xenapi.VM.get_is_control_domain(self.client):
#Run client via XAPI plugin
log.debug("Executing iperf test from Dom0 (%s (%s) --> %s (%s))" % \
(self.session.xenapi.VM.get_name_label(self.client),
self.client,
self.session.xenapi.VM.get_name_label(self.server),
self.server))
args = {}
def copy(param):
if param in self.config.keys() and self.config[param]:
args[param] = self.config[param]
copy('window_size')
copy('format')
copy('buffer_length')
copy('thread_count')
args['dst'] = self.get_server_ip()
args['vm_ref'] = self.client
result = self.plugin_call('iperf_test', args)
else:
#Run the client locally
cmd_str = self.get_iperf_command()
result = ssh_command(self.get_client_ip(), self.username, self.password, cmd_str)
return self.parse_iperf_line(result)
class VLANTestClass(testbase.NetworkTestClass):
"""A test class for ensuring that hardware
can cope with VLAN traffic correctly"""
required_config = ['device_config', 'vlan_id']
tags = ['DEMO']
default_vlan = 800
num_ips_required = 4
def test_vlan_high_port(self, session):
"""This test creates two VMs, one on each host in the pool
and attempts to send traffic through a VLAN network which is
plugged into the second interface on each VM."""
log.debug("Starting to run testVLANHighPort...")
devices = self.get_pifs_to_use()
#Take just the first available device to test
device = devices[0]
vlans = self.get_vlans(device)
vlans.sort()
vlan_id = vlans.pop()
log.debug("VLAN ID = %d (Alternatives: %s)" % (vlan_id, vlans))
vlan_network_ref = create_network(session, 'testvlan', '', {})
for pif_ref in get_pifs_by_device(session, device):
log.debug("Creating VLAN for PIF %s" % pif_ref)
log.debug("Network ref = %s vlan_id = %s" %
(vlan_network_ref, vlan_id))
create_vlan(session, pif_ref,
vlan_network_ref, vlan_id)
log.debug("VLAN for PIF created")
management_network_ref = get_management_network(session)
network_refs = [management_network_ref, vlan_network_ref]
# We may want to allocate static addresses to the different interfaces
# differently, so collect the static ip managers in a record.
sms = {}
sms[management_network_ref] = self.get_static_manager(management_network_ref)
sms[vlan_network_ref] = self.get_static_manager(vlan_network_ref, vlan=vlan_id)
#Deploy two VMs
vm1_ref, vm2_ref = deploy_two_droid_vms(session, network_refs, sms)
vm1_ip = wait_for_ip(session, vm1_ref, 'eth0')
log.debug("IP address for vm1 is %s" % vm1_ip)
vm2_ip = wait_for_ip(session, vm2_ref, 'eth0')
log.debug("IP address for vm2 is %s" % vm2_ip)
if 'dhcp' in self.config:
if self.config['dhcp'].lower() == 'true':
log.debug("Using DHCP for VMs secondary interface")
dhcp = True
else:
dhcp = False
else:
dhcp = False
log.debug("About to configure network interfaces over SSH")
vm2_eth1_ip = wait_for_ip(session, vm2_ref, 'eth1')
#Make certain the VMs are available
for vm_ref in [vm1_ref, vm2_ref]:
check_vm_ping_response(session, vm_ref)
#Run Ping Command
ping_result = ping(vm1_ip, vm2_eth1_ip, 'eth1')
log.debug("Result: %s" % ping_result)
rec = {}
rec['info'] = ping_result
if "0% packet loss" not in ping_result:
raise TestCaseError("Error: Ping transmittion failed. %s"
% ping_result)
return rec
class BondingTestClass(testbase.NetworkTestClass):
"""A test class for ensuring that hardware
can cope with network bonding correctly"""
required_config = ['device_config']
num_ips_required = 2
def _setup_network(self, session, mode):
"""Util function for creating a pool-wide network,
NIC bond of specified mode on each host"""
log.debug("Setting up %s NIC bond" % mode)
net_ref = create_network(session, 'bond0', '', {})
log.debug("Created network %s" % net_ref)
# Use the first physical interface returned
self.piface = self.get_primary_bond_iface()[0]
# Use the first bondable interface for the specified physical interface above
self.siface = self.get_bondable_ifaces(self.piface)[0]
# Organize the correct PIF ref sets
pifs_ref_set_by_host = []
for host in session.xenapi.host.get_all():
pif1 = get_pifs_by_device(session, self.piface, [host])
pif2 = get_pifs_by_device(session, self.siface, [host])
pifs_ref_set_by_host.append(pif1 + pif2)
# Create nic bond
for pifs_ref_set in pifs_ref_set_by_host:
log.debug("Bonding PIF set %s to network %s" % (pifs_ref_set, net_ref))
create_nic_bond(session, net_ref, pifs_ref_set, '', mode)
#Ensure that hosts come back online after creating these bonds.
wait_for_hosts(session)
management_network_ref = get_management_network(session)
return [management_network_ref, net_ref]
def _setup_vms(self, session, net_refs):
"""Util function for returning VMs to run bonding test on"""
log.debug("Setting up droid vms...")
# Use the static configuration for each interface as defined by the user
# for the physical network ID being used.
sms = {}
for net_ref in net_refs:
sms[net_ref] = self.get_static_manager(net_ref)
return deploy_two_droid_vms(session, net_refs, sms)
def _run_test(self, session, mode):
"""Test control method for configuring the NIC bond,
configuring the test VMs, and testing for an active
network connection while the NIC bond is degraded.
Returns failure if any packet loss."""
net_refs = self._setup_network(session, mode)
vm1_ref, vm2_ref = self._setup_vms(session, net_refs)
vm1_ip = wait_for_ip(session, vm1_ref, 'eth0')
vm2_bondnic_ip = wait_for_ip(session, vm2_ref, 'eth1')
for vm_ref in [vm1_ref, vm2_ref]:
check_vm_ping_response(session,vm_ref)
log.debug("Starting test...")
results = []
#Test healthy bond
results.append(ping(vm1_ip, vm2_bondnic_ip, 'eth1'))
#Test degraded bond
set_nic_device_status(session, self.piface, 'down')
results.append(ping(vm1_ip, vm2_bondnic_ip, 'eth1'))
#Test degraded bond
set_nic_device_status(session, self.piface, 'up')
set_nic_device_status(session, self.siface, 'down')
results.append(ping(vm1_ip, vm2_bondnic_ip, 'eth1'))
set_nic_device_status(session, self.siface, 'up')
rec = {}
rec['data'] = results
rec['config'] = mode
for result in results:
if not valid_ping_response(result, 20):
raise TestCaseError("Error: Ping transmittion failed for bond type: %s. %s"
% (mode, result))
else:
log.debug("Ping Result: %s" % result)
return rec
def test_nic_bond_balance_slb(self, session):
"""NIC bonding test case for balance-slb type bond"""
log.debug("Starting to run test_nic_bond_balance_slb...")
return self._run_test(session, 'balance-slb')
def test_nic_bond_active_backup(self, session):
"""NIC bonding test class for active-backup type bond"""
log.debug("Starting to run test_nic_bond_active_backup...")
return self._run_test(session, 'active-backup')
class IperfTestClass(testbase.NetworkTestClass):
"""A base Iperf class for running iperf
between two VMs. This can be subclassed by other
performance related tests which set/monitor other
properities"""
IPERF_ARGS = {'window_size': '256K',
'format': 'm',
'buffer_length': '256K',
'thread_count': '4'}
required_config = ['device_config']
network_for_test = None
num_ips_required = 2
mode = "vm-vm"
def _setup_network(self, session):
"""Utility method for returning the network reference to be used by VMs"""
management_network_ref = get_management_network(session)
# Pick a network to use for testing that exercises the device
# are wanting to test
self.network_for_test = self.get_networks()[0]
log.debug("Network for testing with: %s" % self.network_for_test)
return [management_network_ref, self.network_for_test]
def _setup_vms(self, session, network_refs):
"""Util function for returning VMs to run IPerf test on,
can be subclassed to run different configurations"""
log.debug("Setting up VM - VM cross host test")
# Setup default static manager with the available interfaces
sms = {}
for network_ref in network_refs:
sms[network_ref] = self.get_static_manager(network_ref)
return deploy_two_droid_vms(session, network_refs, sms)
def _setup_dom0_to_vm(self, session, network_refs):
log.debug("Get dom0")
vm1_ref = get_master_control_domain(session)
sms = {}
for network_ref in network_refs:
sms[network_ref] = self.get_static_manager(network_ref)
log.debug("Create droid")
vm2_ref = deploy_slave_droid_vm(session, network_refs, sms)
log.debug("droid created")
return vm1_ref, vm2_ref
def _setup_dom0_to_dom0(self, session):
log.debug("Get dom0 for master")
vm1_ref = get_master_control_domain(session)
log.debug("Get dom0 for slave")
vm2_ref = get_slave_control_domain(session)
return vm1_ref, vm2_ref
def _run_test(self, session, direction):
log.debug("Testing with mode %s" % direction)
#Use the first available network to run tests on
network_refs = self._setup_network(session)
if self.mode == "vm-vm":
vm1_ref, vm2_ref = self._setup_vms(session, network_refs)
elif self.mode == "dom0-dom0":
vm1_ref, vm2_ref = self._setup_dom0_to_dom0(session)
elif self.mode == "dom0-vm":
vm1_ref, vm2_ref = self._setup_dom0_to_vm(session, network_refs)
# Determine which reference should be the server and
# which should be the client.
if direction == 'rx':
client = vm2_ref
server = vm1_ref
elif direction == 'tx':
client = vm1_ref
server = vm2_ref
else:
raise Exception("Unkown 'direction' key specified. Expected tx/rx")
log.debug("Client IPerf VM ref: %s" % client)
log.debug("Server IPerf VM ref: %s" % server)
log.debug("About to run iperf test...")
#Run an iperf test - if failure, an exception should be raised.
iperf_data = IperfTest(session, client, server,
self.network_for_test,
self.get_static_manager(self.network_for_test),
config=self.IPERF_ARGS).run()
return {'info': 'Test ran successfully',
'data': iperf_data,
'config': self.IPERF_ARGS }
def test_tx_throughput(self, session):
"""Generic throughput Iperf test"""
direction = 'tx'
return self._run_test(session, direction)
def test_rx_throughput(self, session):
"""Generic throughput Iperf test"""
direction = 'rx'
return self._run_test(session, direction)
class PIFParamTestClass(IperfTestClass):
"""A test calss for ensuring all PIF params
can be set, modified, and op's verrified"""
# Offload configs to be used in tests.
# If an offload is fixed to in wrong states, log and continue tests.
# If an offload is not fixed and in wrong states, test fails.
OFFLOAD_CONFIG = {'sg': 'on',
'tso': 'on',
'gso': 'on',
'gro': 'off',
'lro': 'off',
'rxvlan': 'on',
'txvlan': 'on'}
num_ips_required = 2
def _set_offload_params(self, session, pif):
""" Set offload setting."""
log.debug(self.OFFLOAD_CONFIG)
device = session.xenapi.PIF.get_device(pif)
log.debug("Device: %s" % device)
for k, v in self.OFFLOAD_CONFIG.iteritems():
set_hw_offload(session, device, k, v)
def _verify_ethtool_offloads(self, session, device):
"""Check that the device specified has the correct
hw offload configuration"""
hw_offloads = get_hw_offloads(session, device)
log.debug("verify offloads...%s" % hw_offloads)
for k, v in self.OFFLOAD_CONFIG.iteritems():
if k not in hw_offloads:
raise Exception("Cannot determine status of %s." % k)
log.debug("Device: %s (%s offload: %s)" % (device, k, hw_offloads[k]))
if not hw_offloads[k].startswith(v):
# Newest ethtool will tell whether the offload setting can be changed.
# If it is not possible due to the hardware ristriction, then ACK should
# ignore this failure and keep running tests.
if '[fixed]' in hw_offloads[k]:
raise FixedOffloadException("Required offload %s is fixed to %s." % (k, hw_offloads[k]))
raise Exception("%s offload was not in the correct state (is %s)" % (k, hw_offloads[k]))
def _setup_pif_params(self, session, network_ref):
pifs = session.xenapi.network.get_PIFs(network_ref)
log.debug("PIFs retrieved %s" % pifs)
#Set argument on PIF
for pif in pifs:
self._set_offload_params(session, pif)
device = session.xenapi.PIF.get_device(pif)
self._verify_ethtool_offloads(session, device)
def _setup_network(self, session):
network_refs = IperfTestClass._setup_network(self, session)
log.debug("Network_refs = %s" % network_refs)
management_network_ref = get_management_network(session)
for network_ref in network_refs:
# Don't configure PIF params for the management NIC
if network_ref != management_network_ref:
#Setup Pif Params
self._setup_pif_params(session, network_ref)
return network_refs
########## Dom0 to VM Iperf Test Classes ##########
class Dom0VMIperfTestClass(PIFParamTestClass):
"""A subclass of the PIFParamTest class, this
class runs the tests between Dom0 and a VM,
rather than just between VMs"""
mode = "dom0-vm"
IPERF_ARGS = {'format': 'm',
'thread_count': '1'}
class Dom0VMBridgeIperfTestClass(Dom0VMIperfTestClass):
"""Subclass that runs the appropriate tests with bridge as the default backend."""
network_backend = "bridge"
order = 5
########## Dom0 to Dom0 PIF parameter test classes #########
class Dom0PIFParamTestClass1(PIFParamTestClass):
"""A class for Dom0 - VM PIF param testing"""
mode = "dom0-dom0"
class Dom0PIFParamTestClass2(Dom0PIFParamTestClass1):
"""A class for Dom0 - VM PIF param testing"""
caps = []
required = False
OFFLOAD_CONFIG = {'sg': 'on',
'tso': 'on',
'gso': 'on',
'gro': 'off',
'lro': 'off',
'rxvlan': 'on',
'txvlan': 'on'}
class Dom0PIFParamTestClass3(Dom0PIFParamTestClass1):
"""A class for Dom0 - VM PIF param testing"""
caps = []
required = False
OFFLOAD_CONFIG = {'sg': 'on',
'tso': 'on',
'gso': 'on',
'gro': 'on',
'lro': 'off',
'rxvlan': 'on',
'txvlan': 'on'}
########## Dom0 to Dom0 *Bridge* PIF parameter test classes #########
class Dom0BridgePIFParamTestClass1(PIFParamTestClass):
"""A class for Dom0 - VM PIF param testing"""
network_backend = "bridge"
mode = "dom0-dom0"
order = 5
class Dom0BridgePIFParamTestClass2(Dom0BridgePIFParamTestClass1):
"""A class for Dom0 - VM PIF param testing"""
caps = []
required = False
OFFLOAD_CONFIG = {'sg': 'on',
'tso': 'on',
'gso': 'on',
'gro': 'off',
'lro': 'off',
'rxvlan': 'on',
'txvlan': 'on'}
class Dom0BridgePIFParamTestClass3(Dom0BridgePIFParamTestClass1):
"""A class for Dom0 - VM PIF param testing"""
caps = []
required = False
OFFLOAD_CONFIG = {'sg': 'on',
'tso': 'on',
'gso': 'on',
'gro': 'on',
'lro': 'off',
'rxvlan': 'on',
'txvlan': 'on'}
########## Jumbo Frames (Large MTU) Test Classes ###########
class MTUPingTestClass(testbase.NetworkTestClass):
"""A test class for ensuring that hardware can cope with
transmitting large MTU. Note, this test case is only
compatible with the open vswitch backend"""
MTU = '9000'
PING_ARGS = {'packet_size': 8000,
'packet_count': 20}
username = 'root'
password = DEFAULT_PASSWORD
num_ips_required = 2
def _setup_network(self, session):
"""Utility method for setting the network MTU and
returning the network reference to be used by VMs"""
net_ref = self.get_networks()[0]
set_network_mtu(session, net_ref, self.MTU)
log.debug("Network created and MTU %s set" % self.MTU)
management_network_ref = get_management_network(session)
return [management_network_ref, net_ref]
def _setup_vms(self, session, net_refs):
"""Util function for returning VMs to run large MTU ping test on"""
log.debug("Setting up VM - VM cross host test")
sms = {}
for net_ref in net_refs:
sms[net_ref] = self.get_static_manager(net_ref)
return deploy_two_droid_vms(session, net_refs, sms)
def _run_test(self, session):
"""Runs a ping test using a set MTU and the -M switch,
for MTU discovery, to verify successful packet delivery to VM2"""
#setup required network
net_refs = self._setup_network(session)
#setup VMs for test
vm1_ref, vm2_ref = self._setup_vms(session, net_refs)
#retrieve VM IPs
vm1_ip = wait_for_ip(session, vm1_ref, 'eth0')
log.debug("VM %s has IP %s (iface: eth0)" % (vm1_ref, vm1_ip))
vm2_ip = wait_for_ip(session, vm2_ref, 'eth0')
log.debug("VM %s has IP %s (iface: eth0)" % (vm2_ref, vm2_ip))
vm1_ip_eth1 = wait_for_ip(session, vm1_ref, 'eth1')
log.debug("VM %s has IP %s (iface: eth1)" % (vm1_ref, vm1_ip_eth1))
vm2_ip_eth1 = wait_for_ip(session, vm2_ref, 'eth1')
log.debug("VM %s has IP %s (iface: eth1)" % (vm2_ref, vm2_ip_eth1))
# Add explicit IP routes to ensure MTU traffic travels
# across the correct interface.
args = {
'vm_ref': vm1_ref,
'dest_ip': vm2_ip_eth1,
'dest_mac': get_vm_device_mac(session, vm2_ref, 'eth1'),
'device': 'eth1',
}
call_ack_plugin(session, 'add_route', args)
args = {
'vm_ref': vm2_ref,
'dest_ip': vm1_ip_eth1,
'dest_mac': get_vm_device_mac(session, vm1_ref, 'eth1'),
'device': 'eth1',
}
call_ack_plugin(session, 'add_route', args)
for vm_ref in [vm1_ref, vm2_ref]:
check_vm_ping_response(session, vm_ref)
ips = [vm1_ip, vm2_ip]
#SSH to vm 'ifconfig ethX mtu XXXX up'
cmd_str = 'ifconfig eth1 mtu %s up' % self.MTU
for vm_ip in ips:
ssh_command(vm_ip, self.username, self.password, cmd_str)
log.debug("Starting large MTU ping test...")
log.debug("Attempt normal ping first...")
ping_result = ping(vm1_ip, vm2_ip_eth1, 'eth1')
log.debug("Moving onto large MTU ping...")
log.debug("Ping Arguments: %s" % self.PING_ARGS)
#set ping args and run cmd
ping_result = ping(vm1_ip, vm2_ip_eth1, 'eth1', self.PING_ARGS['packet_size'], self.PING_ARGS['packet_count'])
log.debug("Result: %s" % ping_result)
rec = {}
rec['data'] = ping_result
rec['config'] = self.PING_ARGS
#Check for failure
if "0% packet loss" not in ping_result:
raise TestCaseError("Error: Large MTU ping transmission failed. %s"
% ping_result)
return rec
def test_ping(self, session):
log.debug("run test...")
return self._run_test(session)
class GROOffloadTestClass(testbase.NetworkTestClass):
""" Check whether GRO can be on. GRO is on by default from XS 6.5 """
REQUIRED_FOR = ">= 1.9.0"
def test_offload_config(self, session):
net_ref = self.get_networks()[0]
pifs = session.xenapi.network.get_PIFs(net_ref)
log.debug("PIFs to test: %s" % pifs)
#Set argument on PIF
for pif in pifs:
device = session.xenapi.PIF.get_device(pif)
set_hw_offload(session, device, 'gro', 'on')
gro_offload = get_hw_offloads(session, device)['gro']
if not gro_offload.startswith('on'):
raise Exception("GRO offload of %s is not set to on" % device)
return {'data': "GRO is set to on properly."}
class GROOffloadBridgeTestClass(GROOffloadTestClass):
""" Check whether GRO can be on with bridge network backend.
GRO is on by default from XS 6.5 """
network_backend = "bridge"
order = 5
|
|
# SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import functools
import json
import logging
import os
import re
from collections import defaultdict
from copy import deepcopy
import junit_xml
from tiny_test_fw import TinyFW, Utility
from .DebugUtils import CustomProcess, GDBBackend, OCDBackend # noqa: export DebugUtils for users
from .IDFApp import UT, ComponentUTApp, Example, IDFApp, LoadableElfTestApp, TestApp # noqa: export all Apps for users
from .IDFDUT import (ESP32C3DUT, ESP32C3FPGADUT, ESP32C6DUT, ESP32DUT, ESP32H2DUT, # noqa: export DUTs for users
ESP32QEMUDUT, ESP32S2DUT, ESP32S3DUT, ESP32S3FPGADUT, ESP8266DUT, IDFDUT)
from .unity_test_parser import TestFormat, TestResults
# pass TARGET_DUT_CLS_DICT to Env.py to avoid circular dependency issue.
TARGET_DUT_CLS_DICT = {
'ESP32': ESP32DUT,
'ESP32S2': ESP32S2DUT,
'ESP32S3': ESP32S3DUT,
'ESP32C3': ESP32C3DUT,
'ESP32C3FPGA': ESP32C3FPGADUT,
'ESP32S3FPGA': ESP32S3FPGADUT,
'ESP32C6': ESP32C6DUT,
'ESP32H2': ESP32H2DUT,
}
try:
string_type = basestring # type: ignore
except NameError:
string_type = str
def upper_list_or_str(text):
"""
Return the uppercase of list of string or string. Return itself for other
data types
:param text: list or string, other instance will be returned immediately
:return: uppercase of list of string
"""
if isinstance(text, string_type):
return [text.upper()]
elif isinstance(text, list):
return [item.upper() for item in text]
else:
return text
def local_test_check(decorator_target):
# Try to get the sdkconfig.json to read the IDF_TARGET value.
# If not set, will set to ESP32.
# For CI jobs, this is a fake procedure, the true target and dut will be
# overwritten by the job config YAML file.
idf_target = 'ESP32' # default if sdkconfig not found or not readable
if os.getenv('CI_JOB_ID'): # Only auto-detect target when running locally
return idf_target
decorator_target = upper_list_or_str(decorator_target)
expected_json_path = os.path.join('build', 'config', 'sdkconfig.json')
if os.path.exists(expected_json_path):
sdkconfig = json.load(open(expected_json_path))
try:
idf_target = sdkconfig['IDF_TARGET'].upper()
except KeyError:
logging.debug('IDF_TARGET not in {}. IDF_TARGET set to esp32'.format(os.path.abspath(expected_json_path)))
else:
logging.debug('IDF_TARGET: {}'.format(idf_target))
else:
logging.debug('{} not found. IDF_TARGET set to esp32'.format(os.path.abspath(expected_json_path)))
if isinstance(decorator_target, list):
if idf_target not in decorator_target:
fpga_target = ''.join((idf_target, 'FPGA'))
if fpga_target not in decorator_target:
raise ValueError('IDF_TARGET set to {}, not in decorator target value'.format(idf_target))
else:
idf_target = fpga_target
else:
if idf_target != decorator_target:
raise ValueError('IDF_TARGET set to {}, not equal to decorator target value'.format(idf_target))
return idf_target
def get_dut_class(target, dut_class_dict, erase_nvs=None):
if target not in dut_class_dict:
raise Exception('target can only be {%s} (case insensitive)' % ', '.join(dut_class_dict.keys()))
dut = dut_class_dict[target.upper()]
if erase_nvs:
dut.ERASE_NVS = 'erase_nvs'
return dut
def ci_target_check(func):
@functools.wraps(func)
def wrapper(**kwargs):
target = upper_list_or_str(kwargs.get('target', []))
ci_target = upper_list_or_str(kwargs.get('ci_target', []))
if not set(ci_target).issubset(set(target)):
raise ValueError('ci_target must be a subset of target')
return func(**kwargs)
return wrapper
def test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, nightly_run, **kwargs):
target = upper_list_or_str(target)
test_target = local_test_check(target)
if 'additional_duts' in kwargs:
dut_classes = deepcopy(TARGET_DUT_CLS_DICT)
dut_classes.update(kwargs['additional_duts'])
else:
dut_classes = TARGET_DUT_CLS_DICT
dut = get_dut_class(test_target, dut_classes, erase_nvs)
original_method = TinyFW.test_method(
app=app, dut=dut, target=target, ci_target=upper_list_or_str(ci_target),
module=module, execution_time=execution_time, level=level, erase_nvs=erase_nvs,
dut_dict=dut_classes, nightly_run=nightly_run, **kwargs
)
test_func = original_method(func)
return test_func
@ci_target_check
def idf_example_test(app=Example, target='ESP32', ci_target=None, module='examples', execution_time=1,
level='example', erase_nvs=True, config_name=None, nightly_run=False, **kwargs):
"""
decorator for testing idf examples (with default values for some keyword args).
:param app: test application class
:param target: target supported, string or list
:param ci_target: target auto run in CI, if None than all target will be tested, None, string or list
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param config_name: if specified, name of the app configuration
:param kwargs: other keyword args
:return: test method
"""
def test(func):
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, nightly_run,
**kwargs)
return test
@ci_target_check
def idf_unit_test(app=UT, target='ESP32', ci_target=None, module='unit-test', execution_time=1,
level='unit', erase_nvs=True, nightly_run=False, **kwargs):
"""
decorator for testing idf unit tests (with default values for some keyword args).
:param app: test application class
:param target: target supported, string or list
:param ci_target: target auto run in CI, if None than all target will be tested, None, string or list
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param kwargs: other keyword args
:return: test method
"""
def test(func):
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, nightly_run,
**kwargs)
return test
@ci_target_check
def idf_custom_test(app=TestApp, target='ESP32', ci_target=None, module='misc', execution_time=1,
level='integration', erase_nvs=True, config_name=None, nightly_run=False, **kwargs):
"""
decorator for idf custom tests (with default values for some keyword args).
:param app: test application class
:param target: target supported, string or list
:param ci_target: target auto run in CI, if None than all target will be tested, None, string or list
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param config_name: if specified, name of the app configuration
:param kwargs: other keyword args
:return: test method
"""
def test(func):
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, nightly_run,
**kwargs)
return test
@ci_target_check
def idf_component_unit_test(app=ComponentUTApp, target='ESP32', ci_target=None, module='misc', execution_time=1,
level='integration', erase_nvs=True, config_name=None, nightly_run=False, **kwargs):
"""
decorator for idf custom tests (with default values for some keyword args).
:param app: test application class
:param target: target supported, string or list
:param ci_target: target auto run in CI, if None than all target will be tested, None, string or list
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param config_name: if specified, name of the app configuration
:param kwargs: other keyword args
:return: test method
"""
def test(func):
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, nightly_run,
**kwargs)
return test
class ComponentUTResult:
"""
Function Class, parse component unit test results
"""
results_list = defaultdict(list) # type: dict[str, list[junit_xml.TestSuite]]
"""
For origin unity test cases with macro "TEST", please set "test_format" to "TestFormat.UNITY_FIXTURE_VERBOSE".
For IDF unity test cases with macro "TEST CASE", please set "test_format" to "TestFormat.UNITY_BASIC".
"""
@staticmethod
def parse_result(stdout, test_format=TestFormat.UNITY_FIXTURE_VERBOSE):
try:
results = TestResults(stdout, test_format)
except (ValueError, TypeError) as e:
raise ValueError('Error occurs when parsing the component unit test stdout to JUnit report: ' + str(e))
group_name = results.tests()[0].group()
ComponentUTResult.results_list[group_name].append(results.to_junit())
with open(os.path.join(os.getenv('LOG_PATH', ''), '{}_XUNIT_RESULT.xml'.format(group_name)), 'w') as fw:
junit_xml.to_xml_report_file(fw, ComponentUTResult.results_list[group_name])
if results.num_failed():
# raise exception if any case fails
err_msg = 'Failed Cases:\n'
for test_case in results.test_iter():
if test_case.result() == 'FAIL':
err_msg += '\t{}: {}'.format(test_case.name(), test_case.message())
raise AssertionError(err_msg)
def log_performance(item, value):
"""
do print performance with pre-defined format to console
:param item: performance item name
:param value: performance value
"""
performance_msg = '[Performance][{}]: {}'.format(item, value)
Utility.console_log(performance_msg, 'orange')
# update to junit test report
current_junit_case = TinyFW.JunitReport.get_current_test_case()
current_junit_case.stdout += performance_msg + '\r\n'
def check_performance(item, value, target):
"""
check if idf performance meet pass standard
:param item: performance item name
:param value: performance item value
:param target: target chip
:raise: AssertionError: if check fails
"""
def _find_perf_item(path):
with open(path, 'r') as f:
data = f.read()
match = re.search(r'#define\s+IDF_PERFORMANCE_(MIN|MAX)_{}\s+([\d.]+)'.format(item.upper()), data)
return match.group(1), float(match.group(2))
def _check_perf(op, standard_value):
if op == 'MAX':
ret = value <= standard_value
else:
ret = value >= standard_value
if not ret:
raise AssertionError("[Performance] {} value is {}, doesn't meet pass standard {}"
.format(item, value, standard_value))
path_prefix = os.path.join(IDFApp.get_sdk_path(), 'components', 'idf_test', 'include')
performance_files = (os.path.join(path_prefix, target, 'idf_performance_target.h'),
os.path.join(path_prefix, 'idf_performance.h'))
for performance_file in performance_files:
try:
op, standard = _find_perf_item(performance_file)
except (IOError, AttributeError):
# performance file doesn't exist or match is not found in it
continue
_check_perf(op, standard)
# if no exception was thrown then the performance is met and no need to continue
break
else:
raise AssertionError('Failed to get performance standard for {}'.format(item))
MINIMUM_FREE_HEAP_SIZE_RE = re.compile(r'Minimum free heap size: (\d+) bytes')
def print_heap_size(app_name, config_name, target, minimum_free_heap_size):
"""
Do not change the print output in case you really need to.
The result is parsed by ci-dashboard project
"""
print('------ heap size info ------\n'
'[app_name] {}\n'
'[config_name] {}\n'
'[target] {}\n'
'[minimum_free_heap_size] {} Bytes\n'
'------ heap size end ------'.format(app_name,
'' if not config_name else config_name,
target,
minimum_free_heap_size))
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the file system searcher."""
import os
import unittest
from dfvfs.lib import definitions
from dfvfs.helpers import fake_file_system_builder
from dfvfs.helpers import file_system_searcher
from dfvfs.path import fake_path_spec
from dfvfs.path import os_path_spec
from dfvfs.path import qcow_path_spec
from dfvfs.path import tsk_path_spec
from dfvfs.resolver import context
from dfvfs.vfs import os_file_system
from dfvfs.vfs import tsk_file_system
from tests import test_lib as shared_test_lib
class FindSpecTest(shared_test_lib.BaseTestCase):
"""Tests for the find specification."""
# pylint: disable=protected-access
def _CreateTestFileSystem(self):
"""Create a file system for testing.
Returns:
FakeFileSystem: file system for testing.
"""
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
test_path = u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py'
test_file_data = b'\n'.join([
b'# -*- coding: utf-8 -*-',
b'"""Digital Forensics Virtual File System (dfVFS).',
b'',
b'dfVFS, or Digital Forensics Virtual File System, is a Python module',
b'that provides read-only access to file-system objects from various',
b'storage media types and file formats.',
b'"""'])
file_system_builder.AddFile(test_path, test_file_data)
return file_system_builder.file_system
def testInitialize(self):
"""Test the __init__ function."""
find_spec = file_system_searcher.FindSpec(location=u'location')
self.assertIsNotNone(find_spec)
find_spec = file_system_searcher.FindSpec(location=[u'location'])
self.assertIsNotNone(find_spec)
with self.assertRaises(TypeError):
find_spec = file_system_searcher.FindSpec(location={})
find_spec = file_system_searcher.FindSpec(location_glob=u'loca?ion')
self.assertIsNotNone(find_spec)
find_spec = file_system_searcher.FindSpec(location_glob=[u'loca?ion'])
self.assertIsNotNone(find_spec)
with self.assertRaises(TypeError):
find_spec = file_system_searcher.FindSpec(location_glob={})
find_spec = file_system_searcher.FindSpec(location_regex=u'loca.ion')
self.assertIsNotNone(find_spec)
find_spec = file_system_searcher.FindSpec(location_regex=[u'loca.ion'])
self.assertIsNotNone(find_spec)
with self.assertRaises(TypeError):
find_spec = file_system_searcher.FindSpec(location_regex={})
with self.assertRaises(ValueError):
find_spec = file_system_searcher.FindSpec(
location=u'location', location_glob=u'loca?ion')
def testCheckFileEntryType(self):
"""Test the _CheckFileEntryType() function."""
file_system = self._CreateTestFileSystem()
find_spec = file_system_searcher.FindSpec(
file_entry_types=[definitions.FILE_ENTRY_TYPE_FILE])
path_spec = fake_path_spec.FakePathSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py')
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
result = find_spec._CheckFileEntryType(file_entry)
self.assertTrue(result)
file_entry = file_system.GetRootFileEntry()
result = find_spec._CheckFileEntryType(file_entry)
self.assertFalse(result)
find_spec = file_system_searcher.FindSpec()
result = find_spec._CheckFileEntryType(file_entry)
self.assertIsNone(result)
def testCheckIsAllocated(self):
"""Test the _CheckIsAllocated() function."""
file_system = self._CreateTestFileSystem()
find_spec = file_system_searcher.FindSpec(
file_entry_types=[definitions.FILE_ENTRY_TYPE_FILE])
path_spec = fake_path_spec.FakePathSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py')
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
result = find_spec._CheckIsAllocated(file_entry)
self.assertTrue(result)
def testCheckIsDevice(self):
"""Test the _CheckIsDevice() function."""
file_system = self._CreateTestFileSystem()
find_spec = file_system_searcher.FindSpec(
file_entry_types=[definitions.FILE_ENTRY_TYPE_FILE])
path_spec = fake_path_spec.FakePathSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py')
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
result = find_spec._CheckIsDevice(file_entry)
self.assertFalse(result)
def testCheckIsDirectory(self):
"""Test the _CheckIsDirectory() function."""
file_system = self._CreateTestFileSystem()
find_spec = file_system_searcher.FindSpec(
file_entry_types=[definitions.FILE_ENTRY_TYPE_FILE])
path_spec = fake_path_spec.FakePathSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py')
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
result = find_spec._CheckIsDirectory(file_entry)
self.assertFalse(result)
def testCheckIsFile(self):
"""Test the _CheckIsFile() function."""
file_system = self._CreateTestFileSystem()
find_spec = file_system_searcher.FindSpec(
file_entry_types=[definitions.FILE_ENTRY_TYPE_FILE])
path_spec = fake_path_spec.FakePathSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py')
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
result = find_spec._CheckIsFile(file_entry)
self.assertTrue(result)
def testCheckIsLink(self):
"""Test the _CheckIsLink() function."""
file_system = self._CreateTestFileSystem()
find_spec = file_system_searcher.FindSpec(
file_entry_types=[definitions.FILE_ENTRY_TYPE_FILE])
path_spec = fake_path_spec.FakePathSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py')
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
result = find_spec._CheckIsLink(file_entry)
self.assertFalse(result)
def testCheckIsPipe(self):
"""Test the _CheckIsPipe() function."""
file_system = self._CreateTestFileSystem()
find_spec = file_system_searcher.FindSpec(
file_entry_types=[definitions.FILE_ENTRY_TYPE_FILE])
path_spec = fake_path_spec.FakePathSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py')
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
result = find_spec._CheckIsPipe(file_entry)
self.assertFalse(result)
def testCheckIsSocket(self):
"""Test the _CheckIsSocket() function."""
file_system = self._CreateTestFileSystem()
find_spec = file_system_searcher.FindSpec(
file_entry_types=[definitions.FILE_ENTRY_TYPE_FILE])
path_spec = fake_path_spec.FakePathSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py')
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
result = find_spec._CheckIsSocket(file_entry)
self.assertFalse(result)
def testCheckLocation(self):
"""Test the _CheckLocation() function."""
file_system = self._CreateTestFileSystem()
path_spec = fake_path_spec.FakePathSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py')
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
find_spec = file_system_searcher.FindSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py')
find_spec.PrepareMatches(file_system)
result = find_spec._CheckLocation(file_entry, 6)
self.assertTrue(result)
result = find_spec._CheckLocation(file_entry, 0)
self.assertTrue(result)
result = find_spec._CheckLocation(file_entry, 5)
self.assertFalse(result)
find_spec = file_system_searcher.FindSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/bogus.py')
find_spec.PrepareMatches(file_system)
result = find_spec._CheckLocation(file_entry, 6)
self.assertFalse(result)
def testConvertLocationGlob2Regex(self):
"""Test the _ConvertLocationGlob2Regex function."""
find_spec = file_system_searcher.FindSpec()
location_regex = find_spec._ConvertLocationGlob2Regex(
u'/tmp/loca?ion')
self.assertEqual(location_regex, u'/tmp/loca.ion')
def testSplitPath(self):
"""Test the _SplitPath function."""
find_spec = file_system_searcher.FindSpec()
path_segments = find_spec._SplitPath(u'/tmp/location', u'/')
self.assertEqual(path_segments, [u'tmp', u'location'])
# TODO: add tests for AtMaximumDepth
def testMatches(self):
"""Test the Matches() function."""
file_system = self._CreateTestFileSystem()
path_spec = fake_path_spec.FakePathSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py')
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
find_spec = file_system_searcher.FindSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py')
find_spec.PrepareMatches(file_system)
result = find_spec.Matches(file_entry, 6)
self.assertEqual(result, (True, True))
result = find_spec.Matches(file_entry, 0)
self.assertEqual(result, (False, True))
find_spec = file_system_searcher.FindSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/bogus.py')
find_spec.PrepareMatches(file_system)
result = find_spec.Matches(file_entry, 6)
self.assertEqual(result, (False, False))
def testPrepareMatches(self):
"""Test the PrepareMatches function."""
file_system = self._CreateTestFileSystem()
find_spec = file_system_searcher.FindSpec(
location=u'/usr/lib/python2.7/site-packages/dfvfs/__init__.py')
self.assertIsNone(find_spec._location_segments)
self.assertEqual(find_spec._number_of_location_segments, 0)
find_spec.PrepareMatches(file_system)
self.assertEqual(find_spec._location_segments, [
u'usr', u'lib', u'python2.7', u'site-packages', u'dfvfs',
u'__init__.py'])
self.assertEqual(find_spec._number_of_location_segments, 6)
@shared_test_lib.skipUnlessHasTestFile([u'password.txt'])
@shared_test_lib.skipUnlessHasTestFile([u'vsstest.qcow2'])
class FileSystemSearcherTest(shared_test_lib.BaseTestCase):
"""Tests for the file system searcher."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
self._os_path = self._GetTestFilePath([])
self._os_path_spec = os_path_spec.OSPathSpec(location=self._os_path)
self._os_file_system = os_file_system.OSFileSystem(self._resolver_context)
# TODO: add RAW volume only test image.
test_file = self._GetTestFilePath([u'vsstest.qcow2'])
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._qcow_path_spec = qcow_path_spec.QCOWPathSpec(parent=path_spec)
self._tsk_path_spec = tsk_path_spec.TSKPathSpec(
location=u'/', parent=self._qcow_path_spec)
self._tsk_file_system = tsk_file_system.TSKFileSystem(
self._resolver_context)
self._tsk_file_system.Open(self._tsk_path_spec)
def testFind(self):
"""Test the Find() function."""
searcher = file_system_searcher.FileSystemSearcher(
self._tsk_file_system, self._qcow_path_spec)
# Find all the file entries of type: FILE_ENTRY_TYPE_FILE.
find_spec = file_system_searcher.FindSpec(
file_entry_types=[definitions.FILE_ENTRY_TYPE_FILE])
path_spec_generator = searcher.Find(find_specs=[find_spec])
self.assertIsNotNone(path_spec_generator)
expected_locations = [
u'/$AttrDef',
u'/$BadClus',
u'/$Bitmap',
u'/$Boot',
u'/$Extend/$ObjId',
u'/$Extend/$Quota',
u'/$Extend/$Reparse',
u'/$Extend/$RmMetadata/$Repair',
u'/$Extend/$RmMetadata/$TxfLog/$Tops',
u'/$Extend/$RmMetadata/$TxfLog/$TxfLog.blf',
u'/$Extend/$RmMetadata/$TxfLog/$TxfLogContainer00000000000000000001',
u'/$Extend/$RmMetadata/$TxfLog/$TxfLogContainer00000000000000000002',
u'/$LogFile',
u'/$MFT',
u'/$MFTMirr',
u'/$Secure',
u'/$UpCase',
u'/$Volume',
u'/another_file',
u'/password.txt',
u'/syslog.gz',
u'/System Volume Information/{3808876b-c176-4e48-b7ae-04046e6cc752}',
(u'/System Volume Information/{600f0b69-5bdf-11e3-9d6c-005056c00008}'
u'{3808876b-c176-4e48-b7ae-04046e6cc752}'),
(u'/System Volume Information/{600f0b6d-5bdf-11e3-9d6c-005056c00008}'
u'{3808876b-c176-4e48-b7ae-04046e6cc752}')]
locations = []
for path_spec in path_spec_generator:
locations.append(getattr(path_spec, u'location', u''))
self.assertEqual(locations, expected_locations)
# Find all the file entries of type: FILE_ENTRY_TYPE_DIRECTORY.
find_spec = file_system_searcher.FindSpec(
file_entry_types=[definitions.FILE_ENTRY_TYPE_DIRECTORY])
path_spec_generator = searcher.Find(find_specs=[find_spec])
self.assertIsNotNone(path_spec_generator)
expected_locations = [
u'/',
u'/$Extend',
u'/$Extend/$RmMetadata',
u'/$Extend/$RmMetadata/$Txf',
u'/$Extend/$RmMetadata/$TxfLog',
u'/System Volume Information',
u'/$OrphanFiles']
locations = []
for path_spec in path_spec_generator:
locations.append(getattr(path_spec, u'location', u''))
self.assertEqual(locations, expected_locations)
# Find all the file entries of type: FILE_ENTRY_TYPE_LINK.
find_spec = file_system_searcher.FindSpec(
file_entry_types=[definitions.FILE_ENTRY_TYPE_LINK])
path_spec_generator = searcher.Find(find_specs=[find_spec])
self.assertIsNotNone(path_spec_generator)
expected_locations = []
locations = []
for path_spec in path_spec_generator:
locations.append(getattr(path_spec, u'location', u''))
self.assertEqual(locations, expected_locations)
# Find all the file entries with a location.
find_spec1 = file_system_searcher.FindSpec(
location=u'/$Extend/$RmMetadata')
find_spec2 = file_system_searcher.FindSpec(
location=[u'$Extend', u'$RmMetadata', u'$TxfLog', u'$TxfLog.blf'])
find_spec3 = file_system_searcher.FindSpec(
location=u'/PASSWORD.TXT')
path_spec_generator = searcher.Find(
find_specs=[find_spec1, find_spec2, find_spec3])
self.assertIsNotNone(path_spec_generator)
expected_locations = [
u'/$Extend/$RmMetadata',
u'/$Extend/$RmMetadata/$TxfLog/$TxfLog.blf']
locations = []
for path_spec in path_spec_generator:
locations.append(getattr(path_spec, u'location', u''))
self.assertEqual(locations, expected_locations)
# Find all the file entries with a case insensitive location.
find_spec = file_system_searcher.FindSpec(
location=u'/PASSWORD.TXT', case_sensitive=False)
path_spec_generator = searcher.Find(find_specs=[find_spec])
self.assertIsNotNone(path_spec_generator)
expected_locations = [
u'/password.txt']
locations = []
first_path_spec = None
for path_spec in path_spec_generator:
if not first_path_spec:
first_path_spec = path_spec
locations.append(getattr(path_spec, u'location', u''))
self.assertEqual(locations, expected_locations)
test_relative_path = searcher.GetRelativePath(first_path_spec)
self.assertEqual(test_relative_path, u'/password.txt')
# Find all the file entries with a location glob.
find_spec1 = file_system_searcher.FindSpec(
location_glob=u'/*/$RmMetadata')
find_spec2 = file_system_searcher.FindSpec(
location_glob=[u'$Extend', u'$RmMetadata', u'*', u'*.blf'])
find_spec3 = file_system_searcher.FindSpec(
location_glob=u'/PASSWORD.TXT')
path_spec_generator = searcher.Find(
find_specs=[find_spec1, find_spec2, find_spec3])
self.assertIsNotNone(path_spec_generator)
expected_locations = [
u'/$Extend/$RmMetadata',
u'/$Extend/$RmMetadata/$TxfLog/$TxfLog.blf']
locations = []
for path_spec in path_spec_generator:
locations.append(getattr(path_spec, u'location', u''))
self.assertEqual(locations, expected_locations)
# Find all the file entries with a location regular expression.
find_spec1 = file_system_searcher.FindSpec(
location_regex=r'/.*/\$RmMetadata')
find_spec2 = file_system_searcher.FindSpec(
location_regex=[r'\$Extend', r'\$RmMetadata', u'.*', u'.*[.]blf'])
find_spec3 = file_system_searcher.FindSpec(
location_regex=u'/PASSWORD.TXT')
path_spec_generator = searcher.Find(
find_specs=[find_spec1, find_spec2, find_spec3])
self.assertIsNotNone(path_spec_generator)
expected_locations = [
u'/$Extend/$RmMetadata',
u'/$Extend/$RmMetadata/$TxfLog/$TxfLog.blf']
locations = []
for path_spec in path_spec_generator:
locations.append(getattr(path_spec, u'location', u''))
self.assertEqual(locations, expected_locations)
# Find all the file entries with a case insensitive location glob.
find_spec = file_system_searcher.FindSpec(
location_glob=u'/PASSWORD.TXT', case_sensitive=False)
path_spec_generator = searcher.Find(find_specs=[find_spec])
self.assertIsNotNone(path_spec_generator)
expected_locations = [
u'/password.txt']
locations = []
for path_spec in path_spec_generator:
locations.append(getattr(path_spec, u'location', u''))
self.assertEqual(locations, expected_locations)
# Find all the file entries with a case insensitive location regular
# expression.
find_spec = file_system_searcher.FindSpec(
location_regex=u'/PASSWORD.TXT', case_sensitive=False)
path_spec_generator = searcher.Find(find_specs=[find_spec])
self.assertIsNotNone(path_spec_generator)
expected_locations = [
u'/password.txt']
locations = []
for path_spec in path_spec_generator:
locations.append(getattr(path_spec, u'location', u''))
self.assertEqual(locations, expected_locations)
# Find all the file entries with a location glob.
searcher = file_system_searcher.FileSystemSearcher(
self._os_file_system, self._os_path_spec)
location = u'{0:s}syslog.*'.format(os.path.sep)
find_spec = file_system_searcher.FindSpec(
location_glob=location, case_sensitive=False)
path_spec_generator = searcher.Find(find_specs=[find_spec])
self.assertIsNotNone(path_spec_generator)
expected_locations = sorted([
self._GetTestFilePath([u'syslog.aes']),
self._GetTestFilePath([u'syslog.base16']),
self._GetTestFilePath([u'syslog.base32']),
self._GetTestFilePath([u'syslog.base64']),
self._GetTestFilePath([u'syslog.bin.cpio']),
self._GetTestFilePath([u'syslog.blowfish']),
self._GetTestFilePath([u'syslog.bz2']),
self._GetTestFilePath([u'syslog.crc.cpio']),
self._GetTestFilePath([u'syslog.db']),
self._GetTestFilePath([u'syslog.des3']),
self._GetTestFilePath([u'syslog.gz']),
self._GetTestFilePath([u'syslog.newc.cpio']),
self._GetTestFilePath([u'syslog.lzma']),
self._GetTestFilePath([u'syslog.odc.cpio']),
self._GetTestFilePath([u'syslog.rc4']),
self._GetTestFilePath([u'syslog.tar']),
self._GetTestFilePath([u'syslog.tgz']),
self._GetTestFilePath([u'syslog.xz']),
self._GetTestFilePath([u'syslog.Z']),
self._GetTestFilePath([u'syslog.zip']),
self._GetTestFilePath([u'syslog.zlib'])])
locations = []
first_path_spec = None
for path_spec in path_spec_generator:
if not first_path_spec:
first_path_spec = path_spec
locations.append(getattr(path_spec, u'location', u''))
self.assertEqual(sorted(locations), expected_locations)
_, path_separator, relative_path = locations[0].rpartition(os.path.sep)
expected_relative_path = u'{0:s}{1:s}'.format(
path_separator, relative_path)
test_relative_path = searcher.GetRelativePath(first_path_spec)
self.assertEqual(test_relative_path, expected_relative_path)
# Find all the file entries with a location regular expression.
searcher = file_system_searcher.FileSystemSearcher(
self._os_file_system, self._os_path_spec)
if os.path.sep == u'\\':
location = u'\\\\syslog[.].*'
else:
location = u'{0:s}syslog[.].*'.format(os.path.sep)
find_spec = file_system_searcher.FindSpec(
location_regex=location, case_sensitive=False)
path_spec_generator = searcher.Find(find_specs=[find_spec])
self.assertIsNotNone(path_spec_generator)
expected_locations = sorted([
self._GetTestFilePath([u'syslog.aes']),
self._GetTestFilePath([u'syslog.base16']),
self._GetTestFilePath([u'syslog.base32']),
self._GetTestFilePath([u'syslog.base64']),
self._GetTestFilePath([u'syslog.bin.cpio']),
self._GetTestFilePath([u'syslog.blowfish']),
self._GetTestFilePath([u'syslog.bz2']),
self._GetTestFilePath([u'syslog.crc.cpio']),
self._GetTestFilePath([u'syslog.db']),
self._GetTestFilePath([u'syslog.des3']),
self._GetTestFilePath([u'syslog.gz']),
self._GetTestFilePath([u'syslog.newc.cpio']),
self._GetTestFilePath([u'syslog.lzma']),
self._GetTestFilePath([u'syslog.odc.cpio']),
self._GetTestFilePath([u'syslog.rc4']),
self._GetTestFilePath([u'syslog.tar']),
self._GetTestFilePath([u'syslog.tgz']),
self._GetTestFilePath([u'syslog.xz']),
self._GetTestFilePath([u'syslog.Z']),
self._GetTestFilePath([u'syslog.zip']),
self._GetTestFilePath([u'syslog.zlib'])])
locations = []
first_path_spec = None
for path_spec in path_spec_generator:
if not first_path_spec:
first_path_spec = path_spec
locations.append(getattr(path_spec, u'location', u''))
self.assertEqual(sorted(locations), expected_locations)
_, path_separator, relative_path = locations[0].rpartition(os.path.sep)
expected_relative_path = u'{0:s}{1:s}'.format(
path_separator, relative_path)
test_relative_path = searcher.GetRelativePath(first_path_spec)
self.assertEqual(test_relative_path, expected_relative_path)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from collections import OrderedDict
import os
from random import shuffle
import numpy as np
from tensorforce import TensorforceError, util
from tensorforce.agents import Agent
from tensorforce.core import ArrayDict
from tensorforce.core.models import TensorforceModel
class TensorforceAgent(Agent):
"""
Tensorforce agent (specification key: `tensorforce`).
Highly configurable agent and basis for a broad class of deep reinforcement learning agents,
which act according to a policy parametrized by a neural network, leverage a memory module for
periodic updates based on batches of experience, and optionally employ a baseline/critic/target
policy for improved reward estimation.
Args:
states (specification): States specification
(<span style="color:#C00000"><b>required</b></span>, better implicitly specified via
`environment` argument for `Agent.create()`), arbitrarily nested dictionary of state
descriptions (usually taken from `Environment.states()`) with the following attributes:
<ul>
<li><b>type</b> (<i>"bool" | "int" | "float"</i>) – state data type
(<span style="color:#00C000"><b>default</b></span>: "float").</li>
<li><b>shape</b> (<i>int | iter[int]</i>) – state shape
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>num_values</b> (<i>int > 0</i>) – number of discrete state values
(<span style="color:#C00000"><b>required</b></span> for type "int").</li>
<li><b>min_value/max_value</b> (<i>float</i>) – minimum/maximum state value
(<span style="color:#00C000"><b>optional</b></span> for type "float").</li>
</ul>
actions (specification): Actions specification
(<span style="color:#C00000"><b>required</b></span>, better implicitly specified via
`environment` argument for `Agent.create()`), arbitrarily nested dictionary of
action descriptions (usually taken from `Environment.actions()`) with the following
attributes:
<ul>
<li><b>type</b> (<i>"bool" | "int" | "float"</i>) – action data type
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>shape</b> (<i>int > 0 | iter[int > 0]</i>) – action shape
(<span style="color:#00C000"><b>default</b></span>: scalar).</li>
<li><b>num_values</b> (<i>int > 0</i>) – number of discrete action values
(<span style="color:#C00000"><b>required</b></span> for type "int").</li>
<li><b>min_value/max_value</b> (<i>float</i>) – minimum/maximum action value
(<span style="color:#00C000"><b>optional</b></span> for type "float").</li>
</ul>
max_episode_timesteps (int > 0): Upper bound for numer of timesteps per episode
(<span style="color:#00C000"><b>default</b></span>: not given, better implicitly
specified via `environment` argument for `Agent.create()`).
policy (specification): Policy configuration, see [networks](../modules/networks.html) and
[policies documentation](../modules/policies.html)
(<span style="color:#00C000"><b>default</b></span>: action distributions or value
functions parametrized by an automatically configured network).
memory (int | specification): Replay memory capacity, or memory configuration, see the
[memories documentation](../modules/memories.html)
(<span style="color:#00C000"><b>default</b></span>: minimum capacity recent memory).
update (int | specification): Model update configuration with the following attributes
(<span style="color:#C00000"><b>required</b>,
<span style="color:#00C000"><b>default</b></span>: timesteps batch size</span>):
<ul>
<li><b>unit</b> (<i>"timesteps" | "episodes"</i>) – unit for update attributes
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>batch_size</b>
(<i><a href="../modules/parameters.html">parameter</a>, int > 0</i>) –
size of update batch in number of units
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>frequency</b>
(<i>"never" | <a href="../modules/parameters.html">parameter</a>, int > 0 | 0.0 < float <= 1.0</i>) –
frequency of updates, relative to batch_size if float
(<span style="color:#00C000"><b>default</b></span>: batch_size).</li>
<li><b>start</b>
(<i><a href="../modules/parameters.html">parameter</a>, int >= batch_size</i>) –
number of units before first update
(<span style="color:#00C000"><b>default</b></span>: none).</li>
</ul>
optimizer (specification): Optimizer configuration, see the
[optimizers documentation](../modules/optimizers.html)
(<span style="color:#00C000"><b>default</b></span>: Adam optimizer).
objective (specification): Optimization objective configuration, see the
[objectives documentation](../modules/objectives.html)
(<span style="color:#C00000"><b>required</b></span>).
reward_estimation (specification): Reward estimation configuration with the following
attributes (<span style="color:#C00000"><b>required</b></span>):
<ul>
<li><b>horizon</b>
(<i>"episode" | <a href="../modules/parameters.html">parameter</a>, int >= 1</i>)
– Horizon of discounted-sum return estimation
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>discount</b>
(<i><a href="../modules/parameters.html">parameter</a>, 0.0 <= float <= 1.0</i>) –
Discount factor of future rewards for discounted-sum return estimation
(<span style="color:#00C000"><b>default</b></span>: 1.0).</li>
<li><b>predict_horizon_values</b> (<i>false | "early" | "late"</i>) – Whether to
include a baseline prediction of the horizon value as part of the return estimation, and
if so, whether to compute the horizon value prediction "early" when experiences are
stored to memory, or "late" when batches of experience are retrieved for the update
(<span style="color:#00C000"><b>default</b></span>: "late" if baseline_policy or
baseline_objective are specified, else false).</li>
<li><b>estimate_advantage</b> (<i>bool | "early" | "late"</i>) – Whether to use an
estimate of the advantage (return minus baseline value prediction) instead of the return
as learning signal, and whether to do so late after the baseline update (default) or
early before the baseline update
(<span style="color:#00C000"><b>default</b></span>: false, unless baseline_policy is
specified but baseline_objective/optimizer are not).</li>
<li><b>predict_action_values</b> (<i>bool</i>) – Whether to predict state-action-
instead of state-values as horizon values and for advantage estimation
(<span style="color:#00C000"><b>default</b></span>: false).</li>
<li><b>return_processing</b> (<i>specification</i>) – Return processing as layer
or list of layers, see the [preprocessing documentation](../modules/preprocessing.html)
(<span style="color:#00C000"><b>default</b></span>: no return processing).</li>
<li><b>advantage_processing</b> (<i>specification</i>) – Advantage processing as
layer or list of layers, see the [preprocessing documentation](../modules/preprocessing.html)
(<span style="color:#00C000"><b>default</b></span>: no advantage processing).</li>
<li><b>predict_terminal_values</b> (<i>bool</i>) – Whether to predict the value
of terminal states, usually not required since max_episode_timesteps terminals are
handled separately
(<span style="color:#00C000"><b>default</b></span>: false).</li>
</ul>
baseline (specification): Baseline configuration, policy will be used as baseline if none,
see [networks](../modules/networks.html) and potentially
[policies documentation](../modules/policies.html)
(<span style="color:#00C000"><b>default</b></span>: none).
baseline_optimizer (specification | <a href="../modules/parameters.html">parameter</a>, float > 0.0):
Baseline optimizer configuration, see the
[optimizers documentation](../modules/optimizers.html),
main optimizer will be used for baseline if none, a float implies none and specifies a
custom weight for the baseline loss
(<span style="color:#00C000"><b>default</b></span>: none).
baseline_objective (specification): Baseline optimization objective configuration, see the
[objectives documentation](../modules/objectives.html),
required if baseline optimizer is specified, main objective will be used for baseline if
baseline objective and optimizer are not specified
(<span style="color:#00C000"><b>default</b></span>: none).
l2_regularization (<a href="../modules/parameters.html">parameter</a>, float >= 0.0):
L2 regularization loss weight
(<span style="color:#00C000"><b>default</b></span>: no L2 regularization).
entropy_regularization (<a href="../modules/parameters.html">parameter</a>, float >= 0.0):
Entropy regularization loss weight, to discourage the policy distribution from being
"too certain"
(<span style="color:#00C000"><b>default</b></span>: no entropy regularization).
state_preprocessing (dict[specification]): State preprocessing as layer or list of layers,
see the [preprocessing documentation](../modules/preprocessing.html),
specified per state-type or -name
(<span style="color:#00C000"><b>default</b></span>: linear normalization of bounded
float states to [-2.0, 2.0]).
reward_preprocessing (specification): Reward preprocessing as layer or list of layers,
see the [preprocessing documentation](../modules/preprocessing.html)
(<span style="color:#00C000"><b>default</b></span>: no reward preprocessing).
exploration (<a href="../modules/parameters.html">parameter</a> | dict[<a href="../modules/parameters.html">parameter</a>], float >= 0.0):
Exploration, defined as the probability for uniformly random output in case of `bool`
and `int` actions, and the standard deviation of Gaussian noise added to every output in
case of `float` actions, specified globally or per action-type or -name
(<span style="color:#00C000"><b>default</b></span>: no exploration).
variable_noise (<a href="../modules/parameters.html">parameter</a>, float >= 0.0):
Add Gaussian noise with given standard deviation to all trainable variables, as
alternative exploration mechanism
(<span style="color:#00C000"><b>default</b></span>: no variable noise).
parallel_interactions (int > 0): Maximum number of parallel interactions to support,
for instance, to enable multiple parallel episodes, environments or agents within an
environment
(<span style="color:#00C000"><b>default</b></span>: 1).
config (specification): Additional configuration options:
<ul>
<li><b>name</b> (<i>string</i>) – Agent name, used e.g. for TensorFlow scopes and
saver default filename
(<span style="color:#00C000"><b>default</b></span>: "agent").
<li><b>device</b> (<i>string</i>) – Device name
(<span style="color:#00C000"><b>default</b></span>: CPU). Different from (un)supervised
deep learning, RL does not always benefit from running on a GPU, depending on
environment and agent configuration. In particular for RL-typical environments with
low-dimensional state spaces (i.e., no images), one usually gets better performance by
running on CPU only. Consequently, Tensorforce is configured to run on CPU by default,
which can be changed, for instance, by setting this value to 'GPU' instead.
<li><b>seed</b> (<i>int</i>) – Random seed to set for Python, NumPy (both set
globally!) and TensorFlow, environment seed may have to be set separately for fully
deterministic execution
(<span style="color:#00C000"><b>default</b></span>: none).</li>
<li><b>buffer_observe</b> (<i>false | "episode" | int > 0</i>) – Number of
timesteps within an episode to buffer before calling the internal observe function, to
reduce calls to TensorFlow for improved performance
(<span style="color:#00C000"><b>default</b></span>: configuration-specific maximum
number which can be buffered without affecting performance).</li>
<li><b>enable_int_action_masking</b> (<i>bool</i>) – Whether int action options
can be masked via an optional "[ACTION-NAME]_mask" state input
(<span style="color:#00C000"><b>default</b></span>: true).</li>
<li><b>create_tf_assertions</b> (<i>bool</i>) – Whether to create internal
TensorFlow assertion operations
(<span style="color:#00C000"><b>default</b></span>: true).</li>
<li><b>eager_mode</b> (<i>bool</i>) – Whether to run functions eagerly instead of
running as a traced graph function, can be helpful for debugging
(<span style="color:#00C000"><b>default</b></span>: false).</li>
<li><b>tf_log_level</b> (<i>int >= 0</i>) – TensorFlow log level, additional C++
logging messages can be enabled by setting os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"/"2"
before importing Tensorforce/TensorFlow
(<span style="color:#00C000"><b>default</b></span>: 40, only error and critical).</li>
</ul>
saver (path | specification): TensorFlow checkpoints directory, or checkpoint manager
configuration with the following attributes, for periodic implicit saving as alternative
to explicit saving via agent.save()
(<span style="color:#00C000"><b>default</b></span>: no saver):
<ul>
<li><b>directory</b> (<i>path</i>) – checkpoint directory
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>filename</b> (<i>string</i>) – checkpoint filename
(<span style="color:#00C000"><b>default</b></span>: agent name).</li>
<li><b>frequency</b> (<i>int > 0</i>) – how frequently to save a checkpoint
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>unit</b> (<i>"timesteps" | "episodes" | "updates"</i>) – frequency unit
(<span style="color:#00C000"><b>default</b></span>: updates).</li>
<li><b>max_checkpoints</b> (<i>int > 0</i>) – maximum number of checkpoints to
keep (<span style="color:#00C000"><b>default</b></span>: 10).</li>
<li><b>max_hour_frequency</b> (<i>int > 0</i>) – ignoring max-checkpoints,
definitely keep a checkpoint in given hour frequency
(<span style="color:#00C000"><b>default</b></span>: none).</li>
</ul>
summarizer (path | specification): TensorBoard summaries directory, or summarizer
configuration with the following attributes
(<span style="color:#00C000"><b>default</b></span>: no summarizer):
<ul>
<li><b>directory</b> (<i>path</i>) – summarizer directory
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>filename</b> (<i>path</i>) – summarizer filename, max_summaries does not
apply if name specified
(<span style="color:#00C000"><b>default</b></span>: "summary-%Y%m%d-%H%M%S").</li>
<li><b>max_summaries</b> (<i>int > 0</i>) – maximum number of (generically-named)
summaries to keep
(<span style="color:#00C000"><b>default</b></span>: 7, number of different colors in
Tensorboard).</li>
<li><b>flush</b> (<i>int > 0</i>) – how frequently in seconds to flush the
summary writer (<span style="color:#00C000"><b>default</b></span>: 10).</li>
<li><b>summaries</b> (<i>"all" | iter[string]</i>) – which summaries to record,
"all" implies all numerical summaries, so all summaries except "graph"
(<span style="color:#00C000"><b>default</b></span>: "all"):</li>
<li>"action-value": value of each action (timestep-based)</li>
<li>"distribution": distribution parameters like probabilities or mean and stddev
(timestep-based)</li>
<li>"entropy": entropy of (per-action) policy distribution(s) (timestep-based)</li>
<li>"graph": computation graph</li>
<li>"kl-divergence": KL-divergence of previous and updated (per-action) policy
distribution(s) (update-based)</li>
<li>"loss": policy and baseline loss plus loss components (update-based)</li>
<li>"parameters": parameter values (according to parameter unit)</li>
<li>"reward": reward per timestep, episode length and reward, plus intermediate
reward/return/advantage estimates and processed values
(timestep/episode/update-based)</li>
<li>"update-norm": global norm of update (update-based)</li>
<li>"updates": mean and variance of update tensors per variable (update-based)</li>
<li>"variables": mean of trainable variables tensors (update-based)</li>
</ul>
tracking ("all" | iter[string]): Which tensors to track, available values are a subset of
the values of summarizer[summaries] above
(<span style="color:#00C000"><b>default</b></span>: no tracking).
The current value of tracked tensors can be retrieved via tracked_tensors() at any time,
however, note that tensor values change at different timescales (timesteps, episodes,
updates).
recorder (path | specification): Traces recordings directory, or recorder configuration with
the following attributes (see
[record-and-pretrain script](https://github.com/tensorforce/tensorforce/blob/master/examples/record_and_pretrain.py)
for example application)
(<span style="color:#00C000"><b>default</b></span>: no recorder):
<ul>
<li><b>directory</b> (<i>path</i>) – recorder directory
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>frequency</b> (<i>int > 0</i>) – how frequently in episodes to record
traces (<span style="color:#00C000"><b>default</b></span>: every episode).</li>
<li><b>start</b> (<i>int >= 0</i>) – how many episodes to skip before starting to
record traces (<span style="color:#00C000"><b>default</b></span>: 0).</li>
<li><b>max-traces</b> (<i>int > 0</i>) – maximum number of traces to keep
(<span style="color:#00C000"><b>default</b></span>: all).</li>
"""
def __init__(
# Required
self, states, actions, update, optimizer, objective, reward_estimation,
# Environment
max_episode_timesteps=None,
# Agent
policy='auto', memory=None,
# Baseline
baseline=None, baseline_optimizer=None, baseline_objective=None,
# Regularization
l2_regularization=0.0, entropy_regularization=0.0,
# Preprocessing
state_preprocessing='linear_normalization', reward_preprocessing=None,
# Exploration
exploration=0.0, variable_noise=0.0,
# Parallel interactions
parallel_interactions=1,
# Config, saver, summarizer, tracking, recorder
config=None, saver=None, summarizer=None, tracking=None, recorder=None,
# Deprecated
**kwargs
):
if 'estimate_actions' in reward_estimation:
raise TensorforceError.deprecated(
name='Agent', argument='reward_estimation[estimate_actions]',
replacement='reward_estimation[predict_action_values]'
)
if 'estimate_terminal' in reward_estimation:
raise TensorforceError.deprecated(
name='Agent', argument='reward_estimation[estimate_terminal]',
replacement='reward_estimation[predict_terminal_values]'
)
if summarizer is not None and 'labels' in summarizer:
raise TensorforceError.deprecated(
name='Agent', argument='summarizer[labels]', replacement='summarizer[summaries]'
)
if 'baseline_policy' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='baseline_policy', replacement='baseline'
)
if 'name' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='name', replacement='config[name]'
)
if 'buffer_observe' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='buffer_observe', replacement='config[buffer_observe]'
)
if 'device' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='device', replacement='config[device]'
)
if 'seed' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='seed', replacement='config[seed]'
)
if len(kwargs) > 0:
raise TensorforceError.invalid(name='Agent', argument=', '.join(kwargs))
if not hasattr(self, 'spec'):
self.spec = OrderedDict(
agent='tensorforce',
# Environment
states=states, actions=actions, max_episode_timesteps=max_episode_timesteps,
# Agent
policy=policy, memory=memory, update=update, optimizer=optimizer,
objective=objective, reward_estimation=reward_estimation,
# Baseline
baseline=baseline, baseline_optimizer=baseline_optimizer,
baseline_objective=baseline_objective,
# Regularization
l2_regularization=l2_regularization, entropy_regularization=entropy_regularization,
# Preprocessing
state_preprocessing=state_preprocessing, reward_preprocessing=reward_preprocessing,
# Exploration
exploration=exploration, variable_noise=variable_noise,
# Parallel interactions
parallel_interactions=parallel_interactions,
# Config, saver, summarizer, recorder
config=config, saver=saver, summarizer=summarizer, tracking=tracking,
recorder=recorder
)
if memory is None:
memory = dict(type='recent')
if isinstance(update, int):
update = dict(unit='timesteps', batch_size=update)
if config is None:
config = dict()
else:
config = dict(config)
# TODO: should this change if summarizer is specified?
if parallel_interactions > 1:
if 'buffer_observe' not in config:
if max_episode_timesteps is None:
raise TensorforceError.required(
name='Agent', argument='max_episode_timesteps',
condition='parallel_interactions > 1'
)
config['buffer_observe'] = 'episode'
# elif config['buffer_observe'] < max_episode_timesteps:
# raise TensorforceError.value(
# name='Agent', argument='config[buffer_observe]',
# hint='< max_episode_timesteps', condition='parallel_interactions > 1'
# )
elif update['unit'] == 'timesteps':
update_frequency = update.get('frequency', update['batch_size'])
if 'buffer_observe' not in config:
if isinstance(update_frequency, int):
config['buffer_observe'] = update_frequency
else:
config['buffer_observe'] = 1
elif isinstance(update_frequency, int) and (
config['buffer_observe'] == 'episode' or config['buffer_observe'] > update_frequency
):
raise TensorforceError.value(
name='Agent', argument='config[buffer_observe]', value=config['buffer_observe'],
hint='> update[frequency]', condition='update[unit] = "timesteps"'
)
elif update['unit'] == 'episodes':
if 'buffer_observe' not in config:
config['buffer_observe'] = 'episode'
# reward_estimation = dict(reward_estimation)
# if reward_estimation['horizon'] == 'episode':
# if max_episode_timesteps is None:
# raise TensorforceError.required(
# name='Agent', argument='max_episode_timesteps',
# condition='reward_estimation[horizon] = "episode"'
# )
# reward_estimation['horizon'] = max_episode_timesteps
super().__init__(
states=states, actions=actions, max_episode_timesteps=max_episode_timesteps,
parallel_interactions=parallel_interactions, config=config, recorder=recorder
)
self.model = TensorforceModel(
states=self.states_spec, actions=self.actions_spec,
max_episode_timesteps=self.max_episode_timesteps,
policy=policy, memory=memory, update=update, optimizer=optimizer, objective=objective,
reward_estimation=reward_estimation,
baseline=baseline, baseline_optimizer=baseline_optimizer,
baseline_objective=baseline_objective,
l2_regularization=l2_regularization, entropy_regularization=entropy_regularization,
state_preprocessing=state_preprocessing, reward_preprocessing=reward_preprocessing,
exploration=exploration, variable_noise=variable_noise,
parallel_interactions=self.parallel_interactions,
config=self.config, saver=saver, summarizer=summarizer, tracking=tracking
)
def experience(self, states, actions, terminal, reward, internals=None):
"""
Feed experience traces.
See the [act-experience-update script](https://github.com/tensorforce/tensorforce/blob/master/examples/act_experience_update_interface.py)
for an example application as part of the act-experience-update interface, which is an
alternative to the act-observe interaction pattern.
Args:
states (dict[array[state]]): Dictionary containing arrays of states
(<span style="color:#C00000"><b>required</b></span>).
actions (dict[array[action]]): Dictionary containing arrays of actions
(<span style="color:#C00000"><b>required</b></span>).
terminal (array[bool]): Array of terminals
(<span style="color:#C00000"><b>required</b></span>).
reward (array[float]): Array of rewards
(<span style="color:#C00000"><b>required</b></span>).
internals (dict[state]): Dictionary containing arrays of internal agent states
(<span style="color:#C00000"><b>required</b></span> if agent has internal states).
"""
if not all(len(buffer) == 0 for buffer in self.terminal_buffer):
raise TensorforceError(message="Calling agent.experience is not possible mid-episode.")
# Process states input and infer batching structure
states, batched, num_instances, is_iter_of_dicts = self._process_states_input(
states=states, function_name='Agent.experience'
)
if is_iter_of_dicts:
# Input structure iter[dict[input]]
# Internals
if internals is None:
internals = ArrayDict(self.initial_internals())
internals = internals.fmap(function=(lambda x: np.repeat(np.expand_dims(x, axis=0), repeats=num_instances, axis=0)))
elif not isinstance(internals, (tuple, list)):
raise TensorforceError.type(
name='Agent.experience', argument='internals', dtype=type(internals),
hint='is not tuple/list'
)
else:
internals = [ArrayDict(internal) for internal in internals]
internals = internals[0].fmap(
function=(lambda *xs: np.stack(xs, axis=0)), zip_values=internals[1:]
)
# Actions
if isinstance(actions, np.ndarray):
actions = ArrayDict(singleton=actions)
elif not isinstance(actions, (tuple, list)):
raise TensorforceError.type(
name='Agent.experience', argument='actions', dtype=type(actions),
hint='is not tuple/list'
)
elif not isinstance(actions[0], dict):
actions = ArrayDict(singleton=np.asarray(actions))
else:
actions = [ArrayDict(action) for action in actions]
actions = actions[0].fmap(
function=(lambda *xs: np.stack(xs, axis=0)), zip_values=actions[1:]
)
else:
# Input structure dict[iter[input]]
# Internals
if internals is None:
internals = ArrayDict(self.initial_internals())
internals = internals.fmap(function=(lambda x: np.tile(np.expand_dims(x, axis=0), reps=(num_instances,))))
elif not isinstance(internals, dict):
raise TensorforceError.type(
name='Agent.experience', argument='internals', dtype=type(internals),
hint='is not dict'
)
else:
internals = ArrayDict(internals)
# Actions
if not isinstance(actions, np.ndarray):
actions = ArrayDict(singleton=actions)
elif not isinstance(actions, dict):
raise TensorforceError.type(
name='Agent.experience', argument='actions', dtype=type(actions),
hint='is not dict'
)
else:
actions = ArrayDict(actions)
# Expand inputs if not batched
if not batched:
internals = internals.fmap(function=(lambda x: np.expand_dims(x, axis=0)))
actions = actions.fmap(function=(lambda x: np.expand_dims(x, axis=0)))
terminal = np.asarray([terminal])
reward = np.asarray([reward])
else:
terminal = np.asarray(terminal)
reward = np.asarray(reward)
# Check number of inputs
for name, internal in internals.items():
if internal.shape[0] != num_instances:
raise TensorforceError.value(
name='Agent.experience', argument='len(internals[{}])'.format(name),
value=internal.shape[0], hint='!= len(states)'
)
for name, action in actions.items():
if action.shape[0] != num_instances:
raise TensorforceError.value(
name='Agent.experience', argument='len(actions[{}])'.format(name),
value=action.shape[0], hint='!= len(states)'
)
if terminal.shape[0] != num_instances:
raise TensorforceError.value(
name='Agent.experience', argument='len(terminal)'.format(name),
value=terminal.shape[0], hint='!= len(states)'
)
if reward.shape[0] != num_instances:
raise TensorforceError.value(
name='Agent.experience', argument='len(reward)'.format(name),
value=reward.shape[0], hint='!= len(states)'
)
def function(name, spec):
auxiliary = ArrayDict()
if self.config.enable_int_action_masking and spec.type == 'int' and \
spec.num_values is not None:
if name is None:
name = 'action'
# Mask, either part of states or default all true
auxiliary['mask'] = states.pop(name + '_mask', np.ones(
shape=(num_instances,) + spec.shape + (spec.num_values,), dtype=spec.np_type()
))
return auxiliary
auxiliaries = self.actions_spec.fmap(function=function, cls=ArrayDict, with_names=True)
if self.states_spec.is_singleton() and not states.is_singleton():
states[None] = states.pop('state')
# Convert terminal to int if necessary
if terminal.dtype is util.np_dtype(dtype='bool'):
zeros = np.zeros_like(terminal, dtype=util.np_dtype(dtype='int'))
ones = np.ones_like(terminal, dtype=util.np_dtype(dtype='int'))
terminal = np.where(terminal, ones, zeros)
if terminal[-1] == 0:
raise TensorforceError(message="Agent.experience() requires full episodes as input.")
# Batch experiences split into episodes and at most size buffer_observe
last = 0
for index in range(1, len(terminal) + 1):
if terminal[index - 1] == 0:
continue
function = (lambda x: x[last: index])
states_batch = states.fmap(function=function)
internals_batch = internals.fmap(function=function)
auxiliaries_batch = auxiliaries.fmap(function=function)
actions_batch = actions.fmap(function=function)
terminal_batch = function(terminal)
reward_batch = function(reward)
last = index
# Inputs to tensors
states_batch = self.states_spec.to_tensor(
value=states_batch, batched=True, name='Agent.experience states'
)
internals_batch = self.internals_spec.to_tensor(
value=internals_batch, batched=True, recover_empty=True,
name='Agent.experience internals'
)
auxiliaries_batch = self.auxiliaries_spec.to_tensor(
value=auxiliaries_batch, batched=True, name='Agent.experience auxiliaries'
)
actions_batch = self.actions_spec.to_tensor(
value=actions_batch, batched=True, name='Agent.experience actions'
)
terminal_batch = self.terminal_spec.to_tensor(
value=terminal_batch, batched=True, name='Agent.experience terminal'
)
reward_batch = self.reward_spec.to_tensor(
value=reward_batch, batched=True, name='Agent.experience reward'
)
# Model.experience()
timesteps, episodes = self.model.experience(
states=states_batch, internals=internals_batch, auxiliaries=auxiliaries_batch,
actions=actions_batch, terminal=terminal_batch, reward=reward_batch
)
self.timesteps = timesteps.numpy().item()
self.episodes = episodes.numpy().item()
if self.model.saver is not None:
self.model.save()
def update(self, query=None, **kwargs):
"""
Perform an update.
See the [act-experience-update script](https://github.com/tensorforce/tensorforce/blob/master/examples/act_experience_update_interface.py)
for an example application as part of the act-experience-update interface, which is an
alternative to the act-observe interaction pattern.
"""
updates = self.model.update()
self.updates = updates.numpy().item()
if self.model.saver is not None:
self.model.save()
def pretrain(self, directory, num_iterations, num_traces=1, num_updates=1, extension='.npz'):
"""
Simple pretraining approach as a combination of `experience()` and `update`, akin to
behavioral cloning, using experience traces obtained e.g. via recording agent interactions
([see documentation](https://tensorforce.readthedocs.io/en/latest/basics/features.html#record-pretrain)).
For the given number of iterations, load the given number of trace files (which each contain
recorder[frequency] episodes), feed the experience to the agent's internal memory, and
subsequently trigger the given number of updates (which will use the experience in the
internal memory, fed in this or potentially previous iterations).
See the [record-and-pretrain script](https://github.com/tensorforce/tensorforce/blob/master/examples/record_and_pretrain.py)
for an example application.
Args:
directory (path): Directory with experience traces, e.g. obtained via recorder; episode
length has to be consistent with agent configuration
(<span style="color:#C00000"><b>required</b></span>).
num_iterations (int > 0): Number of iterations consisting of loading new traces and
performing multiple updates
(<span style="color:#C00000"><b>required</b></span>).
num_traces (int > 0): Number of traces to load per iteration; has to at least satisfy
the update batch size
(<span style="color:#00C000"><b>default</b></span>: 1).
num_updates (int > 0): Number of updates per iteration
(<span style="color:#00C000"><b>default</b></span>: 1).
extension (str): Traces file extension to filter the given directory for
(<span style="color:#00C000"><b>default</b></span>: ".npz").
"""
if not os.path.isdir(directory):
raise TensorforceError.value(
name='agent.pretrain', argument='directory', value=directory
)
files = sorted(
os.path.join(directory, f) for f in os.listdir(directory)
if os.path.isfile(os.path.join(directory, f)) and os.path.splitext(f)[1] == extension
)
indices = list(range(len(files)))
for _ in range(num_iterations):
shuffle(indices)
if num_traces is None:
selection = indices
else:
selection = indices[:num_traces]
batch = None
for index in selection:
trace = ArrayDict(np.load(files[index]))
if batch is None:
batch = trace
else:
batch = batch.fmap(
function=(lambda x, y: np.concatenate([x, y], axis=0)), zip_values=(trace,)
)
for name, value in batch.pop('auxiliaries', dict()).items():
assert name.endswith('/mask')
batch['states'][name[:-5] + '_mask'] = value
self.experience(**batch.to_kwargs())
for _ in range(num_updates):
self.update()
# TODO: self.obliviate()
|
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
NetBIOS over TCP/IP
[RFC 1001/1002]
"""
import struct
from scapy.packet import *
from scapy.fields import *
from scapy.layers.inet import UDP,TCP
from scapy.layers.l2 import SourceMACField
class NetBIOS_DS(Packet):
name = "NetBIOS datagram service"
fields_desc = [
ByteEnumField("type",17, {17:"direct_group"}),
ByteField("flags",0),
XShortField("id",0),
IPField("src","127.0.0.1"),
ShortField("sport",138),
ShortField("len",None),
ShortField("ofs",0),
NetBIOSNameField("srcname",""),
NetBIOSNameField("dstname",""),
]
def post_build(self, p, pay):
p += pay
if self.len is None:
l = len(p)-14
p = p[:10]+struct.pack("!H", l)+p[12:]
return p
# ShortField("length",0),
# ShortField("Delimitor",0),
# ByteField("command",0),
# ByteField("data1",0),
# ShortField("data2",0),
# ShortField("XMIt",0),
# ShortField("RSPCor",0),
# StrFixedLenField("dest","",16),
# StrFixedLenField("source","",16),
#
# ]
#
#NetBIOS
# Name Query Request
# Node Status Request
class NBNSQueryRequest(Packet):
name="NBNS query request"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x0110),
ShortField("QDCOUNT",1),
ShortField("ANCOUNT",0),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("QUESTION_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("QUESTION_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("QUESTION_CLASS",1,{1:"INTERNET"})]
# Name Registration Request
# Name Refresh Request
# Name Release Request or Demand
class NBNSRequest(Packet):
name="NBNS request"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x2910),
ShortField("QDCOUNT",1),
ShortField("ANCOUNT",0),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",1),
NetBIOSNameField("QUESTION_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("QUESTION_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("QUESTION_CLASS",1,{1:"INTERNET"}),
ShortEnumField("RR_NAME",0xC00C,{0xC00C:"Label String Pointer to QUESTION_NAME"}),
ShortEnumField("RR_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
IntField("TTL", 0),
ShortField("RDLENGTH", 6),
BitEnumField("G",0,1,{0:"Unique name",1:"Group name"}),
BitEnumField("OWNER_NODE_TYPE",00,2,{0:"B node",1:"P node",2:"M node",3:"H node"}),
BitEnumField("UNUSED",0,13,{0:"Unused"}),
IPField("NB_ADDRESS", "127.0.0.1")]
# Name Query Response
# Name Registration Response
class NBNSQueryResponse(Packet):
name="NBNS query response"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x8500),
ShortField("QDCOUNT",0),
ShortField("ANCOUNT",1),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("RR_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("QUESTION_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("QUESTION_CLASS",1,{1:"INTERNET"}),
IntField("TTL", 0x493e0),
ShortField("RDLENGTH", 6),
ShortField("NB_FLAGS", 0),
IPField("NB_ADDRESS", "127.0.0.1")]
# Name Query Response (negative)
# Name Release Response
class NBNSQueryResponseNegative(Packet):
name="NBNS query response (negative)"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x8506),
ShortField("QDCOUNT",0),
ShortField("ANCOUNT",1),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("RR_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("RR_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
IntField("TTL",0),
ShortField("RDLENGTH",6),
BitEnumField("G",0,1,{0:"Unique name",1:"Group name"}),
BitEnumField("OWNER_NODE_TYPE",00,2,{0:"B node",1:"P node",2:"M node",3:"H node"}),
BitEnumField("UNUSED",0,13,{0:"Unused"}),
IPField("NB_ADDRESS", "127.0.0.1")]
# Node Status Response
class NBNSNodeStatusResponse(Packet):
name="NBNS Node Status Response"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x8500),
ShortField("QDCOUNT",0),
ShortField("ANCOUNT",1),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("RR_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("RR_TYPE",0x21, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
IntField("TTL",0),
ShortField("RDLENGTH",83),
ByteField("NUM_NAMES",1)]
# Service for Node Status Response
class NBNSNodeStatusResponseService(Packet):
name="NBNS Node Status Response Service"
fields_desc = [StrFixedLenField("NETBIOS_NAME","WINDOWS ",15),
ByteEnumField("SUFFIX",0,{0:"workstation",0x03:"messenger service",0x20:"file server service",0x1b:"domain master browser",0x1c:"domain controller", 0x1e:"browser election service"}),
ByteField("NAME_FLAGS",0x4),
ByteEnumField("UNUSED",0,{0:"unused"})]
# End of Node Status Response packet
class NBNSNodeStatusResponseEnd(Packet):
name="NBNS Node Status Response"
fields_desc = [SourceMACField("MAC_ADDRESS"),
BitField("STATISTICS",0,57*8)]
# Wait for Acknowledgement Response
class NBNSWackResponse(Packet):
name="NBNS Wait for Acknowledgement Response"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0xBC07),
ShortField("QDCOUNT",0),
ShortField("ANCOUNT",1),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("RR_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("RR_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
IntField("TTL", 2),
ShortField("RDLENGTH",2),
BitField("RDATA",10512,16)] #10512=0010100100010000
class NBTDatagram(Packet):
name="NBT Datagram Packet"
fields_desc= [ByteField("Type", 0x10),
ByteField("Flags", 0x02),
ShortField("ID", 0),
IPField("SourceIP", "127.0.0.1"),
ShortField("SourcePort", 138),
ShortField("Length", 272),
ShortField("Offset", 0),
NetBIOSNameField("SourceName","windows"),
ShortEnumField("SUFFIX1",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
NetBIOSNameField("DestinationName","windows"),
ShortEnumField("SUFFIX2",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0)]
class NBTSession(Packet):
name="NBT Session Packet"
fields_desc= [ByteEnumField("TYPE",0,{0x00:"Session Message",0x81:"Session Request",0x82:"Positive Session Response",0x83:"Negative Session Response",0x84:"Retarget Session Response",0x85:"Session Keepalive"}),
BitField("RESERVED",0x00,7),
BitField("LENGTH",0,17)]
bind_layers( UDP, NBNSQueryRequest, dport=137)
bind_layers( UDP, NBNSRequest, dport=137)
bind_layers( UDP, NBNSQueryResponse, sport=137)
bind_layers( UDP, NBNSQueryResponseNegative, sport=137)
bind_layers( UDP, NBNSNodeStatusResponse, sport=137)
bind_layers( NBNSNodeStatusResponse, NBNSNodeStatusResponseService, )
bind_layers( NBNSNodeStatusResponse, NBNSNodeStatusResponseService, )
bind_layers( NBNSNodeStatusResponseService, NBNSNodeStatusResponseService, )
bind_layers( NBNSNodeStatusResponseService, NBNSNodeStatusResponseEnd, )
bind_layers( UDP, NBNSWackResponse, sport=137)
bind_layers( UDP, NBTDatagram, dport=138)
bind_layers( TCP, NBTSession, dport=139)
|
|
from __future__ import division
import numpy as np
from numpy.fft import fft2, ifft2
import pyfftw
from .utils import fft_shift_phasor_2d, yxoffset
from .psffuncs import gaussian_moffat_psf
__all__ = ["TabularPSF", "GaussianMoffatPSF"]
class PSFBase(object):
"""Base class for 3-d PSFs."""
def __init__(self, A):
"""Set up arrays and FFTs for convolution.
Parameters
----------
A : ndarray (3-d)
PSF, assumed to be centered in the array at the
"reference wavelength."
"""
self.nw, self.ny, self.nx = A.shape
# The attribute `fftconv` stores the Fourier-space array
# necessary to convolve another array by the PSF. This is done
# by mulitiplying the input array by `fftconv` in fourier
# space.
#
# We shift the PSF so that instead of being exactly centered
# in the array, it is exactly centered on the lower left
# pixel. (For convolution in Fourier space, the (0, 0)
# element of the kernel is effectively the "center.")
# Note that this shifting is different than simply
# creating the PSF centered at the lower left pixel to begin
# with, due to wrap-around.
#
#`ifft2(fftconv).real` would be the PSF in
# real space, shifted to be centered on the lower-left pixel.
shift = -(self.ny - 1) / 2., -(self.nx - 1) / 2.
fshift = fft_shift_phasor_2d((self.ny, self.nx), shift)
fftconv = fft2(A) * fshift
# align on SIMD boundary.
self.fftconv = pyfftw.n_byte_align(fftconv, pyfftw.simd_alignment,
dtype=np.complex128)
# set up input and output arrays for FFTs.
self.fftin = pyfftw.n_byte_align_empty(A.shape,
pyfftw.simd_alignment,
dtype=np.complex128)
self.fftout = pyfftw.n_byte_align_empty(A.shape,
pyfftw.simd_alignment,
dtype=np.complex128)
# Set up forward and backward FFTs.
self.fft = pyfftw.FFTW(self.fftin, self.fftout, axes=(1, 2),
threads=1)
self.ifft = pyfftw.FFTW(self.fftout, self.fftin, axes=(1, 2),
threads=1, direction='FFTW_BACKWARD')
self.fftnorm = 1. / (self.ny * self.nx)
def evaluate_galaxy(self, galmodel, shape, ctr, grad=False):
"""convolve, shift and sample the galaxy model"""
# shift necessary to put model onto data coordinates
offset = yxoffset((self.ny, self.nx), shape, ctr)
fshift = fft_shift_phasor_2d((self.ny, self.nx),
(-offset[0], -offset[1]), grad=grad)
if grad:
fshift, fshiftgrad = fshift
fshiftgrad *= -1. # make derivatives w.r.t. `ctr`.
# calculate `fft(galmodel) * fftconv`
np.copyto(self.fftin, galmodel) # copy input array to complex array
self.fft.execute() # populates self.fftout
self.fftout *= self.fftconv
if grad:
fftgal = np.copy(self.fftout) # cache result for use in gradient.
self.fftout *= fshift
self.ifft.execute() # populates self.fftin
self.fftin *= self.fftnorm
gal = np.copy(self.fftin.real[:, 0:shape[0], 0:shape[1]])
if grad:
galgrad = np.empty((2,) + gal.shape, dtype=np.float64)
for i in (0, 1):
np.copyto(self.fftout, fftgal)
self.fftout *= fshiftgrad[i]
self.ifft.execute() # populates self.fftin
self.fftin *= self.fftnorm
galgrad[i] = self.fftin.real[:, 0:shape[0], 0:shape[1]]
return gal, galgrad
else:
return gal
def gradient_helper(self, x, shape, ctr):
"""Not sure exactly what this does yet.
Parameters
----------
i_t : int
Epoch index.
x : np.ndarray (3-d)
Same shape as *data* for single epoch (nw, ny, nx).
xcoords : np.ndarray (1-d)
ycoords : np.ndarray (1-d)
Returns
-------
x : np.ndarray (3-d)
Shape is (nw, len(ycoords), len(xcoords)).
"""
# shift necessary to put model onto data coordinates
offset = yxoffset((self.ny, self.nx), shape, ctr)
fshift = fft_shift_phasor_2d((self.ny, self.nx),
(-offset[0], -offset[1]))
fshift = np.asarray(fshift, dtype=np.complex128)
# create output array
out = np.zeros((self.nw, self.ny, self.nx), dtype=np.float64)
out[:, :x.shape[1], :x.shape[2]] = x
for i in range(self.nw):
tmp = ifft2(np.conj(self.fftconv[i, :, :] * fshift) *
fft2(out[i, :, :]))
out[i, :, :] = tmp.real
return out
class TabularPSF(PSFBase):
"""PSF represented by an array."""
def point_source(self, pos, shape, ctr, grad=False):
"""Evaluate a point source at the given position.
If grad is True, return a 2-tuple, with the second item being
a 4-d array of gradient with respect to
ctr[0], ctr[1], pos[0], pos[1].
"""
# shift necessary to put model onto data coordinates
offset = yxoffset((self.ny, self.nx), shape, ctr)
yshift, xshift = -offset[0], -offset[1]
# Add shift to move point source from the lower left in the model array
# to `pos` in model *coordinates*. Note that in model coordinates,
# (0, 0) corresponds to array position (ny-1)/2., (nx-1)/2.
yshift += (self.ny - 1) / 2. + pos[0]
xshift += (self.nx - 1) / 2. + pos[1]
fshift = fft_shift_phasor_2d((self.ny, self.nx), (yshift, xshift),
grad=grad)
if grad:
fshift, fshiftgrad = fshift
fshiftgrad *= -1. # make derivatives w.r.t. `ctr`.
# following block is like ifft2(fftconv * fshift)
np.copyto(self.fftout, self.fftconv)
self.fftout *= self.fftnorm * fshift
self.ifft.execute()
s = np.copy(self.fftin.real[:, 0:shape[0], 0:shape[1]])
if grad:
sgrad = np.empty((4,) + s.shape, dtype=np.float64)
for i in (0, 1):
np.copyto(self.fftout, self.fftconv)
self.fftout *= self.fftnorm * fshiftgrad[i]
self.ifft.execute()
sgrad[i] = self.fftin.real[:, 0:shape[0], 0:shape[1]]
sgrad[2:4] = -sgrad[0:2]
return s, sgrad
else:
return s
class GaussianMoffatPSF(PSFBase):
"""A Gaussian plus Moffat function 3-d point spread function.
This describes a separate analytic PSF at multiple (discrete) wavelengths.
At each wavelength, the PSF is described by two parameters: ellipticity
and alpha. These in turn determine the Gaussian and Moffat function
parameters.
Parameters
----------
ellipticity : ndarray (1-d)
alpha : ndarray (1-d)
"""
def __init__(self, sigma, alpha, beta, ellipticity, eta, yctr, xctr,
shape, subpix=1):
if not (len(sigma) == len(alpha) == len(beta) == len(ellipticity) ==
len(eta) == len(yctr) == len(xctr)):
raise ValueError("length of input arrays must match")
if not np.all(beta > 1.):
raise ValueError("beta must be > 1")
self.sigma = sigma
self.alpha = alpha
self.beta = beta
self.ellipticity = ellipticity
self.eta = eta
self.yctr = yctr
self.xctr = xctr
self.subpix = subpix
# Set up tabular PSF for galaxy convolution
A = gaussian_moffat_psf(sigma, alpha, beta, ellipticity, eta,
yctr, xctr, shape, subpix=subpix)
super(GaussianMoffatPSF, self).__init__(A)
def point_source(self, pos, shape, ctr, grad=False):
yctr = self.yctr + pos[0] - ctr[0]
xctr = self.xctr + pos[1] - ctr[1]
res = gaussian_moffat_psf(self.sigma, self.alpha, self.beta,
self.ellipticity, self.eta, yctr, xctr,
shape, subpix=self.subpix, grad=grad)
if grad:
s, sgrad_pos = res
sgrad = np.empty((4,) + s.shape, dtype=np.float64)
sgrad[0:2] = -sgrad_pos
sgrad[2:4] = sgrad_pos
return s, sgrad
else:
return res
|
|
import pytest
import mock
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
ForkFactory
)
from rest_framework import exceptions
from website import mails
from website.util import permissions
from api.nodes.serializers import NodeForksSerializer
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeForksList:
@pytest.fixture()
def pointer(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def private_project(self, user, pointer):
private_project = ProjectFactory()
private_project.add_contributor(user, permissions=[permissions.READ, permissions.WRITE])
private_project.add_pointer(pointer, auth=Auth(user), save=True)
private_project.save()
return private_project
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def private_component(self, user, private_project):
return NodeFactory(parent=private_project, creator=user)
@pytest.fixture()
def public_component(self, user, public_project):
return NodeFactory(parent=public_project, creator=user, is_public=True)
@pytest.fixture()
def private_fork(self, user, private_project):
return ForkFactory(project=private_project, user=user)
@pytest.fixture()
def public_fork(self, user, public_project):
return ForkFactory(project=public_project, user=user)
@pytest.fixture()
def private_project_url(self, private_project):
return '/{}nodes/{}/forks/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_project_url(self, public_project):
return '/{}nodes/{}/forks/'.format(API_BASE, public_project._id)
def test_can_access_public_node_forks_list_when_unauthenticated(self, app, public_project, public_fork, public_project_url):
res = app.get(public_project_url)
assert res.status_code == 200
assert len(res.json['data']) == 0
# Fork defaults to private
assert public_fork.is_public == False
public_fork.is_public = True
public_fork.save()
res = app.get(public_project_url)
assert len(res.json['data']) == 1
assert public_fork.is_public == True
data = res.json['data'][0]
assert data['attributes']['title'] == 'Fork of ' + public_project.title
assert data['id'] == public_fork._id
assert data['attributes']['registration'] == False
assert data['attributes']['fork'] == True
def test_can_access_public_node_forks_list_authenticated_contributor(self, app, user, public_project, public_fork, public_project_url):
res = app.get(public_project_url, auth=user.auth)
assert res.status_code == 200
assert public_fork.is_public == False
assert len(res.json['data']) == 1
data = res.json['data'][0]
assert data['attributes']['title'] == 'Fork of ' + public_project.title
assert data['id'] == public_fork._id
assert data['attributes']['registration'] == False
assert data['attributes']['fork'] == True
def test_can_access_public_node_forks_list_authenticated_non_contributor(self, app, public_project, public_fork, public_project_url):
non_contrib = AuthUserFactory()
res = app.get(public_project_url, auth=non_contrib.auth)
assert res.status_code == 200
assert len(res.json['data']) == 0
# Fork defaults to private
assert public_fork.is_public == False
public_fork.is_public = True
public_fork.save()
res = app.get(public_project_url)
assert len(res.json['data']) == 1
assert public_fork.is_public == True
data = res.json['data'][0]
assert data['attributes']['title'] == 'Fork of ' + public_project.title
assert data['id'] == public_fork._id
assert data['attributes']['registration'] == False
assert data['attributes']['fork'] == True
def test_authenticated_contributor_can_access_private_node_forks_list(self, app, user, private_project, private_component, private_fork, pointer, private_project_url):
res = app.get(private_project_url + '?embed=children&embed=node_links&embed=logs&embed=contributors&embed=forked_from', auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
data = res.json['data'][0]
assert data['attributes']['title'] == 'Fork of ' + private_project.title
assert data['id'] == private_fork._id
fork_contributors = data['embeds']['contributors']['data'][0]['embeds']['users']['data']
assert fork_contributors['attributes']['family_name'] == user.family_name
assert fork_contributors['id'] == user._id
forked_children = data['embeds']['children']['data'][0]
assert forked_children['id'] == private_component.forks.first()._id
assert forked_children['attributes']['title'] == private_component.title
forked_node_links = data['embeds']['node_links']['data'][0]['embeds']['target_node']['data']
assert forked_node_links['id'] == pointer._id
assert forked_node_links['attributes']['title'] == pointer.title
auth = Auth(user)
expected_logs = list(private_project.get_aggregate_logs_queryset(auth).values_list('action', flat=True))
expected_logs.append('node_forked')
forked_logs = data['embeds']['logs']['data']
forked_log_actions = [log['attributes']['action'] for log in forked_logs]
assert set(expected_logs) == set(forked_log_actions)
assert len(set(forked_log_actions)) == len(set(expected_logs))
forked_from = data['embeds']['forked_from']['data']
assert forked_from['id'] == private_project._id
def test_node_forks_list_errors(self, app, private_project_url):
# test_cannot_access_private_node_forks_list_unauthenticated
res = app.get(private_project_url, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_authenticated_non_contributor_cannot_access_private_node_forks_list
non_contrib = AuthUserFactory()
res = app.get(private_project_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
def test_forks_list_does_not_show_registrations_of_forks(self, app, public_project, public_fork, public_project_url):
reg = RegistrationFactory(project=public_fork, is_public=True)
# confirm registration shows up in node forks
assert reg in public_project.forks.all()
res = app.get(public_project_url)
# confirm registration of fork does not show up in public data
assert len(res.json['data']) == 0
@pytest.mark.django_db
class TestNodeForkCreate:
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def private_project(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def private_project_url(self, private_project):
return '/{}nodes/{}/forks/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_project_url(self, public_project):
return '/{}nodes/{}/forks/'.format(API_BASE, public_project._id)
@pytest.fixture()
def fork_data(self):
return {
'data': {
'type': 'nodes'
}
}
@pytest.fixture()
def fork_data_with_title(self):
return {
'data': {
'type': 'nodes',
'attributes':
{'title': 'My Forked Project'}
}
}
def test_create_fork_from_public_project_with_new_title(self, app, user, public_project, fork_data_with_title, public_project_url):
res = app.post_json_api(public_project_url, fork_data_with_title, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['id'] == public_project.forks.first()._id
assert res.json['data']['attributes']['title'] == fork_data_with_title['data']['attributes']['title']
def test_create_fork_from_private_project_with_new_title(self, app, user, private_project, fork_data_with_title, private_project_url):
res = app.post_json_api(private_project_url, fork_data_with_title, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['id'] == private_project.forks.first()._id
assert res.json['data']['attributes']['title'] == fork_data_with_title['data']['attributes']['title']
def test_can_fork_public_node_logged_in(self, app, public_project, fork_data, public_project_url):
non_contrib = AuthUserFactory()
res = app.post_json_api(public_project_url, fork_data, auth=non_contrib.auth)
assert res.status_code == 201
assert res.json['data']['id'] == public_project.forks.first()._id
assert res.json['data']['attributes']['title'] == 'Fork of ' + public_project.title
def test_cannot_fork_errors(self, app, fork_data, public_project_url, private_project_url):
# test_cannot_fork_public_node_logged_out
res = app.post_json_api(public_project_url, fork_data, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_cannot_fork_private_node_logged_out
res = app.post_json_api(private_project_url, fork_data, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_cannot_fork_private_node_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.post_json_api(private_project_url, fork_data, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
def test_can_fork_public_node_logged_in_contributor(self, app, user, public_project, fork_data, public_project_url):
res = app.post_json_api(public_project_url, fork_data, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['id'] == public_project.forks.first()._id
assert res.json['data']['attributes']['title'] == 'Fork of ' + public_project.title
def test_can_fork_private_node_logged_in_contributor(self, app, user, private_project, fork_data, private_project_url):
res = app.post_json_api(private_project_url + '?embed=children&embed=node_links&embed=logs&embed=contributors&embed=forked_from', fork_data, auth=user.auth)
assert res.status_code == 201
data = res.json['data']
assert data['attributes']['title'] == 'Fork of ' + private_project.title
fork_contributors = data['embeds']['contributors']['data'][0]['embeds']['users']['data']
assert fork_contributors['attributes']['family_name'] == user.family_name
assert fork_contributors['id'] == user._id
forked_from = data['embeds']['forked_from']['data']
assert forked_from['id'] == private_project._id
def test_fork_private_components_no_access(self, app, user_two, public_project, fork_data, public_project_url):
user_three = AuthUserFactory()
url = public_project_url + '?embed=children'
private_component = NodeFactory(parent=public_project, creator=user_two, is_public=False)
res = app.post_json_api(url, fork_data, auth=user_three.auth)
assert res.status_code == 201
# Private components that you do not have access to are not forked
assert res.json['data']['embeds']['children']['links']['meta']['total'] == 0
def test_fork_components_you_can_access(self, app, user, private_project, fork_data, private_project_url):
url = private_project_url + '?embed=children'
new_component = NodeFactory(parent=private_project, creator=user)
res = app.post_json_api(url, fork_data, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['embeds']['children']['links']['meta']['total'] == 1
assert res.json['data']['embeds']['children']['data'][0]['id'] == new_component.forks.first()._id
assert res.json['data']['embeds']['children']['data'][0]['attributes']['title'] == new_component.title
def test_fork_private_node_links(self, app, user, user_two, private_project, fork_data, private_project_url):
private_pointer = ProjectFactory(creator=user_two)
actual_pointer = private_project.add_pointer(private_pointer, auth=Auth(user_two), save=True)
url = private_project_url + '?embed=node_links'
# Node link is forked, but shows up as a private node link
res = app.post_json_api(url, fork_data, auth=user.auth)
assert res.status_code == 201
assert (res.json['data']['embeds']['node_links']['data'][0]['embeds']['target_node']['errors'][0]['detail'] ==
exceptions.PermissionDenied.default_detail)
assert res.json['data']['embeds']['node_links']['links']['meta']['total'] == 1
private_project.rm_pointer(actual_pointer, auth=Auth(user_two))
def test_fork_node_links_you_can_access(self, app, user, user_two, private_project, fork_data, private_project_url):
pointer = ProjectFactory(creator=user)
private_project.add_pointer(pointer, auth=Auth(user_two), save=True)
url = private_project_url + '?embed=node_links'
res = app.post_json_api(url, fork_data, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['embeds']['node_links']['data'][0]['embeds']['target_node']['data']['id'] == pointer._id
assert res.json['data']['embeds']['node_links']['links']['meta']['total'] == 1
def test_can_fork_registration(self, app, user, private_project, fork_data):
registration = RegistrationFactory(project=private_project, user=user)
url = '/{}registrations/{}/forks/'.format(API_BASE, registration._id)
res = app.post_json_api(url, fork_data, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['id'] == registration.forks.first()._id
assert res.json['data']['attributes']['title'] == 'Fork of ' + registration.title
def test_read_only_contributor_can_fork_private_registration(self, app, private_project, fork_data, private_project_url):
read_contrib = AuthUserFactory()
private_project.add_contributor(read_contrib, permissions=[permissions.READ], save=True)
res = app.post_json_api(private_project_url, fork_data, auth=read_contrib.auth)
assert res.status_code == 201
assert res.json['data']['id'] == private_project.forks.first()._id
assert res.json['data']['attributes']['title'] == 'Fork of ' + private_project.title
def test_send_email_success(self, app, user, public_project_url, fork_data_with_title, public_project):
with mock.patch.object(mails, 'send_mail', return_value=None) as mock_send_mail:
res = app.post_json_api(public_project_url, fork_data_with_title, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['id'] == public_project.forks.first()._id
mock_send_mail.assert_called_with(user.email,
mails.FORK_COMPLETED,
title=public_project.title,
guid=res.json['data']['id'],
mimetype='html',
can_change_preferences=False)
def test_send_email_failed(self, app, user, public_project_url, fork_data_with_title, public_project):
with mock.patch.object(NodeForksSerializer, 'save', side_effect=Exception()):
with mock.patch.object(mails, 'send_mail', return_value=None) as mock_send_mail:
with pytest.raises(Exception):
app.post_json_api(public_project_url, fork_data_with_title, auth=user.auth)
mock_send_mail.assert_called_with(user.email,
mails.FORK_FAILED,
title=public_project.title,
guid=public_project._id,
mimetype='html',
can_change_preferences=False)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import uuid
import warnings
if sys.version > '3':
basestring = str
unicode = str
from pyspark import SparkContext, since
from pyspark.ml.common import inherit_doc
def _jvm():
"""
Returns the JVM view associated with SparkContext. Must be called
after SparkContext is initialized.
"""
jvm = SparkContext._jvm
if jvm:
return jvm
else:
raise AttributeError("Cannot load _jvm from SparkContext. Is SparkContext initialized?")
class Identifiable(object):
"""
Object with a unique ID.
"""
def __init__(self):
#: A unique id for the object.
self.uid = self._randomUID()
def __repr__(self):
return self.uid
@classmethod
def _randomUID(cls):
"""
Generate a unique unicode id for the object. The default implementation
concatenates the class name, "_", and 12 random hex chars.
"""
return unicode(cls.__name__ + "_" + uuid.uuid4().hex[12:])
@inherit_doc
class MLWriter(object):
"""
Utility class that can save ML instances.
.. versionadded:: 2.0.0
"""
def save(self, path):
"""Save the ML instance to the input path."""
raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self))
def overwrite(self):
"""Overwrites if the output path already exists."""
raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self))
def context(self, sqlContext):
"""
Sets the SQL context to use for saving.
.. note:: Deprecated in 2.1 and will be removed in 2.2, use session instead.
"""
raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self))
def session(self, sparkSession):
"""Sets the Spark Session to use for saving."""
raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self))
@inherit_doc
class JavaMLWriter(MLWriter):
"""
(Private) Specialization of :py:class:`MLWriter` for :py:class:`JavaParams` types
"""
def __init__(self, instance):
super(JavaMLWriter, self).__init__()
_java_obj = instance._to_java()
self._jwrite = _java_obj.write()
def save(self, path):
"""Save the ML instance to the input path."""
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
self._jwrite.save(path)
def overwrite(self):
"""Overwrites if the output path already exists."""
self._jwrite.overwrite()
return self
def context(self, sqlContext):
"""
Sets the SQL context to use for saving.
.. note:: Deprecated in 2.1 and will be removed in 2.2, use session instead.
"""
warnings.warn("Deprecated in 2.1 and will be removed in 2.2, use session instead.")
self._jwrite.context(sqlContext._ssql_ctx)
return self
def session(self, sparkSession):
"""Sets the Spark Session to use for saving."""
self._jwrite.session(sparkSession._jsparkSession)
return self
@inherit_doc
class MLWritable(object):
"""
Mixin for ML instances that provide :py:class:`MLWriter`.
.. versionadded:: 2.0.0
"""
def write(self):
"""Returns an MLWriter instance for this ML instance."""
raise NotImplementedError("MLWritable is not yet implemented for type: %r" % type(self))
def save(self, path):
"""Save this ML instance to the given path, a shortcut of `write().save(path)`."""
self.write().save(path)
@inherit_doc
class JavaMLWritable(MLWritable):
"""
(Private) Mixin for ML instances that provide :py:class:`JavaMLWriter`.
"""
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@inherit_doc
class MLReader(object):
"""
Utility class that can load ML instances.
.. versionadded:: 2.0.0
"""
def load(self, path):
"""Load the ML instance from the input path."""
raise NotImplementedError("MLReader is not yet implemented for type: %s" % type(self))
def context(self, sqlContext):
"""
Sets the SQL context to use for loading.
.. note:: Deprecated in 2.1 and will be removed in 2.2, use session instead.
"""
raise NotImplementedError("MLReader is not yet implemented for type: %s" % type(self))
def session(self, sparkSession):
"""Sets the Spark Session to use for loading."""
raise NotImplementedError("MLReader is not yet implemented for type: %s" % type(self))
@inherit_doc
class JavaMLReader(MLReader):
"""
(Private) Specialization of :py:class:`MLReader` for :py:class:`JavaParams` types
"""
def __init__(self, clazz):
self._clazz = clazz
self._jread = self._load_java_obj(clazz).read()
def load(self, path):
"""Load the ML instance from the input path."""
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
java_obj = self._jread.load(path)
if not hasattr(self._clazz, "_from_java"):
raise NotImplementedError("This Java ML type cannot be loaded into Python currently: %r"
% self._clazz)
return self._clazz._from_java(java_obj)
def context(self, sqlContext):
"""
Sets the SQL context to use for loading.
.. note:: Deprecated in 2.1 and will be removed in 2.2, use session instead.
"""
warnings.warn("Deprecated in 2.1 and will be removed in 2.2, use session instead.")
self._jread.context(sqlContext._ssql_ctx)
return self
def session(self, sparkSession):
"""Sets the Spark Session to use for loading."""
self._jread.session(sparkSession._jsparkSession)
return self
@classmethod
def _java_loader_class(cls, clazz):
"""
Returns the full class name of the Java ML instance. The default
implementation replaces "pyspark" by "org.apache.spark" in
the Python full class name.
"""
java_package = clazz.__module__.replace("pyspark", "org.apache.spark")
if clazz.__name__ in ("Pipeline", "PipelineModel"):
# Remove the last package name "pipeline" for Pipeline and PipelineModel.
java_package = ".".join(java_package.split(".")[0:-1])
return java_package + "." + clazz.__name__
@classmethod
def _load_java_obj(cls, clazz):
"""Load the peer Java object of the ML instance."""
java_class = cls._java_loader_class(clazz)
java_obj = _jvm()
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
return java_obj
@inherit_doc
class MLReadable(object):
"""
Mixin for instances that provide :py:class:`MLReader`.
.. versionadded:: 2.0.0
"""
@classmethod
def read(cls):
"""Returns an MLReader instance for this class."""
raise NotImplementedError("MLReadable.read() not implemented for type: %r" % cls)
@classmethod
def load(cls, path):
"""Reads an ML instance from the input path, a shortcut of `read().load(path)`."""
return cls.read().load(path)
@inherit_doc
class JavaMLReadable(MLReadable):
"""
(Private) Mixin for instances that provide JavaMLReader.
"""
@classmethod
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@inherit_doc
class JavaPredictionModel():
"""
(Private) Java Model for prediction tasks (regression and classification).
To be mixed in with class:`pyspark.ml.JavaModel`
"""
@property
@since("2.1.0")
def numFeatures(self):
"""
Returns the number of features the model was trained on. If unknown, returns -1
"""
return self._call_java("numFeatures")
|
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
import re
from pytest import raises
from jenkinsflow.flow import serial, parallel, FailedChildJobException
from .cfg import ApiType
from .framework import api_select
from .framework.utils import lines_in, replace_host_port, result_msg, build_started_msg
def test_reporting_job_status(api_type, capsys):
with api_select.api(__file__, api_type) as api:
api.flow_job()
api.job('j11', max_fails=0, expect_invocations=1, expect_order=1)
api.job('j12', max_fails=0, expect_invocations=1, invocation_delay=1.0, exec_time=1.5, initial_buildno=7, expect_order=2, serial=True)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=0.5/api.speedup) as ctrl1:
ctrl1.invoke('j11')
ctrl1.invoke('j12')
sout, _ = capsys.readouterr()
if api.api_type == ApiType.MOCK:
repr_not_invoked = "job: 'jenkinsflow_test__reporting_job_status__j11' Status IDLE - latest build: "
assert repr_not_invoked in sout, repr_not_invoked + "\n - NOT FOUND IN:\n" + sout
assert lines_in(api_type, sout, "job: 'jenkinsflow_test__reporting_job_status__j12' Status IDLE - latest build: #7")
assert lines_in(api_type, sout, "'jenkinsflow_test__reporting_job_status__j12' Status QUEUED - Why am I queued?")
assert lines_in(api_type, sout, "'jenkinsflow_test__reporting_job_status__j12' Status RUNNING - build: #8")
assert lines_in(api_type, sout, "'jenkinsflow_test__reporting_job_status__j12' Status IDLE - build: #8")
else:
# TODO: know if we cleaned jobs and check the 'repr_not_invoked' above
assert "'jenkinsflow_test__reporting_job_status__j12' Status RUNNING - build: " in sout
# assert "'jenkinsflow_test__reporting_job_status__j12' Status IDLE - build: " in sout
def test_reporting_invocation_serial(api_type, capsys):
with api_select.api(__file__, api_type) as api:
api.flow_job()
api.job('j11', max_fails=0, expect_invocations=1, expect_order=1)
api.job('j12', max_fails=0, expect_invocations=1, invocation_delay=1.0, exec_time=1.5, initial_buildno=7, expect_order=2, serial=True)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=0.5/api.speedup) as ctrl1:
ctrl1.invoke('j11')
ctrl1.invoke('j12')
sout, _ = capsys.readouterr()
empty_re = re.compile("^$")
assert lines_in(
api_type, sout,
"^Defined Invocation http://x.x/job/jenkinsflow_test__reporting_invocation_serial__j11",
"^Defined Invocation http://x.x/job/jenkinsflow_test__reporting_invocation_serial__j12",
empty_re,
"--- Starting flow ---",
empty_re,
"^Flow Invocation (1/1,1/1): ['jenkinsflow_test__reporting_invocation_serial__j11', 'jenkinsflow_test__reporting_invocation_serial__j12']",
"^Job Invocation (1/1,1/1): http://x.x/job/jenkinsflow_test__reporting_invocation_serial__j11",
build_started_msg(api, "jenkinsflow_test__reporting_invocation_serial__j11", 1),
"^Job Invocation (1/1,1/1): http://x.x/job/jenkinsflow_test__reporting_invocation_serial__j12",
build_started_msg(api, "jenkinsflow_test__reporting_invocation_serial__j12", 8),
)
def test_reporting_invocation_parallel(api_type, capsys):
with api_select.api(__file__, api_type) as api:
api.flow_job()
api.job('j11', max_fails=0, expect_invocations=1, expect_order=1)
api.job('j12', max_fails=0, expect_invocations=1, invocation_delay=1.0, exec_time=1.5, initial_buildno=7, expect_order=2)
with parallel(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=0.5/api.speedup) as ctrl1:
ctrl1.invoke('j11')
ctrl1.invoke('j12')
sout, _ = capsys.readouterr()
assert lines_in(
api_type, sout,
"^Flow Invocation (1/1,1/1): ('jenkinsflow_test__reporting_invocation_parallel__j11', 'jenkinsflow_test__reporting_invocation_parallel__j12')",
"^Job Invocation (1/1,1/1): http://x.x/job/jenkinsflow_test__reporting_invocation_parallel__j11",
"^Job Invocation (1/1,1/1): http://x.x/job/jenkinsflow_test__reporting_invocation_parallel__j12",
)
assert lines_in(
api_type, sout,
"^Job Invocation (1/1,1/1): http://x.x/job/jenkinsflow_test__reporting_invocation_parallel__j11",
build_started_msg(api, "jenkinsflow_test__reporting_invocation_parallel__j11", 1),
)
assert lines_in(
api_type, sout,
"^Job Invocation (1/1,1/1): http://x.x/job/jenkinsflow_test__reporting_invocation_parallel__j12",
build_started_msg(api, "jenkinsflow_test__reporting_invocation_parallel__j12", 8)
)
def test_reporting_multiple_invocations(api_type, capsys):
with api_select.api(__file__, api_type) as api:
api.flow_job()
api.job('j11', max_fails=0, expect_invocations=3, expect_order=None)
api.job('j12', max_fails=0, expect_invocations=1, invocation_delay=1.0, exec_time=1.5, initial_buildno=7, expect_order=4, serial=True)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=0.5/api.speedup) as ctrl1:
ctrl1.invoke('j11')
ctrl1.invoke('j11')
ctrl1.invoke('j11')
ctrl1.invoke('j12')
sout, _ = capsys.readouterr()
empty_re = re.compile("^$")
assert lines_in(
api_type, sout,
"^Defined Invocation-1 http://x.x/job/jenkinsflow_test__reporting_multiple_invocations__j11",
"^Defined Invocation-2 http://x.x/job/jenkinsflow_test__reporting_multiple_invocations__j11",
"^Defined Invocation-3 http://x.x/job/jenkinsflow_test__reporting_multiple_invocations__j11",
"^Defined Invocation http://x.x/job/jenkinsflow_test__reporting_multiple_invocations__j12",
empty_re,
"--- Starting flow ---",
empty_re,
"^Flow Invocation (1/1,1/1): ['jenkinsflow_test__reporting_multiple_invocations__j11', 'jenkinsflow_test__reporting_multiple_invocations__j11', 'jenkinsflow_test__reporting_multiple_invocations__j11', 'jenkinsflow_test__reporting_multiple_invocations__j12']",
"^Job Invocation-1 (1/1,1/1): http://x.x/job/jenkinsflow_test__reporting_multiple_invocations__j11",
build_started_msg(api, "jenkinsflow_test__reporting_multiple_invocations__j11", 1, invocation_number=1),
"^Job Invocation (1/1,1/1): http://x.x/job/jenkinsflow_test__reporting_multiple_invocations__j12",
build_started_msg(api, "jenkinsflow_test__reporting_multiple_invocations__j12", 8),
)
def test_reporting_retry(api_type, capsys):
with api_select.api(__file__, api_type) as api:
api.flow_job()
api.job('j11_fail', max_fails=1, expect_invocations=2, expect_order=1)
api.job('j12', max_fails=0, expect_invocations=1, expect_order=2, serial=True)
api.job('j21', max_fails=0, expect_invocations=1, expect_order=3, serial=True)
api.job('j22_fail', max_fails=2, expect_invocations=3, expect_order=3)
api.job('j31_fail', max_fails=3, expect_invocations=4, expect_order=3)
api.job('j32', max_fails=0, expect_invocations=1, expect_order=3, serial=True)
api.job('j23', max_fails=0, expect_invocations=1, expect_order=3)
api.job('j13', max_fails=0, expect_invocations=1, expect_order=4, serial=True)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, max_tries=2) as ctrl1:
ctrl1.invoke('j11_fail')
ctrl1.invoke('j12')
with ctrl1.parallel(timeout=70, max_tries=3) as ctrl2:
ctrl2.invoke('j21')
ctrl2.invoke('j22_fail')
with ctrl2.serial(timeout=70, max_tries=2) as ctrl3:
ctrl3.invoke('j31_fail')
ctrl3.invoke('j32')
ctrl2.invoke('j23')
ctrl1.invoke('j13')
sout, _ = capsys.readouterr()
outer_flow_repr = "['jenkinsflow_test__reporting_retry__j11_fail', 'jenkinsflow_test__reporting_retry__j12', " \
"('jenkinsflow_test__reporting_retry__j21', 'jenkinsflow_test__reporting_retry__j22_fail', " \
"['jenkinsflow_test__reporting_retry__j31_fail', 'jenkinsflow_test__reporting_retry__j32'], " \
"'jenkinsflow_test__reporting_retry__j23'), 'jenkinsflow_test__reporting_retry__j13']"
assert lines_in(
api_type, sout,
"^Flow Invocation (1/2,1/2): " + outer_flow_repr,
"^Job Invocation (1/2,1/2): http://x.x/job/jenkinsflow_test__reporting_retry__j11_fail",
build_started_msg(api, "jenkinsflow_test__reporting_retry__j11_fail", 1),
"^FAILURE: 'jenkinsflow_test__reporting_retry__j11_fail'",
"^RETRY: job: 'jenkinsflow_test__reporting_retry__j11_fail' failed, retrying child jobs from beginning. Up to 1 more times in current flow",
"^Job Invocation (2/2,2/2): http://x.x/job/jenkinsflow_test__reporting_retry__j11_fail",
build_started_msg(api, "jenkinsflow_test__reporting_retry__j11_fail", 2),
"^SUCCESS: 'jenkinsflow_test__reporting_retry__j11_fail'",
"^Job Invocation (1/3,1/6): http://x.x/job/jenkinsflow_test__reporting_retry__j23",
build_started_msg(api, "jenkinsflow_test__reporting_retry__j23", 1),
"^SUCCESS: 'jenkinsflow_test__reporting_retry__j23'",
"^Job Invocation (1/2,1/2): http://.x.x/job/jenkinsflow_test__reporting_retry__j13",
build_started_msg(api, "jenkinsflow_test__reporting_retry__j13", 1),
"^Flow SUCCESS " + outer_flow_repr
)
assert lines_in(
api_type, sout,
"^Job Invocation (1/2,1/12): http://x.x/job/jenkinsflow_test__reporting_retry__j31_fail",
build_started_msg(api, "jenkinsflow_test__reporting_retry__j31_fail", 1),
"^FAILURE: 'jenkinsflow_test__reporting_retry__j31_fail'",
"^RETRY: job: 'jenkinsflow_test__reporting_retry__j31_fail' failed, retrying child jobs from beginning. Up to 1 more times in current flow",
"^Job Invocation (2/2,2/12): http://.x.x/job/jenkinsflow_test__reporting_retry__j31_fail",
build_started_msg(api, "jenkinsflow_test__reporting_retry__j31_fail", 2),
"^FAILURE: 'jenkinsflow_test__reporting_retry__j31_fail'",
"^RETRY: job: 'jenkinsflow_test__reporting_retry__j31_fail' failed, retrying child jobs from beginning. Up to 10 more times through outer flow",
"^Job Invocation (1/2,3/12): http://x.x/job/jenkinsflow_test__reporting_retry__j31_fail",
build_started_msg(api, "jenkinsflow_test__reporting_retry__j31_fail", 3),
"^FAILURE: 'jenkinsflow_test__reporting_retry__j31_fail'",
"^RETRY: job: 'jenkinsflow_test__reporting_retry__j31_fail' failed, retrying child jobs from beginning. Up to 1 more times in current flow",
"^Job Invocation (2/2,4/12): http://x.x/job/jenkinsflow_test__reporting_retry__j31_fail",
build_started_msg(api, "jenkinsflow_test__reporting_retry__j31_fail", 4),
"^SUCCESS: 'jenkinsflow_test__reporting_retry__j31_fail'"
)
def test_reporting_result_unchecked(api_type, capsys):
with api_select.api(__file__, api_type, login=True) as api:
api.flow_job()
api.job('j11', max_fails=0, expect_invocations=1, expect_order=1)
api.job('j21_unchecked', max_fails=0, expect_invocations=1, invocation_delay=0, exec_time=50, initial_buildno=7, expect_order=None, unknown_result=True, serial=True)
api.job('j22', max_fails=0, expect_invocations=1, invocation_delay=0, exec_time=1.5, initial_buildno=7, expect_order=2)
api.job('j31', max_fails=0, expect_invocations=1, invocation_delay=0, exec_time=1.5, initial_buildno=7, expect_order=3)
api.job('j32_unchecked_fail', max_fails=1, expect_invocations=1, invocation_delay=0, exec_time=1.5, initial_buildno=7, expect_order=3)
api.job('j41', max_fails=0, expect_invocations=1, invocation_delay=0, exec_time=1.5, initial_buildno=7, expect_order=3)
api.job('j42_unchecked', max_fails=0, expect_invocations=1, invocation_delay=0, exec_time=1.5, initial_buildno=7, expect_order=3, serial=True)
api.job('j23', max_fails=0, expect_invocations=1, invocation_delay=0, exec_time=1.5, initial_buildno=7, expect_order=4)
api.job('j12', max_fails=0, expect_invocations=1, invocation_delay=0, exec_time=5, initial_buildno=7, expect_order=5)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=0.5/api.speedup) as ctrl1:
ctrl1.invoke('j11')
with ctrl1.serial() as ctrl2:
ctrl2.invoke_unchecked('j21_unchecked')
ctrl2.invoke('j22')
with ctrl2.parallel() as ctrl3:
ctrl3.invoke('j31')
ctrl3.invoke_unchecked('j32_unchecked_fail')
with ctrl3.serial() as ctrl4:
ctrl4.invoke('j41')
ctrl4.invoke_unchecked('j42_unchecked')
ctrl2.invoke('j23')
ctrl1.invoke('j12')
sout, _ = capsys.readouterr()
assert lines_in(
api_type, sout,
"^UNCHECKED FAILURE: " + result_msg(api, "jenkinsflow_test__reporting_result_unchecked__j32_unchecked_fail"),
"^UNCHECKED SUCCESS: " + result_msg(api, "jenkinsflow_test__reporting_result_unchecked__j42_unchecked", 8),
)
def test_reporting_defined_job_parameters(api_type, capsys):
with api_select.api(__file__, api_type) as api:
api.flow_job()
api.job('j1', max_fails=0, expect_invocations=1, invocation_delay=1.0, exec_time=1.5, initial_buildno=7, expect_order=1, serial=True,
params=(('s1', '', 'desc'), ('c1', 'what', 'desc'), ('i1', 1, 'integer'), ('b1', False, 'boolean')))
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=0.5/api.speedup) as ctrl1:
ctrl1.invoke('j1', s1="hi", c1='why?', i1=2, b1=True)
sout, _ = capsys.readouterr()
assert lines_in(
api_type, sout,
"^Defined Invocation http://x.x/job/jenkinsflow_test__reporting_defined_job_parameters__j1 - parameters:",
" i1 = '2'",
)
assert " s1 = 'hi'" in sout
assert " c1 = 'why?'" in sout
assert " b1 = 'true'" in sout
ordered_params_output = """
Defined Invocation http://x.x/job/jenkinsflow_test__reporting_ordered_job_parameters__j1 - parameters:
s1 = 'hi'
s2 = 'not-last'
c1 = 'why?'
i1 = '2'
b1 = 'true'
s4 = 'was last'
aaa = '3'
unknown1 = 'Hello'
unknown2 = 'true'
s3 = 'last'
"""
def test_reporting_ordered_job_parameters(api_type, capsys):
with api_select.api(__file__, api_type) as api:
api.flow_job()
api.job('j1', max_fails=0, expect_invocations=1, invocation_delay=1.0, exec_time=1.5, initial_buildno=7, expect_order=1, serial=True,
params=(('s1', '', 'desc'), ('c1', 'what', 'desc'), ('i1', 1, 'integer'), ('b1', False, 'boolean'), ('s2', 't', 'd'), ('s3', 't2', 'd2'),
('unknown1', 'Hello', 'd'), ('aaa', 17, 'd'), ('unknown2', False, 'd')))
order = ['s1', 's2', 'c1', 'i1', 'b1', 's4', '*', 's3']
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=0.5/api.speedup, params_display_order=order) as ctrl1:
ctrl1.invoke('j1', s1="hi", c1='why?', i1=2, b1=True, s2='not-last', s3='last', unknown1='Hello', aaa=3, unknown2=True, s4='was last')
sout, _ = capsys.readouterr()
assert replace_host_port(api_type, ordered_params_output.strip()) in replace_host_port(api_type, sout)
def test_reporting_defined_non_existing(api_type, capsys):
with api_select.api(__file__, api_type) as api:
api.flow_job()
# TODO
with raises(FailedChildJobException):
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=0.5/api.speedup, allow_missing_jobs=True) as ctrl1:
ctrl1.invoke('j1', a="b", c='d')
sout, _ = capsys.readouterr()
assert lines_in(
api_type, sout,
"Defined Invocation 'jenkinsflow_test__reporting_defined_non_existing__j1' - MISSING JOB",
" a = 'b'",
)
assert " c = 'd'" in sout
|
|
import argparse
import asyncio
import gc
import os.path
import pathlib
import socket
import ssl
PRINT = 0
async def echo_server(loop, address, unix):
if unix:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(5)
sock.setblocking(False)
if PRINT:
print('Server listening at', address)
with sock:
while True:
client, addr = await loop.sock_accept(sock)
if PRINT:
print('Connection from', addr)
loop.create_task(echo_client(loop, client))
async def echo_client(loop, client):
try:
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (OSError, NameError):
pass
with client:
while True:
data = await loop.sock_recv(client, 1000000)
if not data:
break
await loop.sock_sendall(client, data)
if PRINT:
print('Connection closed')
async def echo_client_streams(reader, writer):
sock = writer.get_extra_info('socket')
try:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (OSError, NameError):
pass
if PRINT:
print('Connection from', sock.getpeername())
while True:
data = await reader.read(1000000)
if not data:
break
writer.write(data)
if PRINT:
print('Connection closed')
writer.close()
class EchoProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
def connection_lost(self, exc):
self.transport = None
def data_received(self, data):
self.transport.write(data)
class EchoBufferedProtocol(asyncio.BufferedProtocol):
def connection_made(self, transport):
self.transport = transport
# Here the buffer is intended to be copied, so that the outgoing buffer
# won't be wrongly updated by next read
self.buffer = bytearray(256 * 1024)
def connection_lost(self, exc):
self.transport = None
def get_buffer(self, sizehint):
return self.buffer
def buffer_updated(self, nbytes):
self.transport.write(self.buffer[:nbytes])
async def print_debug(loop):
while True:
print(chr(27) + "[2J") # clear screen
loop.print_debug_info()
await asyncio.sleep(0.5)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--uvloop', default=False, action='store_true')
parser.add_argument('--streams', default=False, action='store_true')
parser.add_argument('--proto', default=False, action='store_true')
parser.add_argument('--addr', default='127.0.0.1:25000', type=str)
parser.add_argument('--print', default=False, action='store_true')
parser.add_argument('--ssl', default=False, action='store_true')
parser.add_argument('--buffered', default=False, action='store_true')
args = parser.parse_args()
if args.uvloop:
import uvloop
loop = uvloop.new_event_loop()
print('using UVLoop')
else:
loop = asyncio.new_event_loop()
print('using asyncio loop')
asyncio.set_event_loop(loop)
loop.set_debug(False)
if args.print:
PRINT = 1
if hasattr(loop, 'print_debug_info'):
loop.create_task(print_debug(loop))
PRINT = 0
unix = False
if args.addr.startswith('file:'):
unix = True
addr = args.addr[5:]
if os.path.exists(addr):
os.remove(addr)
else:
addr = args.addr.split(':')
addr[1] = int(addr[1])
addr = tuple(addr)
print('serving on: {}'.format(addr))
server_context = None
if args.ssl:
print('with SSL')
if hasattr(ssl, 'PROTOCOL_TLS'):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS)
else:
server_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
server_context.load_cert_chain(
(pathlib.Path(__file__).parent.parent.parent /
'tests' / 'certs' / 'ssl_cert.pem'),
(pathlib.Path(__file__).parent.parent.parent /
'tests' / 'certs' / 'ssl_key.pem'))
if hasattr(server_context, 'check_hostname'):
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
if args.streams:
if args.proto:
print('cannot use --stream and --proto simultaneously')
exit(1)
if args.buffered:
print('cannot use --stream and --buffered simultaneously')
exit(1)
print('using asyncio/streams')
if unix:
coro = asyncio.start_unix_server(echo_client_streams,
addr,
ssl=server_context)
else:
coro = asyncio.start_server(echo_client_streams,
*addr,
ssl=server_context)
srv = loop.run_until_complete(coro)
elif args.proto:
if args.streams:
print('cannot use --stream and --proto simultaneously')
exit(1)
if args.buffered:
print('using buffered protocol')
protocol = EchoBufferedProtocol
else:
print('using simple protocol')
protocol = EchoProtocol
if unix:
coro = loop.create_unix_server(protocol, addr,
ssl=server_context)
else:
coro = loop.create_server(protocol, *addr,
ssl=server_context)
srv = loop.run_until_complete(coro)
else:
if args.ssl:
print('cannot use SSL for loop.sock_* methods')
exit(1)
print('using sock_recv/sock_sendall')
loop.create_task(echo_server(loop, addr, unix))
try:
loop.run_forever()
finally:
if hasattr(loop, 'print_debug_info'):
gc.collect()
print(chr(27) + "[2J")
loop.print_debug_info()
loop.close()
|
|
import asyncio
import logging
import re
import traceback
from contextlib import suppress
from http import HTTPStatus
from urllib.parse import parse_qs
import aiohttp_jinja2
import jinja2
from aiohttp import web
from aiohttp.web_exceptions import HTTPNotFound, HTTPPermanentRedirect
from aiohttp.web_middlewares import normalize_path_middleware
from airflow._vendor.connexion.apis.abstract import AbstractAPI
from airflow._vendor.connexion.exceptions import ProblemException
from airflow._vendor.connexion.handlers import AuthErrorHandler
from airflow._vendor.connexion.jsonifier import JSONEncoder, Jsonifier
from airflow._vendor.connexion.lifecycle import ConnexionRequest, ConnexionResponse
from airflow._vendor.connexion.problem import problem
from airflow._vendor.connexion.utils import yamldumper
from werkzeug.exceptions import HTTPException as werkzeug_HTTPException
logger = logging.getLogger('connexion.apis.aiohttp_api')
def _generic_problem(http_status: HTTPStatus, exc: Exception = None):
extra = None
if exc is not None:
loop = asyncio.get_event_loop()
if loop.get_debug():
tb = None
with suppress(Exception):
tb = traceback.format_exc()
if tb:
extra = {"traceback": tb}
return problem(
status=http_status.value,
title=http_status.phrase,
detail=http_status.description,
ext=extra,
)
@web.middleware
async def problems_middleware(request, handler):
try:
response = await handler(request)
except ProblemException as exc:
response = problem(status=exc.status, detail=exc.detail, title=exc.title,
type=exc.type, instance=exc.instance, headers=exc.headers, ext=exc.ext)
except (werkzeug_HTTPException, _HttpNotFoundError) as exc:
response = problem(status=exc.code, title=exc.name, detail=exc.description)
except web.HTTPError as exc:
if exc.text == "{}: {}".format(exc.status, exc.reason):
detail = HTTPStatus(exc.status).description
else:
detail = exc.text
response = problem(status=exc.status, title=exc.reason, detail=detail)
except (
web.HTTPException, # eg raised HTTPRedirection or HTTPSuccessful
asyncio.CancelledError, # skipped in default web_protocol
):
# leave this to default handling in aiohttp.web_protocol.RequestHandler.start()
raise
except asyncio.TimeoutError as exc:
# overrides 504 from aiohttp.web_protocol.RequestHandler.start()
logger.debug('Request handler timed out.', exc_info=exc)
response = _generic_problem(HTTPStatus.GATEWAY_TIMEOUT, exc)
except Exception as exc:
# overrides 500 from aiohttp.web_protocol.RequestHandler.start()
logger.exception('Error handling request', exc_info=exc)
response = _generic_problem(HTTPStatus.INTERNAL_SERVER_ERROR, exc)
if isinstance(response, ConnexionResponse):
response = await AioHttpApi.get_response(response)
return response
class AioHttpApi(AbstractAPI):
def __init__(self, *args, **kwargs):
# NOTE we use HTTPPermanentRedirect (308) because
# clients sometimes turn POST requests into GET requests
# on 301, 302, or 303
# see https://tools.ietf.org/html/rfc7538
trailing_slash_redirect = normalize_path_middleware(
append_slash=True,
redirect_class=HTTPPermanentRedirect
)
self.subapp = web.Application(
middlewares=[
problems_middleware,
trailing_slash_redirect
]
)
AbstractAPI.__init__(self, *args, **kwargs)
aiohttp_jinja2.setup(
self.subapp,
loader=jinja2.FileSystemLoader(
str(self.options.openapi_console_ui_from_dir)
)
)
middlewares = self.options.as_dict().get('middlewares', [])
self.subapp.middlewares.extend(middlewares)
def _set_base_path(self, base_path):
AbstractAPI._set_base_path(self, base_path)
self._api_name = AioHttpApi.normalize_string(self.base_path)
@staticmethod
def normalize_string(string):
return re.sub(r'[^a-zA-Z0-9]', '_', string.strip('/'))
def _base_path_for_prefix(self, request):
"""
returns a modified basePath which includes the incoming request's
path prefix.
"""
base_path = self.base_path
if not request.path.startswith(self.base_path):
prefix = request.path.split(self.base_path)[0]
base_path = prefix + base_path
return base_path
def _spec_for_prefix(self, request):
"""
returns a spec with a modified basePath / servers block
which corresponds to the incoming request path.
This is needed when behind a path-altering reverse proxy.
"""
base_path = self._base_path_for_prefix(request)
return self.specification.with_base_path(base_path).raw
def add_openapi_json(self):
"""
Adds openapi json to {base_path}/openapi.json
(or {base_path}/swagger.json for swagger2)
"""
logger.debug('Adding spec json: %s/%s', self.base_path,
self.options.openapi_spec_path)
self.subapp.router.add_route(
'GET',
self.options.openapi_spec_path,
self._get_openapi_json
)
def add_openapi_yaml(self):
"""
Adds openapi json to {base_path}/openapi.json
(or {base_path}/swagger.json for swagger2)
"""
if not self.options.openapi_spec_path.endswith("json"):
return
openapi_spec_path_yaml = \
self.options.openapi_spec_path[:-len("json")] + "yaml"
logger.debug('Adding spec yaml: %s/%s', self.base_path,
openapi_spec_path_yaml)
self.subapp.router.add_route(
'GET',
openapi_spec_path_yaml,
self._get_openapi_yaml
)
async def _get_openapi_json(self, request):
return web.Response(
status=200,
content_type='application/json',
body=self.jsonifier.dumps(self._spec_for_prefix(request))
)
async def _get_openapi_yaml(self, request):
return web.Response(
status=200,
content_type='text/yaml',
body=yamldumper(self._spec_for_prefix(request))
)
def add_swagger_ui(self):
"""
Adds swagger ui to {base_path}/ui/
"""
console_ui_path = self.options.openapi_console_ui_path.strip().rstrip('/')
logger.debug('Adding swagger-ui: %s%s/',
self.base_path,
console_ui_path)
for path in (
console_ui_path + '/',
console_ui_path + '/index.html',
):
self.subapp.router.add_route(
'GET',
path,
self._get_swagger_ui_home
)
if self.options.openapi_console_ui_config is not None:
self.subapp.router.add_route(
'GET',
console_ui_path + '/swagger-ui-config.json',
self._get_swagger_ui_config
)
# we have to add an explicit redirect instead of relying on the
# normalize_path_middleware because we also serve static files
# from this dir (below)
async def redirect(request):
raise web.HTTPMovedPermanently(
location=self.base_path + console_ui_path + '/'
)
self.subapp.router.add_route(
'GET',
console_ui_path,
redirect
)
# this route will match and get a permission error when trying to
# serve index.html, so we add the redirect above.
self.subapp.router.add_static(
console_ui_path,
path=str(self.options.openapi_console_ui_from_dir),
name='swagger_ui_static'
)
@aiohttp_jinja2.template('index.j2')
async def _get_swagger_ui_home(self, req):
base_path = self._base_path_for_prefix(req)
template_variables = {
'openapi_spec_url': (base_path + self.options.openapi_spec_path)
}
if self.options.openapi_console_ui_config is not None:
template_variables['configUrl'] = 'swagger-ui-config.json'
return template_variables
async def _get_swagger_ui_config(self, req):
return web.Response(
status=200,
content_type='text/json',
body=self.jsonifier.dumps(self.options.openapi_console_ui_config)
)
def add_auth_on_not_found(self, security, security_definitions):
"""
Adds a 404 error handler to authenticate and only expose the 404 status if the security validation pass.
"""
logger.debug('Adding path not found authentication')
not_found_error = AuthErrorHandler(
self, _HttpNotFoundError(),
security=security,
security_definitions=security_definitions
)
endpoint_name = "{}_not_found".format(self._api_name)
self.subapp.router.add_route(
'*',
'/{not_found_path}',
not_found_error.function,
name=endpoint_name
)
def _add_operation_internal(self, method, path, operation):
method = method.upper()
operation_id = operation.operation_id or path
logger.debug('... Adding %s -> %s', method, operation_id,
extra=vars(operation))
handler = operation.function
endpoint_name = '{}_{}_{}'.format(
self._api_name,
AioHttpApi.normalize_string(path),
method.lower()
)
self.subapp.router.add_route(
method, path, handler, name=endpoint_name
)
if not path.endswith('/'):
self.subapp.router.add_route(
method, path + '/', handler, name=endpoint_name + '_'
)
@classmethod
async def get_request(cls, req):
"""Convert aiohttp request to connexion
:param req: instance of aiohttp.web.Request
:return: connexion request instance
:rtype: ConnexionRequest
"""
url = str(req.url)
logger.debug('Getting data and status code',
extra={'has_body': req.has_body, 'url': url})
query = parse_qs(req.rel_url.query_string)
headers = req.headers
body = None
if req.body_exists:
body = await req.read()
return ConnexionRequest(url=url,
method=req.method.lower(),
path_params=dict(req.match_info),
query=query,
headers=headers,
body=body,
json_getter=lambda: cls.jsonifier.loads(body),
files={},
context=req)
@classmethod
async def get_response(cls, response, mimetype=None, request=None):
"""Get response.
This method is used in the lifecycle decorators
:type response: aiohttp.web.StreamResponse | (Any,) | (Any, int) | (Any, dict) | (Any, int, dict)
:rtype: aiohttp.web.Response
"""
while asyncio.iscoroutine(response):
response = await response
url = str(request.url) if request else ''
return cls._get_response(response, mimetype=mimetype, extra_context={"url": url})
@classmethod
def _is_framework_response(cls, response):
""" Return True if `response` is a framework response class """
return isinstance(response, web.StreamResponse)
@classmethod
def _framework_to_connexion_response(cls, response, mimetype):
""" Cast framework response class to ConnexionResponse used for schema validation """
body = None
if hasattr(response, "body"): # StreamResponse and FileResponse don't have body
body = response.body
return ConnexionResponse(
status_code=response.status,
mimetype=mimetype,
content_type=response.content_type,
headers=response.headers,
body=body
)
@classmethod
def _connexion_to_framework_response(cls, response, mimetype, extra_context=None):
""" Cast ConnexionResponse to framework response class """
return cls._build_response(
mimetype=response.mimetype or mimetype,
status_code=response.status_code,
content_type=response.content_type,
headers=response.headers,
data=response.body,
extra_context=extra_context,
)
@classmethod
def _build_response(cls, data, mimetype, content_type=None, headers=None, status_code=None, extra_context=None):
if cls._is_framework_response(data):
raise TypeError("Cannot return web.StreamResponse in tuple. Only raw data can be returned in tuple.")
data, status_code, serialized_mimetype = cls._prepare_body_and_status_code(data=data, mimetype=mimetype, status_code=status_code, extra_context=extra_context)
if isinstance(data, str):
text = data
body = None
else:
text = None
body = data
content_type = content_type or mimetype or serialized_mimetype
return web.Response(body=body, text=text, headers=headers, status=status_code, content_type=content_type)
@classmethod
def _set_jsonifier(cls):
cls.jsonifier = Jsonifier(cls=JSONEncoder)
class _HttpNotFoundError(HTTPNotFound):
def __init__(self):
self.name = 'Not Found'
self.description = (
'The requested URL was not found on the server. '
'If you entered the URL manually please check your spelling and '
'try again.'
)
self.code = type(self).status_code
self.empty_body = True
HTTPNotFound.__init__(self, reason=self.name)
|
|
from __future__ import print_function
from acq4.util import Qt
import acq4.Manager
import acq4.pyqtgraph as pg
import acq4.pyqtgraph.opengl as gl
import numpy as np
import acq4.util.functions as fn
import re
man = acq4.Manager.getManager()
## update DB field to reflect dir meta info
#for i in db.select('Cell', ['rowid']):
#d = db.getDir('Cell', i[0])
#typ = d.info().get('type', '')
#db.update('Cell', {'type': typ}, rowid=i[0])
#print d, typ
global eventView, siteView, cells
eventView = 'events_view'
siteView = 'sites_view'
firstRun = False
if 'events' not in locals():
global events
events = {}
firstRun = True
win = Qt.QMainWindow()
#cw = Qt.QWidget()
layout = pg.LayoutWidget()
#layout = Qt.QGridLayout()
#layout.setContentsMargins(0,0,0,0)
#layout.setSpacing(0)
#cw.setLayout(layout)
win.setCentralWidget(layout)
cellCombo = Qt.QComboBox()
cellCombo.setSizeAdjustPolicy(cellCombo.AdjustToContents)
layout.addWidget(cellCombo)
reloadBtn = Qt.QPushButton('reload')
layout.addWidget(reloadBtn)
separateCheck = Qt.QCheckBox("color pre/post")
layout.addWidget(separateCheck)
colorCheck = Qt.QCheckBox("color y position")
layout.addWidget(colorCheck)
errLimitSpin = pg.SpinBox(value=0.7, step=0.1)
layout.addWidget(errLimitSpin)
lengthRatioLimitSpin = pg.SpinBox(value=1.5, step=0.1)
layout.addWidget(lengthRatioLimitSpin)
postRgnStartSpin = pg.SpinBox(value=0.500, step=0.01, siPrefix=True, suffix='s')
layout.addWidget(postRgnStartSpin)
postRgnStopSpin = pg.SpinBox(value=0.700, step=0.01, siPrefix=True, suffix='s')
layout.addWidget(postRgnStopSpin)
spl1 = Qt.QSplitter()
spl1.setOrientation(Qt.Qt.Vertical)
layout.addWidget(spl1, row=1, col=0, rowspan=1, colspan=8)
pw1 = pg.PlotWidget()
spl1.addWidget(pw1)
pw1.setLabel('left', 'Amplitude', 'A')
pw1.setLabel('bottom', 'Decay Tau', 's')
spl2 = Qt.QSplitter()
spl2.setOrientation(Qt.Qt.Horizontal)
spl1.addWidget(spl2)
pw2 = pg.PlotWidget(labels={'bottom': ('time', 's')})
spl2.addWidget(pw2)
tab = Qt.QTabWidget()
spl2.addWidget(tab)
## For viewing cell morphology
gv = pg.GraphicsView()
gv.setBackgroundBrush(pg.mkBrush('w'))
image = pg.ImageItem()
gv.addItem(image)
gv.enableMouse()
gv.setAspectLocked(True)
tab.addTab(gv, 'Morphology')
## 3D atlas
import acq4.analysis.atlas.CochlearNucleus as CN
atlas = CN.CNAtlasDisplayWidget()
atlas.showLabel('DCN')
atlas.showLabel('AVCN')
atlas.showLabel('PVCN')
tab.addTab(atlas, 'Atlas')
atlasPoints = gl.GLScatterPlotItem()
atlas.addItem(atlasPoints)
win.show()
win.resize(1000,800)
sp1 = pw1.scatterPlot([], pen=pg.mkPen(None), brush=(200,200,255,70), identical=True, size=8)
sp2 = pw1.scatterPlot([], pen=pg.mkPen(None), brush=(255,200,200,70), identical=True, size=8)
sp3 = pw1.scatterPlot([], pen=pg.mkPen(None), brush=(100,255,100,70), identical=True, size=8)
sp4 = pw1.scatterPlot([], pen=pg.mkPen(None), size=8)
print("Reading cell list...")
#import os, pickle
#md = os.path.abspath(os.path.split(__file__)[0])
#cacheFile = os.path.join(md, 'eventCache.p')
#if os.path.isfile(cacheFile):
#print "Read from cache..."
#ev = pickle.load(open(cacheFile, 'r'))
#else:
#pickle.dump(ev, open(cacheFile, 'w'))
## create views that link cell information to events/sites
db = man.getModule('Data Manager').currentDatabase()
if not db.hasTable(siteView):
print("Creating DB views.")
db.createView(siteView, ['photostim_sites', 'DirTable_Protocol', 'DirTable_Cell']) ## seems to be unused.
if not db.hasTable(eventView):
db.createView(eventView, ['photostim_events', 'DirTable_Protocol', 'DirTable_Cell'])
cells = db.select(siteView, ['CellDir'], distinct=True)
cells = [c['CellDir'] for c in cells]
cells.sort(lambda a,b: cmp(a.name(), b.name()))
cellCombo.addItem('')
for c in cells:
cellCombo.addItem(c.name(relativeTo=man.baseDir))
#cellSpin.setMaximum(len(cells)-1)
print("Done.")
def loadCell(cell, reloadData=False):
global events
if reloadData:
events.pop(cell, None)
if cell in events:
return
db = man.getModule('Data Manager').currentDatabase()
mod = man.dataModel
allEvents = []
hvals = {}
nEv = 0
positionCache = {}
tcache = {}
print("Loading all events for cell", cell)
tot = db.select(eventView, 'count()', where={'CellDir': cell})[0]['count()']
print(tot, "total events..")
with pg.ProgressDialog('Loading event data...', maximum=tot, wait=0) as dlg:
for ev in db.iterSelect(eventView, ['ProtocolSequenceDir', 'SourceFile', 'fitAmplitude', 'fitTime', 'fitDecayTau', 'fitRiseTau', 'fitTimeToPeak', 'fitLengthOverDecay', 'fitFractionalError', 'userTransform', 'CellType', 'CellDir', 'ProtocolDir'], where={'CellDir': cell}, toArray=True, chunkSize=200):
extra = np.empty(ev.shape, dtype=[('right', float), ('anterior', float), ('dorsal', float), ('holding', float)])
## insert holding levels
for i in range(len(ev)):
sd = ev[i]['ProtocolSequenceDir']
if sd not in hvals:
cf = ev[i]['SourceFile']
hvals[sd] = mod.getClampHoldingLevel(cf)
#print hvals[sd], cf
extra[i]['holding'] = hvals[sd]
## insert positions
for i in range(len(ev)):
protoDir = ev[i]['SourceFile'].parent()
key = protoDir
#key = (ev[i]['ProtocolSequenceDir'], ev[i]['SourceFile'])
if key not in positionCache:
#try:
#dh = ev[i]['ProtocolDir']
#p1 = pg.Point(dh.info()['Scanner']['position'])
#if key[0] not in tcache:
#tr = pg.SRTTransform()
#tr.restoreState(dh.parent().info()['userTransform'])
#tcache[key[0]] = tr
#trans = tcache[key[0]]
#p2 = trans.map(p1)
#pcache[key] = (p2.x(),p2.y())
#except:
#print key
#raise
rec = db.select('CochlearNucleus_Protocol', where={'ProtocolDir': protoDir})
if len(rec) == 0:
pos = (None, None, None)
elif len(rec) == 1:
pos = (rec[0]['right'], rec[0]['anterior'], rec[0]['dorsal'])
elif len(rec) == 2:
raise Exception("Multiple position records for %s!" % str(protoDir))
positionCache[key] = pos
extra[i]['right'] = positionCache[key][0]
extra[i]['anterior'] = positionCache[key][1]
extra[i]['dorsal'] = positionCache[key][2]
ev = fn.concatenateColumns([ev, extra])
allEvents.append(ev)
nEv += len(ev)
dlg.setValue(nEv)
if dlg.wasCanceled():
raise Exception('Canceled by user.')
ev = np.concatenate(allEvents)
numExSites = 0
numInSites = 0
for site in db.select(siteView, 'ProtocolSequenceDir', where={'CellDir': cell}):
h = hvals.get(site['ProtocolSequenceDir'],None)
if h is None:
continue
if h > -0.02:
numInSites += 1
elif h < -0.04:
numExSites += 1
events[cell] = (ev, numExSites, numInSites)
def init():
if not firstRun:
return
cellCombo.currentIndexChanged.connect(showCell)
separateCheck.toggled.connect(showCell)
colorCheck.toggled.connect(showCell)
errLimitSpin.valueChanged.connect(showCell)
lengthRatioLimitSpin.valueChanged.connect(showCell)
reloadBtn.clicked.connect(reloadCell)
for s in [sp1, sp2, sp3, sp4]:
s.sigPointsClicked.connect(plotClicked)
def plotClicked(plt, pts):
pt = pts[0]
#(id, fn, time) = pt.data
#[['SourceFile', 'ProtocolSequenceDir', 'fitTime']]
#fh = db.getDir('ProtocolSequence', id)[fn]
fh = pt.data()['SourceFile']
id = pt.data()['ProtocolSequenceDir']
time = pt.data()['fitTime']
data = fh.read()['Channel':'primary']
data = fn.besselFilter(data, 8e3)
p = pw2.plot(data, clear=True)
pos = time / data.xvals('Time')[-1]
arrow = pg.CurveArrow(p, pos=pos)
xr = pw2.viewRect().left(), pw2.viewRect().right()
if time < xr[0] or time > xr[1]:
w = xr[1]-xr[0]
pw2.setXRange(time-w/5., time+4*w/5., padding=0)
fitLen = pt.data()['fitDecayTau']*pt.data()['fitLengthOverDecay']
x = np.linspace(time, time+fitLen, fitLen * 50e3)
v = [pt.data()['fitAmplitude'], pt.data()['fitTime'], pt.data()['fitRiseTau'], pt.data()['fitDecayTau']]
y = fn.pspFunc(v, x, risePower=2.0) + data[np.argwhere(data.xvals('Time')>time)[0]-1]
pw2.plot(x, y, pen='b')
#plot.addItem(arrow)
def select(ev, ex=True):
#if source is not None:
#ev = ev[ev['CellDir']==source]
if ex:
ev = ev[ev['holding'] < -0.04] # excitatory events
ev = ev[(ev['fitAmplitude'] < 0) * (ev['fitAmplitude'] > -2e-10)]
else:
ev = ev[(ev['holding'] >= -0.02) * (ev['holding'] <= 0.01)] ## inhibitory events
ev = ev[(ev['fitAmplitude'] > 0) * (ev['fitAmplitude'] < 2e-10)]
ev = ev[(0 < ev['fitDecayTau']) * (ev['fitDecayTau'] < 0.2)] # select decay region
ev = ev[ev['fitFractionalError'] < errLimitSpin.value()]
ev = ev[ev['fitLengthOverDecay'] > lengthRatioLimitSpin.value()]
return ev
def reloadCell():
showCell(reloadData=True)
def showCell(**kwds):
pw2.clear()
reloadData = kwds.get('reloadData', False)
#global lock
#if lock:
#return
#lock = True
Qt.QApplication.processEvents() ## prevents double-spin
#lock = False
cell = cells[cellCombo.currentIndex()-1]
dh = cell #db.getDir('Cell', cell)
loadCell(dh, reloadData=reloadData)
try:
image.setImage(dh['morphology.png'].read())
gv.setRange(image.sceneBoundingRect())
except:
image.setImage(np.zeros((2,2)))
pass
ev, numExSites, numInSites = events[cell]
ev2 = select(ev, ex=True)
ev3 = select(ev, ex=False)
if colorCheck.isChecked():
sp1.hide()
sp2.hide()
sp3.hide()
sp4.show()
start = postRgnStart()
stop = postRgnStop()
ev2post = ev2[(ev2['fitTime']>start) * (ev2['fitTime']<stop)]
ev3post = ev3[(ev3['fitTime']>start) * (ev3['fitTime']<stop)]
ev4 = np.concatenate([ev2post, ev3post])
yMax = ev4['dorsal'].max()
yMin = ev4['dorsal'].min()
brushes = []
for i in range(len(ev4)):
hue = 0.6*((ev4[i]['dorsal']-yMin) / (yMax-yMin))
brushes.append(pg.hsvColor(hue, 1.0, 1.0, 0.3))
#pts.append({
#'pos': (ev4[i]['fitDecayTau'], ev4[i]['fitAmplitude']),
#'brush': pg.hsvColor(hue, 1, 1, 0.3),
#'data': ev4[i]
#})
sp4.setData(x=ev4['fitDecayTau'], y=ev4['fitAmplitude'], symbolBrush=brushes, data=ev4)
else:
sp1.show()
sp2.show()
#sp3.show()
sp4.hide()
## excitatory
if separateCheck.isChecked():
pre = ev2[ev2['fitTime']< preRgnStop()]
post = ev2[(ev2['fitTime'] > postRgnStart()) * (ev2['fitTime'] < postRgnStop())]
else:
pre = ev2
sp1.setData(x=pre['fitDecayTau'], y=pre['fitAmplitude'], data=pre);
#print "Cell ", cell
#print " excitatory:", np.median(ev2['fitDecayTau']), np.median(ev2['fitAmplitude'])
## inhibitory
if separateCheck.isChecked():
pre = ev3[ev3['fitTime']< preRgnStop()]
post2 = ev3[(ev3['fitTime'] > postRgnStart()) * (ev3['fitTime'] < postRgnStop())]
post = np.concatenate([post, post2])
else:
pre = ev3
sp2.setData(x=pre['fitDecayTau'], y=pre['fitAmplitude'], data=pre);
#print " inhibitory:", np.median(ev2['fitDecayTau']), np.median(ev2['fitAmplitude'])
if separateCheck.isChecked():
sp3.setData(x=post['fitDecayTau'], y=post['fitAmplitude'], data=post)
sp3.show()
else:
sp3.hide()
try:
typ = ev2[0]['CellType']
except:
typ = ev3[0]['CellType']
sr = spontRate(ev2, numExSites)
sri = spontRate(ev3, numInSites)
title = "%s -- %s --- <span style='color: #99F;'>ex:</span> %s %s %s %0.1fHz --- <span style='color: #F99;'>in:</span> %s %s %s %0.1fHz" % (
dh.name(relativeTo=dh.parent().parent().parent()),
typ,
pg.siFormat(np.median(ev2['fitTimeToPeak']), error=np.std(ev2['fitTimeToPeak']), space=False, suffix='s'),
pg.siFormat(np.median(ev2['fitDecayTau']), error=np.std(ev2['fitDecayTau']), space=False, suffix='s'),
pg.siFormat(np.median(ev2['fitAmplitude']), error=np.std(ev2['fitAmplitude']), space=False, suffix='A'),
sr,
pg.siFormat(np.median(ev3['fitTimeToPeak']), error=np.std(ev3['fitTimeToPeak']), space=False, suffix='s'),
pg.siFormat(np.median(ev3['fitDecayTau']), error=np.std(ev3['fitDecayTau']), space=False, suffix='s'),
pg.siFormat(np.median(ev3['fitAmplitude']), error=np.std(ev3['fitAmplitude']), space=False, suffix='A'),
sri)
print(re.sub(r'<[^>]+>', '', title))
pw1.setTitle(title)
### show cell in atlas
#rec = db.select('CochlearNucleus_Cell', where={'CellDir': cell})
#pts = []
#if len(rec) > 0:
#pos = (rec[0]['right'], rec[0]['anterior'], rec[0]['dorsal'])
#pts = [{'pos': pos, 'size': 100e-6, 'color': (0.7, 0.7, 1.0, 1.0)}]
### show event positions
evSpots = {}
for rec in ev:
p = (rec['right'], rec['anterior'], rec['dorsal'])
evSpots[p] = None
pos = np.array(list(evSpots.keys()))
atlasPoints.setData(pos=pos, )
def spontRate(ev, n):
## This is broken. It does not take into account recordings that had no events.
ev = ev[ev['fitTime'] < preRgnStop()]
#count = {}
#dirs = set()
#for i in range(len(ev)):
#key = (ev[i]['ProtocolSequenceDir'], ev[i]['SourceFile'])
#dirs.add(set)
#if key not in count:
#count[key] = 0
#count[key] += 1
#sr = np.mean([v/(preRgnStop()) for v in count.itervalues()])
if n == 0:
return 0
return len(ev) / (preRgnStop() * n)
def preRgnStop():
return postRgnStartSpin.value() - 0.002
def postRgnStart():
return postRgnStartSpin.value() + 0.002
def postRgnStop():
return postRgnStopSpin.value()
init()
|
|
# Copyright (c) 2014 Mirantis, Inc.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cgi
import jsonschema
import os
import tempfile
from oslo.config import cfg
from oslo.db import exception as db_exc
from webob import exc
import murano.api.v1
from murano.api.v1 import schemas
from murano.common import policy
from murano.common import wsgi
from murano.db.catalog import api as db_api
from murano.openstack.common import exception
from murano.openstack.common.gettextutils import _ # noqa
from murano.openstack.common import log as logging
from murano.packages import exceptions as pkg_exc
from murano.packages import load_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
SUPPORTED_PARAMS = murano.api.v1.SUPPORTED_PARAMS
LIST_PARAMS = murano.api.v1.LIST_PARAMS
ORDER_VALUES = murano.api.v1.ORDER_VALUES
PKG_PARAMS_MAP = murano.api.v1.PKG_PARAMS_MAP
def _check_content_type(req, content_type):
try:
req.get_content_type((content_type,))
except exception.InvalidContentType:
msg = _("Content-Type must be '{0}'").format(content_type)
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _get_filters(query_params):
filters = {}
for param_pair in query_params:
k, v = param_pair
if k not in SUPPORTED_PARAMS:
LOG.warning(_("Search by parameter '{name}' "
"is not supported. Skipping it.").format(name=k))
continue
if k in LIST_PARAMS:
filters.setdefault(k, []).append(v)
else:
filters[k] = v
order_by = filters.get('order_by', [])
for i in order_by[:]:
if ORDER_VALUES and i not in ORDER_VALUES:
filters['order_by'].remove(i)
LOG.warning(_("Value of 'order_by' parameter is not valid. "
"Allowed values are: {0}. Skipping it.").format(
", ".join(ORDER_VALUES)))
return filters
def _validate_body(body):
"""Check multipart/form-data has two parts: text (which is json string and
should parsed into dictionary in serializer) and file, which stores as
cgi.FieldStorage instance. Also validate file size doesn't exceed
the limit: seek to the end of the file, get the position of EOF and
reset the file position to the beginning
"""
def check_file_size(f):
mb_limit = CONF.packages_opts.package_size_limit
pkg_size_limit = mb_limit * 1024 * 1024
f.seek(0, 2)
size = f.tell()
f.seek(0)
if size > pkg_size_limit:
raise exc.HTTPBadRequest('Uploading file is too large.'
' The limit is {0} Mb'.format(mb_limit))
if len(body.keys()) > 2:
msg = _("'multipart/form-data' request body should contain "
"1 or 2 parts: json string and zip archive. Current body "
"consists of {0} part(s)").format(len(body.keys()))
LOG.error(msg)
raise exc.HTTPBadRequest(msg)
file_obj = None
package_meta = None
for part in body.values():
if isinstance(part, cgi.FieldStorage):
file_obj = part
check_file_size(file_obj.file)
if isinstance(part, dict):
package_meta = part
if file_obj is None:
msg = _('There is no file package with application description')
LOG.error(msg)
raise exc.HTTPBadRequest(msg)
return file_obj, package_meta
class Controller(object):
"""WSGI controller for application catalog resource in Murano v1 API."""
def update(self, req, body, package_id):
"""List of allowed changes:
{ "op": "add", "path": "/tags", "value": [ "foo", "bar" ] }
{ "op": "add", "path": "/categories", "value": [ "foo", "bar" ] }
{ "op": "remove", "path": "/tags" }
{ "op": "remove", "path": "/categories" }
{ "op": "replace", "path": "/tags", "value": ["foo", "bar"] }
{ "op": "replace", "path": "/is_public", "value": true }
{ "op": "replace", "path": "/description",
"value":"New description" }
{ "op": "replace", "path": "/name", "value": "New name" }
"""
policy.check("update_package", req.context, {'package_id': package_id})
_check_content_type(req, 'application/murano-packages-json-patch')
if not isinstance(body, list):
msg = _('Request body must be a JSON array of operation objects.')
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
package = db_api.package_update(package_id, body, req.context)
return package.to_dict()
def get(self, req, package_id):
policy.check("get_package", req.context, {'package_id': package_id})
package = db_api.package_get(package_id, req.context)
return package.to_dict()
def search(self, req):
def _validate_limit(value):
if value is None:
return
try:
value = int(value)
except ValueError:
msg = _("limit param must be an integer")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
if value <= 0:
msg = _("limit param must be positive")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
return value
policy.check("search_packages", req.context)
filters = _get_filters(req.GET.items())
limit = _validate_limit(filters.get('limit'))
if limit is None:
limit = CONF.packages_opts.limit_param_default
limit = min(CONF.packages_opts.api_limit_max, limit)
result = {}
packages = db_api.package_search(filters, req.context, limit)
if len(packages) == limit:
result['next_marker'] = packages[-1].id
result['packages'] = [package.to_dict() for package in packages]
return result
def upload(self, req, body=None):
"""Upload new file archive for the new package
together with package metadata.
"""
policy.check("upload_package", req.context)
_check_content_type(req, 'multipart/form-data')
file_obj, package_meta = _validate_body(body)
if package_meta:
try:
jsonschema.validate(package_meta, schemas.PKG_UPLOAD_SCHEMA)
except jsonschema.ValidationError as e:
LOG.exception(e)
raise exc.HTTPBadRequest(explanation=e.message)
else:
package_meta = {}
with tempfile.NamedTemporaryFile(delete=False) as tempf:
LOG.debug("Storing package archive in a temporary file")
content = file_obj.file.read()
if not content:
msg = _("Uploading file can't be empty")
LOG.error(msg)
raise exc.HTTPBadRequest(msg)
tempf.write(content)
package_meta['archive'] = content
try:
pkg_to_upload = load_utils.load_from_file(
tempf.name, target_dir=None, drop_dir=True)
except pkg_exc.PackageLoadError as e:
LOG.exception(e)
raise exc.HTTPBadRequest(e)
finally:
LOG.debug("Deleting package archive temporary file")
os.remove(tempf.name)
# extend dictionary for update db
for k, v in PKG_PARAMS_MAP.iteritems():
if hasattr(pkg_to_upload, k):
package_meta[v] = getattr(pkg_to_upload, k)
if req.params.get('is_public', '').lower() == 'true':
policy.check('publicize_image', req.context)
package_meta['is_public'] = True
try:
package = db_api.package_upload(package_meta, req.context.tenant)
except db_exc.DBDuplicateEntry:
msg = _('Package with specified full name is already registered')
LOG.exception(msg)
raise exc.HTTPServerError(msg)
return package.to_dict()
def get_ui(self, req, package_id):
target = {'package_id': package_id}
policy.check("get_package_ui", req.context, target)
package = db_api.package_get(package_id, req.context)
return package.ui_definition
def get_logo(self, req, package_id):
target = {'package_id': package_id}
policy.check("get_package_logo", req.context, target)
package = db_api.package_get(package_id, req.context)
return package.logo
def get_supplier_logo(self, req, package_id):
package = db_api.package_get(package_id, req.context)
return package.supplier_logo
def download(self, req, package_id):
target = {'package_id': package_id}
policy.check("download_package", req.context, target)
package = db_api.package_get(package_id, req.context)
return package.archive
def delete(self, req, package_id):
target = {'package_id': package_id}
policy.check("delete_package", req.context, target)
db_api.package_delete(package_id, req.context)
def show_categories(self, req):
policy.check("show_categories", req.context)
categories = db_api.categories_list()
return {'categories': [category.name for category in categories]}
class PackageSerializer(wsgi.ResponseSerializer):
def serialize(self, action_result, accept, action):
if action == 'get_ui':
accept = 'text/plain'
elif action in ('download', 'get_logo', 'get_supplier_logo'):
accept = 'application/octet-stream'
return super(PackageSerializer, self).serialize(action_result,
accept,
action)
def create_resource():
serializer = PackageSerializer()
return wsgi.Resource(Controller(), serializer=serializer)
|
|
from octopy.utils import *
import java.lang
import jarray
from ru.parallel.octotron.generators.tmpl import VarTemplate
OCTO_PACKAGE = "ru.parallel.octotron"
class Rule(object):
def __init__(self, args):
self.args = args
def GetOcto(self):
c = java.lang.Class.forName(OCTO_PACKAGE + ".rules." + type(self).__name__)
cons = c.getConstructors()[0]
return cons.newInstance(*self.args)
def GetPlainOcto(self):
c = java.lang.Class.forName(OCTO_PACKAGE + ".rules.plain." + type(self).__name__)
cons = c.getConstructors()[0]
return cons.newInstance(*self.args)
# ASoft
class ASoftDoubleSum(Rule):
def __init__(self, arg1, *arg2):
Rule.__init__(self, (arg1, jarray.array(arg2, java.lang.String)))
class ASoftLongSum(Rule):
def __init__(self, arg1, *arg2):
Rule.__init__(self, (arg1, jarray.array(arg2, java.lang.String)))
class ASoftMatchCount(Rule):
def __init__(self, arg1, arg2, *arg3):
Rule.__init__(self, (arg1, arg2, jarray.array(arg3, java.lang.String)))
class ASoftNotMatchCount(Rule):
def __init__(self, arg1, arg2, *arg3):
Rule.__init__(self, (arg1, arg2, jarray.array(arg3, java.lang.String)))
# AStrict
class AStrictDoubleSum(Rule):
def __init__(self, arg1, *arg2):
Rule.__init__(self, (arg1, jarray.array(arg2, java.lang.String)))
class AStrictLongSum(Rule):
def __init__(self, arg1, *arg2):
Rule.__init__(self, (arg1, jarray.array(arg2, java.lang.String)))
class AStrictMatchCount(Rule):
def __init__(self, arg1, arg2, *arg3):
Rule.__init__(self, (arg1, arg2, jarray.array(arg3, java.lang.String)))
class AStrictNotMatchCount(Rule):
def __init__(self, arg1, arg2, *arg3):
Rule.__init__(self, (arg1, arg2, jarray.array(arg3, java.lang.String)))
# valid/invalid
class AValidCount(Rule):
def __init__(self, arg1, *arg2):
Rule.__init__(self, (arg1, jarray.array(arg2, java.lang.String)))
class AInvalidCount(Rule):
def __init__(self, arg1, *arg2):
Rule.__init__(self, (arg1, jarray.array(arg2, java.lang.String)))
class RequireSomeValid(Rule):
def __init__(self, arg1, arg2, arg3, *arg4):
Rule.__init__(self, (arg1, arg2, arg3, jarray.array(arg4, java.lang.String)))
class RequireAllValid(Rule):
def __init__(self, arg1, arg2, *arg3):
Rule.__init__(self, (arg1, arg2, jarray.array(arg3, java.lang.String)))
class ValueIfSomeValid(Rule):
def __init__(self, arg1, arg2, arg3, *arg4):
Rule.__init__(self, (arg1, arg2, arg3, jarray.array(arg4, java.lang.String)))
class ValueIfAllValid(Rule):
def __init__(self, arg1, arg2, *arg3):
Rule.__init__(self, (arg1, arg2, jarray.array(arg3, java.lang.String)))
# logical
class StrictLogicalAnd(Rule):
def __init__(self, *arg1):
Rule.__init__(self, (jarray.array(arg1, java.lang.String),))
class StrictLogicalOr(Rule):
def __init__(self, *arg1):
Rule.__init__(self, (jarray.array(arg1, java.lang.String),))
class SoftLogicalAnd(Rule):
def __init__(self, *arg1):
Rule.__init__(self, (jarray.array(arg1, java.lang.String),))
class SoftLogicalOr(Rule):
def __init__(self, *arg1):
Rule.__init__(self, (jarray.array(arg1, java.lang.String),))
# plain
class Manual(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class GT(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class GTArg(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class GE(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class GEArg(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class LinkedMatch(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class LinkedNotMatch(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class LT(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class LTArg(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class LE(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class LEArg(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class Match(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class MatchAprx(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class MatchArg(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class MatchArgAprx(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class NotMatch(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class NotMatchArg(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
# single
class Speed(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class ToPct(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class ToArgPct(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class Changed(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class ContainsString(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class Interval(Rule):
def __init__(self, arg1, *arg2):
Rule.__init__(self, (arg1, jarray.array(arg2, java.lang.Object),))
class CheckedInterval(Rule):
def __init__(self, arg1, *arg2):
Rule.__init__(self, (arg1, jarray.array(arg2, java.lang.Object),))
class MirrorBoolean(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class MirrorDouble(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class MirrorLong(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
class MirrorString(Rule):
def __init__(self, *args):
Rule.__init__(self, args)
# utils
def VarsFromDict(varyings_dict):
res = []
for name, rule in varyings_dict.items():
if len(rule) > 1:
raise RuntimeError("duplicated var: " + name + " : " + str(rule))
try:
res.append(VarTemplate(name, rule[0].GetOcto()))
except java.lang.ClassNotFoundException:
res.append(VarTemplate(name, rule[0].GetPlainOcto()))
return res
def ConvertVars(var):
return VarsFromDict(MergeDicts(var))
|
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Tests for swift.common.swob"
import datetime
import unittest
import re
import time
from StringIO import StringIO
from urllib import quote
import swift.common.swob
from swift.common import utils, exceptions
class TestHeaderEnvironProxy(unittest.TestCase):
def test_proxy(self):
environ = {}
proxy = swift.common.swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
self.assertEquals(
proxy.environ, {'CONTENT_LENGTH': '20',
'CONTENT_TYPE': 'text/plain',
'HTTP_SOMETHING_ELSE': 'somevalue'})
self.assertEquals(proxy['content-length'], '20')
self.assertEquals(proxy['content-type'], 'text/plain')
self.assertEquals(proxy['something-else'], 'somevalue')
def test_del(self):
environ = {}
proxy = swift.common.swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
del proxy['Content-Length']
del proxy['Content-Type']
del proxy['Something-Else']
self.assertEquals(proxy.environ, {})
def test_contains(self):
environ = {}
proxy = swift.common.swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
self.assert_('content-length' in proxy)
self.assert_('content-type' in proxy)
self.assert_('something-else' in proxy)
def test_keys(self):
environ = {}
proxy = swift.common.swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
self.assertEquals(
set(proxy.keys()),
set(('Content-Length', 'Content-Type', 'Something-Else')))
class TestHeaderKeyDict(unittest.TestCase):
def test_case_insensitive(self):
headers = swift.common.swob.HeaderKeyDict()
headers['Content-Length'] = 0
headers['CONTENT-LENGTH'] = 10
headers['content-length'] = 20
self.assertEquals(headers['Content-Length'], '20')
self.assertEquals(headers['content-length'], '20')
self.assertEquals(headers['CONTENT-LENGTH'], '20')
def test_setdefault(self):
headers = swift.common.swob.HeaderKeyDict()
# it gets set
headers.setdefault('x-rubber-ducky', 'the one')
self.assertEquals(headers['X-Rubber-Ducky'], 'the one')
# it has the right return value
ret = headers.setdefault('x-boat', 'dinghy')
self.assertEquals(ret, 'dinghy')
ret = headers.setdefault('x-boat', 'yacht')
self.assertEquals(ret, 'dinghy')
# shouldn't crash
headers.setdefault('x-sir-not-appearing-in-this-request', None)
def test_del_contains(self):
headers = swift.common.swob.HeaderKeyDict()
headers['Content-Length'] = 0
self.assert_('Content-Length' in headers)
del headers['Content-Length']
self.assert_('Content-Length' not in headers)
def test_update(self):
headers = swift.common.swob.HeaderKeyDict()
headers.update({'Content-Length': '0'})
headers.update([('Content-Type', 'text/plain')])
self.assertEquals(headers['Content-Length'], '0')
self.assertEquals(headers['Content-Type'], 'text/plain')
def test_get(self):
headers = swift.common.swob.HeaderKeyDict()
headers['content-length'] = 20
self.assertEquals(headers.get('CONTENT-LENGTH'), '20')
self.assertEquals(headers.get('something-else'), None)
self.assertEquals(headers.get('something-else', True), True)
def test_keys(self):
headers = swift.common.swob.HeaderKeyDict()
headers['content-length'] = 20
headers['cOnTent-tYpe'] = 'text/plain'
headers['SomeThing-eLse'] = 'somevalue'
self.assertEquals(
set(headers.keys()),
set(('Content-Length', 'Content-Type', 'Something-Else')))
class TestRange(unittest.TestCase):
def test_range(self):
range = swift.common.swob.Range('bytes=1-7')
self.assertEquals(range.ranges[0], (1, 7))
def test_upsidedown_range(self):
range = swift.common.swob.Range('bytes=5-10')
self.assertEquals(range.ranges_for_length(2), [])
def test_str(self):
for range_str in ('bytes=1-7', 'bytes=1-', 'bytes=-1',
'bytes=1-7,9-12', 'bytes=-7,9-'):
range = swift.common.swob.Range(range_str)
self.assertEquals(str(range), range_str)
def test_ranges_for_length(self):
range = swift.common.swob.Range('bytes=1-7')
self.assertEquals(range.ranges_for_length(10), [(1, 8)])
self.assertEquals(range.ranges_for_length(5), [(1, 5)])
self.assertEquals(range.ranges_for_length(None), None)
def test_ranges_for_large_length(self):
range = swift.common.swob.Range('bytes=-1000000000000000000000000000')
self.assertEquals(range.ranges_for_length(100), [(0, 100)])
def test_ranges_for_length_no_end(self):
range = swift.common.swob.Range('bytes=1-')
self.assertEquals(range.ranges_for_length(10), [(1, 10)])
self.assertEquals(range.ranges_for_length(5), [(1, 5)])
self.assertEquals(range.ranges_for_length(None), None)
# This used to freak out:
range = swift.common.swob.Range('bytes=100-')
self.assertEquals(range.ranges_for_length(5), [])
self.assertEquals(range.ranges_for_length(None), None)
range = swift.common.swob.Range('bytes=4-6,100-')
self.assertEquals(range.ranges_for_length(5), [(4, 5)])
def test_ranges_for_length_no_start(self):
range = swift.common.swob.Range('bytes=-7')
self.assertEquals(range.ranges_for_length(10), [(3, 10)])
self.assertEquals(range.ranges_for_length(5), [(0, 5)])
self.assertEquals(range.ranges_for_length(None), None)
range = swift.common.swob.Range('bytes=4-6,-100')
self.assertEquals(range.ranges_for_length(5), [(4, 5), (0, 5)])
def test_ranges_for_length_multi(self):
range = swift.common.swob.Range('bytes=-20,4-')
self.assertEquals(len(range.ranges_for_length(200)), 2)
# the actual length greater than each range element
self.assertEquals(range.ranges_for_length(200), [(180, 200), (4, 200)])
range = swift.common.swob.Range('bytes=30-150,-10')
self.assertEquals(len(range.ranges_for_length(200)), 2)
# the actual length lands in the middle of a range
self.assertEquals(range.ranges_for_length(90), [(30, 90), (80, 90)])
# the actual length greater than any of the range
self.assertEquals(range.ranges_for_length(200),
[(30, 151), (190, 200)])
self.assertEquals(range.ranges_for_length(None), None)
def test_ranges_for_length_edges(self):
range = swift.common.swob.Range('bytes=0-1, -7')
self.assertEquals(range.ranges_for_length(10),
[(0, 2), (3, 10)])
range = swift.common.swob.Range('bytes=-7, 0-1')
self.assertEquals(range.ranges_for_length(10),
[(3, 10), (0, 2)])
range = swift.common.swob.Range('bytes=-7, 0-1')
self.assertEquals(range.ranges_for_length(5),
[(0, 5), (0, 2)])
def test_ranges_for_length_overlapping(self):
# Fewer than 3 overlaps is okay
range = swift.common.swob.Range('bytes=10-19,15-24')
self.assertEquals(range.ranges_for_length(100),
[(10, 20), (15, 25)])
range = swift.common.swob.Range('bytes=10-19,15-24,20-29')
self.assertEquals(range.ranges_for_length(100),
[(10, 20), (15, 25), (20, 30)])
# Adjacent ranges, though suboptimal, don't overlap
range = swift.common.swob.Range('bytes=10-19,20-29,30-39')
self.assertEquals(range.ranges_for_length(100),
[(10, 20), (20, 30), (30, 40)])
# Ranges that share a byte do overlap
range = swift.common.swob.Range('bytes=10-20,20-30,30-40,40-50')
self.assertEquals(range.ranges_for_length(100), [])
# With suffix byte range specs (e.g. bytes=-2), make sure that we
# correctly determine overlapping-ness based on the entity length
range = swift.common.swob.Range('bytes=10-15,15-20,30-39,-9')
self.assertEquals(range.ranges_for_length(100),
[(10, 16), (15, 21), (30, 40), (91, 100)])
self.assertEquals(range.ranges_for_length(20), [])
def test_ranges_for_length_nonascending(self):
few_ranges = ("bytes=100-109,200-209,300-309,500-509,"
"400-409,600-609,700-709")
many_ranges = few_ranges + ",800-809"
range = swift.common.swob.Range(few_ranges)
self.assertEquals(range.ranges_for_length(100000),
[(100, 110), (200, 210), (300, 310), (500, 510),
(400, 410), (600, 610), (700, 710)])
range = swift.common.swob.Range(many_ranges)
self.assertEquals(range.ranges_for_length(100000), [])
def test_ranges_for_length_too_many(self):
at_the_limit_ranges = (
"bytes=" + ",".join("%d-%d" % (x * 1000, x * 1000 + 10)
for x in range(50)))
too_many_ranges = at_the_limit_ranges + ",10000000-10000009"
rng = swift.common.swob.Range(at_the_limit_ranges)
self.assertEquals(len(rng.ranges_for_length(1000000000)), 50)
rng = swift.common.swob.Range(too_many_ranges)
self.assertEquals(rng.ranges_for_length(1000000000), [])
def test_range_invalid_syntax(self):
def _check_invalid_range(range_value):
try:
swift.common.swob.Range(range_value)
return False
except ValueError:
return True
"""
All the following cases should result ValueError exception
1. value not starts with bytes=
2. range value start is greater than the end, eg. bytes=5-3
3. range does not have start or end, eg. bytes=-
4. range does not have hyphen, eg. bytes=45
5. range value is non numeric
6. any combination of the above
"""
self.assert_(_check_invalid_range('nonbytes=foobar,10-2'))
self.assert_(_check_invalid_range('bytes=5-3'))
self.assert_(_check_invalid_range('bytes=-'))
self.assert_(_check_invalid_range('bytes=45'))
self.assert_(_check_invalid_range('bytes=foo-bar,3-5'))
self.assert_(_check_invalid_range('bytes=4-10,45'))
self.assert_(_check_invalid_range('bytes=foobar,3-5'))
self.assert_(_check_invalid_range('bytes=nonumber-5'))
self.assert_(_check_invalid_range('bytes=nonumber'))
class TestMatch(unittest.TestCase):
def test_match(self):
match = swift.common.swob.Match('"a", "b"')
self.assertEquals(match.tags, set(('a', 'b')))
self.assert_('a' in match)
self.assert_('b' in match)
self.assert_('c' not in match)
def test_match_star(self):
match = swift.common.swob.Match('"a", "*"')
self.assert_('a' in match)
self.assert_('b' in match)
self.assert_('c' in match)
def test_match_noquote(self):
match = swift.common.swob.Match('a, b')
self.assertEquals(match.tags, set(('a', 'b')))
self.assert_('a' in match)
self.assert_('b' in match)
self.assert_('c' not in match)
class TestAccept(unittest.TestCase):
def test_accept_json(self):
for accept in ('application/json', 'application/json;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/json;q=1.0', 'application/*',
'text/*,application/json', 'application/*,text/*',
'application/json,text/xml'):
acc = swift.common.swob.Accept(accept)
match = acc.best_match(['text/plain', 'application/json',
'application/xml', 'text/xml'])
self.assertEquals(match, 'application/json')
def test_accept_plain(self):
for accept in ('', 'text/plain', 'application/xml;q=0.8,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=0.8', '*/*',
'text/plain,application/xml'):
acc = swift.common.swob.Accept(accept)
match = acc.best_match(['text/plain', 'application/json',
'application/xml', 'text/xml'])
self.assertEquals(match, 'text/plain')
def test_accept_xml(self):
for accept in ('application/xml', 'application/xml;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=1.0',
'application/xml;charset=UTF-8',
'application/xml;charset=UTF-8;qws="quoted with space"',
'application/xml; q=0.99 ; qws="quoted with space"'):
acc = swift.common.swob.Accept(accept)
match = acc.best_match(['text/plain', 'application/xml',
'text/xml'])
self.assertEquals(match, 'application/xml')
def test_accept_invalid(self):
for accept in ('*', 'text/plain,,', 'some stuff',
'application/xml;q=1.0;q=1.1', 'text/plain,*',
'text /plain', 'text\x7f/plain',
'text/plain;a=b=c',
'text/plain;q=1;q=2',
'text/plain; ubq="unbalanced " quotes"'):
acc = swift.common.swob.Accept(accept)
match = acc.best_match(['text/plain', 'application/xml',
'text/xml'])
self.assertEquals(match, None)
def test_repr(self):
acc = swift.common.swob.Accept("application/json")
self.assertEquals(repr(acc), "application/json")
class TestRequest(unittest.TestCase):
def test_blank(self):
req = swift.common.swob.Request.blank(
'/', environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'text/plain'}, body='hi')
self.assertEquals(req.path_info, '/')
self.assertEquals(req.body, 'hi')
self.assertEquals(req.headers['Content-Type'], 'text/plain')
self.assertEquals(req.method, 'POST')
def test_blank_req_environ_property_args(self):
blank = swift.common.swob.Request.blank
req = blank('/', method='PATCH')
self.assertEquals(req.method, 'PATCH')
self.assertEquals(req.environ['REQUEST_METHOD'], 'PATCH')
req = blank('/', referer='http://example.com')
self.assertEquals(req.referer, 'http://example.com')
self.assertEquals(req.referrer, 'http://example.com')
self.assertEquals(req.environ['HTTP_REFERER'], 'http://example.com')
self.assertEquals(req.headers['Referer'], 'http://example.com')
req = blank('/', script_name='/application')
self.assertEquals(req.script_name, '/application')
self.assertEquals(req.environ['SCRIPT_NAME'], '/application')
req = blank('/', host='www.example.com')
self.assertEquals(req.host, 'www.example.com')
self.assertEquals(req.environ['HTTP_HOST'], 'www.example.com')
self.assertEquals(req.headers['Host'], 'www.example.com')
req = blank('/', remote_addr='127.0.0.1')
self.assertEquals(req.remote_addr, '127.0.0.1')
self.assertEquals(req.environ['REMOTE_ADDR'], '127.0.0.1')
req = blank('/', remote_user='username')
self.assertEquals(req.remote_user, 'username')
self.assertEquals(req.environ['REMOTE_USER'], 'username')
req = blank('/', user_agent='curl/7.22.0 (x86_64-pc-linux-gnu)')
self.assertEquals(req.user_agent, 'curl/7.22.0 (x86_64-pc-linux-gnu)')
self.assertEquals(req.environ['HTTP_USER_AGENT'],
'curl/7.22.0 (x86_64-pc-linux-gnu)')
self.assertEquals(req.headers['User-Agent'],
'curl/7.22.0 (x86_64-pc-linux-gnu)')
req = blank('/', query_string='a=b&c=d')
self.assertEquals(req.query_string, 'a=b&c=d')
self.assertEquals(req.environ['QUERY_STRING'], 'a=b&c=d')
req = blank('/', if_match='*')
self.assertEquals(req.environ['HTTP_IF_MATCH'], '*')
self.assertEquals(req.headers['If-Match'], '*')
# multiple environ property kwargs
req = blank('/', method='PATCH', referer='http://example.com',
script_name='/application', host='www.example.com',
remote_addr='127.0.0.1', remote_user='username',
user_agent='curl/7.22.0 (x86_64-pc-linux-gnu)',
query_string='a=b&c=d', if_match='*')
self.assertEquals(req.method, 'PATCH')
self.assertEquals(req.referer, 'http://example.com')
self.assertEquals(req.script_name, '/application')
self.assertEquals(req.host, 'www.example.com')
self.assertEquals(req.remote_addr, '127.0.0.1')
self.assertEquals(req.remote_user, 'username')
self.assertEquals(req.user_agent, 'curl/7.22.0 (x86_64-pc-linux-gnu)')
self.assertEquals(req.query_string, 'a=b&c=d')
self.assertEquals(req.environ['QUERY_STRING'], 'a=b&c=d')
def test_invalid_req_environ_property_args(self):
# getter only property
try:
swift.common.swob.Request.blank('/', params={'a': 'b'})
except TypeError as e:
self.assertEquals("got unexpected keyword argument 'params'",
str(e))
else:
self.assert_(False, "invalid req_environ_property "
"didn't raise error!")
# regular attribute
try:
swift.common.swob.Request.blank('/', _params_cache={'a': 'b'})
except TypeError as e:
self.assertEquals("got unexpected keyword "
"argument '_params_cache'", str(e))
else:
self.assert_(False, "invalid req_environ_property "
"didn't raise error!")
# non-existent attribute
try:
swift.common.swob.Request.blank('/', params_cache={'a': 'b'})
except TypeError as e:
self.assertEquals("got unexpected keyword "
"argument 'params_cache'", str(e))
else:
self.assert_(False, "invalid req_environ_property "
"didn't raise error!")
# method
try:
swift.common.swob.Request.blank(
'/', as_referer='GET http://example.com')
except TypeError as e:
self.assertEquals("got unexpected keyword "
"argument 'as_referer'", str(e))
else:
self.assert_(False, "invalid req_environ_property "
"didn't raise error!")
def test_blank_path_info_precedence(self):
blank = swift.common.swob.Request.blank
req = blank('/a')
self.assertEquals(req.path_info, '/a')
req = blank('/a', environ={'PATH_INFO': '/a/c'})
self.assertEquals(req.path_info, '/a/c')
req = blank('/a', environ={'PATH_INFO': '/a/c'}, path_info='/a/c/o')
self.assertEquals(req.path_info, '/a/c/o')
req = blank('/a', path_info='/a/c/o')
self.assertEquals(req.path_info, '/a/c/o')
def test_blank_body_precedence(self):
req = swift.common.swob.Request.blank(
'/', environ={'REQUEST_METHOD': 'POST',
'wsgi.input': StringIO('')},
headers={'Content-Type': 'text/plain'}, body='hi')
self.assertEquals(req.path_info, '/')
self.assertEquals(req.body, 'hi')
self.assertEquals(req.headers['Content-Type'], 'text/plain')
self.assertEquals(req.method, 'POST')
body_file = StringIO('asdf')
req = swift.common.swob.Request.blank(
'/', environ={'REQUEST_METHOD': 'POST',
'wsgi.input': StringIO('')},
headers={'Content-Type': 'text/plain'}, body='hi',
body_file=body_file)
self.assert_(req.body_file is body_file)
req = swift.common.swob.Request.blank(
'/', environ={'REQUEST_METHOD': 'POST',
'wsgi.input': StringIO('')},
headers={'Content-Type': 'text/plain'}, body='hi',
content_length=3)
self.assertEquals(req.content_length, 3)
self.assertEquals(len(req.body), 2)
def test_blank_parsing(self):
req = swift.common.swob.Request.blank('http://test.com/')
self.assertEquals(req.environ['wsgi.url_scheme'], 'http')
self.assertEquals(req.environ['SERVER_PORT'], '80')
self.assertEquals(req.environ['SERVER_NAME'], 'test.com')
req = swift.common.swob.Request.blank('https://test.com:456/')
self.assertEquals(req.environ['wsgi.url_scheme'], 'https')
self.assertEquals(req.environ['SERVER_PORT'], '456')
req = swift.common.swob.Request.blank('test.com/')
self.assertEquals(req.environ['wsgi.url_scheme'], 'http')
self.assertEquals(req.environ['SERVER_PORT'], '80')
self.assertEquals(req.environ['PATH_INFO'], 'test.com/')
self.assertRaises(TypeError, swift.common.swob.Request.blank,
'ftp://test.com/')
def test_params(self):
req = swift.common.swob.Request.blank('/?a=b&c=d')
self.assertEquals(req.params['a'], 'b')
self.assertEquals(req.params['c'], 'd')
def test_timestamp_missing(self):
req = swift.common.swob.Request.blank('/')
self.assertRaises(exceptions.InvalidTimestamp,
getattr, req, 'timestamp')
def test_timestamp_invalid(self):
req = swift.common.swob.Request.blank(
'/', headers={'X-Timestamp': 'asdf'})
self.assertRaises(exceptions.InvalidTimestamp,
getattr, req, 'timestamp')
def test_timestamp(self):
req = swift.common.swob.Request.blank(
'/', headers={'X-Timestamp': '1402447134.13507_00000001'})
expected = utils.Timestamp('1402447134.13507', offset=1)
self.assertEqual(req.timestamp, expected)
self.assertEqual(req.timestamp.normal, expected.normal)
self.assertEqual(req.timestamp.internal, expected.internal)
def test_path(self):
req = swift.common.swob.Request.blank('/hi?a=b&c=d')
self.assertEquals(req.path, '/hi')
req = swift.common.swob.Request.blank(
'/', environ={'SCRIPT_NAME': '/hi', 'PATH_INFO': '/there'})
self.assertEquals(req.path, '/hi/there')
def test_path_question_mark(self):
req = swift.common.swob.Request.blank('/test%3Ffile')
# This tests that .blank unquotes the path when setting PATH_INFO
self.assertEquals(req.environ['PATH_INFO'], '/test?file')
# This tests that .path requotes it
self.assertEquals(req.path, '/test%3Ffile')
def test_path_info_pop(self):
req = swift.common.swob.Request.blank('/hi/there')
self.assertEquals(req.path_info_pop(), 'hi')
self.assertEquals(req.path_info, '/there')
self.assertEquals(req.script_name, '/hi')
def test_bad_path_info_pop(self):
req = swift.common.swob.Request.blank('blahblah')
self.assertEquals(req.path_info_pop(), None)
def test_path_info_pop_last(self):
req = swift.common.swob.Request.blank('/last')
self.assertEquals(req.path_info_pop(), 'last')
self.assertEquals(req.path_info, '')
self.assertEquals(req.script_name, '/last')
def test_path_info_pop_none(self):
req = swift.common.swob.Request.blank('/')
self.assertEquals(req.path_info_pop(), '')
self.assertEquals(req.path_info, '')
self.assertEquals(req.script_name, '/')
def test_copy_get(self):
req = swift.common.swob.Request.blank(
'/hi/there', environ={'REQUEST_METHOD': 'POST'})
self.assertEquals(req.method, 'POST')
req2 = req.copy_get()
self.assertEquals(req2.method, 'GET')
def test_get_response(self):
def test_app(environ, start_response):
start_response('200 OK', [])
return ['hi']
req = swift.common.swob.Request.blank('/')
resp = req.get_response(test_app)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body, 'hi')
def test_401_unauthorized(self):
# No request environment
resp = swift.common.swob.HTTPUnauthorized()
self.assertEquals(resp.status_int, 401)
self.assert_('Www-Authenticate' in resp.headers)
# Request environment
req = swift.common.swob.Request.blank('/')
resp = swift.common.swob.HTTPUnauthorized(request=req)
self.assertEquals(resp.status_int, 401)
self.assert_('Www-Authenticate' in resp.headers)
def test_401_valid_account_path(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', [])
return ['hi']
# Request environment contains valid account in path
req = swift.common.swob.Request.blank('/v1/account-name')
resp = req.get_response(test_app)
self.assertEquals(resp.status_int, 401)
self.assert_('Www-Authenticate' in resp.headers)
self.assertEquals('Swift realm="account-name"',
resp.headers['Www-Authenticate'])
# Request environment contains valid account/container in path
req = swift.common.swob.Request.blank('/v1/account-name/c')
resp = req.get_response(test_app)
self.assertEquals(resp.status_int, 401)
self.assert_('Www-Authenticate' in resp.headers)
self.assertEquals('Swift realm="account-name"',
resp.headers['Www-Authenticate'])
def test_401_invalid_path(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', [])
return ['hi']
# Request environment contains bad path
req = swift.common.swob.Request.blank('/random')
resp = req.get_response(test_app)
self.assertEquals(resp.status_int, 401)
self.assert_('Www-Authenticate' in resp.headers)
self.assertEquals('Swift realm="unknown"',
resp.headers['Www-Authenticate'])
def test_401_non_keystone_auth_path(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', [])
return ['no creds in request']
# Request to get token
req = swift.common.swob.Request.blank('/v1.0/auth')
resp = req.get_response(test_app)
self.assertEquals(resp.status_int, 401)
self.assert_('Www-Authenticate' in resp.headers)
self.assertEquals('Swift realm="unknown"',
resp.headers['Www-Authenticate'])
# Other form of path
req = swift.common.swob.Request.blank('/auth/v1.0')
resp = req.get_response(test_app)
self.assertEquals(resp.status_int, 401)
self.assert_('Www-Authenticate' in resp.headers)
self.assertEquals('Swift realm="unknown"',
resp.headers['Www-Authenticate'])
def test_401_www_authenticate_exists(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', {
'Www-Authenticate': 'Me realm="whatever"'})
return ['no creds in request']
# Auth middleware sets own Www-Authenticate
req = swift.common.swob.Request.blank('/auth/v1.0')
resp = req.get_response(test_app)
self.assertEquals(resp.status_int, 401)
self.assert_('Www-Authenticate' in resp.headers)
self.assertEquals('Me realm="whatever"',
resp.headers['Www-Authenticate'])
def test_401_www_authenticate_is_quoted(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', [])
return ['hi']
hacker = 'account-name\n\n<b>foo<br>' # url injection test
quoted_hacker = quote(hacker)
req = swift.common.swob.Request.blank('/v1/' + hacker)
resp = req.get_response(test_app)
self.assertEquals(resp.status_int, 401)
self.assert_('Www-Authenticate' in resp.headers)
self.assertEquals('Swift realm="%s"' % quoted_hacker,
resp.headers['Www-Authenticate'])
req = swift.common.swob.Request.blank('/v1/' + quoted_hacker)
resp = req.get_response(test_app)
self.assertEquals(resp.status_int, 401)
self.assert_('Www-Authenticate' in resp.headers)
self.assertEquals('Swift realm="%s"' % quoted_hacker,
resp.headers['Www-Authenticate'])
def test_not_401(self):
# Other status codes should not have WWW-Authenticate in response
def test_app(environ, start_response):
start_response('200 OK', [])
return ['hi']
req = swift.common.swob.Request.blank('/')
resp = req.get_response(test_app)
self.assert_('Www-Authenticate' not in resp.headers)
def test_properties(self):
req = swift.common.swob.Request.blank('/hi/there', body='hi')
self.assertEquals(req.body, 'hi')
self.assertEquals(req.content_length, 2)
req.remote_addr = 'something'
self.assertEquals(req.environ['REMOTE_ADDR'], 'something')
req.body = 'whatever'
self.assertEquals(req.content_length, 8)
self.assertEquals(req.body, 'whatever')
self.assertEquals(req.method, 'GET')
req.range = 'bytes=1-7'
self.assertEquals(req.range.ranges[0], (1, 7))
self.assert_('Range' in req.headers)
req.range = None
self.assert_('Range' not in req.headers)
def test_datetime_properties(self):
req = swift.common.swob.Request.blank('/hi/there', body='hi')
req.if_unmodified_since = 0
self.assert_(isinstance(req.if_unmodified_since, datetime.datetime))
if_unmodified_since = req.if_unmodified_since
req.if_unmodified_since = if_unmodified_since
self.assertEquals(if_unmodified_since, req.if_unmodified_since)
req.if_unmodified_since = 'something'
self.assertEquals(req.headers['If-Unmodified-Since'], 'something')
self.assertEquals(req.if_unmodified_since, None)
self.assert_('If-Unmodified-Since' in req.headers)
req.if_unmodified_since = None
self.assert_('If-Unmodified-Since' not in req.headers)
too_big_date_list = list(datetime.datetime.max.timetuple())
too_big_date_list[0] += 1 # bump up the year
too_big_date = time.strftime(
"%a, %d %b %Y %H:%M:%S UTC", time.struct_time(too_big_date_list))
req.if_unmodified_since = too_big_date
self.assertEqual(req.if_unmodified_since, None)
def test_bad_range(self):
req = swift.common.swob.Request.blank('/hi/there', body='hi')
req.range = 'bad range'
self.assertEquals(req.range, None)
def test_accept_header(self):
req = swift.common.swob.Request({'REQUEST_METHOD': 'GET',
'PATH_INFO': '/',
'HTTP_ACCEPT': 'application/json'})
self.assertEqual(
req.accept.best_match(['application/json', 'text/plain']),
'application/json')
self.assertEqual(
req.accept.best_match(['text/plain', 'application/json']),
'application/json')
def test_swift_entity_path(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
self.assertEqual(req.swift_entity_path, '/a/c/o')
req = swift.common.swob.Request.blank('/v1/a/c')
self.assertEqual(req.swift_entity_path, '/a/c')
req = swift.common.swob.Request.blank('/v1/a')
self.assertEqual(req.swift_entity_path, '/a')
req = swift.common.swob.Request.blank('/v1')
self.assertEqual(req.swift_entity_path, None)
def test_path_qs(self):
req = swift.common.swob.Request.blank('/hi/there?hello=equal&acl')
self.assertEqual(req.path_qs, '/hi/there?hello=equal&acl')
req = swift.common.swob.Request({'PATH_INFO': '/hi/there',
'QUERY_STRING': 'hello=equal&acl'})
self.assertEqual(req.path_qs, '/hi/there?hello=equal&acl')
def test_url(self):
req = swift.common.swob.Request.blank('/hi/there?hello=equal&acl')
self.assertEqual(req.url,
'http://localhost/hi/there?hello=equal&acl')
def test_wsgify(self):
used_req = []
@swift.common.swob.wsgify
def _wsgi_func(req):
used_req.append(req)
return swift.common.swob.Response('200 OK')
req = swift.common.swob.Request.blank('/hi/there')
resp = req.get_response(_wsgi_func)
self.assertEqual(used_req[0].path, '/hi/there')
self.assertEqual(resp.status_int, 200)
def test_wsgify_raise(self):
used_req = []
@swift.common.swob.wsgify
def _wsgi_func(req):
used_req.append(req)
raise swift.common.swob.HTTPServerError()
req = swift.common.swob.Request.blank('/hi/there')
resp = req.get_response(_wsgi_func)
self.assertEqual(used_req[0].path, '/hi/there')
self.assertEqual(resp.status_int, 500)
def test_split_path(self):
"""
Copied from swift.common.utils.split_path
"""
def _test_split_path(path, minsegs=1, maxsegs=None, rwl=False):
req = swift.common.swob.Request.blank(path)
return req.split_path(minsegs, maxsegs, rwl)
self.assertRaises(ValueError, _test_split_path, '')
self.assertRaises(ValueError, _test_split_path, '/')
self.assertRaises(ValueError, _test_split_path, '//')
self.assertEquals(_test_split_path('/a'), ['a'])
self.assertRaises(ValueError, _test_split_path, '//a')
self.assertEquals(_test_split_path('/a/'), ['a'])
self.assertRaises(ValueError, _test_split_path, '/a/c')
self.assertRaises(ValueError, _test_split_path, '//c')
self.assertRaises(ValueError, _test_split_path, '/a/c/')
self.assertRaises(ValueError, _test_split_path, '/a//')
self.assertRaises(ValueError, _test_split_path, '/a', 2)
self.assertRaises(ValueError, _test_split_path, '/a', 2, 3)
self.assertRaises(ValueError, _test_split_path, '/a', 2, 3, True)
self.assertEquals(_test_split_path('/a/c', 2), ['a', 'c'])
self.assertEquals(_test_split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, _test_split_path, '/a/c/o/r', 3, 3)
self.assertEquals(_test_split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEquals(_test_split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, _test_split_path, '/a', 5, 4)
self.assertEquals(_test_split_path('/a/c/', 2), ['a', 'c'])
self.assertEquals(_test_split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
_test_split_path('o\nn e', 2)
except ValueError as err:
self.assertEquals(str(err), 'Invalid path: o%0An%20e')
try:
_test_split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEquals(str(err), 'Invalid path: o%0An%20e')
def test_unicode_path(self):
req = swift.common.swob.Request.blank(u'/\u2661')
self.assertEquals(req.path, quote(u'/\u2661'.encode('utf-8')))
def test_unicode_query(self):
req = swift.common.swob.Request.blank(u'/')
req.query_string = u'x=\u2661'
self.assertEquals(req.params['x'], u'\u2661'.encode('utf-8'))
def test_url2(self):
pi = '/hi/there'
path = pi
req = swift.common.swob.Request.blank(path)
sche = 'http'
exp_url = '%s://localhost%s' % (sche, pi)
self.assertEqual(req.url, exp_url)
qs = 'hello=equal&acl'
path = '%s?%s' % (pi, qs)
s, p = 'unit.test.example.com', '90'
req = swift.common.swob.Request({'PATH_INFO': pi,
'QUERY_STRING': qs,
'SERVER_NAME': s,
'SERVER_PORT': p})
exp_url = '%s://%s:%s%s?%s' % (sche, s, p, pi, qs)
self.assertEqual(req.url, exp_url)
host = 'unit.test.example.com'
req = swift.common.swob.Request({'PATH_INFO': pi,
'QUERY_STRING': qs,
'HTTP_HOST': host + ':80'})
exp_url = '%s://%s%s?%s' % (sche, host, pi, qs)
self.assertEqual(req.url, exp_url)
host = 'unit.test.example.com'
sche = 'https'
req = swift.common.swob.Request({'PATH_INFO': pi,
'QUERY_STRING': qs,
'HTTP_HOST': host + ':443',
'wsgi.url_scheme': sche})
exp_url = '%s://%s%s?%s' % (sche, host, pi, qs)
self.assertEqual(req.url, exp_url)
host = 'unit.test.example.com:81'
req = swift.common.swob.Request({'PATH_INFO': pi,
'QUERY_STRING': qs,
'HTTP_HOST': host,
'wsgi.url_scheme': sche})
exp_url = '%s://%s%s?%s' % (sche, host, pi, qs)
self.assertEqual(req.url, exp_url)
def test_as_referer(self):
pi = '/hi/there'
qs = 'hello=equal&acl'
sche = 'https'
host = 'unit.test.example.com:81'
req = swift.common.swob.Request({'REQUEST_METHOD': 'POST',
'PATH_INFO': pi,
'QUERY_STRING': qs,
'HTTP_HOST': host,
'wsgi.url_scheme': sche})
exp_url = '%s://%s%s?%s' % (sche, host, pi, qs)
self.assertEqual(req.as_referer(), 'POST ' + exp_url)
def test_message_length_just_content_length(self):
req = swift.common.swob.Request.blank(
u'/',
environ={'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/'})
self.assertEquals(req.message_length(), None)
req = swift.common.swob.Request.blank(
u'/',
environ={'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/'},
body='x' * 42)
self.assertEquals(req.message_length(), 42)
req.headers['Content-Length'] = 'abc'
try:
req.message_length()
except ValueError as e:
self.assertEquals(str(e), "Invalid Content-Length header value")
else:
self.fail("Expected a ValueError raised for 'abc'")
def test_message_length_transfer_encoding(self):
req = swift.common.swob.Request.blank(
u'/',
environ={'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/'},
headers={'transfer-encoding': 'chunked'},
body='x' * 42)
self.assertEquals(req.message_length(), None)
req.headers['Transfer-Encoding'] = 'gzip,chunked'
try:
req.message_length()
except AttributeError as e:
self.assertEquals(str(e), "Unsupported Transfer-Coding header"
" value specified in Transfer-Encoding header")
else:
self.fail("Expected an AttributeError raised for 'gzip'")
req.headers['Transfer-Encoding'] = 'gzip'
try:
req.message_length()
except ValueError as e:
self.assertEquals(str(e), "Invalid Transfer-Encoding header value")
else:
self.fail("Expected a ValueError raised for 'gzip'")
req.headers['Transfer-Encoding'] = 'gzip,identity'
try:
req.message_length()
except AttributeError as e:
self.assertEquals(str(e), "Unsupported Transfer-Coding header"
" value specified in Transfer-Encoding header")
else:
self.fail("Expected an AttributeError raised for 'gzip,identity'")
class TestStatusMap(unittest.TestCase):
def test_status_map(self):
response_args = []
def start_response(status, headers):
response_args.append(status)
response_args.append(headers)
resp_cls = swift.common.swob.status_map[404]
resp = resp_cls()
self.assertEquals(resp.status_int, 404)
self.assertEquals(resp.title, 'Not Found')
body = ''.join(resp({}, start_response))
self.assert_('The resource could not be found.' in body)
self.assertEquals(response_args[0], '404 Not Found')
headers = dict(response_args[1])
self.assertEquals(headers['Content-Type'], 'text/html; charset=UTF-8')
self.assert_(int(headers['Content-Length']) > 0)
class TestResponse(unittest.TestCase):
def _get_response(self):
def test_app(environ, start_response):
start_response('200 OK', [])
return ['hi']
req = swift.common.swob.Request.blank('/')
return req.get_response(test_app)
def test_properties(self):
resp = self._get_response()
resp.location = 'something'
self.assertEquals(resp.location, 'something')
self.assert_('Location' in resp.headers)
resp.location = None
self.assert_('Location' not in resp.headers)
resp.content_type = 'text/plain'
self.assert_('Content-Type' in resp.headers)
resp.content_type = None
self.assert_('Content-Type' not in resp.headers)
def test_empty_body(self):
resp = self._get_response()
resp.body = ''
self.assertEquals(resp.body, '')
def test_unicode_body(self):
resp = self._get_response()
resp.body = u'\N{SNOWMAN}'
self.assertEquals(resp.body, u'\N{SNOWMAN}'.encode('utf-8'))
def test_call_reifies_request_if_necessary(self):
"""
The actual bug was a HEAD response coming out with a body because the
Request object wasn't passed into the Response object's constructor.
The Response object's __call__ method should be able to reify a
Request object from the env it gets passed.
"""
def test_app(environ, start_response):
start_response('200 OK', [])
return ['hi']
req = swift.common.swob.Request.blank('/')
req.method = 'HEAD'
status, headers, app_iter = req.call_application(test_app)
resp = swift.common.swob.Response(status=status, headers=dict(headers),
app_iter=app_iter)
output_iter = resp(req.environ, lambda *_: None)
self.assertEquals(list(output_iter), [''])
def test_call_preserves_closeability(self):
def test_app(environ, start_response):
start_response('200 OK', [])
yield "igloo"
yield "shindig"
yield "macadamia"
yield "hullabaloo"
req = swift.common.swob.Request.blank('/')
req.method = 'GET'
status, headers, app_iter = req.call_application(test_app)
iterator = iter(app_iter)
self.assertEqual('igloo', next(iterator))
self.assertEqual('shindig', next(iterator))
app_iter.close()
self.assertRaises(StopIteration, iterator.next)
def test_location_rewrite(self):
def start_response(env, headers):
pass
req = swift.common.swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost'})
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEquals(resp.location, 'http://somehost/something')
req = swift.common.swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost:80'})
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEquals(resp.location, 'http://somehost/something')
req = swift.common.swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost:443',
'wsgi.url_scheme': 'http'})
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEquals(resp.location, 'http://somehost:443/something')
req = swift.common.swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost:443',
'wsgi.url_scheme': 'https'})
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEquals(resp.location, 'https://somehost/something')
def test_location_rewrite_no_host(self):
def start_response(env, headers):
pass
req = swift.common.swob.Request.blank(
'/', environ={'SERVER_NAME': 'local', 'SERVER_PORT': 80})
del req.environ['HTTP_HOST']
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEquals(resp.location, 'http://local/something')
req = swift.common.swob.Request.blank(
'/', environ={'SERVER_NAME': 'local', 'SERVER_PORT': 81})
del req.environ['HTTP_HOST']
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEquals(resp.location, 'http://local:81/something')
def test_location_no_rewrite(self):
def start_response(env, headers):
pass
req = swift.common.swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost'})
resp = self._get_response()
resp.location = 'http://www.google.com/'
# read response
''.join(resp(req.environ, start_response))
self.assertEquals(resp.location, 'http://www.google.com/')
def test_location_no_rewrite_when_told_not_to(self):
def start_response(env, headers):
pass
req = swift.common.swob.Request.blank(
'/', environ={'SERVER_NAME': 'local', 'SERVER_PORT': 81,
'swift.leave_relative_location': True})
del req.environ['HTTP_HOST']
resp = self._get_response()
resp.location = '/something'
# read response
''.join(resp(req.environ, start_response))
self.assertEquals(resp.location, '/something')
def test_app_iter(self):
def start_response(env, headers):
pass
resp = self._get_response()
resp.app_iter = ['a', 'b', 'c']
body = ''.join(resp({}, start_response))
self.assertEquals(body, 'abc')
def test_multi_ranges_wo_iter_ranges(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '10')])
return ['1234567890']
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=0-9,10-19,20-29'})
resp = req.get_response(test_app)
resp.conditional_response = True
resp.content_length = 10
# read response
''.join(resp._response_iter(resp.app_iter, ''))
self.assertEquals(resp.status, '200 OK')
self.assertEqual(10, resp.content_length)
def test_single_range_wo_iter_range(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '10')])
return ['1234567890']
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=0-9'})
resp = req.get_response(test_app)
resp.conditional_response = True
resp.content_length = 10
# read response
''.join(resp._response_iter(resp.app_iter, ''))
self.assertEquals(resp.status, '200 OK')
self.assertEqual(10, resp.content_length)
def test_multi_range_body(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '4')])
return ['abcd']
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=0-9,10-19,20-29'})
resp = req.get_response(test_app)
resp.conditional_response = True
resp.content_length = 100
resp.content_type = 'text/plain'
content = ''.join(resp._response_iter(None,
('0123456789112345678'
'92123456789')))
self.assert_(re.match(('--[a-f0-9]{32}\r\n'
'Content-Type: text/plain\r\n'
'Content-Range: bytes '
'0-9/100\r\n\r\n0123456789\r\n'
'--[a-f0-9]{32}\r\n'
'Content-Type: text/plain\r\n'
'Content-Range: bytes '
'10-19/100\r\n\r\n1123456789\r\n'
'--[a-f0-9]{32}\r\n'
'Content-Type: text/plain\r\n'
'Content-Range: bytes '
'20-29/100\r\n\r\n2123456789\r\n'
'--[a-f0-9]{32}--'), content))
def test_multi_response_iter(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '10'),
('Content-Type', 'application/xml')])
return ['0123456789']
app_iter_ranges_args = []
class App_iter(object):
def app_iter_ranges(self, ranges, content_type, boundary, size):
app_iter_ranges_args.append((ranges, content_type, boundary,
size))
for i in range(3):
yield str(i) + 'fun'
yield boundary
def __iter__(self):
for i in range(3):
yield str(i) + 'fun'
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=1-5,8-11'})
resp = req.get_response(test_app)
resp.conditional_response = True
resp.content_length = 12
content = ''.join(resp._response_iter(App_iter(), ''))
boundary = content[-32:]
self.assertEqual(content[:-32], '0fun1fun2fun')
self.assertEqual(app_iter_ranges_args,
[([(1, 6), (8, 12)], 'application/xml',
boundary, 12)])
def test_range_body(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '10')])
return ['1234567890']
def start_response(env, headers):
pass
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=1-3'})
resp = swift.common.swob.Response(
body='1234567890', request=req,
conditional_response=True)
body = ''.join(resp([], start_response))
self.assertEquals(body, '234')
self.assertEquals(resp.content_range, 'bytes 1-3/10')
self.assertEquals(resp.status, '206 Partial Content')
# syntactically valid, but does not make sense, so returning 416
# in next couple of cases.
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=-0'})
resp = req.get_response(test_app)
resp.conditional_response = True
body = ''.join(resp([], start_response))
self.assertEquals(body, '')
self.assertEquals(resp.content_length, 0)
self.assertEquals(resp.status, '416 Requested Range Not Satisfiable')
resp = swift.common.swob.Response(
body='1234567890', request=req,
conditional_response=True)
body = ''.join(resp([], start_response))
self.assertEquals(body, '')
self.assertEquals(resp.content_length, 0)
self.assertEquals(resp.status, '416 Requested Range Not Satisfiable')
# Syntactically-invalid Range headers "MUST" be ignored
req = swift.common.swob.Request.blank(
'/', headers={'Range': 'bytes=3-2'})
resp = req.get_response(test_app)
resp.conditional_response = True
body = ''.join(resp([], start_response))
self.assertEquals(body, '1234567890')
self.assertEquals(resp.status, '200 OK')
resp = swift.common.swob.Response(
body='1234567890', request=req,
conditional_response=True)
body = ''.join(resp([], start_response))
self.assertEquals(body, '1234567890')
self.assertEquals(resp.status, '200 OK')
def test_content_type(self):
resp = self._get_response()
resp.content_type = 'text/plain; charset=utf8'
self.assertEquals(resp.content_type, 'text/plain')
def test_charset(self):
resp = self._get_response()
resp.content_type = 'text/plain; charset=utf8'
self.assertEquals(resp.charset, 'utf8')
resp.charset = 'utf16'
self.assertEquals(resp.charset, 'utf16')
def test_charset_content_type(self):
resp = swift.common.swob.Response(
content_type='text/plain', charset='utf-8')
self.assertEquals(resp.charset, 'utf-8')
resp = swift.common.swob.Response(
charset='utf-8', content_type='text/plain')
self.assertEquals(resp.charset, 'utf-8')
def test_etag(self):
resp = self._get_response()
resp.etag = 'hi'
self.assertEquals(resp.headers['Etag'], '"hi"')
self.assertEquals(resp.etag, 'hi')
self.assert_('etag' in resp.headers)
resp.etag = None
self.assert_('etag' not in resp.headers)
def test_host_url_default(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'http'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
del env['HTTP_HOST']
self.assertEquals(resp.host_url, 'http://bob:1234')
def test_host_url_default_port_squelched(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'http'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '80'
del env['HTTP_HOST']
self.assertEquals(resp.host_url, 'http://bob')
def test_host_url_https(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'https'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
del env['HTTP_HOST']
self.assertEquals(resp.host_url, 'https://bob:1234')
def test_host_url_https_port_squelched(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'https'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '443'
del env['HTTP_HOST']
self.assertEquals(resp.host_url, 'https://bob')
def test_host_url_host_override(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'http'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
env['HTTP_HOST'] = 'someother'
self.assertEquals(resp.host_url, 'http://someother')
def test_host_url_host_port_override(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'http'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
env['HTTP_HOST'] = 'someother:5678'
self.assertEquals(resp.host_url, 'http://someother:5678')
def test_host_url_host_https(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'https'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
env['HTTP_HOST'] = 'someother:5678'
self.assertEquals(resp.host_url, 'https://someother:5678')
def test_507(self):
resp = swift.common.swob.HTTPInsufficientStorage()
content = ''.join(resp._response_iter(resp.app_iter, resp._body))
self.assertEquals(
content,
'<html><h1>Insufficient Storage</h1><p>There was not enough space '
'to save the resource. Drive: unknown</p></html>')
resp = swift.common.swob.HTTPInsufficientStorage(drive='sda1')
content = ''.join(resp._response_iter(resp.app_iter, resp._body))
self.assertEquals(
content,
'<html><h1>Insufficient Storage</h1><p>There was not enough space '
'to save the resource. Drive: sda1</p></html>')
def test_200_with_body_and_headers(self):
headers = {'Content-Length': '0'}
content = 'foo'
resp = swift.common.swob.HTTPOk(body=content, headers=headers)
self.assertEquals(resp.body, content)
self.assertEquals(resp.content_length, len(content))
def test_init_with_body_headers_app_iter(self):
# body exists but no headers and no app_iter
body = 'ok'
resp = swift.common.swob.Response(body=body)
self.assertEquals(resp.body, body)
self.assertEquals(resp.content_length, len(body))
# body and headers with 0 content_length exist but no app_iter
body = 'ok'
resp = swift.common.swob.Response(
body=body, headers={'Content-Length': '0'})
self.assertEquals(resp.body, body)
self.assertEquals(resp.content_length, len(body))
# body and headers with content_length exist but no app_iter
body = 'ok'
resp = swift.common.swob.Response(
body=body, headers={'Content-Length': '5'})
self.assertEquals(resp.body, body)
self.assertEquals(resp.content_length, len(body))
# body and headers with no content_length exist but no app_iter
body = 'ok'
resp = swift.common.swob.Response(body=body, headers={})
self.assertEquals(resp.body, body)
self.assertEquals(resp.content_length, len(body))
# body, headers with content_length and app_iter exist
resp = swift.common.swob.Response(
body='ok', headers={'Content-Length': '5'}, app_iter=iter([]))
self.assertEquals(resp.content_length, 5)
self.assertEquals(resp.body, '')
# headers with content_length and app_iter exist but no body
resp = swift.common.swob.Response(
headers={'Content-Length': '5'}, app_iter=iter([]))
self.assertEquals(resp.content_length, 5)
self.assertEquals(resp.body, '')
# app_iter exists but no body and headers
resp = swift.common.swob.Response(app_iter=iter([]))
self.assertEquals(resp.content_length, None)
self.assertEquals(resp.body, '')
class TestUTC(unittest.TestCase):
def test_tzname(self):
self.assertEquals(swift.common.swob.UTC.tzname(None), 'UTC')
class TestConditionalIfNoneMatch(unittest.TestCase):
def fake_app(self, environ, start_response):
start_response('200 OK', [('Etag', 'the-etag')])
return ['hi']
def fake_start_response(*a, **kw):
pass
def test_simple_match(self):
# etag matches --> 304
req = swift.common.swob.Request.blank(
'/', headers={'If-None-Match': 'the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 304)
self.assertEquals(body, '')
def test_quoted_simple_match(self):
# double quotes don't matter
req = swift.common.swob.Request.blank(
'/', headers={'If-None-Match': '"the-etag"'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 304)
self.assertEquals(body, '')
def test_list_match(self):
# it works with lists of etags to match
req = swift.common.swob.Request.blank(
'/', headers={'If-None-Match': '"bert", "the-etag", "ernie"'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 304)
self.assertEquals(body, '')
def test_list_no_match(self):
# no matches --> whatever the original status was
req = swift.common.swob.Request.blank(
'/', headers={'If-None-Match': '"bert", "ernie"'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 200)
self.assertEquals(body, 'hi')
def test_match_star(self):
# "*" means match anything; see RFC 2616 section 14.24
req = swift.common.swob.Request.blank(
'/', headers={'If-None-Match': '*'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 304)
self.assertEquals(body, '')
class TestConditionalIfMatch(unittest.TestCase):
def fake_app(self, environ, start_response):
start_response('200 OK', [('Etag', 'the-etag')])
return ['hi']
def fake_start_response(*a, **kw):
pass
def test_simple_match(self):
# if etag matches, proceed as normal
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': 'the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 200)
self.assertEquals(body, 'hi')
def test_simple_conditional_etag_match(self):
# if etag matches, proceed as normal
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': 'not-the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
resp._conditional_etag = 'not-the-etag'
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 200)
self.assertEquals(body, 'hi')
def test_quoted_simple_match(self):
# double quotes or not, doesn't matter
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': '"the-etag"'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 200)
self.assertEquals(body, 'hi')
def test_no_match(self):
# no match --> 412
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': 'not-the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 412)
self.assertEquals(body, '')
def test_simple_conditional_etag_no_match(self):
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': 'the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
resp._conditional_etag = 'not-the-etag'
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 412)
self.assertEquals(body, '')
def test_match_star(self):
# "*" means match anything; see RFC 2616 section 14.24
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': '*'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 200)
self.assertEquals(body, 'hi')
def test_match_star_on_404(self):
def fake_app_404(environ, start_response):
start_response('404 Not Found', [])
return ['hi']
req = swift.common.swob.Request.blank(
'/', headers={'If-Match': '*'})
resp = req.get_response(fake_app_404)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 412)
self.assertEquals(body, '')
class TestConditionalIfModifiedSince(unittest.TestCase):
def fake_app(self, environ, start_response):
start_response(
'200 OK', [('Last-Modified', 'Thu, 27 Feb 2014 03:29:37 GMT')])
return ['hi']
def fake_start_response(*a, **kw):
pass
def test_absent(self):
req = swift.common.swob.Request.blank('/')
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 200)
self.assertEquals(body, 'hi')
def test_before(self):
req = swift.common.swob.Request.blank(
'/',
headers={'If-Modified-Since': 'Thu, 27 Feb 2014 03:29:36 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 200)
self.assertEquals(body, 'hi')
def test_same(self):
req = swift.common.swob.Request.blank(
'/',
headers={'If-Modified-Since': 'Thu, 27 Feb 2014 03:29:37 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 304)
self.assertEquals(body, '')
def test_greater(self):
req = swift.common.swob.Request.blank(
'/',
headers={'If-Modified-Since': 'Thu, 27 Feb 2014 03:29:38 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 304)
self.assertEquals(body, '')
def test_out_of_range_is_ignored(self):
# All that datetime gives us is a ValueError or OverflowError when
# something is out of range (i.e. less than datetime.datetime.min or
# greater than datetime.datetime.max). Unfortunately, we can't
# distinguish between a date being too old and a date being too new,
# so the best we can do is ignore such headers.
max_date_list = list(datetime.datetime.max.timetuple())
max_date_list[0] += 1 # bump up the year
too_big_date_header = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.struct_time(max_date_list))
req = swift.common.swob.Request.blank(
'/',
headers={'If-Modified-Since': too_big_date_header})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 200)
self.assertEquals(body, 'hi')
class TestConditionalIfUnmodifiedSince(unittest.TestCase):
def fake_app(self, environ, start_response):
start_response(
'200 OK', [('Last-Modified', 'Thu, 20 Feb 2014 03:29:37 GMT')])
return ['hi']
def fake_start_response(*a, **kw):
pass
def test_absent(self):
req = swift.common.swob.Request.blank('/')
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 200)
self.assertEquals(body, 'hi')
def test_before(self):
req = swift.common.swob.Request.blank(
'/',
headers={'If-Unmodified-Since': 'Thu, 20 Feb 2014 03:29:36 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 412)
self.assertEquals(body, '')
def test_same(self):
req = swift.common.swob.Request.blank(
'/',
headers={'If-Unmodified-Since': 'Thu, 20 Feb 2014 03:29:37 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 200)
self.assertEquals(body, 'hi')
def test_greater(self):
req = swift.common.swob.Request.blank(
'/',
headers={'If-Unmodified-Since': 'Thu, 20 Feb 2014 03:29:38 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 200)
self.assertEquals(body, 'hi')
def test_out_of_range_is_ignored(self):
# All that datetime gives us is a ValueError or OverflowError when
# something is out of range (i.e. less than datetime.datetime.min or
# greater than datetime.datetime.max). Unfortunately, we can't
# distinguish between a date being too old and a date being too new,
# so the best we can do is ignore such headers.
max_date_list = list(datetime.datetime.max.timetuple())
max_date_list[0] += 1 # bump up the year
too_big_date_header = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.struct_time(max_date_list))
req = swift.common.swob.Request.blank(
'/',
headers={'If-Unmodified-Since': too_big_date_header})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = ''.join(resp(req.environ, self.fake_start_response))
self.assertEquals(resp.status_int, 200)
self.assertEquals(body, 'hi')
if __name__ == '__main__':
unittest.main()
|
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 IBM Corp.
# Copyright 2013 eNovance <licensing@enovance.com>
# Copyright Ericsson AB 2013. All rights reserved
# Copyright 2014 Hewlett-Packard Company
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_log import log
import pecan
from pecan import rest
import six
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from ceilometer.api.controllers.v2 import base
from ceilometer.api.controllers.v2 import utils as v2_utils
from ceilometer.event.storage import models as event_models
from ceilometer.i18n import _
from ceilometer import storage
LOG = log.getLogger(__name__)
class TraitDescription(base.Base):
"""A description of a trait, with no associated value."""
type = wtypes.text
"the data type, defaults to string"
name = wtypes.text
"the name of the trait"
@classmethod
def sample(cls):
return cls(name='service',
type='string'
)
class EventQuery(base.Query):
"""Query arguments for Event Queries."""
_supported_types = ['integer', 'float', 'string', 'datetime']
type = wsme.wsattr(wtypes.text, default='string')
"the type of the trait filter, defaults to string"
def __repr__(self):
# for logging calls
return '<EventQuery %r %s %r %s>' % (self.field,
self.op,
self._get_value_as_type(),
self.type)
@classmethod
def sample(cls):
return cls(field="event_type",
type="string",
op="eq",
value="compute.instance.create.start")
class Trait(base.Base):
"""A Trait associated with an event."""
name = wtypes.text
"The name of the trait"
value = wtypes.text
"the value of the trait"
type = wtypes.text
"the type of the trait (string, integer, float or datetime)"
@staticmethod
def _convert_storage_trait(trait):
"""Helper method to convert a storage model into an API trait instance.
If an API trait instance is passed in, just return it.
"""
if isinstance(trait, Trait):
return trait
value = (six.text_type(trait.value)
if not trait.dtype == event_models.Trait.DATETIME_TYPE
else trait.value.isoformat())
trait_type = event_models.Trait.get_name_by_type(trait.dtype)
return Trait(name=trait.name, type=trait_type, value=value)
@classmethod
def sample(cls):
return cls(name='service',
type='string',
value='compute.hostname'
)
class Event(base.Base):
"""A System event."""
message_id = wtypes.text
"The message ID for the notification"
event_type = wtypes.text
"The type of the event"
_traits = None
def get_traits(self):
return self._traits
def set_traits(self, traits):
self._traits = map(Trait._convert_storage_trait, traits)
traits = wsme.wsproperty(wtypes.ArrayType(Trait),
get_traits,
set_traits)
"Event specific properties"
generated = datetime.datetime
"The time the event occurred"
raw = base.JsonType()
"The raw copy of notification"
@classmethod
def sample(cls):
return cls(
event_type='compute.instance.update',
generated=datetime.datetime(2015, 1, 1, 12, 30, 59, 123456),
message_id='94834db1-8f1b-404d-b2ec-c35901f1b7f0',
traits={
Trait(name='request_id',
value='req-4e2d67b8-31a4-48af-bb2f-9df72a353a72'),
Trait(name='service',
value='conductor.tem-devstack-01'),
Trait(name='tenant_id',
value='7f13f2b17917463b9ee21aa92c4b36d6')
},
raw={'status': {'nested': 'started'}}
)
def _event_query_to_event_filter(q):
evt_model_filter = {
'event_type': None,
'message_id': None,
'start_timestamp': None,
'end_timestamp': None
}
traits_filter = []
for i in q:
if not i.op:
i.op = 'eq'
elif i.op not in base.operation_kind:
error = _("operator {} is incorrect").format(i.op)
raise base.ClientSideError(error)
if i.field in evt_model_filter:
evt_model_filter[i.field] = i.value
else:
trait_type = i.type or 'string'
traits_filter.append({"key": i.field,
trait_type: i._get_value_as_type(),
"op": i.op})
return storage.EventFilter(traits_filter=traits_filter, **evt_model_filter)
class TraitsController(rest.RestController):
"""Works on Event Traits."""
@v2_utils.requires_admin
@wsme_pecan.wsexpose([Trait], wtypes.text, wtypes.text)
def get_one(self, event_type, trait_name):
"""Return all instances of a trait for an event type.
:param event_type: Event type to filter traits by
:param trait_name: Trait to return values for
"""
LOG.debug(_("Getting traits for %s") % event_type)
return [Trait._convert_storage_trait(t)
for t in pecan.request.event_storage_conn
.get_traits(event_type, trait_name)]
@v2_utils.requires_admin
@wsme_pecan.wsexpose([TraitDescription], wtypes.text)
def get_all(self, event_type):
"""Return all trait names for an event type.
:param event_type: Event type to filter traits by
"""
get_trait_name = event_models.Trait.get_name_by_type
return [TraitDescription(name=t['name'],
type=get_trait_name(t['data_type']))
for t in pecan.request.event_storage_conn
.get_trait_types(event_type)]
class EventTypesController(rest.RestController):
"""Works on Event Types in the system."""
traits = TraitsController()
@v2_utils.requires_admin
@wsme_pecan.wsexpose(None, wtypes.text)
def get_one(self, event_type):
"""Unused API, will always return 404.
:param event_type: A event type
"""
pecan.abort(404)
@v2_utils.requires_admin
@wsme_pecan.wsexpose([six.text_type])
def get_all(self):
"""Get all event types."""
return list(pecan.request.event_storage_conn.get_event_types())
class EventsController(rest.RestController):
"""Works on Events."""
@v2_utils.requires_admin
@wsme_pecan.wsexpose([Event], [EventQuery])
def get_all(self, q=None):
"""Return all events matching the query filters.
:param q: Filter arguments for which Events to return
"""
q = q or []
event_filter = _event_query_to_event_filter(q)
return [Event(message_id=event.message_id,
event_type=event.event_type,
generated=event.generated,
traits=event.traits,
raw=event.raw)
for event in
pecan.request.event_storage_conn.get_events(event_filter)]
@v2_utils.requires_admin
@wsme_pecan.wsexpose(Event, wtypes.text)
def get_one(self, message_id):
"""Return a single event with the given message id.
:param message_id: Message ID of the Event to be returned
"""
event_filter = storage.EventFilter(message_id=message_id)
events = [event for event
in pecan.request.event_storage_conn.get_events(event_filter)]
if not events:
raise base.EntityNotFound(_("Event"), message_id)
if len(events) > 1:
LOG.error(_("More than one event with "
"id %s returned from storage driver") % message_id)
event = events[0]
return Event(message_id=event.message_id,
event_type=event.event_type,
generated=event.generated,
traits=event.traits,
raw=event.raw)
|
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""This file contains code to handle InvalidPipeline exceptions that contain
upgrade requests."""
from __future__ import division
from vistrails.core import debug
import vistrails.core.db.action
from vistrails.core.modules.module_registry import get_module_registry, \
ModuleDescriptor, MissingModule, MissingPort, MissingPackage
from vistrails.core.modules.utils import parse_descriptor_string, \
create_descriptor_string, parse_port_spec_string, create_port_spec_string
from vistrails.core.packagemanager import get_package_manager
from vistrails.core.system import get_vistrails_basic_pkg_id
from vistrails.core.vistrail.annotation import Annotation
from vistrails.core.vistrail.module_control_param import ModuleControlParam
from vistrails.core.vistrail.connection import Connection
from vistrails.core.vistrail.port import Port
from vistrails.core.vistrail.port_spec import PortSpec
from vistrails.core.vistrail.port_spec_item import PortSpecItem
from vistrails.core.utils import versions_increasing
import copy
##############################################################################
class UpgradeWorkflowError(Exception):
def __init__(self, msg, module=None, port_name=None, port_type=None):
Exception.__init__(self, msg)
self._msg = msg
self._module = module
self._port_name = port_name
self._port_type = port_type.lower() if port_type else None
def __str__(self):
return "Upgrading workflow failed.\n" + self._msg
class UpgradeModuleRemap(object):
def __init__(self, start_version, end_version,
output_version, new_module=None,
dst_port_remap=None, src_port_remap=None,
function_remap=None, annotation_remap=None,
control_param_remap=None, module_name=None):
self.module_name = module_name
self.start_version = start_version
self.end_version = end_version
self.output_version = output_version
self.new_module = new_module
if dst_port_remap is None:
self._dst_port_remap = {}
else:
self._dst_port_remap = dst_port_remap
if src_port_remap is None:
self._src_port_remap = {}
else:
self._src_port_remap = src_port_remap
if function_remap is None:
self._function_remap = {}
else:
self._function_remap = function_remap
if annotation_remap is None:
self._annotation_remap = {}
else:
self._annotation_remap = annotation_remap
if control_param_remap is None:
self._control_param_remap = {}
else:
self._control_param_remap = control_param_remap
@classmethod
def __copy__(cls, obj):
newobj = cls()
for k, v in obj.__dict__.iteritems():
if k.startswith('_') and k.endswith('_remap'):
v = copy.copy(v)
newobj.__dict__[k] = v
return newobj
@classmethod
def from_tuple(cls, module_name, t):
if len(t) == 3:
obj = cls(t[0], t[1], None, t[2], module_name=module_name)
remap = None
elif len(t) == 4:
obj = cls(t[0], t[1], None, t[2], module_name=module_name)
remap = t[3]
elif len(t) == 5:
obj = cls(t[0], t[1], t[2], t[3], module_name=module_name)
remap = t[4]
else:
raise TypeError("UpgradeModuleRemap.from_tuple() got a tuple of "
"length %d" % len(t))
if remap is not None:
for remap_type, remap_dict in remap.iteritems():
for remap_name, remap_change in remap_dict.iteritems():
obj.add_remap(remap_type, remap_name, remap_change)
return obj
def _get_dst_port_remap(self):
return self._dst_port_remap
dst_port_remap = property(_get_dst_port_remap)
def _get_src_port_remap(self):
return self._src_port_remap
src_port_remap = property(_get_src_port_remap)
def _get_function_remap(self):
# !!! we're going to let dst_port_remap serve as a
# base for function_remap but the developer is
# responsible for knowing that anything beyond name
# remaps requires different functions
d = copy.copy(self._dst_port_remap)
d.update(self._function_remap)
return d
function_remap = property(_get_function_remap)
def _get_annotation_remap(self):
return self._annotation_remap
annotation_remap = property(_get_annotation_remap)
def _get_control_param_remap(self):
return self._control_param_remap
control_param_remap = property(_get_control_param_remap)
def add_remap(self, remap_type, remap_name, remap_change):
if not hasattr(self, '_%s' % remap_type):
raise ValueError('remap_type "%s" not allowed' % remap_type)
d = getattr(self, '_%s' % remap_type)
d[remap_name] = remap_change
# if remap_type not in self.allowed_remaps:
# raise ValueError("remap_type must be one of %s" % allowed_remaps)
# self.remap[remap_type][remap_name] = remap_change
def get_remap(self, remap_type):
if not hasattr(self, '_%s' % remap_type):
raise ValueError('remap_type "%s" not allowed' % remap_type)
d = getattr(self, '_%s' % remap_type)
return d
# if remap_type not in self.allowed_remaps:
# raise ValueError("remap_type must be one of %s" % allowed_remaps)
# return self.remap[remap_type]
def get_output_version(self):
return self.output_version
class UpgradePackageRemap(object):
def __init__(self):
self.remaps = {} # name (str): remap (UpgradeModuleRemap)
@classmethod
def __copy__(cls, obj):
newobj = cls()
newobj.remaps = dict((modname, copy.copy(modremap))
for modname, modremap in obj.remaps.iteritems())
return newobj
@classmethod
def from_dict(cls, d):
pkg_remap = cls()
for module_name, remap_list in d.iteritems():
for remap in remap_list:
pkg_remap.add_module_remap(remap, module_name)
return pkg_remap
def add_module_remap(self, module_remap, module_name=None):
if isinstance(module_remap, tuple):
if module_name is None:
raise ValueError("module_name must be specified if "
"module_remap is a tuple")
module_remap = UpgradeModuleRemap.from_tuple(module_name,
module_remap)
else:
if module_name is not None:
# assume user wants to override name
module_remap.module_name = module_name
if module_remap.module_name not in self.remaps:
self.remaps[module_remap.module_name] = []
self.remaps[module_remap.module_name].append(module_remap)
def get_module_remaps(self, module_name):
if module_name in self.remaps:
return self.remaps[module_name]
return []
def has_module_remaps(self, module_name):
return module_name in self.remaps
def get_module_upgrade(self, module_name, old_version):
for module_remap in self.get_module_remaps(module_name):
if ((module_remap.start_version is None or
not versions_increasing(old_version,
module_remap.start_version)) and
(module_remap.end_version is None or
versions_increasing(old_version,
module_remap.end_version))):
return module_remap
return None
class UpgradeWorkflowHandler(object):
@staticmethod
def dispatch_request(controller, module_id, current_pipeline):
pm = get_package_manager()
if module_id not in current_pipeline.modules:
# It is possible that some other upgrade request has
# already removed the invalid module of this request. In
# that case, disregard the request.
debug.log("module %s already handled. skipping" % module_id)
return []
invalid_module = current_pipeline.modules[module_id]
pkg = pm.get_package(invalid_module.package)
if hasattr(pkg.module, 'handle_module_upgrade_request'):
f = pkg.module.handle_module_upgrade_request
return f(controller, module_id, current_pipeline)
elif hasattr(pkg.module, '_upgrades'):
return UpgradeWorkflowHandler.remap_module(controller, module_id,
current_pipeline,
pkg.module._upgrades)
else:
debug.log('Package "%s" cannot handle upgrade request. '
'VisTrails will attempt automatic upgrade.' % \
pkg.identifier)
auto_upgrade = UpgradeWorkflowHandler.attempt_automatic_upgrade
return auto_upgrade(controller, current_pipeline, module_id)
@staticmethod
def check_port_spec(module, port_name, port_type, descriptor=None,
sigstring=None):
basic_pkg = get_vistrails_basic_pkg_id()
reg = get_module_registry()
found = False
try:
if descriptor is not None:
s = reg.get_port_spec_from_descriptor(descriptor, port_name,
port_type)
found = True
spec_tuples = parse_port_spec_string(sigstring, basic_pkg)
for i in xrange(len(spec_tuples)):
spec_tuple = spec_tuples[i]
port_pkg = reg.get_package_by_name(spec_tuple[0])
if port_pkg.identifier != spec_tuple[0]:
# we have an old identifier
spec_tuples[i] = (port_pkg.identifier,) + spec_tuple[1:]
sigstring = create_port_spec_string(spec_tuples)
# sigstring = expand_port_spec_string(sigstring, basic_pkg)
if s.sigstring != sigstring:
msg = ('%s port "%s" of module "%s" exists, but '
'signatures differ "%s" != "%s"') % \
(port_type.capitalize(), port_name, module.name,
s.sigstring, sigstring)
raise UpgradeWorkflowError(msg, module, port_name, port_type)
except MissingPort:
pass
if not found and \
not module.has_portSpec_with_name((port_name, port_type)):
msg = '%s port "%s" of module "%s" does not exist.' % \
(port_type.capitalize(), port_name, module.name)
raise UpgradeWorkflowError(msg, module, port_name, port_type)
@staticmethod
def find_descriptor(controller, pipeline, module_id, desired_version=''):
reg = get_module_registry()
get_descriptor = reg.get_descriptor_by_name
pm = get_package_manager()
invalid_module = pipeline.modules[module_id]
mpkg, mname, mnamespace, mid = (invalid_module.package,
invalid_module.name,
invalid_module.namespace,
invalid_module.id)
pkg = pm.get_package(mpkg)
desired_version = ''
d = None
# don't check for abstraction/subworkflow since the old module
# could be a subworkflow
if reg.has_abs_upgrade(*invalid_module.descriptor_info):
return reg.get_abs_upgrade(*invalid_module.descriptor_info)
try:
try:
d = get_descriptor(mpkg, mname, mnamespace, '', desired_version)
except MissingModule, e:
r = None
if pkg.can_handle_missing_modules():
r = pkg.handle_missing_module(controller, module_id,
pipeline)
d = get_descriptor(mpkg, mname, mnamespace, '',
desired_version)
if not r:
raise e
except MissingModule, e:
return None
assert isinstance(d, ModuleDescriptor)
return d
@staticmethod
def check_upgrade(pipeline, module_id, d, function_remap=None,
src_port_remap=None, dst_port_remap=None):
if function_remap is None:
function_remap = {}
if src_port_remap is None:
src_port_remap = {}
if dst_port_remap is None:
dst_port_remap = {}
invalid_module = pipeline.modules[module_id]
def check_connection_port(port):
port_type = PortSpec.port_type_map.inverse[port.type]
UpgradeWorkflowHandler.check_port_spec(invalid_module,
port.name, port_type,
d, port.sigstring)
# check if connections are still valid
for _, conn_id in pipeline.graph.edges_from(module_id):
port = pipeline.connections[conn_id].source
if port.name not in src_port_remap:
check_connection_port(port)
for _, conn_id in pipeline.graph.edges_to(module_id):
port = pipeline.connections[conn_id].destination
if port.name not in dst_port_remap:
check_connection_port(port)
# check if function values are still valid
for function in invalid_module.functions:
# function_spec = function.get_spec('input')
if function.name not in function_remap:
UpgradeWorkflowHandler.check_port_spec(invalid_module,
function.name,
'input', d,
function.sigstring)
@staticmethod
def attempt_automatic_upgrade(controller, pipeline, module_id,
function_remap=None, src_port_remap=None,
dst_port_remap=None, annotation_remap=None,
control_param_remap=None):
"""attempt_automatic_upgrade(module_id, pipeline): [Action]
Attempts to automatically upgrade module by simply adding a
new module with the current package version, and recreating
all connections and functions. If any of the ports used are
not available, raise an exception that will trigger the
failure of the entire upgrade.
attempt_automatic_upgrade returns a list of actions if
successful.
"""
invalid_module = pipeline.modules[module_id]
mpkg, mname, mnamespace, mid = (invalid_module.package,
invalid_module.name,
invalid_module.namespace,
invalid_module.id)
d = UpgradeWorkflowHandler.find_descriptor(controller, pipeline,
module_id)
if not d:
if mnamespace:
nss = mnamespace + '|' + mname
else:
nss = mname
msg = ("Could not upgrade module %s from package %s.\n" %
(nss, mpkg))
raise UpgradeWorkflowError(msg)
UpgradeWorkflowHandler.check_upgrade(pipeline, module_id, d,
function_remap,
src_port_remap, dst_port_remap)
# If we passed all of these checks, then we consider module to
# be automatically upgradeable. Now create actions that will
# delete functions, module, and connections, and add new
# module with corresponding functions and connections.
return UpgradeWorkflowHandler.replace_module(controller, pipeline,
module_id, d,
function_remap,
src_port_remap,
dst_port_remap,
annotation_remap,
control_param_remap)
@staticmethod
def create_new_connection(controller, src_module, src_port,
dst_module, dst_port):
# spec -> name, type, signature
output_port_id = controller.id_scope.getNewId(Port.vtType)
if isinstance(src_port, basestring):
output_port_spec = src_module.get_port_spec(src_port, 'output')
output_port = Port(id=output_port_id,
spec=output_port_spec,
moduleId=src_module.id,
moduleName=src_module.name)
else:
output_port = Port(id=output_port_id,
name=src_port.name,
type=src_port.type,
signature=src_port.signature,
moduleId=src_module.id,
moduleName=src_module.name)
input_port_id = controller.id_scope.getNewId(Port.vtType)
if isinstance(dst_port, basestring):
input_port_spec = dst_module.get_port_spec(dst_port, 'input')
input_port = Port(id=input_port_id,
spec=input_port_spec,
moduleId=dst_module.id,
moduleName=dst_module.name)
else:
input_port = Port(id=input_port_id,
name=dst_port.name,
type=dst_port.type,
signature=dst_port.signature,
moduleId=dst_module.id,
moduleName=dst_module.name)
conn_id = controller.id_scope.getNewId(Connection.vtType)
connection = Connection(id=conn_id,
ports=[input_port, output_port])
return connection
@staticmethod
def replace_generic(controller, pipeline, old_module, new_module,
function_remap=None, src_port_remap=None,
dst_port_remap=None, annotation_remap=None,
control_param_remap=None, use_registry=True):
if function_remap is None:
function_remap = {}
if src_port_remap is None:
src_port_remap = {}
if dst_port_remap is None:
dst_port_remap = {}
if annotation_remap is None:
annotation_remap = {}
if control_param_remap is None:
control_param_remap = {}
basic_pkg = get_vistrails_basic_pkg_id()
ops = []
ops.extend(controller.delete_module_list_ops(pipeline, [old_module.id]))
for annotation in old_module.annotations:
if annotation.key not in annotation_remap:
annotation_key = annotation.key
else:
remap = annotation_remap[annotation.key]
if remap is None:
# don't add the annotation back in
continue
elif not isinstance(remap, basestring):
ops.extend(remap(annotation))
continue
else:
annotation_key = remap
new_annotation = \
Annotation(id=controller.id_scope.getNewId(Annotation.vtType),
key=annotation_key,
value=annotation.value)
new_module.add_annotation(new_annotation)
for control_param in old_module.control_parameters:
if control_param.name not in control_param_remap:
control_param_name = control_param.name
else:
remap = control_param_remap[control_param.name]
if remap is None:
# don't add the control param back in
continue
elif not isinstance(remap, basestring):
ops.extend(remap(control_param))
continue
else:
control_param_name = remap
new_control_param = \
ModuleControlParam(id=controller.id_scope.getNewId(
ModuleControlParam.vtType),
name=control_param_name,
value=control_param.value)
new_module.add_control_parameter(new_control_param)
if not old_module.is_group() and not old_module.is_abstraction():
for port_spec in old_module.port_spec_list:
if port_spec.type == 'input':
if port_spec.name not in dst_port_remap:
spec_name = port_spec.name
else:
remap = dst_port_remap[port_spec.name]
if remap is None:
continue
elif not isinstance(remap, basestring):
ops.extend(remap(port_spec))
continue
else:
spec_name = remap
elif port_spec.type == 'output':
if port_spec.name not in src_port_remap:
spec_name = port_spec.name
else:
remap = src_port_remap[port_spec.name]
if remap is None:
continue
elif not isinstance(remap, basestring):
ops.extend(remap(port_spec))
continue
else:
spec_name = remap
new_spec = port_spec.do_copy(True, controller.id_scope, {})
new_spec.name = spec_name
new_module.add_port_spec(new_spec)
function_ops = []
for function in old_module.functions:
if function.name not in function_remap:
function_name = function.name
else:
remap = function_remap[function.name]
if remap is None:
# don't add the function back in
continue
elif not isinstance(remap, basestring):
function_ops.extend(remap(function, new_module))
continue
else:
function_name = remap
if len(function.parameters) > 0:
new_param_vals, aliases = zip(*[(p.strValue, p.alias)
for p in function.parameters])
else:
new_param_vals = []
aliases = []
if use_registry:
function_port_spec = function_name
else:
def mk_psi(pos):
psi = PortSpecItem(module="Module", package=basic_pkg,
namespace="", pos=pos)
return psi
n_items = len(new_param_vals)
function_port_spec = PortSpec(name=function_name,
items=[mk_psi(i)
for i in xrange(n_items)])
new_function = controller.create_function(new_module,
function_port_spec,
new_param_vals,
aliases)
new_module.add_function(new_function)
if None in function_remap:
# used to add new functions
remap = function_remap[None]
function_ops.extend(remap(None, new_module))
# add the new module
ops.append(('add', new_module))
ops.extend(function_ops)
create_new_connection = UpgradeWorkflowHandler.create_new_connection
for _, conn_id in pipeline.graph.edges_from(old_module.id):
old_conn = pipeline.connections[conn_id]
if old_conn.source.name not in src_port_remap:
source_name = old_conn.source.name
else:
remap = src_port_remap[old_conn.source.name]
if remap is None:
# don't add this connection back in
continue
elif not isinstance(remap, basestring):
ops.extend(remap(old_conn, new_module))
continue
else:
source_name = remap
old_dst_module = pipeline.modules[old_conn.destination.moduleId]
if use_registry:
source_port = source_name
else:
source_port = Port(name=source_name,
type='source',
signature=create_port_spec_string(
[(basic_pkg, 'Variant', '')]))
new_conn = create_new_connection(controller,
new_module,
source_port,
old_dst_module,
old_conn.destination)
ops.append(('add', new_conn))
for _, conn_id in pipeline.graph.edges_to(old_module.id):
old_conn = pipeline.connections[conn_id]
if old_conn.destination.name not in dst_port_remap:
destination_name = old_conn.destination.name
else:
remap = dst_port_remap[old_conn.destination.name]
if remap is None:
# don't add this connection back in
continue
elif not isinstance(remap, basestring):
ops.extend(remap(old_conn, new_module))
continue
else:
destination_name = remap
old_src_module = pipeline.modules[old_conn.source.moduleId]
if use_registry:
destination_port = destination_name
else:
destination_port = Port(name=destination_name,
type='destination',
signature=create_port_spec_string(
[(basic_pkg, 'Variant', '')]))
new_conn = create_new_connection(controller,
old_src_module,
old_conn.source,
new_module,
destination_port)
ops.append(('add', new_conn))
return [vistrails.core.db.action.create_action(ops)]
@staticmethod
def replace_group(controller, pipeline, module_id, new_subpipeline):
basic_pkg = get_vistrails_basic_pkg_id()
old_group = pipeline.modules[module_id]
new_group = controller.create_module(basic_pkg, 'Group', '',
old_group.location.x,
old_group.location.y)
new_group.pipeline = new_subpipeline
return UpgradeWorkflowHandler.replace_generic(controller, pipeline,
old_group, new_group)
@staticmethod
def replace_module(controller, pipeline, module_id, new_descriptor,
function_remap=None, src_port_remap=None,
dst_port_remap=None, annotation_remap=None,
control_param_remap=None, use_registry=True):
old_module = pipeline.modules[module_id]
internal_version = -1
# try to determine whether new module is an abstraction
if (hasattr(new_descriptor, 'module') and
hasattr(new_descriptor.module, "vistrail") and
hasattr(new_descriptor.module, "internal_version")):
internal_version = new_descriptor.version
new_module = \
controller.create_module_from_descriptor(new_descriptor,
old_module.location.x,
old_module.location.y,
internal_version,
not use_registry)
return UpgradeWorkflowHandler.replace_generic(controller, pipeline,
old_module, new_module,
function_remap,
src_port_remap,
dst_port_remap,
annotation_remap,
control_param_remap,
use_registry)
@staticmethod
def remap_module(controller, module_id, pipeline, pkg_remap):
"""remap_module offers a method to shortcut the
specification of upgrades. It is useful when just changing
the names of ports or modules, but can also be used to add
intermediate modules or change the format of parameters. It
is usually called from handle_module_upgrade_request, and the
first three arguments are passed from the arguments to that
method.
pkg_remap specifies all of the changes and is of the format
{<old_module_name>: [(<start_version>, <end_version>,
<new_module_klass> | <new_module_id> | None,
<remap_dictionary>)]}
where new_module_klass is the class and new_module_id
is a string of the format
<package_name>:[<namespace> | ]<module_name>
passing None keeps the original name,
and remap_dictionary is {<remap_type>:
<name_changes>} and <name_changes> is a map from <old_name> to
<new_name> or <remap_function>
The remap functions are passed the old object and the new
module and should return a list of operations with elements of
the form ('add', <obj>).
For example:
def outputName_remap(old_conn, new_module):
ops = []
...
return ops
pkg_remap = {'FileSink': [(None, '1.5.1', FileSink,
{'dst_port_remap':
{'overrideFile': 'overwrite',
'outputName': outputName_remap},
'function_remap':
{'overrideFile': 'overwrite',
'outputName': 'outputPath'}}),
}
"""
reg = get_module_registry()
old_module = pipeline.modules[module_id]
old_version = old_module.version
old_desc_str = create_descriptor_string(old_module.package,
old_module.name,
old_module.namespace,
False)
# print 'running module_upgrade_request', old_module.name
if not isinstance(pkg_remap, UpgradePackageRemap):
pkg_remap = UpgradePackageRemap.from_dict(pkg_remap)
action_list = []
old_module_t = \
(old_module.package, old_module.name, old_module.namespace)
module_remap = pkg_remap.get_module_upgrade(old_desc_str, old_version)
tmp_pipeline = copy.copy(pipeline)
while module_remap is not None:
new_module_type = module_remap.new_module
if new_module_type is None:
new_module_t = old_module_t
elif isinstance(new_module_type, basestring):
new_module_t = parse_descriptor_string(new_module_type,
old_module_t[0])
elif isinstance(new_module_type, ModuleDescriptor):
new_module_t = new_module_type.spec_tuple
else:
new_module_desc = reg.get_descriptor(new_module_type)
new_module_t = new_module_desc.spec_tuple
new_pkg_version = module_remap.output_version
if (new_pkg_version is None or
reg.get_package_by_name(new_module_t[0]).version == new_pkg_version):
# upgrading to the current version
try:
new_module_desc = reg.get_descriptor_by_name(*new_module_t)
except MissingModule, e:
# if the replacement is an abstraction,
# and it has been upgraded, we use that
if reg.has_abs_upgrade(*new_module_t):
new_module_desc = reg.get_abs_upgrade(*new_module_t)
else:
raise e
use_registry = True
next_module_remap = None
else:
new_module_desc = ModuleDescriptor(package=new_module_t[0],
name=new_module_t[1],
namespace=new_module_t[2],
package_version=new_pkg_version)
use_registry = False
# need to try more upgrades since this one isn't current
old_desc_str = create_descriptor_string(new_module_t[0],
new_module_t[1],
new_module_t[2],
False)
old_version = new_pkg_version
next_module_remap = pkg_remap.get_module_upgrade(old_desc_str,
old_version)
old_module_t = new_module_t
replace_module = UpgradeWorkflowHandler.replace_module
actions = replace_module(controller,
tmp_pipeline,
module_id,
new_module_desc,
module_remap.function_remap,
module_remap.src_port_remap,
module_remap.dst_port_remap,
module_remap.annotation_remap,
module_remap.control_param_remap,
use_registry)
for a in actions:
for op in a.operations:
# Update the id of the module being updated
# FIXME: This is brittle
# This assumes first added module is the correct one
if op.vtType == 'add' and op.what == 'module':
module_id = op.objectId
break
tmp_pipeline.perform_action(a)
action_list.extend(actions)
module_remap = next_module_remap
if len(action_list) > 0:
return action_list
# otherwise, just try to automatic upgrade
# attempt_automatic_upgrade
return UpgradeWorkflowHandler.attempt_automatic_upgrade(controller,
pipeline,
module_id)
import unittest
class TestUpgradePackageRemap(unittest.TestCase):
def test_from_dict(self):
def outputName_remap(old_conn, new_module):
ops = []
return ops
pkg_remap_d = {'FileSink': [(None, '1.5.1', None,
{'dst_port_remap':
{'overrideFile': 'overwrite',
'outputName': outputName_remap},
'function_remap':
{'overrideFile': 'overwrite',
'outputName': 'outputPath'}})]}
pkg_remap = UpgradePackageRemap.from_dict(pkg_remap_d)
def create_workflow(self, c):
upgrade_test_pkg = 'org.vistrails.vistrails.tests.upgrade'
d1 = ModuleDescriptor(package=upgrade_test_pkg,
name='TestUpgradeA',
namespace='',
package_version='0.8')
m1 = c.create_module_from_descriptor(d1, use_desc_pkg_version=True)
m1.is_valid = False
c.add_module_action(m1)
d2 = ModuleDescriptor(package=upgrade_test_pkg,
name='TestUpgradeB',
namespace='',
package_version = '0.8')
m2 = c.create_module_from_descriptor(d2, use_desc_pkg_version=True)
m2.is_valid = False
c.add_module_action(m2)
basic_pkg = get_vistrails_basic_pkg_id()
psi = PortSpecItem(module="Float", package=basic_pkg,
namespace="", pos=0)
function_port_spec = PortSpec(name="a", type="input", items=[psi])
f = c.create_function(m1, function_port_spec, [12])
c.add_function_action(m1, f)
conn_out_psi = PortSpecItem(module="Integer", package=basic_pkg,
namespace="", pos=0)
conn_out_spec = PortSpec(name="z", type="output",
items=[conn_out_psi])
conn_in_psi = PortSpecItem(module="Integer", package=basic_pkg,
namespace="", pos=0)
conn_in_spec = PortSpec(name="b", type="input",
items=[conn_in_psi])
conn = c.create_connection(m1, conn_out_spec, m2, conn_in_spec)
c.add_connection_action(conn)
return c.current_version
def run_multi_upgrade_test(self, pkg_remap):
from vistrails.core.application import get_vistrails_application
app = get_vistrails_application()
created_vistrail = False
pm = get_package_manager()
try:
pm.late_enable_package('upgrades',
{'upgrades':
'vistrails.tests.resources.'})
app.new_vistrail()
created_vistrail = True
c = app.get_controller()
self.create_workflow(c)
p = c.current_pipeline
actions = UpgradeWorkflowHandler.remap_module(c, 0, p, pkg_remap)
finally:
if created_vistrail:
app.close_vistrail()
try:
pm.late_disable_package('upgrades')
except MissingPackage:
pass
def test_multi_upgrade_obj(self):
module_remap_1 = UpgradeModuleRemap('0.8', '0.9', '0.9', None,
module_name="TestUpgradeA")
module_remap_1.add_remap('function_remap', 'a', 'aa')
module_remap_1.add_remap('src_port_remap', 'z', 'zz')
module_remap_2 = UpgradeModuleRemap('0.9', '1.0', '1.0', None,
module_name="TestUpgradeA")
module_remap_2.add_remap('function_remap', 'aa', 'aaa')
module_remap_2.add_remap('src_port_remap', 'zz', 'zzz')
pkg_remap = UpgradePackageRemap()
pkg_remap.add_module_remap(module_remap_1)
pkg_remap.add_module_remap(module_remap_2)
self.run_multi_upgrade_test(pkg_remap)
def test_multi_upgrade_dict(self):
pkg_remap = {"TestUpgradeA":
[UpgradeModuleRemap('0.8', '0.9', '0.9', None,
function_remap={'a': 'aa'},
src_port_remap={'z': 'zz'}),
UpgradeModuleRemap('0.9', '1.0', '1.0', None,
function_remap={'aa': 'aaa'},
src_port_remap={'zz': 'zzz'})]}
self.run_multi_upgrade_test(pkg_remap)
def test_multi_upgrade_legacy(self):
# note that remap specifies the 0.8 -> 1.0 upgrade directly as
# must be the case for legacy upgrades
pkg_remap = {"TestUpgradeA": [('0.8', '1.0', None,
{"function_remap": {'a': 'aaa'},
"src_port_remap": {'z': 'zzz'}}),
('0.9', '1.0', None,
{"function_remap": {'aa': 'aaa'},
"src_port_remap": {'zz': 'zzz'}})]}
self.run_multi_upgrade_test(pkg_remap)
def test_multi_upgrade_rename(self):
pkg_remap = {"TestUpgradeA":
[UpgradeModuleRemap('0.8', '0.9', '0.9', "TestUpgradeB",
dst_port_remap={'a': 'b'},
src_port_remap={'z': 'zz'})],
"TestUpgradeB":
[UpgradeModuleRemap('0.9', '1.0', '1.0', None,
src_port_remap={'zz': None})]}
self.run_multi_upgrade_test(pkg_remap)
def test_external_upgrade(self):
from vistrails.core.application import get_vistrails_application
app = get_vistrails_application()
default_upgrades = app.temp_configuration.upgrades
default_upgrade_delay = app.temp_configuration.upgradeDelay
app.temp_configuration.upgrades = True
app.temp_configuration.upgradeDelay = False
created_vistrail = False
pm = get_package_manager()
try:
pm.late_enable_package('upgrades',
{'upgrades':
'vistrails.tests.resources.'})
app.new_vistrail()
created_vistrail = True
c = app.get_controller()
current_version = self.create_workflow(c)
for m in c.current_pipeline.modules.itervalues():
self.assertEqual(m.version, '0.8')
c.change_selected_version(current_version, from_root=True)
self.assertEqual(len(c.current_pipeline.modules), 2)
for m in c.current_pipeline.modules.itervalues():
self.assertEqual(m.version, '1.0')
if m.name == "TestUpgradeA":
self.assertEqual(m.functions[0].name, 'aaa')
self.assertEqual(len(c.current_pipeline.connections), 1)
conn = c.current_pipeline.connections.values()[0]
self.assertEqual(conn.source.name, 'zzz')
self.assertEqual(conn.destination.name, 'b')
finally:
if created_vistrail:
app.close_vistrail()
try:
pm.late_disable_package('upgrades')
except MissingPackage:
pass
app.temp_configuration.upgrades = default_upgrades
app.temp_configuration.upgradeDelay = default_upgrade_delay
if __name__ == '__main__':
import vistrails.core.application
vistrails.core.application.init()
unittest.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import abstractmethod, ABCMeta
from pyspark.ml.wrapper import JavaWrapper
from pyspark.ml.param import Param, Params
from pyspark.ml.param.shared import HasLabelCol, HasPredictionCol, HasRawPredictionCol
from pyspark.ml.util import keyword_only
from pyspark.mllib.common import inherit_doc
__all__ = ['Evaluator', 'BinaryClassificationEvaluator', 'RegressionEvaluator']
@inherit_doc
class Evaluator(Params):
"""
Base class for evaluators that compute metrics from predictions.
"""
__metaclass__ = ABCMeta
@abstractmethod
def _evaluate(self, dataset):
"""
Evaluates the output.
:param dataset: a dataset that contains labels/observations and
predictions
:return: metric
"""
raise NotImplementedError()
def evaluate(self, dataset, params=None):
"""
Evaluates the output with optional parameters.
:param dataset: a dataset that contains labels/observations and
predictions
:param params: an optional param map that overrides embedded
params
:return: metric
"""
if params is None:
params = dict()
if isinstance(params, dict):
if params:
return self.copy(params)._evaluate(dataset)
else:
return self._evaluate(dataset)
else:
raise ValueError("Params must be a param map but got %s." % type(params))
@inherit_doc
class JavaEvaluator(Evaluator, JavaWrapper):
"""
Base class for :py:class:`Evaluator`s that wrap Java/Scala
implementations.
"""
__metaclass__ = ABCMeta
def _evaluate(self, dataset):
"""
Evaluates the output.
:param dataset: a dataset that contains labels/observations and predictions.
:return: evaluation metric
"""
self._transfer_params_to_java()
return self._java_obj.evaluate(dataset._jdf)
@inherit_doc
class BinaryClassificationEvaluator(JavaEvaluator, HasLabelCol, HasRawPredictionCol):
"""
Evaluator for binary classification, which expects two input
columns: rawPrediction and label.
>>> from pyspark.mllib.linalg import Vectors
>>> scoreAndLabels = map(lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1]),
... [(0.1, 0.0), (0.1, 1.0), (0.4, 0.0), (0.6, 0.0), (0.6, 1.0), (0.6, 1.0), (0.8, 1.0)])
>>> dataset = sqlContext.createDataFrame(scoreAndLabels, ["raw", "label"])
...
>>> evaluator = BinaryClassificationEvaluator(rawPredictionCol="raw")
>>> evaluator.evaluate(dataset)
0.70...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "areaUnderPR"})
0.83...
"""
# a placeholder to make it appear in the generated doc
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation (areaUnderROC|areaUnderPR)")
@keyword_only
def __init__(self, rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC"):
"""
__init__(self, rawPredictionCol="rawPrediction", labelCol="label", \
metricName="areaUnderROC")
"""
super(BinaryClassificationEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.BinaryClassificationEvaluator", self.uid)
#: param for metric name in evaluation (areaUnderROC|areaUnderPR)
self.metricName = Param(self, "metricName",
"metric name in evaluation (areaUnderROC|areaUnderPR)")
self._setDefault(rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC")
kwargs = self.__init__._input_kwargs
self._set(**kwargs)
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
self._paramMap[self.metricName] = value
return self
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
def setParams(self, rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC"):
"""
setParams(self, rawPredictionCol="rawPrediction", labelCol="label", \
metricName="areaUnderROC")
Sets params for binary classification evaluator.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@inherit_doc
class RegressionEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol):
"""
Evaluator for Regression, which expects two input
columns: prediction and label.
>>> scoreAndLabels = [(-28.98343821, -27.0), (20.21491975, 21.5),
... (-25.98418959, -22.0), (30.69731842, 33.0), (74.69283752, 71.0)]
>>> dataset = sqlContext.createDataFrame(scoreAndLabels, ["raw", "label"])
...
>>> evaluator = RegressionEvaluator(predictionCol="raw")
>>> evaluator.evaluate(dataset)
-2.842...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "r2"})
0.993...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "mae"})
-2.649...
"""
# Because we will maximize evaluation value (ref: `CrossValidator`),
# when we evaluate a metric that is needed to minimize (e.g., `"rmse"`, `"mse"`, `"mae"`),
# we take and output the negative of this metric.
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation (mse|rmse|r2|mae)")
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="rmse"):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="rmse")
"""
super(RegressionEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.RegressionEvaluator", self.uid)
#: param for metric name in evaluation (mse|rmse|r2|mae)
self.metricName = Param(self, "metricName",
"metric name in evaluation (mse|rmse|r2|mae)")
self._setDefault(predictionCol="prediction", labelCol="label",
metricName="rmse")
kwargs = self.__init__._input_kwargs
self._set(**kwargs)
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
self._paramMap[self.metricName] = value
return self
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="rmse"):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="rmse")
Sets params for regression evaluator.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
if __name__ == "__main__":
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SQLContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
sc = SparkContext("local[2]", "ml.evaluation tests")
sqlContext = SQLContext(sc)
globs['sc'] = sc
globs['sqlContext'] = sqlContext
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
sc.stop()
if failure_count:
exit(-1)
|
|
from nose.tools import eq_, ok_, raises
from wtforms import fields
from flask.ext.admin import form
from flask.ext.admin._compat import as_unicode
from flask.ext.admin._compat import iteritems
from flask.ext.admin.contrib.sqla import ModelView, filters
from flask.ext.babelex import Babel
from . import setup
from datetime import datetime, time, date
class CustomModelView(ModelView):
def __init__(self, model, session,
name=None, category=None, endpoint=None, url=None,
**kwargs):
for k, v in iteritems(kwargs):
setattr(self, k, v)
super(CustomModelView, self).__init__(model, session, name, category,
endpoint, url)
def create_models(db):
class Model1(db.Model):
def __init__(self, test1=None, test2=None, test3=None, test4=None,
bool_field=False, date_field=None, time_field=None,
datetime_field=None, enum_field=None):
self.test1 = test1
self.test2 = test2
self.test3 = test3
self.test4 = test4
self.bool_field = bool_field
self.date_field = date_field
self.time_field = time_field
self.datetime_field = datetime_field
self.enum_field = enum_field
id = db.Column(db.Integer, primary_key=True)
test1 = db.Column(db.String(20))
test2 = db.Column(db.Unicode(20))
test3 = db.Column(db.Text)
test4 = db.Column(db.UnicodeText)
bool_field = db.Column(db.Boolean)
enum_field = db.Column(db.Enum('model1_v1', 'model1_v2'), nullable=True)
date_field = db.Column(db.Date)
time_field = db.Column(db.Time)
datetime_field = db.Column(db.DateTime)
def __unicode__(self):
return self.test1
def __str__(self):
return self.test1
class Model2(db.Model):
def __init__(self, string_field=None, int_field=None, bool_field=None,
model1=None, float_field=None):
self.string_field = string_field
self.int_field = int_field
self.bool_field = bool_field
self.model1 = model1
self.float_field = float_field
id = db.Column(db.Integer, primary_key=True)
string_field = db.Column(db.String)
int_field = db.Column(db.Integer)
bool_field = db.Column(db.Boolean)
enum_field = db.Column(db.Enum('model2_v1', 'model2_v2'), nullable=True)
float_field = db.Column(db.Float)
# Relation
model1_id = db.Column(db.Integer, db.ForeignKey(Model1.id))
model1 = db.relationship(Model1, backref='model2')
db.create_all()
return Model1, Model2
def fill_db(db, Model1, Model2):
model1_obj1 = Model1('test1_val_1', 'test2_val_1', bool_field=True)
model1_obj2 = Model1('test1_val_2', 'test2_val_2')
model1_obj3 = Model1('test1_val_3', 'test2_val_3')
model1_obj4 = Model1('test1_val_4', 'test2_val_4')
model2_obj1 = Model2('test2_val_1', model1=model1_obj1, float_field=None)
model2_obj2 = Model2('test2_val_2', model1=model1_obj2, float_field=None)
model2_obj3 = Model2('test2_val_3', int_field=5000, float_field=25.9)
model2_obj4 = Model2('test2_val_4', int_field=9000, float_field=75.5)
date_obj1 = Model1('date_obj1', date_field=date(2014,11,17))
date_obj2 = Model1('date_obj2', date_field=date(2013,10,16))
timeonly_obj1 = Model1('timeonly_obj1', time_field=time(11,10,9))
timeonly_obj2 = Model1('timeonly_obj2', time_field=time(10,9,8))
datetime_obj1 = Model1('datetime_obj1', datetime_field=datetime(2014,4,3,1,9,0))
datetime_obj2 = Model1('datetime_obj2', datetime_field=datetime(2013,3,2,0,8,0))
enum_obj1 = Model1('enum_obj1', enum_field="model1_v1")
enum_obj2 = Model1('enum_obj2', enum_field="model1_v2")
empty_obj = Model1(test2="empty_obj")
db.session.add_all([
model1_obj1, model1_obj2, model1_obj3, model1_obj4,
model2_obj1, model2_obj2, model2_obj3, model2_obj4,
date_obj1, timeonly_obj1, datetime_obj1,
date_obj2, timeonly_obj2, datetime_obj2,
enum_obj1, enum_obj2, empty_obj
])
db.session.commit()
def test_model():
app, db, admin = setup()
Model1, Model2 = create_models(db)
db.create_all()
view = CustomModelView(Model1, db.session)
admin.add_view(view)
eq_(view.model, Model1)
eq_(view.name, 'Model1')
eq_(view.endpoint, 'model1')
eq_(view._primary_key, 'id')
ok_('test1' in view._sortable_columns)
ok_('test2' in view._sortable_columns)
ok_('test3' in view._sortable_columns)
ok_('test4' in view._sortable_columns)
ok_(view._create_form_class is not None)
ok_(view._edit_form_class is not None)
eq_(view._search_supported, False)
eq_(view._filters, None)
# Verify form
eq_(view._create_form_class.test1.field_class, fields.StringField)
eq_(view._create_form_class.test2.field_class, fields.StringField)
eq_(view._create_form_class.test3.field_class, fields.TextAreaField)
eq_(view._create_form_class.test4.field_class, fields.TextAreaField)
# Make some test clients
client = app.test_client()
rv = client.get('/admin/model1/')
eq_(rv.status_code, 200)
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
rv = client.post('/admin/model1/new/',
data=dict(test1='test1large',
test2='test2',
time_field=time(0,0,0)))
eq_(rv.status_code, 302)
model = db.session.query(Model1).first()
eq_(model.test1, u'test1large')
eq_(model.test2, u'test2')
eq_(model.test3, u'')
eq_(model.test4, u'')
rv = client.get('/admin/model1/')
eq_(rv.status_code, 200)
ok_(u'test1large' in rv.data.decode('utf-8'))
url = '/admin/model1/edit/?id=%s' % model.id
rv = client.get(url)
eq_(rv.status_code, 200)
# verify that midnight does not show as blank
ok_(u'00:00:00' in rv.data.decode('utf-8'))
rv = client.post(url,
data=dict(test1='test1small', test2='test2large'))
eq_(rv.status_code, 302)
model = db.session.query(Model1).first()
eq_(model.test1, 'test1small')
eq_(model.test2, 'test2large')
eq_(model.test3, '')
eq_(model.test4, '')
url = '/admin/model1/delete/?id=%s' % model.id
rv = client.post(url)
eq_(rv.status_code, 302)
eq_(db.session.query(Model1).count(), 0)
@raises(Exception)
def test_no_pk():
app, db, admin = setup()
class Model(db.Model):
test = db.Column(db.Integer)
view = CustomModelView(Model)
admin.add_view(view)
def test_list_columns():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, db.session,
column_list=['test1', 'test3'],
column_labels=dict(test1='Column1'))
admin.add_view(view)
eq_(len(view._list_columns), 2)
eq_(view._list_columns, [('test1', 'Column1'), ('test3', 'Test3')])
client = app.test_client()
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('Column1' in data)
ok_('Test2' not in data)
def test_exclude_columns():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(
Model1, db.session,
column_exclude_list=['test2', 'test4', 'enum_field', 'date_field', 'time_field', 'datetime_field']
)
admin.add_view(view)
eq_(
view._list_columns,
[('test1', 'Test1'), ('test3', 'Test3'), ('bool_field', 'Bool Field')]
)
client = app.test_client()
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('Test1' in data)
ok_('Test2' not in data)
def test_column_searchable_list():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model2, db.session,
column_searchable_list=['string_field', 'int_field'])
admin.add_view(view)
eq_(view._search_supported, True)
eq_(len(view._search_fields), 2)
ok_(isinstance(view._search_fields[0], db.Column))
ok_(isinstance(view._search_fields[1], db.Column))
eq_(view._search_fields[0].name, 'string_field')
eq_(view._search_fields[1].name, 'int_field')
db.session.add(Model2('model1-test', 5000))
db.session.add(Model2('model2-test', 9000))
db.session.commit()
client = app.test_client()
rv = client.get('/admin/model2/?search=model1')
data = rv.data.decode('utf-8')
ok_('model1-test' in data)
ok_('model2-test' not in data)
rv = client.get('/admin/model2/?search=9000')
data = rv.data.decode('utf-8')
ok_('model1-test' not in data)
ok_('model2-test' in data)
def test_complex_searchable_list():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model2, db.session,
column_searchable_list=['model1.test1'])
admin.add_view(view)
m1 = Model1('model1-test1-val')
m2 = Model1('model1-test2-val')
db.session.add(m1)
db.session.add(m2)
db.session.add(Model2('model2-test1-val', model1=m1))
db.session.add(Model2('model2-test2-val', model1=m2))
db.session.commit()
client = app.test_client()
# test relation string - 'model1.test1'
rv = client.get('/admin/model2/?search=model1-test1')
data = rv.data.decode('utf-8')
ok_('model2-test1-val' in data)
ok_('model2-test2-val' not in data)
view2 = CustomModelView(Model1, db.session,
column_searchable_list=[Model2.string_field])
admin.add_view(view2)
# test relation object - Model2.string_field
rv = client.get('/admin/model1/?search=model2-test1')
data = rv.data.decode('utf-8')
ok_('model1-test1-val' in data)
ok_('model1-test2-val' not in data)
def test_complex_searchable_list_missing_children():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, db.session,
column_searchable_list=[
'test1', 'model2.string_field'])
admin.add_view(view)
db.session.add(Model1('magic string'))
db.session.commit()
client = app.test_client()
rv = client.get('/admin/model1/?search=magic')
data = rv.data.decode('utf-8')
ok_('magic string' in data)
def test_column_editable_list():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, db.session,
column_editable_list=[
'test1', 'enum_field'])
admin.add_view(view)
fill_db(db, Model1, Model2)
client = app.test_client()
# Test in-line edit field rendering
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('data-role="x-editable"' in data)
# Form - Test basic in-line edit functionality
rv = client.post('/admin/model1/ajax/update/', data={
'test1-1': 'change-success-1',
})
data = rv.data.decode('utf-8')
ok_('Record was successfully saved.' == data)
# ensure the value has changed
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('change-success-1' in data)
# Test validation error
rv = client.post('/admin/model1/ajax/update/', data={
'enum_field-1': 'problematic-input',
})
eq_(rv.status_code, 500)
# Test invalid primary key
rv = client.post('/admin/model1/ajax/update/', data={
'test1-1000': 'problematic-input',
})
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
# Test editing column not in column_editable_list
rv = client.post('/admin/model1/ajax/update/', data={
'test2-1': 'problematic-input',
})
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
# Test in-line editing for relations
view = CustomModelView(Model2, db.session,
column_editable_list=[
'model1'])
admin.add_view(view)
rv = client.post('/admin/model2/ajax/update/', data={
'model1-1': '3',
})
data = rv.data.decode('utf-8')
ok_('Record was successfully saved.' == data)
# confirm the value has changed
rv = client.get('/admin/model2/')
data = rv.data.decode('utf-8')
ok_('test1_val_3' in data)
def test_column_filters():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(
Model1, db.session,
column_filters=['test1']
)
admin.add_view(view)
eq_(len(view._filters), 7)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Test1']],
[
(0, u'equals'),
(1, u'not equal'),
(2, u'contains'),
(3, u'not contains'),
(4, u'empty'),
(5, u'in list'),
(6, u'not in list'),
])
# Test filter that references property
view = CustomModelView(Model2, db.session,
column_filters=['model1'])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Test1']],
[
(0, u'equals'),
(1, u'not equal'),
(2, u'contains'),
(3, u'not contains'),
(4, u'empty'),
(5, u'in list'),
(6, u'not in list'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Test2']],
[
(7, u'equals'),
(8, u'not equal'),
(9, u'contains'),
(10, u'not contains'),
(11, u'empty'),
(12, u'in list'),
(13, u'not in list'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Test3']],
[
(14, u'equals'),
(15, u'not equal'),
(16, u'contains'),
(17, u'not contains'),
(18, u'empty'),
(19, u'in list'),
(20, u'not in list'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Test4']],
[
(21, u'equals'),
(22, u'not equal'),
(23, u'contains'),
(24, u'not contains'),
(25, u'empty'),
(26, u'in list'),
(27, u'not in list'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Bool Field']],
[
(28, u'equals'),
(29, u'not equal'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Enum Field']],
[
(30, u'equals'),
(31, u'not equal'),
(32, u'empty'),
(33, u'in list'),
(34, u'not in list'),
])
# Test filter with a dot
view = CustomModelView(Model2, db.session,
column_filters=['model1.bool_field'])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Bool Field']],
[
(0, 'equals'),
(1, 'not equal'),
])
# Test column_labels on filters
view = CustomModelView(Model2, db.session,
column_filters=['model1.bool_field', 'string_field'],
column_labels={
'model1.bool_field': 'Test Filter #1',
'string_field': 'Test Filter #2',
})
eq_(list(view._filter_groups.keys()), [u'Test Filter #1', u'Test Filter #2'])
fill_db(db, Model1, Model2)
client = app.test_client()
rv = client.get('/admin/model1/?flt0_0=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
# the filter value is always in "data"
# need to check a different column than test1 for the expected row
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
rv = client.get('/admin/model1/?flt0_6=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# Test string filter
view = CustomModelView(Model1, db.session,
column_filters=['test1'], endpoint='_strings')
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Test1']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'contains'),
(3, 'not contains'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# string - equals
rv = client.get('/admin/_strings/?flt0_0=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not equal
rv = client.get('/admin/_strings/?flt0_1=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - contains
rv = client.get('/admin/_strings/?flt0_2=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not contains
rv = client.get('/admin/_strings/?flt0_3=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - empty
rv = client.get('/admin/_strings/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' in data)
ok_('test1_val_1' not in data)
ok_('test1_val_2' not in data)
# string - not empty
rv = client.get('/admin/_strings/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' not in data)
ok_('test1_val_1' in data)
ok_('test1_val_2' in data)
# string - in list
rv = client.get('/admin/_strings/?flt0_5=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test1_val_3' not in data)
ok_('test1_val_4' not in data)
# string - not in list
rv = client.get('/admin/_strings/?flt0_6=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test1_val_3' in data)
ok_('test1_val_4' in data)
# Test integer filter
view = CustomModelView(Model2, db.session,
column_filters=['int_field'])
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Int Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# integer - equals
rv = client.get('/admin/model2/?flt0_0=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' in data)
ok_('test2_val_4' not in data)
# integer - equals - test validation
rv = client.get('/admin/model2/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not equal
rv = client.get('/admin/model2/?flt0_1=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' not in data)
ok_('test2_val_4' in data)
# integer - greater
rv = client.get('/admin/model2/?flt0_2=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' not in data)
ok_('test2_val_4' in data)
# integer - smaller
rv = client.get('/admin/model2/?flt0_3=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' in data)
ok_('test2_val_4' not in data)
# integer - empty
rv = client.get('/admin/model2/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test2_val_3' not in data)
ok_('test2_val_4' not in data)
# integer - not empty
rv = client.get('/admin/model2/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test2_val_3' in data)
ok_('test2_val_4' in data)
# integer - in list
rv = client.get('/admin/model2/?flt0_5=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test2_val_3' in data)
ok_('test2_val_4' in data)
# integer - in list - test validation
rv = client.get('/admin/model2/?flt0_5=5000%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not in list
rv = client.get('/admin/model2/?flt0_6=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test2_val_3' not in data)
ok_('test2_val_4' not in data)
# Test float filter
view = CustomModelView(Model2, db.session, column_filters=['float_field'],
endpoint="_float")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Float Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# float - equals
rv = client.get('/admin/_float/?flt0_0=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' in data)
ok_('test2_val_4' not in data)
# float - equals - test validation
rv = client.get('/admin/_float/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not equal
rv = client.get('/admin/_float/?flt0_1=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' not in data)
ok_('test2_val_4' in data)
# float - greater
rv = client.get('/admin/_float/?flt0_2=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' not in data)
ok_('test2_val_4' in data)
# float - smaller
rv = client.get('/admin/_float/?flt0_3=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' in data)
ok_('test2_val_4' not in data)
# float - empty
rv = client.get('/admin/_float/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test2_val_3' not in data)
ok_('test2_val_4' not in data)
# float - not empty
rv = client.get('/admin/_float/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test2_val_3' in data)
ok_('test2_val_4' in data)
# float - in list
rv = client.get('/admin/_float/?flt0_5=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test2_val_3' in data)
ok_('test2_val_4' in data)
# float - in list - test validation
rv = client.get('/admin/_float/?flt0_5=25.9%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not in list
rv = client.get('/admin/_float/?flt0_6=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test2_val_3' not in data)
ok_('test2_val_4' not in data)
# Test filters to joined table field
view = CustomModelView(
Model2, db.session,
endpoint='_model2',
column_filters=['model1.bool_field'],
column_list=[
'string_field',
'model1.id',
'model1.bool_field',
]
)
admin.add_view(view)
rv = client.get('/admin/_model2/?flt1_0=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' not in data)
ok_('test2_val_3' not in data)
ok_('test2_val_4' not in data)
# Test human readable URLs
view = CustomModelView(
Model1, db.session,
column_filters=['test1'],
endpoint='_model3',
named_filter_urls=True
)
admin.add_view(view)
rv = client.get('/admin/_model3/?flt1_test1_equals=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('test1_val_2' not in data)
# Test date, time, and datetime filters
view = CustomModelView(Model1, db.session,
column_filters=['date_field', 'datetime_field', 'time_field'],
endpoint="_datetime")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Date Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'between'),
(5, 'not between'),
(6, 'empty'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Datetime Field']],
[
(7, 'equals'),
(8, 'not equal'),
(9, 'greater than'),
(10, 'smaller than'),
(11, 'between'),
(12, 'not between'),
(13, 'empty'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Time Field']],
[
(14, 'equals'),
(15, 'not equal'),
(16, 'greater than'),
(17, 'smaller than'),
(18, 'between'),
(19, 'not between'),
(20, 'empty'),
])
# date - equals
rv = client.get('/admin/_datetime/?flt0_0=2014-11-17')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' in data)
ok_('date_obj2' not in data)
# date - not equal
rv = client.get('/admin/_datetime/?flt0_1=2014-11-17')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' not in data)
ok_('date_obj2' in data)
# date - greater
rv = client.get('/admin/_datetime/?flt0_2=2014-11-16')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' in data)
ok_('date_obj2' not in data)
# date - smaller
rv = client.get('/admin/_datetime/?flt0_3=2014-11-16')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' not in data)
ok_('date_obj2' in data)
# date - between
rv = client.get('/admin/_datetime/?flt0_4=2014-11-13+to+2014-11-20')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' in data)
ok_('date_obj2' not in data)
# date - not between
rv = client.get('/admin/_datetime/?flt0_5=2014-11-13+to+2014-11-20')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' not in data)
ok_('date_obj2' in data)
# date - empty
rv = client.get('/admin/_datetime/?flt0_6=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('date_obj1' not in data)
ok_('date_obj2' not in data)
# date - empty
rv = client.get('/admin/_datetime/?flt0_6=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('date_obj1' in data)
ok_('date_obj2' in data)
# datetime - equals
rv = client.get('/admin/_datetime/?flt0_7=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not equal
rv = client.get('/admin/_datetime/?flt0_8=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - greater
rv = client.get('/admin/_datetime/?flt0_9=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - smaller
rv = client.get('/admin/_datetime/?flt0_10=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - between
rv = client.get('/admin/_datetime/?flt0_11=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not between
rv = client.get('/admin/_datetime/?flt0_12=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - empty
rv = client.get('/admin/_datetime/?flt0_13=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' not in data)
# datetime - not empty
rv = client.get('/admin/_datetime/?flt0_13=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('datetime_obj1' in data)
ok_('datetime_obj2' in data)
# time - equals
rv = client.get('/admin/_datetime/?flt0_14=11%3A10%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' not in data)
# time - not equal
rv = client.get('/admin/_datetime/?flt0_15=11%3A10%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' in data)
# time - greater
rv = client.get('/admin/_datetime/?flt0_16=11%3A09%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' not in data)
# time - smaller
rv = client.get('/admin/_datetime/?flt0_17=11%3A09%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' in data)
# time - between
rv = client.get('/admin/_datetime/?flt0_18=10%3A40%3A00+to+11%3A50%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' not in data)
# time - not between
rv = client.get('/admin/_datetime/?flt0_19=10%3A40%3A00+to+11%3A50%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' in data)
# time - empty
rv = client.get('/admin/_datetime/?flt0_20=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' not in data)
# time - not empty
rv = client.get('/admin/_datetime/?flt0_20=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' in data)
# Test enum filter
view = CustomModelView(Model1, db.session,
column_filters=['enum_field'],
endpoint="_enumfield")
admin.add_view(view)
# enum - equals
rv = client.get('/admin/_enumfield/?flt0_0=model1_v1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('enum_obj1' in data)
ok_('enum_obj2' not in data)
# enum - not equal
rv = client.get('/admin/_enumfield/?flt0_1=model1_v1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('enum_obj1' not in data)
ok_('enum_obj2' in data)
# enum - empty
rv = client.get('/admin/_enumfield/?flt0_2=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('enum_obj1' not in data)
ok_('enum_obj2' not in data)
# enum - not empty
rv = client.get('/admin/_enumfield/?flt0_2=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('enum_obj1' in data)
ok_('enum_obj2' in data)
# enum - in list
rv = client.get('/admin/_enumfield/?flt0_3=model1_v1%2Cmodel1_v2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('enum_obj1' in data)
ok_('enum_obj2' in data)
# enum - not in list
rv = client.get('/admin/_enumfield/?flt0_4=model1_v1%2Cmodel1_v2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('enum_obj1' not in data)
ok_('enum_obj2' not in data)
# Test single custom filter on relation
view = CustomModelView(Model2, db.session,
column_filters = [
filters.FilterEqual(Model1.test1, "Test1")
], endpoint='_relation_test')
admin.add_view(view)
rv = client.get('/admin/_relation_test/?flt1_0=test1_val_1')
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('test1_val_2' not in data)
def test_url_args():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, db.session,
page_size=2,
column_searchable_list=['test1'],
column_filters=['test1'])
admin.add_view(view)
db.session.add(Model1('data1'))
db.session.add(Model1('data2'))
db.session.add(Model1('data3'))
db.session.add(Model1('data4'))
db.session.commit()
client = app.test_client()
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('data1' in data)
ok_('data3' not in data)
# page
rv = client.get('/admin/model1/?page=1')
data = rv.data.decode('utf-8')
ok_('data1' not in data)
ok_('data3' in data)
# sort
rv = client.get('/admin/model1/?sort=0&desc=1')
data = rv.data.decode('utf-8')
ok_('data1' not in data)
ok_('data3' in data)
ok_('data4' in data)
# search
rv = client.get('/admin/model1/?search=data1')
data = rv.data.decode('utf-8')
ok_('data1' in data)
ok_('data2' not in data)
rv = client.get('/admin/model1/?search=^data1')
data = rv.data.decode('utf-8')
ok_('data2' not in data)
# like
rv = client.get('/admin/model1/?flt0=0&flt0v=data1')
data = rv.data.decode('utf-8')
ok_('data1' in data)
# not like
rv = client.get('/admin/model1/?flt0=1&flt0v=data1')
data = rv.data.decode('utf-8')
ok_('data2' in data)
def test_non_int_pk():
app, db, admin = setup()
class Model(db.Model):
id = db.Column(db.String, primary_key=True)
test = db.Column(db.String)
db.create_all()
view = CustomModelView(Model, db.session, form_columns=['id', 'test'])
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model/')
eq_(rv.status_code, 200)
rv = client.post('/admin/model/new/',
data=dict(id='test1', test='test2'))
eq_(rv.status_code, 302)
rv = client.get('/admin/model/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1' in data)
rv = client.get('/admin/model/edit/?id=test1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2' in data)
def test_form_columns():
app, db, admin = setup()
class Model(db.Model):
id = db.Column(db.String, primary_key=True)
int_field = db.Column(db.Integer)
datetime_field = db.Column(db.DateTime)
text_field = db.Column(db.UnicodeText)
excluded_column = db.Column(db.String)
class ChildModel(db.Model):
id = db.Column(db.String, primary_key=True)
model_id = db.Column(db.Integer, db.ForeignKey(Model.id))
model = db.relationship(Model, backref='backref')
db.create_all()
view1 = CustomModelView(Model, db.session, endpoint='view1',
form_columns=('int_field', 'text_field'))
view2 = CustomModelView(Model, db.session, endpoint='view2',
form_excluded_columns=('excluded_column',))
view3 = CustomModelView(ChildModel, db.session, endpoint='view3')
form1 = view1.create_form()
form2 = view2.create_form()
form3 = view3.create_form()
ok_('int_field' in form1._fields)
ok_('text_field' in form1._fields)
ok_('datetime_field' not in form1._fields)
ok_('excluded_column' not in form2._fields)
ok_(type(form3.model).__name__ == 'QuerySelectField')
# TODO: form_args
def test_form_override():
app, db, admin = setup()
class Model(db.Model):
id = db.Column(db.String, primary_key=True)
test = db.Column(db.String)
db.create_all()
view1 = CustomModelView(Model, db.session, endpoint='view1')
view2 = CustomModelView(Model, db.session, endpoint='view2', form_overrides=dict(test=fields.FileField))
admin.add_view(view1)
admin.add_view(view2)
eq_(view1._create_form_class.test.field_class, fields.StringField)
eq_(view2._create_form_class.test.field_class, fields.FileField)
def test_form_onetoone():
app, db, admin = setup()
class Model1(db.Model):
id = db.Column(db.Integer, primary_key=True)
test = db.Column(db.String)
class Model2(db.Model):
id = db.Column(db.Integer, primary_key=True)
model1_id = db.Column(db.Integer, db.ForeignKey(Model1.id))
model1 = db.relationship(Model1, backref=db.backref('model2', uselist=False))
db.create_all()
view1 = CustomModelView(Model1, db.session, endpoint='view1')
view2 = CustomModelView(Model2, db.session, endpoint='view2')
admin.add_view(view1)
admin.add_view(view2)
model1 = Model1(test='test')
model2 = Model2(model1=model1)
db.session.add(model1)
db.session.add(model2)
db.session.commit()
eq_(model1.model2, model2)
eq_(model2.model1, model1)
eq_(view1._create_form_class.model2.kwargs['widget'].multiple, False)
eq_(view2._create_form_class.model1.kwargs['widget'].multiple, False)
def test_relations():
# TODO: test relations
pass
def test_on_model_change_delete():
app, db, admin = setup()
Model1, _ = create_models(db)
db.create_all()
class ModelView(CustomModelView):
def on_model_change(self, form, model, is_created):
model.test1 = model.test1.upper()
def on_model_delete(self, model):
self.deleted = True
view = ModelView(Model1, db.session)
admin.add_view(view)
client = app.test_client()
client.post('/admin/model1/new/',
data=dict(test1='test1large', test2='test2'))
model = db.session.query(Model1).first()
eq_(model.test1, 'TEST1LARGE')
url = '/admin/model1/edit/?id=%s' % model.id
client.post(url, data=dict(test1='test1small', test2='test2large'))
model = db.session.query(Model1).first()
eq_(model.test1, 'TEST1SMALL')
url = '/admin/model1/delete/?id=%s' % model.id
client.post(url)
ok_(view.deleted)
def test_multiple_delete():
app, db, admin = setup()
M1, _ = create_models(db)
db.session.add_all([M1('a'), M1('b'), M1('c')])
db.session.commit()
eq_(M1.query.count(), 3)
view = ModelView(M1, db.session)
admin.add_view(view)
client = app.test_client()
rv = client.post('/admin/model1/action/', data=dict(action='delete', rowid=[1, 2, 3]))
eq_(rv.status_code, 302)
eq_(M1.query.count(), 0)
def test_default_sort():
app, db, admin = setup()
M1, _ = create_models(db)
db.session.add_all([M1('c'), M1('b'), M1('a')])
db.session.commit()
eq_(M1.query.count(), 3)
view = CustomModelView(M1, db.session, column_default_sort='test1')
admin.add_view(view)
_, data = view.get_list(0, None, None, None, None)
eq_(len(data), 3)
eq_(data[0].test1, 'a')
eq_(data[1].test1, 'b')
eq_(data[2].test1, 'c')
def test_complex_sort():
app, db, admin = setup()
M1, M2 = create_models(db)
m1 = M1('b')
db.session.add(m1)
db.session.add(M2('c', model1=m1))
m2 = M1('a')
db.session.add(m2)
db.session.add(M2('c', model1=m2))
db.session.commit()
# test sorting on relation string - 'model1.test1'
view = CustomModelView(M2, db.session,
column_list = ['string_field', 'model1.test1'],
column_sortable_list = ['model1.test1'])
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model2/?sort=1')
eq_(rv.status_code, 200)
# test sorting on relation object - M2.string_field
view2 = CustomModelView(M1, db.session,
column_list = ['model2.string_field'],
column_sortable_list = [M2.string_field])
admin.add_view(view2)
client = app.test_client()
rv = client.get('/admin/model1/?sort=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Sort by' in data)
def test_default_complex_sort():
app, db, admin = setup()
M1, M2 = create_models(db)
m1 = M1('b')
db.session.add(m1)
db.session.add(M2('c', model1=m1))
m2 = M1('a')
db.session.add(m2)
db.session.add(M2('c', model1=m2))
db.session.commit()
view = CustomModelView(M2, db.session, column_default_sort='model1.test1')
admin.add_view(view)
_, data = view.get_list(0, None, None, None, None)
eq_(len(data), 2)
eq_(data[0].model1.test1, 'a')
eq_(data[1].model1.test1, 'b')
def test_extra_fields():
app, db, admin = setup()
Model1, _ = create_models(db)
view = CustomModelView(
Model1, db.session,
form_extra_fields={
'extra_field': fields.StringField('Extra Field')
}
)
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
# Check presence and order
data = rv.data.decode('utf-8')
ok_('Extra Field' in data)
pos1 = data.find('Extra Field')
pos2 = data.find('Test1')
ok_(pos2 < pos1)
def test_extra_field_order():
app, db, admin = setup()
Model1, _ = create_models(db)
view = CustomModelView(
Model1, db.session,
form_columns=('extra_field', 'test1'),
form_extra_fields={
'extra_field': fields.StringField('Extra Field')
}
)
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
# Check presence and order
data = rv.data.decode('utf-8')
pos1 = data.find('Extra Field')
pos2 = data.find('Test1')
ok_(pos2 > pos1)
def test_modelview_localization():
def test_locale(locale):
try:
app, db, admin = setup()
app.config['BABEL_DEFAULT_LOCALE'] = locale
babel = Babel(app)
Model1, _ = create_models(db)
view = CustomModelView(
Model1, db.session,
column_filters=['test1', 'bool_field', 'date_field', 'datetime_field', 'time_field']
)
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model1/')
eq_(rv.status_code, 200)
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
except:
print("Error on the following locale:", locale)
raise
locales = ['en', 'cs', 'de', 'es', 'fa', 'fr', 'pt', 'ru', 'zh_CN', 'zh_TW']
for locale in locales:
test_locale(locale)
def test_custom_form_base():
app, db, admin = setup()
class TestForm(form.BaseForm):
pass
Model1, _ = create_models(db)
view = CustomModelView(
Model1, db.session,
form_base_class=TestForm
)
admin.add_view(view)
ok_(hasattr(view._create_form_class, 'test1'))
create_form = view.create_form()
ok_(isinstance(create_form, TestForm))
def test_ajax_fk():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(
Model2, db.session,
url='view',
form_ajax_refs={
'model1': {
'fields': ('test1', 'test2')
}
}
)
admin.add_view(view)
ok_(u'model1' in view._form_ajax_refs)
model = Model1(u'first')
model2 = Model1(u'foo', u'bar')
db.session.add_all([model, model2])
db.session.commit()
# Check loader
loader = view._form_ajax_refs[u'model1']
mdl = loader.get_one(model.id)
eq_(mdl.test1, model.test1)
items = loader.get_list(u'fir')
eq_(len(items), 1)
eq_(items[0].id, model.id)
items = loader.get_list(u'bar')
eq_(len(items), 1)
eq_(items[0].test1, u'foo')
# Check form generation
form = view.create_form()
eq_(form.model1.__class__.__name__, u'AjaxSelectField')
with app.test_request_context('/admin/view/'):
ok_(u'value=""' not in form.model1())
form.model1.data = model
ok_(u'data-json="[%s, "first"]"' % model.id in form.model1())
ok_(u'value="1"' in form.model1())
# Check querying
client = app.test_client()
req = client.get(u'/admin/view/ajax/lookup/?name=model1&query=foo')
eq_(req.data.decode('utf-8'), u'[[%s, "foo"]]' % model2.id)
# Check submitting
req = client.post('/admin/view/new/', data={u'model1': as_unicode(model.id)})
mdl = db.session.query(Model2).first()
ok_(mdl is not None)
ok_(mdl.model1 is not None)
eq_(mdl.model1.id, model.id)
eq_(mdl.model1.test1, u'first')
def test_ajax_fk_multi():
app, db, admin = setup()
class Model1(db.Model):
__tablename__ = 'model1'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
def __str__(self):
return self.name
table = db.Table('m2m', db.Model.metadata,
db.Column('model1_id', db.Integer, db.ForeignKey('model1.id')),
db.Column('model2_id', db.Integer, db.ForeignKey('model2.id'))
)
class Model2(db.Model):
__tablename__ = 'model2'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
model1_id = db.Column(db.Integer(), db.ForeignKey(Model1.id))
model1 = db.relationship(Model1, backref='models2', secondary=table)
db.create_all()
view = CustomModelView(
Model2, db.session,
url='view',
form_ajax_refs={
'model1': {
'fields': ['name']
}
}
)
admin.add_view(view)
ok_(u'model1' in view._form_ajax_refs)
model = Model1(name=u'first')
db.session.add_all([model, Model1(name=u'foo')])
db.session.commit()
# Check form generation
form = view.create_form()
eq_(form.model1.__class__.__name__, u'AjaxSelectMultipleField')
with app.test_request_context('/admin/view/'):
ok_(u'data-json="[]"' in form.model1())
form.model1.data = [model]
ok_(u'data-json="[[1, "first"]]"' in form.model1())
# Check submitting
client = app.test_client()
client.post('/admin/view/new/', data={u'model1': as_unicode(model.id)})
mdl = db.session.query(Model2).first()
ok_(mdl is not None)
ok_(mdl.model1 is not None)
eq_(len(mdl.model1), 1)
def test_safe_redirect():
app, db, admin = setup()
Model1, _ = create_models(db)
db.create_all()
view = CustomModelView(Model1, db.session)
admin.add_view(view)
client = app.test_client()
rv = client.post('/admin/model1/new/?url=http://localhost/admin/model2view/',
data=dict(test1='test1large', test2='test2'))
eq_(rv.status_code, 302)
eq_(rv.location, 'http://localhost/admin/model2view/')
rv = client.post('/admin/model1/new/?url=http://google.com/evil/',
data=dict(test1='test1large', test2='test2'))
eq_(rv.status_code, 302)
eq_(rv.location, 'http://localhost/admin/model1/')
|
|
# -*- coding: utf-8 -*-
import re
class Lexer:
def __init__(self,source):
"source's newline must be '\n'. '\r' is regarded as normal charactor"
self.source = source
self.len_ = len(self.source)
self.pos_ = 0
self.nextl = -1
def __getitem__(self,i):
return self.source[i]
def __len__(self):
return self.len_
def get_remainder(self):
return self[self.pos():]
def pos(self):
return self.pos_
def get_line(self,advance=False):
if self.eof():
return None
if self.nextl < 0:
t = self.source.find('\n',self.pos_)
if t<0:
self.nextl = self.len_
else:
self.nextl = t
f = self.pos_
t = self.nextl
if advance:
self.set_pos(t+1)
return self.source[f:t]
def set_pos(self,pos):
self.pos_ = pos
self.nextl = -1
def startswith(self,prefix):
return self.source.startswith(prefix,self.pos_)
def skipspace(self):
while not self.eof() and self.source[self.pos_] in ' \t\r':
self.skip(1)
def skipline(self):
self.get_line(True)
def skip(self,num):
self.set_pos(self.pos_+num)
def search(self,regex,advance=False):
m = regex.search(self.source,self.pos_)
if advance:
if m:
self.set_pos(m.end())
else:
self.set_pos(self.len_)
return m
def eof(self):
return self.len_ <= self.pos_
class ReLexer:
def __init__(self,lexer):
self.stack = None
self.lexer = lexer
def __getattr__(self,key):
return getattr(self.lexer,key)
def lex(self,regex):
if self.stack:
r = self.stack
self.stack = None
return r
else:
pos = self.pos_
m = self.search(regex,True)
if m:
text = self.lexer[pos:m.start()]
if text:
self.stack = None,m
else:
return None,m
else:
text = self.lexer[pos:self.pos_]
return text,None
def eof(self):
return self.lexer.eof() and not self.stack
class WikiParserBase(object):
def parse(self, source):
lexer = Lexer(source)
self.begin_document()
parse_body(lexer, self, 0)
return self.end_document()
def begin_document(self):
pass
def end_document(self):
pass
########### Section ###########
def begin_section(self,sectionn,title_parser,aname,position):
'''
sectionn: section deepness
title_parser(doci):
aname:
position:
'''
pass
def end_section(self):
pass
########### Blockquote ###########
def begin_blockquote(self):
pass
def end_blockquote(self):
pass
########### Para ###########
def begin_para(self):
pass
def end_para(self):
pass
########### Pre ###########
def begin_pre(self,option):
pass
def end_pre(self):
pass
########### Inline Elements ###########
def br(self):
pass
def horizontal_line(self):
pass
def text_pre(self,text):
pass
def text(self,text):
pass
def command(self,cmdname,params):
pass
def empathis(self,text):
pass
def deleteline(self,text):
pass
def underline(self,text):
pass
def identity(self,ids):
pass
########### Link ###########
# label maybe None
def link_uri(self,label,uri):
pass
def link_wiki(self,label,wikiname,wikianame):
pass
# <a href=uri><img/></a>
def link_img_uri(self,img_params,uri):
pass
def link_img_wiki(self,img_params,wikiname):
pass
########### Foornote ###########
def begin_footnote(self):
pass
def end_footnote(self):
pass
########### List ###########
def begin_list(self):
pass
def end_list(self):
pass
def begin_list_element(self,level,ol):
pass
def end_list_element(self):
pass
########### Table ###########
def begin_table(self):
pass
def end_table(self):
pass
def begin_table_row(self):
pass
def end_table_row(self):
pass
def begin_table_cell(self,ishead,style_class):
pass
def end_table_cell(self):
pass
class WikiParserDump(WikiParserBase):
def __init__(self,writer):
self.w = writer
def begin_document(self):
self.w.write('<doc>\n')
def end_document(self):
self.w.write('</doc>\n')
########### Section ###########
def begin_section(self,sectionn,title_parser,aname,position):
'''
sectionn: section deepness
title_parser(doci):
aname:
position:
'''
self.w.write('<section num="%s">\n'%sectionn)
self.w.write('<title>')
title_parser(DI_dump(self.w))
self.w.write('</title>')
def end_section(self):
self.w.write('</section>\n')
########### Blockquote ###########
def begin_blockquote(self):
self.w.write('<blockquote>\n')
def end_blockquote(self):
self.w.write('</blockquote>\n')
########### Para ###########
def begin_para(self):
self.w.write('<p>\n')
def end_para(self):
self.w.write('</p>\n')
########### Pre ###########
def begin_pre(self,option):
self.w.write('<pre class="%s">\n'%option)
def end_pre(self):
self.w.write('</pre>\n')
########### Inline Elements ###########
def br(self):
self.w.write('<br/>\n')
def horizontal_line(self):
self.w.write('<hr/>\n')
def text_pre(self,text):
self.w.write(text)
def text(self,text):
self.w.write(text)
def command(self,cmdname,params):
self.w.write('<command name="%s" params="%s"/>\n')
def empathis(self,text):
self.w.write('<empathis>\n')
self.w.write(text)
self.w.write('</empathis>\n')
def deleteline(self,text):
self.w.write('<deleteline>\n')
self.w.write(text)
self.w.write('</deleteline>\n')
def underline(self,text):
self.w.write('<underline>\n')
self.w.write(text)
self.w.write('</underline>\n')
def identity(self,ids):
for i in ids:
self.w.write('<empathis>\n')
self.w.write(i)
self.w.write('</empathis>\n')
########### Link ###########
# label maybe None
def link_uri(self,label,uri):
self.w.write('<a href="%s">%s</a>\n'%(uri,label))
def link_wiki(self,label,wikiname,wikianame):
self.w.write('<a href="%s">%s</a>\n'%(wikiname+"#"+wikianame,label))
# <a href=uri><img/></a>
def link_img_uri(self,img_params,uri):
self.w.write('<a href="%s"><img src="%s"/></a>\n'%(url,img_params))
def link_img_wiki(self,img_params,wikiname):
self.w.write('<a href="%s"><img src="%s"/></a>\n'%(wikiname,img_params))
########### Foornote ###########
def begin_footnote(self):
self.w.write('<foornote>\n')
pass
def end_footnote(self):
self.w.write('</foornote>\n')
pass
########### List ###########
def begin_list(self):
self.w.write('<list>\n')
def end_list(self):
self.w.write('</list>\n')
pass
def begin_list_element(self,level,ol):
self.w.write('<li lebel="%s">\n'%level)
pass
def end_list_element(self):
self.w.write('</li>\n')
########### Table ###########
def begin_table(self):
self.w.write('<table>\n')
def end_table(self):
self.w.write('</table>\n')
def begin_table_row(self):
self.w.write('<tr>\n')
def end_table_row(self):
self.w.write('</tr>\n')
def begin_table_cell(self,ishead,style_class):
self.w.write('<td class="%s">\n'%style_class)
def end_table_cell(self):
self.w.write('</td>\n')
re_section = re.compile(r'(.+)\[#([_a-zA-Z0-9]+)\]\s*',re.U)
re_cmd0 = re.compile(r'#(\w+):(.*)$',re.U)
re_cmd1 = re.compile(r'#(\w+)\(([^)]*)\)$',re.U)
re_cmd2 = re.compile(r'#(\w+)$',re.U)
def parse_body(lexer,doci,section):
'''
parse data from lexer, and interpret and write to doc
negative section means that context is in footnote instead of section body
'''
class qstack:
def __init__(self,doci):
self.d = doci
self.n = 0
def close(self):
if self.n>0:
self.d.end_blockquote()
self.n -= 1
def closeall(self):
while self.n>0:
self.close()
def push(self):
self.closeall()
self.n+=1
self.d.begin_blockquote()
class pstack:
def __init__(self,doci):
self.d = doci
self.n = False
def close(self):
if self.n:
self.d.end_para()
self.n = False
def push(self):
self.close()
assert not self.n
self.n = True
self.d.begin_para()
def in_p(self):
return self.n
bq = qstack(doci)
p = pstack(doci)
def close():
p.close()
bq.closeall()
while not lexer.eof():
if section>=0 and lexer.startswith('*'):
close()
l = lexer.get_line()
inline = l.lstrip('*')
n = len(l)-len(inline)
if n>section:
aname = None
m = re_section.match(inline)
if m:
inline = m.group(1)
aname = m.group(2)
def title_parser(docit):
parse_line(Lexer(inline),docit,False)
doci.begin_section(n,title_parser,aname,lexer.pos())
lexer.skipline()
parse_body(lexer,doci,n)
doci.end_section()
continue
else:
break
if lexer.startswith('|'):
p.close()
parse_table(lexer,doci)
continue
if lexer.startswith('==='):
l = lexer.get_line()
if l.strip().replace('=','')=='':
lexer.skipline()
p.close()
doci.horizontal_line()
continue
if lexer.startswith('+') or lexer.startswith('-'):
p.close()
parse_list(lexer,doci)
continue
lexer.skipspace()
if lexer.startswith('#'):
l = lexer.get_line()
m = re_cmd0.match(l.strip())
if m:
lexer.skipline()
p.close()
doci.command(m.group(1),m.group(2))
continue
m = re_cmd1.match(l.strip())
if m:
lexer.skipline()
p.close()
doci.command(m.group(1),m.group(2))
continue
m = re_cmd2.match(l.strip())
if m:
lexer.skipline()
p.close()
doci.command(m.group(1),'')
continue
# ['<<<','>>>','{{{','}}}','/+','))']:
if lexer.startswith('<<<'):
lexer.skip(3)
close()
bq.push()
continue
if lexer.startswith('>>>'):
lexer.skip(3)
close()
continue
if lexer.startswith('/+'):
parse_comment(lexer)
continue
if lexer.startswith('{{{'):
p.close()
parse_pre(lexer,doci)
continue
if lexer.startswith('}}}'):
lexer.skip(3)
doci.end_pre()
continue
# escape footnote
if lexer.startswith('))'):
if section < 0:
close()
lexer.skip(2)
break
l = lexer.get_line()
if not l or l.isspace():
p.close()
lexer.skipline()
continue
if p.in_p():
doci.br()
else:
p.push()
parse_line(lexer,doci,True,section<0)
p.close()
bq.closeall()
def parse_list(lexer,doci):
doci.begin_list()
while not lexer.eof():
l = lexer.get_line()
if l.startswith('+'):
inline = l.lstrip('+')
ol = True
elif l.startswith('-'):
inline = l.lstrip('-')
ol = False
else:
break
level = len(l)-len(inline)
doci.begin_list_element(level,ol)
lexer.skip(level)
parse_line(lexer,doci,True)
doci.end_list_element()
doci.end_list()
############## Line Parsing #######################
re_cmd_img = re.compile(r'#img\(([^)]*)\)$',re.U)
'''
urllabel: [[(Label:)Link]]
where Link = URL | WikiName(#aname)
nakedurl: URL
' ' ' empthasis ' ' '
___underline___
%%%delete%%%
//comment
/+ /+ nest comment +/ +/
((footnote))
{block}
'''
re_line = re.compile(r"""
\n | ~
| <<< | >>>
| {{{ | }}}
| /\+
| '''(?P<empathis>[^\n]+?)'''
| %%%(?P<deleteline>[^\n]+?)%%%
| ___(?P<underline>[^\n]+?)___
| s?https?:\/\/[-_.!~*\'\(\)a-zA-Z0-9;\/?:\@&=+\$,%#]+
| \(\(
| \)\)
| //
| @(?P<identity>[\w\s\-_,]+);
| \[\[
(?:(?P<label>(?:(?<!\]\]).)+):)??
(?:
(?P<url>s?https?:\/\/[-_.!~*\'\(\)a-zA-Z0-9;\/?:\@&=+\$,%#]+?)
| (?P<wikiname>[\.\w\-+_][\.\w\-+_/\s]*)?(?P<aname>\#[_a-zA-Z0-9]+)?
)
\]\]
""",(re.VERBOSE|re.U))
def parse_line(lexer, doci, enable_footnote, within_footnote=False):
def gg(m,i):
return m.group(i)
l = ReLexer(lexer)
while not l.eof():
text,m = l.lex(re_line)
if text:
doci.text(text)
continue
elif m:
pt = m.group(0)
if pt=='\n':
break
elif pt in ['<<<','>>>','{{{','}}}','/+']:
lexer.skip(-len(pt))
break
elif pt=='))':
if within_footnote:
lexer.skip(-len(pt))
break
else:
doci.text('))')
elif pt=='~':
doci.br()
elif pt.startswith("''"):
doci.empathis(gg(m,'empathis'))
elif pt.startswith("%%"):
doci.deleteline(gg(m,'deleteline'))
elif pt.startswith("__"):
doci.underline(gg(m,'underline'))
elif pt=='//':
lexer.skipline()
break
elif pt=='((':
if enable_footnote:
doci.begin_footnote()
parse_body(lexer,doci,-1)
doci.end_footnote()
continue
else:
doci.text('((')
elif pt.startswith('shttp') or pt.startswith('http'):
doci.link_uri(pt,pt)
elif pt.startswith('@'):
i = [s.strip() for s in gg(m,'identity').split(',')]
doci.identity(i)
elif pt.startswith('[['):
label = gg(m,'label')
url = gg(m,'url')
wikiname = gg(m,'wikiname')
if not wikiname:
wikiname = '.'
wikianame = gg(m,'aname')
if not wikianame:
wikianame = ''
if label:
m = re_cmd_img.match(label)
if m:
params = m.group(1).split(',')
if url:
doci.link_img_uri(params,url)
else:
doci.link_img_wiki(params,wikiname)
elif url:
doci.link_uri(label,url)
else:
doci.link_wiki(label,wikiname,wikianame)
else:
if url:
doci.link_uri(None,url)
else:
doci.link_wiki(None,wikiname,wikianame)
else:
assert not 'unreachable'
re_comment = re.compile(r'//|\+/|/\+',re.U)
def parse_comment(lexer):
assert lexer.startswith('/+')
commentn = 0
while not lexer.eof():
m = lexer.search(re_comment,True)
if not m:
break
pt = m.group(0)
if pt=='//':
lexer.skipline()
elif pt=='/+':
commentn += 1
elif pt=='+/':
commentn -= 1
if commentn<=0:
return
re_pre = re.compile(r'}}}|\n',re.U)
def parse_pre(lexer,doci):
assert lexer.startswith('{{{')
lexer.skip(3)
option = None
if lexer.startswith('#'):
lexer.skip(1)
option = lexer.get_line(True).strip() or None
doci.begin_pre(option)
l = ReLexer(lexer)
while not l.eof():
text,m = l.lex(re_pre)
if text:
doci.text_pre(text)
continue
else:
pt = m.group(0)
if pt=='\n':
doci.text_pre('\n')
continue
if pt=='}}}':
break
doci.end_pre()
############## Table Parsing #######################
def lstrip1(s,m):
assert s[:len(m)]==m
return s[len(m):]
re_table_optwidth = re.compile(r'width\s*=\s*(\d+(?:%|px|em)?)$',re.U)
class Cell:
def __init__(self):
self.calign = ''
self.cwidth = ''
self.cth = False
self.colspan = 1
self.rowspan = 1
self.data = ''
self.skip = False
def get_class(self):
cattr = {'class':cls_align[self.calign]}
if self.colspan>1:
cattr.update({'colspan':str(self.colspan)})
if self.rowspan>1:
cattr.update({'rowspan':str(self.rowspan)})
if self.cwidth:
cattr.update({'style':'width:%s'%self.cwidth})
return cattr
cls_align = {
'c':'t_center',
'r':'t_right',
'l':'t_left',
'':''
}
def parse_table(lexer,doci):
table = []
modrow = None
num = -1
activerow = None
# phase 1. scanning
while not lexer.eof():
if not lexer.startswith('|'):
break
l = lexer.get_line().rstrip()
mod = ''
if l.endswith('|'):
cols = l[1:-1].split('|')
else:
v = l[1:].split('|')
cols = v[:-1]
mod = (v[-1]).strip()
if num>=0 and num!=len(cols):
break
num = len(cols)
# ok pop from stream
lexer.skipline()
# empty table
if num==0:
break
# initialization at the first row
if not modrow:
modrow = [Cell() for i in range(num)]
if not activerow:
activerow = [-1 for i in range(num)]
if mod=='c':
for i in range(num):
for c in cols[i].lower().split(','):
c = c.strip()
if c in 'rcl':
modrow[i].calign=c
elif c=='th':
modrow[i].cth=True
elif c.startswith('w'):
m=re_table_optwidth.match(c)
if m:
modrow[i].cwidth = m.group(1)
else:
table.append([Cell() for i in range(num)])
row = table[-1]
rowi = len(table)-1
colspanc = 0
activecol = -1
for i,c in enumerate(cols):
row[i].calign=modrow[i].calign
row[i].cwidth=modrow[i].cwidth
row[i].cth = modrow[i].cth or (mod=='h')
row[i].data = ''
cs = c.strip()
if cs=='>':
activecol = -1
if i+1==num:
activecol = i
row[i].colspan += colspanc
colspanc = 0
activerow[i] = rowi
else:
row[i].skip = True
colspanc += 1
elif cs=='<':
if activecol < 0:
activecol = i
row[i].colspan += colspanc
colspanc = 0
activerow[i] = rowi
else:
row[i].skip = True
row[activecol].colspan += 1
elif cs=='^':
if activerow[i] < 0:
activerow[i] = rowi
else:
target = table[activerow[i]][i]
if target.colspan > 1 or target.skip:
activerow[i] = rowi
else:
target.rowspan += 1
row[i].skip = True
else:
activecol = i
row[i].colspan += colspanc
colspanc = 0
activerow[i] = rowi
if c.startswith('CENTER:'):
c = lstrip1(c,'CENTER:')
row[i].calign='c'
elif c.startswith('RIGHT:'):
c = lstrip1(c,'RIGHT:')
row[i].calign='r'
elif c.startswith('LEFT:'):
c = lstrip1(c,'LEFT:')
row[i].calign='l'
row[i].data = c
# phase 2, dump
doci.begin_table()
for rowi,r in enumerate(table):
doci.begin_table_row()
for c in r:
if c.skip:
continue
doci.begin_table_cell(ishead=c.cth,style_class=c.get_class())
l = Lexer(c.data)
parse_line(l,doci,True)
doci.end_table_cell()
doci.end_table_row()
doci.end_table()
############## Test #######################
if __name__=='__main__':
import sys
print('test parser.py')
sin = sys.stdin.read()
sout = sys.stdout
parse(DI_dump(sout),sin)
|
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
from collections import OrderedDict
from functools import partial
import yaml
try:
from yaml import CSafeLoader as SafeLoader
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader
from yaml import SafeDumper
"""
Wrapper around PyYAML to provide sane defaults ensuring that dump/load does
not damage content, keeps ordering, use always block-style and use four
spaces indents to get readable YAML and quotes and folds texts in a sane way.
Use the `load` function to get a primitive type from a YAML string and the
`dump` function to get a YAML string from a primitive type.
Load and dump rely on subclasses of SafeLoader and SafeDumper respectively
doing all the dirty bidding to get PyYAML straight.
"""
# Check:
# https://github.com/ralienpp/reyaml/blob/master/reyaml/__init__.py
# https://pypi.python.org/pypi/PyYAML.Yandex/3.11.1
# https://pypi.python.org/pypi/ruamel.yaml/0.9.1
# https://pypi.python.org/pypi/yaml2rst/0.2
def load(s):
"""
Return an object safely loaded from YAML string `s`. `s` must be unicode
or be a string that converts to unicode without errors.
"""
return yaml.load(s, Loader=SaneLoader)
def dump(obj):
"""
Return a safe YAML unicode string representation from `obj`.
"""
kwargs = dict(
Dumper=SaneDumper,
default_flow_style=False,
default_style=None,
canonical=False,
allow_unicode=True,
# do not encode Unicode
encoding=None,
indent=4,
width=80,
line_break='\n',
explicit_start=False,
explicit_end=False,
)
return yaml.dump(obj, **kwargs)
class SaneLoader(SafeLoader):
pass
def string_loader(loader, node):
"""
Ensure that a scalar type (a value) is returned as a plain unicode string.
"""
return loader.construct_scalar(node)
SaneLoader.add_constructor(u'tag:yaml.org,2002:str', string_loader)
# Load as strings most scalar types: nulls, ints, (such as in
# version 01) floats (such version 2.20) and timestamps conversion (in
# versions too) are all emitted as unicode strings. This avoid unwanted type
# conversions for unquoted strings and the resulting content damaging. This
# overrides the implicit resolvers. Callers must handle type conversion
# explicitly from unicode to other types in the loaded objects.
SaneLoader.add_constructor(u'tag:yaml.org,2002:null', string_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:timestamp', string_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:float', string_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:int', string_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:null', string_loader)
# keep boolean conversion
# SaneLoader.add_constructor(u'tag:yaml.org,2002:boolean', string_loader)
def ordered_loader(loader, node):
"""
Ensure that YAML maps ordered is preserved and loaded in an OrderedDict.
"""
assert isinstance(node, yaml.MappingNode)
omap = OrderedDict()
yield omap
for key, value in node.value:
key = loader.construct_object(key)
value = loader.construct_object(value)
omap[key] = value
SaneLoader.add_constructor(u'tag:yaml.org,2002:map', ordered_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:omap', ordered_loader)
class SaneDumper(SafeDumper):
"""
Ensure that lists items are always indented.
"""
def increase_indent(self, flow=False, indentless=False): # @UnusedVariable
return super(SaneDumper, self).increase_indent(flow, indentless=False)
def ordered_dumper(dumper, data):
"""
Ensure that maps are always dumped in the items order.
"""
return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.items())
SaneDumper.add_representer(OrderedDict, ordered_dumper)
def null_dumper(dumper, value): # @UnusedVariable
"""
Always dump nulls as empty string.
"""
return dumper.represent_scalar(u'tag:yaml.org,2002:null', u'')
SafeDumper.add_representer(type(None), null_dumper)
def string_dumper(dumper, value, _tag=u'tag:yaml.org,2002:str'):
"""
Ensure that all scalars are dumped as UTF-8 unicode, folded and quoted in
the sanest and most readable way.
"""
style = None
if not isinstance(value, basestring):
value = repr(value)
if isinstance(value, str):
value = value.decode('utf-8')
folded_style = '>'
verbatim_style = '|'
# single_style = "'"
# double_style = '"'
long_lines = any(len(line) > 40 for line in value.splitlines(False)) and ' ' in value
multilines = '\n' in value
# single_quote = "'" in value
# double_quote = '"' in value
# colon_space = ': ' in value
# hash_space = '# ' in value
if multilines:# or colon_space or hash_space or (single_quote and double_quote) or double_quote:
style = verbatim_style
elif long_lines:
style = folded_style
# elif single_quote and double_quote:
# style = folded_style
# elif single_quote:
# style = double_style
# elif double_quote:
# style = single_style
return dumper.represent_scalar(_tag, value, style=style)
SaneDumper.add_representer(str, string_dumper)
SaneDumper.add_representer(unicode, string_dumper)
SaneDumper.add_representer(int, partial(string_dumper, _tag=u'tag:yaml.org,2002:int'))
SaneDumper.add_representer(float, partial(string_dumper, _tag=u'tag:yaml.org,2002:float'))
def boolean_dumper(dumper, value):
"""
Dump booleans as yes or no.
"""
value = u'yes' if value else u'no'
style = None
return dumper.represent_scalar(u'tag:yaml.org,2002:bool', value, style=style)
SaneDumper.add_representer(bool, boolean_dumper)
|
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.ConductingEquipment import ConductingEquipment
class TransformerWinding(ConductingEquipment):
"""A winding is associated with each defined terminal of a transformer (or phase shifter).
"""
def __init__(self, connectionType="Yn", windingType="primary", x=0.0, grounded=False, g=0.0, r=0.0, x0=0.0, ratedU=0.0, ratedS=0.0, emergencyS=0.0, rground=0.0, shortTermS=0.0, r0=0.0, g0=0.0, insulationU=0.0, b=0.0, b0=0.0, xground=0.0, From_WindingTest=None, To_WindingTest=None, RatioTapChanger=None, PowerTransformer=None, PhaseTapChanger=None, *args, **kw_args):
"""Initialises a new 'TransformerWinding' instance.
@param connectionType: The type of connection of the winding. Values are: "Yn", "Y", "D", "I", "Z", "A", "Zn"
@param windingType: The type of winding. Values are: "primary", "quaternary", "secondary", "tertiary"
@param x: Positive sequence series reactance of the winding. For a two winding transformer, the full reactance of the transformer should be entered on the primary (high voltage) winding.
@param grounded: Set if the winding is grounded.
@param g: Magnetizing branch conductance (G mag).
@param r: Positive sequence series resistance of the winding. For a two winding transformer, the full resistance of the transformer should be entered on the primary (high voltage) winding.
@param x0: Zero sequence series reactance of the winding.
@param ratedU: The rated voltage (phase-to-phase) of the winding, usually the same as the neutral voltage.
@param ratedS: The normal apparent power rating for the winding
@param emergencyS: The apparent power that the winding can carry under emergency conditions.
@param rground: Ground resistance path through connected grounding transformer.
@param shortTermS: Apparent power that the winding can carry for a short period of time.
@param r0: Zero sequence series resistance of the winding.
@param g0: Zero sequence magnetizing branch conductance.
@param insulationU: Basic insulation level voltage rating
@param b: Magnetizing branch susceptance (B mag). The value can be positive or negative.
@param b0: Zero sequence magnetizing branch susceptance.
@param xground: Ground reactance path through connected grounding transformer.
@param From_WindingTest: The transformer winding tests for which the transformer winding (terminal) participates as the 'from' part of the test.
@param To_WindingTest: The winding winding tests for which the transformer winding (terminal) participates as the 'to' end of the test.
@param RatioTapChanger: The ratio tap changer associated with the transformer winding.
@param PowerTransformer: A transformer has windings
@param PhaseTapChanger: The phase tap changer associated with the transformer winding.
"""
#: The type of connection of the winding. Values are: "Yn", "Y", "D", "I", "Z", "A", "Zn"
self.connectionType = connectionType
#: The type of winding. Values are: "primary", "quaternary", "secondary", "tertiary"
self.windingType = windingType
#: Positive sequence series reactance of the winding. For a two winding transformer, the full reactance of the transformer should be entered on the primary (high voltage) winding.
self.x = x
#: Set if the winding is grounded.
self.grounded = grounded
#: Magnetizing branch conductance (G mag).
self.g = g
#: Positive sequence series resistance of the winding. For a two winding transformer, the full resistance of the transformer should be entered on the primary (high voltage) winding.
self.r = r
#: Zero sequence series reactance of the winding.
self.x0 = x0
#: The rated voltage (phase-to-phase) of the winding, usually the same as the neutral voltage.
self.ratedU = ratedU
#: The normal apparent power rating for the winding
self.ratedS = ratedS
#: The apparent power that the winding can carry under emergency conditions.
self.emergencyS = emergencyS
#: Ground resistance path through connected grounding transformer.
self.rground = rground
#: Apparent power that the winding can carry for a short period of time.
self.shortTermS = shortTermS
#: Zero sequence series resistance of the winding.
self.r0 = r0
#: Zero sequence magnetizing branch conductance.
self.g0 = g0
#: Basic insulation level voltage rating
self.insulationU = insulationU
#: Magnetizing branch susceptance (B mag). The value can be positive or negative.
self.b = b
#: Zero sequence magnetizing branch susceptance.
self.b0 = b0
#: Ground reactance path through connected grounding transformer.
self.xground = xground
self._From_WindingTest = []
self.From_WindingTest = [] if From_WindingTest is None else From_WindingTest
self._To_WindingTest = []
self.To_WindingTest = [] if To_WindingTest is None else To_WindingTest
self._RatioTapChanger = None
self.RatioTapChanger = RatioTapChanger
self._PowerTransformer = None
self.PowerTransformer = PowerTransformer
self._PhaseTapChanger = None
self.PhaseTapChanger = PhaseTapChanger
super(TransformerWinding, self).__init__(*args, **kw_args)
_attrs = ["connectionType", "windingType", "x", "grounded", "g", "r", "x0", "ratedU", "ratedS", "emergencyS", "rground", "shortTermS", "r0", "g0", "insulationU", "b", "b0", "xground"]
_attr_types = {"connectionType": str, "windingType": str, "x": float, "grounded": bool, "g": float, "r": float, "x0": float, "ratedU": float, "ratedS": float, "emergencyS": float, "rground": float, "shortTermS": float, "r0": float, "g0": float, "insulationU": float, "b": float, "b0": float, "xground": float}
_defaults = {"connectionType": "Yn", "windingType": "primary", "x": 0.0, "grounded": False, "g": 0.0, "r": 0.0, "x0": 0.0, "ratedU": 0.0, "ratedS": 0.0, "emergencyS": 0.0, "rground": 0.0, "shortTermS": 0.0, "r0": 0.0, "g0": 0.0, "insulationU": 0.0, "b": 0.0, "b0": 0.0, "xground": 0.0}
_enums = {"connectionType": "WindingConnection", "windingType": "WindingType"}
_refs = ["From_WindingTest", "To_WindingTest", "RatioTapChanger", "PowerTransformer", "PhaseTapChanger"]
_many_refs = ["From_WindingTest", "To_WindingTest"]
def getFrom_WindingTest(self):
"""The transformer winding tests for which the transformer winding (terminal) participates as the 'from' part of the test.
"""
return self._From_WindingTest
def setFrom_WindingTest(self, value):
for x in self._From_WindingTest:
x.From_TransformerWinding = None
for y in value:
y._From_TransformerWinding = self
self._From_WindingTest = value
From_WindingTest = property(getFrom_WindingTest, setFrom_WindingTest)
def addFrom_WindingTest(self, *From_WindingTest):
for obj in From_WindingTest:
obj.From_TransformerWinding = self
def removeFrom_WindingTest(self, *From_WindingTest):
for obj in From_WindingTest:
obj.From_TransformerWinding = None
def getTo_WindingTest(self):
"""The winding winding tests for which the transformer winding (terminal) participates as the 'to' end of the test.
"""
return self._To_WindingTest
def setTo_WindingTest(self, value):
for x in self._To_WindingTest:
x.To_TransformerWinding = None
for y in value:
y._To_TransformerWinding = self
self._To_WindingTest = value
To_WindingTest = property(getTo_WindingTest, setTo_WindingTest)
def addTo_WindingTest(self, *To_WindingTest):
for obj in To_WindingTest:
obj.To_TransformerWinding = self
def removeTo_WindingTest(self, *To_WindingTest):
for obj in To_WindingTest:
obj.To_TransformerWinding = None
def getRatioTapChanger(self):
"""The ratio tap changer associated with the transformer winding.
"""
return self._RatioTapChanger
def setRatioTapChanger(self, value):
if self._RatioTapChanger is not None:
self._RatioTapChanger._TransformerWinding = None
self._RatioTapChanger = value
if self._RatioTapChanger is not None:
self._RatioTapChanger.TransformerWinding = None
self._RatioTapChanger._TransformerWinding = self
RatioTapChanger = property(getRatioTapChanger, setRatioTapChanger)
def getPowerTransformer(self):
"""A transformer has windings
"""
return self._PowerTransformer
def setPowerTransformer(self, value):
if self._PowerTransformer is not None:
filtered = [x for x in self.PowerTransformer.TransformerWindings if x != self]
self._PowerTransformer._TransformerWindings = filtered
self._PowerTransformer = value
if self._PowerTransformer is not None:
if self not in self._PowerTransformer._TransformerWindings:
self._PowerTransformer._TransformerWindings.append(self)
PowerTransformer = property(getPowerTransformer, setPowerTransformer)
def getPhaseTapChanger(self):
"""The phase tap changer associated with the transformer winding.
"""
return self._PhaseTapChanger
def setPhaseTapChanger(self, value):
if self._PhaseTapChanger is not None:
self._PhaseTapChanger._TransformerWinding = None
self._PhaseTapChanger = value
if self._PhaseTapChanger is not None:
self._PhaseTapChanger.TransformerWinding = None
self._PhaseTapChanger._TransformerWinding = self
PhaseTapChanger = property(getPhaseTapChanger, setPhaseTapChanger)
|
|
#
# (C) Copyright 2003-2011 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# pylint: disable-msg=W0201
"""Core XMPP stream functionality.
Normative reference:
- `RFC 6120 <http://xmpp.org/rfcs/rfc6120.html>`__
"""
__docformat__ = "restructuredtext en"
import inspect
import logging
import uuid
import re
import threading
from .etree import ElementTree, element_to_unicode
from .xmppparser import XMLStreamHandler
from .error import StreamErrorElement
from .jid import JID
from .exceptions import StreamError
from .exceptions import FatalStreamError, StreamParseError
from .constants import STREAM_QNP, XML_LANG_QNAME, STREAM_ROOT_TAG
from .settings import XMPPSettings
from .xmppserializer import serialize
from .streamevents import StreamConnectedEvent, GotFeaturesEvent
from .streamevents import AuthenticatedEvent, StreamRestartedEvent
from .stanzaprocessor import stanza_factory
from .interfaces import StreamFeatureHandler
from .interfaces import StreamFeatureHandled, StreamFeatureNotHandled
logger = logging.getLogger("pyxmpp2.streambase")
LANG_SPLIT_RE = re.compile(r"(.*)(?:-[a-zA-Z0-9])?-[a-zA-Z0-9]+$")
ERROR_TAG = STREAM_QNP + "error"
FEATURES_TAG = STREAM_QNP + "features"
# just to distinguish those from a domain name
IP_RE = re.compile(r"^((\d+.){3}\d+)|([0-9a-f]*:[0-9a-f:]*:[0-9a-f]*)$")
class StreamBase(XMLStreamHandler):
"""Base class for a generic XMPP stream.
Responsible for establishing connection, parsing the stream, handling
stream elements and passing stanzas receiver to other object.
This doesn't provide any authentication or encryption (both required by
the XMPP specification) and is not usable on its own.
Whenever we say "stream" here we actually mean two streams
(incoming and outgoing) of one connections, as defined by the XMPP
specification.
:Ivariables:
- `authenticated`: `True` if local entity has authenticated to peer
- `features`: stream features as annouced by the receiver.
- `handlers`: handlers for stream elements
- `initiator`: `True` if local stream endpoint is the initiating entity.
- `lock`: RLock object used to synchronize access to Stream object.
- `me`: local stream endpoint JID.
- `peer_authenticated`: `True` if the peer has authenticated to us
- `peer_language`: language of human-readable stream content selected
by the peer
- `peer`: remote stream endpoint JID.
- `settings`: stream settings
- `stanza_namespace`: default namespace of the stream
- `tls_established`: `True` when the stream is protected by TLS
- `transport`: transport used by this stream
- `version`: Negotiated version of the XMPP protocol. (0,9) for the
legacy (pre-XMPP) Jabber protocol.
- `_element_handlers`: mapping from stream element names to lists of
methods handling them
- `_input_state`: `None`, "open" (<stream:stream> has been received)
"restart" or "closed" (</stream:stream> or EOF has been received)
- `_output_state`: `None`, "open" (<stream:stream> has been received)
"restart" or "closed" (</stream:stream> or EOF has been received)
- `_stanza_namespace_p`: qname prefix of the stanza namespace
- `_stream_feature_handlers`: stream features handlers
:Types:
- `authenticated`: `bool`
- `features`: :etree:`ElementTree.Element`
- `handlers`: `list`
- `initiator`: `bool`
- `lock`: :std:`threading.RLock`
- `me`: `JID`
- `peer_authenticated`: `bool`
- `peer_language`: `str`
- `peer`: `JID`
- `settings`: XMPPSettings
- `stanza_namespace`: `str`
- `tls_established`: `bool`
- `transport`: `transport.XMPPTransport`
- `version`: (`int`, `int`) tuple
- `_element_handlers`: `dict`
- `_input_state`: `str`
- `_output_state`: `str`
- `_stanza_namespace_p`: `str`
- `_stream_feature_handlers`: `list` of `StreamFeatureHandler`
"""
# pylint: disable-msg=R0902,R0904
def __init__(self, stanza_namespace, stanza_route, handlers,
settings = None):
"""Initialize StreamBase object
:Parameters:
- `stanza_namespace`: stream's default namespace URI ("jabber:client"
for client, "jabber:server" for server, etc.)
- `stanza_route`: object to handle received stanzas
- `handlers`: objects to handle the stream events and elements
- `settings`: extra settings
:Types:
- `stanza_namespace`: `str`
- `stanza_route`: `StanzaRoute`
- `settings`: XMPPSettings
- `handlers`: `list` of objects
"""
XMLStreamHandler.__init__(self)
self.lock = threading.RLock()
if settings is None:
settings = XMPPSettings()
self.settings = settings
self.stanza_namespace = stanza_namespace
self._stanza_namespace_p = "{{{0}}}".format(stanza_namespace)
self.stanza_route = stanza_route
self.handlers = handlers
self._stream_feature_handlers = []
for handler in handlers:
if isinstance(handler, StreamFeatureHandler):
self._stream_feature_handlers.append(handler)
self.me = None
self.peer = None
self.stream_id = None
self.initiator = None
self.features = None
self.authenticated = False
self.peer_authenticated = False
self.tls_established = False
self.auth_method_used = None
self.version = None
self.language = None
self.peer_language = None
self.transport = None
self._input_state = None
self._output_state = None
self._element_handlers = {}
def initiate(self, transport, to = None):
"""Initiate an XMPP connection over the `transport`.
:Parameters:
- `transport`: an XMPP transport instance
- `to`: peer name
"""
with self.lock:
self.initiator = True
self.transport = transport
transport.set_target(self)
if to:
self.peer = JID(to)
else:
self.peer = None
if transport.is_connected():
self._initiate()
def _initiate(self):
"""Initiate an XMPP connection over a connected `transport`.
[ called with `lock` acquired ]
"""
self._setup_stream_element_handlers()
self._send_stream_start()
def receive(self, transport, myname):
"""Receive an XMPP connection over the `transport`.
:Parameters:
- `transport`: an XMPP transport instance
- `myname`: local stream endpoint name.
"""
with self.lock:
self.transport = transport
transport.set_target(self)
self.me = JID(myname)
self.initiator = False
self._setup_stream_element_handlers()
def _setup_stream_element_handlers(self):
"""Set up stream element handlers.
Scans the `handlers` list for `StreamFeatureHandler`
instances and updates `_element_handlers` mapping with their
methods decorated with @`stream_element_handler`
"""
# pylint: disable-msg=W0212
if self.initiator:
mode = "initiator"
else:
mode = "receiver"
self._element_handlers = {}
for handler in self.handlers:
if not isinstance(handler, StreamFeatureHandler):
continue
for _unused, meth in inspect.getmembers(handler, callable):
if not hasattr(meth, "_pyxmpp_stream_element_handled"):
continue
element_handled = meth._pyxmpp_stream_element_handled
if element_handled in self._element_handlers:
# use only the first matching handler
continue
if meth._pyxmpp_usage_restriction in (None, mode):
self._element_handlers[element_handled] = meth
def disconnect(self):
"""Gracefully close the connection."""
with self.lock:
self.transport.disconnect()
self._output_state = "closed"
def event(self, event): # pylint: disable-msg=R0201
"""Handle a stream event.
Called when connection state is changed.
Should not be called with self.lock acquired!
"""
event.stream = self
logger.debug("Stream event: {0}".format(event))
self.settings["event_queue"].put(event)
return False
def transport_connected(self):
"""Called when transport has been connected.
Send the stream head if initiator.
"""
with self.lock:
if self.initiator:
if self._output_state is None:
self._initiate()
def close(self):
"""Forcibly close the connection and clear the stream state."""
self.transport.close()
def stream_start(self, element):
"""Process <stream:stream> (stream start) tag received from peer.
`lock` is acquired when this method is called.
:Parameters:
- `element`: root element (empty) created by the parser"""
with self.lock:
logger.debug("input document: " + element_to_unicode(element))
if not element.tag.startswith(STREAM_QNP):
self._send_stream_error("invalid-namespace")
raise FatalStreamError("Bad stream namespace")
if element.tag != STREAM_ROOT_TAG:
self._send_stream_error("bad-format")
raise FatalStreamError("Bad root element")
if self._input_state == "restart":
event = StreamRestartedEvent(self.peer)
else:
event = StreamConnectedEvent(self.peer)
self._input_state = "open"
version = element.get("version")
if version:
try:
major, minor = version.split(".", 1)
major, minor = int(major), int(minor)
except ValueError:
self._send_stream_error("unsupported-version")
raise FatalStreamError("Unsupported protocol version.")
self.version = (major, minor)
else:
self.version = (0, 9)
if self.version[0] != 1 and self.version != (0, 9):
self._send_stream_error("unsupported-version")
raise FatalStreamError("Unsupported protocol version.")
peer_lang = element.get(XML_LANG_QNAME)
self.peer_language = peer_lang
if not self.initiator:
lang = None
languages = self.settings["languages"]
while peer_lang:
if peer_lang in languages:
lang = peer_lang
break
match = LANG_SPLIT_RE.match(peer_lang)
if not match:
break
peer_lang = match.group(0)
if lang:
self.language = lang
if self.initiator:
self.stream_id = element.get("id")
peer = element.get("from")
if peer:
peer = JID(peer)
if self.peer:
if peer and peer != self.peer:
logger.debug("peer hostname mismatch: {0!r} != {1!r}"
.format(peer, self.peer))
self.peer = peer
else:
to = element.get("to")
if to:
to = self.check_to(to)
if not to:
self._send_stream_error("host-unknown")
raise FatalStreamError('Bad "to"')
self.me = JID(to)
peer = element.get("from")
if peer:
peer = JID(peer)
self._send_stream_start(self.generate_id(), stream_to = peer)
self._send_stream_features()
self.event(event)
def stream_end(self):
"""Process </stream:stream> (stream end) tag received from peer.
"""
logger.debug("Stream ended")
with self.lock:
self._input_state = "closed"
self.transport.disconnect()
self._output_state = "closed"
def stream_eof(self):
"""Process stream EOF.
"""
self.stream_end()
def stream_element(self, element):
"""Process first level child element of the stream).
:Parameters:
- `element`: XML element received
:Types:
- `element`: :etree:`ElementTree.Element`
"""
with self.lock:
self._process_element(element)
def stream_parse_error(self, descr):
"""Called when an error is encountered in the stream.
:Parameters:
- `descr`: description of the error
:Types:
- `descr`: `str`"""
self.send_stream_error("not-well-formed")
raise StreamParseError(descr)
def _send_stream_start(self, stream_id = None, stream_to = None):
"""Send stream start tag."""
if self._output_state in ("open", "closed"):
raise StreamError("Stream start already sent")
if not self.language:
self.language = self.settings["language"]
if stream_to:
stream_to = str(stream_to)
elif self.peer and self.initiator:
stream_to = str(self.peer)
stream_from = None
if self.me and (self.tls_established or not self.initiator):
stream_from = str(self.me)
if stream_id:
self.stream_id = stream_id
else:
self.stream_id = None
self.transport.send_stream_head(self.stanza_namespace,
stream_from, stream_to,
self.stream_id, language = self.language)
self._output_state = "open"
def send_stream_error(self, condition):
"""Send stream error element.
:Parameters:
- `condition`: stream error condition name, as defined in the
XMPP specification.
"""
with self.lock:
self._send_stream_error(condition)
def _send_stream_error(self, condition):
"""Same as `send_stream_error`, but expects `lock` acquired.
"""
if self._output_state is "closed":
return
if self._output_state in (None, "restart"):
self._send_stream_start()
element = StreamErrorElement(condition).as_xml()
self.transport.send_element(element)
self.transport.disconnect()
self._output_state = "closed"
def _restart_stream(self):
"""Restart the stream as needed after SASL and StartTLS negotiation."""
self._input_state = "restart"
self._output_state = "restart"
self.features = None
self.transport.restart()
if self.initiator:
self._send_stream_start(self.stream_id)
def _make_stream_features(self):
"""Create the <features/> element for the stream.
[receving entity only]
:returns: new <features/> element
:returntype: :etree:`ElementTree.Element`"""
features = ElementTree.Element(FEATURES_TAG)
for handler in self._stream_feature_handlers:
handler.make_stream_features(self, features)
return features
def _send_stream_features(self):
"""Send stream <features/>.
[receiving entity only]"""
self.features = self._make_stream_features()
self._write_element(self.features)
def write_element(self, element):
"""Write XML `element` to the stream.
:Parameters:
- `element`: Element node to send.
:Types:
- `element`: :etree:`ElementTree.Element`
"""
with self.lock:
self._write_element(element)
def _write_element(self, element):
"""Same as `write_element` but with `lock` already acquired.
"""
self.transport.send_element(element)
def send(self, stanza):
"""Write stanza to the stream.
:Parameters:
- `stanza`: XMPP stanza to send.
:Types:
- `stanza`: `pyxmpp2.stanza.Stanza`
"""
with self.lock:
return self._send(stanza)
def _send(self, stanza):
"""Same as `send` but assume `lock` is acquired."""
self.fix_out_stanza(stanza)
element = stanza.as_xml()
self._write_element(element)
def _process_element(self, element):
"""Process first level element of the stream.
The element may be stream error or features, StartTLS
request/response, SASL request/response or a stanza.
:Parameters:
- `element`: XML element
:Types:
- `element`: :etree:`ElementTree.Element`
"""
tag = element.tag
if tag in self._element_handlers:
handler = self._element_handlers[tag]
logger.debug("Passing element {0!r} to method {1!r}"
.format(element, handler))
handled = handler(self, element)
if handled:
return
if tag.startswith(self._stanza_namespace_p):
stanza = stanza_factory(element, self, self.language)
self.uplink_receive(stanza)
elif tag == ERROR_TAG:
error = StreamErrorElement(element)
self.process_stream_error(error)
elif tag == FEATURES_TAG:
logger.debug("Got features element: {0}".format(serialize(element)))
self._got_features(element)
else:
logger.debug("Unhandled element: {0}".format(serialize(element)))
logger.debug(" known handlers: {0!r}".format(
self._element_handlers))
def uplink_receive(self, stanza):
"""Handle stanza received from the stream."""
with self.lock:
if self.stanza_route:
self.stanza_route.uplink_receive(stanza)
else:
logger.debug("Stanza dropped (no route): {0!r}".format(stanza))
def process_stream_error(self, error):
"""Process stream error element received.
:Parameters:
- `error`: error received
:Types:
- `error`: `StreamErrorElement`
"""
# pylint: disable-msg=R0201
logger.debug("Unhandled stream error: condition: {0} {1!r}"
.format(error.condition_name, error.serialize()))
def check_to(self, to):
"""Check "to" attribute of received stream header.
:return: `to` if it is equal to `me`, None otherwise.
Should be overriden in derived classes which require other logic
for handling that attribute."""
if to != self.me:
return None
return to
def generate_id(self):
"""Generate a random and unique stream ID.
:return: the id string generated."""
# pylint: disable-msg=R0201
return str(uuid.uuid4())
def _got_features(self, features):
"""Process incoming <stream:features/> element.
[initiating entity only]
The received features node is available in `features`."""
self.features = features
logger.debug("got features, passing to event handlers...")
handled = self.event(GotFeaturesEvent(self.features))
logger.debug(" handled: {0}".format(handled))
if not handled:
mandatory_handled = []
mandatory_not_handled = []
logger.debug(" passing to stream features handlers: {0}"
.format(self._stream_feature_handlers))
for handler in self._stream_feature_handlers:
ret = handler.handle_stream_features(self, self.features)
if ret is None:
continue
elif isinstance(ret, StreamFeatureHandled):
if ret.mandatory:
mandatory_handled.append(str(ret))
break
break
elif isinstance(ret, StreamFeatureNotHandled):
if ret.mandatory:
mandatory_not_handled.append(str(ret))
break
else:
raise ValueError("Wrong value returned from a stream"
" feature handler: {0!r}".format(ret))
if mandatory_not_handled and not mandatory_handled:
self.send_stream_error("unsupported-feature")
raise FatalStreamError(
"Unsupported mandatory-to-implement features: "
+ " ".join(mandatory_not_handled))
def is_connected(self):
"""Check if stream is is_connected and stanzas may be sent.
:return: True if stream connection is active."""
return self.transport.is_connected() and self._output_state == "open"
def set_peer_authenticated(self, peer, restart_stream = False):
"""Mark the other side of the stream authenticated as `peer`
:Parameters:
- `peer`: local JID just authenticated
- `restart_stream`: `True` when stream should be restarted (needed
after SASL authentication)
:Types:
- `peer`: `JID`
- `restart_stream`: `bool`
"""
with self.lock:
self.peer_authenticated = True
self.peer = peer
if restart_stream:
self._restart_stream()
self.event(AuthenticatedEvent(self.peer))
def set_authenticated(self, me, restart_stream = False):
"""Mark stream authenticated as `me`.
:Parameters:
- `me`: local JID just authenticated
- `restart_stream`: `True` when stream should be restarted (needed
after SASL authentication)
:Types:
- `me`: `JID`
- `restart_stream`: `bool`
"""
with self.lock:
self.authenticated = True
self.me = me
if restart_stream:
self._restart_stream()
self.event(AuthenticatedEvent(self.me))
def fix_in_stanza(self, stanza):
"""Fix incoming stanza, setting the implicit fields.
Used for for servers side of client stream to set proper stanza from.
"""
# pylint: disable-msg=R0201
return stanza
def fix_out_stanza(self, stanza):
"""Fix outgoing, setting or clearing the implicit fields.
Used for for client side of client stream to clear the 'from'
attribute.
"""
# pylint: disable-msg=R0201
return stanza
def _languages_factory(settings):
"""Make the default value of the :r:`languages setting`."""
return [settings["language"]]
XMPPSettings.add_setting("language", type = str, default = "en",
cmdline_help = "Preferred language of the XMPP stream",
doc = """The preferred language of the XMPP stream."""
)
XMPPSettings.add_setting("languages", type = "list of ``unicode``",
validator = XMPPSettings.validate_string_list,
factory = _languages_factory,
cmdline_help = "Accepted languages of the XMPP stream",
doc = """When the remote entity selects one of these languages
on their stream, the same language will be sent in our stream declaration."""
)
XMPPSettings.add_setting("extra_ns_prefixes", type = "prefix -> uri mapping",
default = {},
doc = """Extra namespace prefix declarations to use at the stream root
element."""
)
# vi: sts=4 et sw=4
|
|
from functools import partial
from itertools import chain
import pytest
import numpy as np
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import rand_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import fowlkes_mallows_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import silhouette_score
from sklearn.metrics.cluster import calinski_harabasz_score
from sklearn.metrics.cluster import davies_bouldin_score
from sklearn.utils._testing import assert_allclose
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
# - SUPERVISED_METRICS: all supervised cluster metrics - (when given a
# ground truth value)
# - UNSUPERVISED_METRICS: all unsupervised cluster metrics
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
SUPERVISED_METRICS = {
"adjusted_mutual_info_score": adjusted_mutual_info_score,
"adjusted_rand_score": adjusted_rand_score,
"rand_score": rand_score,
"completeness_score": completeness_score,
"homogeneity_score": homogeneity_score,
"mutual_info_score": mutual_info_score,
"normalized_mutual_info_score": normalized_mutual_info_score,
"v_measure_score": v_measure_score,
"fowlkes_mallows_score": fowlkes_mallows_score,
}
UNSUPERVISED_METRICS = {
"silhouette_score": silhouette_score,
"silhouette_manhattan": partial(silhouette_score, metric="manhattan"),
"calinski_harabasz_score": calinski_harabasz_score,
"davies_bouldin_score": davies_bouldin_score,
}
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics
# that are symmetric with respect to their input argument y_true and y_pred.
#
# --------------------------------------------------------------------
# Symmetric with respect to their input arguments y_true and y_pred.
# Symmetric metrics only apply to supervised clusters.
SYMMETRIC_METRICS = [
"adjusted_rand_score",
"rand_score",
"v_measure_score",
"mutual_info_score",
"adjusted_mutual_info_score",
"normalized_mutual_info_score",
"fowlkes_mallows_score",
]
NON_SYMMETRIC_METRICS = ["homogeneity_score", "completeness_score"]
# Metrics whose upper bound is 1
NORMALIZED_METRICS = [
"adjusted_rand_score",
"rand_score",
"homogeneity_score",
"completeness_score",
"v_measure_score",
"adjusted_mutual_info_score",
"fowlkes_mallows_score",
"normalized_mutual_info_score",
]
rng = np.random.RandomState(0)
y1 = rng.randint(3, size=30)
y2 = rng.randint(3, size=30)
def test_symmetric_non_symmetric_union():
assert sorted(SYMMETRIC_METRICS + NON_SYMMETRIC_METRICS) == sorted(
SUPERVISED_METRICS
)
# 0.22 AMI and NMI changes
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize(
"metric_name, y1, y2", [(name, y1, y2) for name in SYMMETRIC_METRICS]
)
def test_symmetry(metric_name, y1, y2):
metric = SUPERVISED_METRICS[metric_name]
assert metric(y1, y2) == pytest.approx(metric(y2, y1))
@pytest.mark.parametrize(
"metric_name, y1, y2", [(name, y1, y2) for name in NON_SYMMETRIC_METRICS]
)
def test_non_symmetry(metric_name, y1, y2):
metric = SUPERVISED_METRICS[metric_name]
assert metric(y1, y2) != pytest.approx(metric(y2, y1))
# 0.22 AMI and NMI changes
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("metric_name", NORMALIZED_METRICS)
def test_normalized_output(metric_name):
upper_bound_1 = [0, 0, 0, 1, 1, 1]
upper_bound_2 = [0, 0, 0, 1, 1, 1]
metric = SUPERVISED_METRICS[metric_name]
assert metric([0, 0, 0, 1, 1], [0, 0, 0, 1, 2]) > 0.0
assert metric([0, 0, 1, 1, 2], [0, 0, 1, 1, 1]) > 0.0
assert metric([0, 0, 0, 1, 2], [0, 1, 1, 1, 1]) < 1.0
assert metric([0, 0, 0, 1, 2], [0, 1, 1, 1, 1]) < 1.0
assert metric(upper_bound_1, upper_bound_2) == pytest.approx(1.0)
lower_bound_1 = [0, 0, 0, 0, 0, 0]
lower_bound_2 = [0, 1, 2, 3, 4, 5]
score = np.array(
[metric(lower_bound_1, lower_bound_2), metric(lower_bound_2, lower_bound_1)]
)
assert not (score < 0).any()
# 0.22 AMI and NMI changes
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("metric_name", chain(SUPERVISED_METRICS, UNSUPERVISED_METRICS))
def test_permute_labels(metric_name):
# All clustering metrics do not change score due to permutations of labels
# that is when 0 and 1 exchanged.
y_label = np.array([0, 0, 0, 1, 1, 0, 1])
y_pred = np.array([1, 0, 1, 0, 1, 1, 0])
if metric_name in SUPERVISED_METRICS:
metric = SUPERVISED_METRICS[metric_name]
score_1 = metric(y_pred, y_label)
assert_allclose(score_1, metric(1 - y_pred, y_label))
assert_allclose(score_1, metric(1 - y_pred, 1 - y_label))
assert_allclose(score_1, metric(y_pred, 1 - y_label))
else:
metric = UNSUPERVISED_METRICS[metric_name]
X = np.random.randint(10, size=(7, 10))
score_1 = metric(X, y_pred)
assert_allclose(score_1, metric(X, 1 - y_pred))
# 0.22 AMI and NMI changes
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("metric_name", chain(SUPERVISED_METRICS, UNSUPERVISED_METRICS))
# For all clustering metrics Input parameters can be both
# in the form of arrays lists, positive, negative or string
def test_format_invariance(metric_name):
y_true = [0, 0, 0, 0, 1, 1, 1, 1]
y_pred = [0, 1, 2, 3, 4, 5, 6, 7]
def generate_formats(y):
y = np.array(y)
yield y, "array of ints"
yield y.tolist(), "list of ints"
yield [str(x) + "-a" for x in y.tolist()], "list of strs"
yield (
np.array([str(x) + "-a" for x in y.tolist()], dtype=object),
"array of strs",
)
yield y - 1, "including negative ints"
yield y + 1, "strictly positive ints"
if metric_name in SUPERVISED_METRICS:
metric = SUPERVISED_METRICS[metric_name]
score_1 = metric(y_true, y_pred)
y_true_gen = generate_formats(y_true)
y_pred_gen = generate_formats(y_pred)
for (y_true_fmt, fmt_name), (y_pred_fmt, _) in zip(y_true_gen, y_pred_gen):
assert score_1 == metric(y_true_fmt, y_pred_fmt)
else:
metric = UNSUPERVISED_METRICS[metric_name]
X = np.random.randint(10, size=(8, 10))
score_1 = metric(X, y_true)
assert score_1 == metric(X.astype(float), y_true)
y_true_gen = generate_formats(y_true)
for (y_true_fmt, fmt_name) in y_true_gen:
assert score_1 == metric(X, y_true_fmt)
@pytest.mark.parametrize("metric", SUPERVISED_METRICS.values())
def test_single_sample(metric):
# only the supervised metrics support single sample
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
metric([i], [j])
@pytest.mark.parametrize(
"metric_name, metric_func", dict(SUPERVISED_METRICS, **UNSUPERVISED_METRICS).items()
)
def test_inf_nan_input(metric_name, metric_func):
if metric_name in SUPERVISED_METRICS:
invalids = [
([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf]),
]
else:
X = np.random.randint(10, size=(2, 10))
invalids = [(X, [np.inf, np.inf]), (X, [np.nan, np.nan]), (X, [np.nan, np.inf])]
with pytest.raises(ValueError, match=r"contains (NaN|infinity)"):
for args in invalids:
metric_func(*args)
|
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing fio installation, cleanup, parsing functions."""
import ConfigParser
import io
import time
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
FIO_DIR = '%s/fio' % vm_util.VM_TMP_DIR
GIT_REPO = 'http://git.kernel.dk/fio.git'
GIT_TAG = 'fio-2.2.10'
FIO_PATH = FIO_DIR + '/fio'
FIO_CMD_PREFIX = '%s --output-format=json' % FIO_PATH
SECTION_REGEX = r'\[(\w+)\]\n([\w\d\n=*$/]+)'
PARAMETER_REGEX = r'(\w+)=([/\w\d$*]+)\n'
GLOBAL = 'global'
CMD_SECTION_REGEX = r'--name=(\w+)\s+'
JOB_SECTION_REPL_REGEX = r'[\1]\n'
CMD_PARAMETER_REGEX = r'--(\w+=[/\w\d]+)\n'
CMD_PARAMETER_REPL_REGEX = r'\1\n'
CMD_STONEWALL_PARAMETER = '--stonewall'
JOB_STONEWALL_PARAMETER = 'stonewall'
def _Install(vm):
"""Installs the fio package on the VM."""
vm.Install('build_tools')
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, FIO_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(FIO_DIR, GIT_TAG))
vm.RemoteCommand('cd {0} && ./configure && make'.format(FIO_DIR))
def YumInstall(vm):
"""Installs the fio package on the VM."""
vm.InstallPackages('libaio-devel libaio bc')
_Install(vm)
def AptInstall(vm):
"""Installs the fio package on the VM."""
vm.InstallPackages('libaio-dev libaio1 bc')
_Install(vm)
def ParseJobFile(job_file):
"""Parse fio job file as dictionaries of sample metadata.
Args:
job_file: The contents of fio job file.
Returns:
A dictionary of dictionaries of sample metadata, using test name as keys,
dictionaries of sample metadata as value.
"""
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.readfp(io.BytesIO(job_file))
global_metadata = {}
if GLOBAL in config.sections():
global_metadata = dict(config.items(GLOBAL))
section_metadata = {}
for section in config.sections():
if section != GLOBAL:
metadata = {}
metadata.update(dict(config.items(section)))
metadata.update(global_metadata)
if JOB_STONEWALL_PARAMETER in metadata:
del metadata[JOB_STONEWALL_PARAMETER]
section_metadata[section] = metadata
return section_metadata
def FioParametersToJob(fio_parameters):
"""Translate fio parameters into a job config file.
Sample fio parameters:
--filesize=10g --directory=/scratch0
--name=sequential_write --overwrite=0 --rw=write
Output:
[global]
filesize=10g
directory=/scratch0
[sequential_write]
overwrite=0
rw=write
Args:
fio_parameter: string. Fio parameters in string format.
Returns:
A string representing a fio job config file.
"""
fio_parameters = fio_parameters.replace(' ', '\n')
fio_parameters = regex_util.Substitute(
CMD_SECTION_REGEX, JOB_SECTION_REPL_REGEX, fio_parameters)
fio_parameters = '[%s]\n%s' % (GLOBAL, fio_parameters)
fio_parameters = regex_util.Substitute(
CMD_PARAMETER_REGEX, CMD_PARAMETER_REPL_REGEX, fio_parameters)
return fio_parameters.replace(CMD_STONEWALL_PARAMETER,
JOB_STONEWALL_PARAMETER)
def ParseResults(job_file, fio_json_result, base_metadata=None):
"""Parse fio json output into samples.
Args:
job_file: The contents of the fio job file.
fio_json_result: Fio results in json format.
base_metadata: Extra metadata to annotate the samples with.
Returns:
A list of sample.Sample objects.
"""
samples = []
# The samples should all have the same timestamp because they
# come from the same fio run.
timestamp = time.time()
parameter_metadata = ParseJobFile(job_file)
io_modes = ['read', 'write', 'trim']
for job in fio_json_result['jobs']:
job_name = job['jobname']
for mode in io_modes:
if job[mode]['io_bytes']:
metric_name = '%s:%s' % (job_name, mode)
parameters = parameter_metadata[job_name]
if base_metadata:
parameters.update(base_metadata)
parameters['fio_job'] = job_name
bw_metadata = {
'bw_min': job[mode]['bw_min'],
'bw_max': job[mode]['bw_max'],
'bw_dev': job[mode]['bw_dev'],
'bw_agg': job[mode]['bw_agg'],
'bw_mean': job[mode]['bw_mean']}
bw_metadata.update(parameters)
samples.append(
sample.Sample('%s:bandwidth' % metric_name,
job[mode]['bw'],
'KB/s', bw_metadata))
# There is one sample whose metric is '<metric_name>:latency'
# with all of the latency statistics in its metadata, and then
# a bunch of samples whose metrics are
# '<metric_name>:latency:min' through
# '<metric_name>:latency:p99.99' that hold the individual
# latency numbers as values. This is for historical reasons.
clat_section = job[mode]['clat']
percentiles = clat_section['percentile']
lat_statistics = [
('min', clat_section['min']),
('max', clat_section['max']),
('mean', clat_section['mean']),
('stddev', clat_section['stddev']),
('p1', percentiles['1.000000']),
('p5', percentiles['5.000000']),
('p10', percentiles['10.000000']),
('p20', percentiles['20.000000']),
('p30', percentiles['30.000000']),
('p40', percentiles['40.000000']),
('p50', percentiles['50.000000']),
('p60', percentiles['60.000000']),
('p70', percentiles['70.000000']),
('p80', percentiles['80.000000']),
('p90', percentiles['90.000000']),
('p95', percentiles['95.000000']),
('p99', percentiles['99.000000']),
('p99.5', percentiles['99.500000']),
('p99.9', percentiles['99.900000']),
('p99.95', percentiles['99.950000']),
('p99.99', percentiles['99.990000'])]
lat_metadata = parameters.copy()
for name, val in lat_statistics:
lat_metadata[name] = val
samples.append(
sample.Sample('%s:latency' % metric_name,
job[mode]['clat']['mean'],
'usec', lat_metadata, timestamp))
for stat_name, stat_val in lat_statistics:
samples.append(
sample.Sample('%s:latency:%s' % (metric_name, stat_name),
stat_val, 'usec', parameters, timestamp))
samples.append(
sample.Sample('%s:iops' % metric_name,
job[mode]['iops'], '', parameters, timestamp))
return samples
def DeleteParameterFromJobFile(job_file, parameter):
"""Delete all occurance of parameter from job_file.
Args:
job_file: The contents of the fio job file.
parameter: The parameter to be deleted in job file.
Returns:
A string representing a fio job file after removing parameter.
"""
try:
return regex_util.Substitute(r'%s=[\w\d_/]+\n' % parameter, '', job_file)
except regex_util.NoMatchError:
return job_file
|
|
#-------------------------------------------------------------------------------
# Name: Notional SQLite module
# Purpose: Provides an object class for decoding and interpreting a SQLite
# DB data from a forenic perspective. Note: this module does not
# provide querying capabilities - it is for low-level analysis only.
#
# Author: Notional-Labs.com
#
# Created: 30/08/2013
# Licence: Apache V.2
#-------------------------------------------------------------------------------
import logging
import struct
import os
import re
import unicodedata
class NotionalSQLite:
"""
NotionalSQLite is used to store file structure information and provide
convenience functions for parsing the contents.
"""
_dbheaderfmt = ">16sHbbbbbbiiiiiiiiiii24sii"
_dictkeys = ["sig","pagesize","writever","readver","resspace","maxpayload",
"minpayload","leafpayload","changecount","dbsize","freepagelist",
"totalfreepage","schemacookie","schemanum","defpagecache",
"bigroottree","textencode","userver","incvac","expansion",
"validfor","sqlver"]
_btreetblleafheaderfmt = ">bsssbi"
all_chars = (unichr(i) for i in xrange(0x110000))
control_chars = ''.join(map(unichr, range(0,32) + range(127,160)))
statuscode = 1
headerdict = dict()
headertransdict = dict()
isDirty = bool()
dbfile = None
isWAL = False
debug = False
def __init__(self, filepath, debug):
self.debug = debug
for key in self._dictkeys:
self.headertransdict[key] = "ERROR - call translateHeader() first."
self.headerdict[key] = "ERROR - Could not read value."
try:
self.dbfile = open(filepath,"rb")
except:
logging.error("ERROR: Could not open database file")
return
self._parseDBHeader();
if self.debug:
pass
self.statuscode = 0
def _strip_nonprintable(self,s):
control_char_re = re.compile('[%s]' % re.escape(self.control_chars))
return control_char_re.sub('', s)
def _parseTableLeafPageHeader(self,offset,pagesize):
"""
Parse a binary-tree Table Leaf header given its starting (physical) offset.
Pass physical offset to start of page (should be 0x0D) and page size from
DB header. cell-pointers, freeblock lists, and the offset to unused area
are relative offsets.
Returns a dict of header field metadata, a list of (active) cell-pointers,
a list of freeblocks, and the starting offset of the content area.
"""
pageheader = dict()
celllist = list()
freeblklist = list()
# Parse Page Header
self.dbfile.seek(offset)
pageheader['pagetype'] = ord(self.dbfile.read(1))
pageheader['freeblockofs'] = struct.unpack(">h",self.dbfile.read(2))[0]
pageheader['pagecellcount'] = struct.unpack(">h",self.dbfile.read(2))[0]
pageheader['contentareaofs'] = struct.unpack(">h",self.dbfile.read(2))[0]
pageheader['freebytefrags'] = ord(self.dbfile.read(1))
# Parse Cell Pointer Array and note the start of cell content area
for ptr in range(0,pageheader['pagecellcount']):
celllist.append(struct.unpack(">h",self.dbfile.read(2))[0])
cellptrendofs = self.dbfile.tell() - offset
# Get Freeblock offsets
self.dbfile.seek(offset+pageheader['freeblockofs'])
freeblkptr = pageheader['freeblockofs']
while freeblkptr != 0:
freeblklist.append(freeblkptr)
freeblkptr = struct.unpack(">h",self.dbfile.read(2))[0]
self.dbfile.seek(offset+freeblkptr)
return pageheader, celllist, freeblklist, cellptrendofs
def _parseDBHeader(self):
"""
Parse the SQLite 3 database header metadata and control information.
Sets headerdict.
"""
rawheader = self.dbfile.read(100)
unpackedheader = struct.unpack(self._dbheaderfmt,rawheader)
self.headerdict = dict(zip(self._dictkeys,list(unpackedheader)))
if (self.headerdict["readver"] == 2) or (self.headerdict["writever"] == 2):
self.isWAL = True
def _parseCell(self,offset):
"""
Parse a B-Tree Leaf Page Cell, given it's starting absolute byte offset.
Pass absolute starting byte offset for the cell header.
Returns the parsed cell as a list in the form:
"""
celldatalist = list()
cellheader,dataoffset,payloadlen,recordnum = self._parseCellHeader(offset)
for field in cellheader:
if field[0] == "NULL":
celldatalist.append(recordnum)
elif field[0] == "ST_INT8":
self.dbfile.seek(dataoffset)
celldatalist.append(ord(struct.unpack(">c",self.dbfile.read(1))[0]))
dataoffset+=field[1]
elif field[0] == "ST_INT16":
self.dbfile.seek(dataoffset)
celldatalist.append(struct.unpack(">h",self.dbfile.read(2))[0])
dataoffset+=field[1]
elif field[0] == "ST_INT24":
self.dbfile.seek(dataoffset)
celldatalist.append("ST_INT24 - NOT IMPLEMENTED!") # NOT IMPLEMENTED YET!
dataoffset+=field[1]
elif field[0] == "ST_INT32":
self.dbfile.seek(dataoffset)
celldatalist.append(struct.unpack(">i",self.dbfile.read(4))[0])
dataoffset+=field[1]
elif field[0] == "ST_INT48":
self.dbfile.seek(dataoffset)
celldatalist.append("ST_INT48 - NOT IMPLEMENTED!") # NOT IMPLEMENTED YET!
dataoffset+=field[1]
elif field[0] == "ST_INT64":
self.dbfile.seek(dataoffset)
celldatalist.append(struct.unpack(">q",self.dbfile.read(8))[0])
dataoffset+=8
elif field[0] == "ST_FLOAT":
self.dbfile.seek(dataoffset)
celldatalist.append(struct.unpack(">d",self.dbfile.read(8))[0])
dataoffset+=8
elif field[0] == "ST_C0":
celldatalist.append("ST_C0 - NOT IMPLEMENTED!") # NOT IMPLEMENTED YET!
elif field[0] == "ST_C1":
celldatalist.append("ST_C0 - NOT IMPLEMENTED!") # NOT IMPLEMENTED YET!
elif field[0] == "ST_BLOB":
self.dbfile.seek(dataoffset)
celldatalist.append(self.dbfile.read(field[1]))
dataoffset+=field[1]
elif field[0] == "ST_TEXT":
self.dbfile.seek(dataoffset)
celldatalist.append(struct.unpack("%ss" % str(field[1]),self.dbfile.read(field[1]))[0])
dataoffset+=field[1]
else:
print field[0]
return celldatalist
def _parseCellHeader(self,offset):
"""
Parse a B-Tree Leaf Page Cell Header, given it's starting absolute byte
offset.
Pass absolute starting byte offset for the cell header to be decoded.
Returns tuple containing a list of tuples in the form
[(String type,int length),...], and the starting offset of the payload
fields.
"""
headerlist = list()
# Payload length
payloadlen,length = self._getVarIntOfs(offset)
offset+=length
# Record Number
recordnum,length = self._getVarIntOfs(offset)
offset+=length
# Payload Header Length
payloadheaderlen,length = self._getVarIntOfs(offset)
payloadheaderlenofs = offset + payloadheaderlen
offset+=length
# Payload Fields
while offset < (payloadheaderlenofs):
fieldtype,length = self._getVarIntOfs(offset)
# Determine Serial Type
if fieldtype == 0:
headerlist.append(("NULL",0))
elif fieldtype == 1:
headerlist.append(("ST_INT8",1))
elif fieldtype == 2:
headerlist.append(("ST_INT16",2))
elif fieldtype == 3:
headerlist.append(("ST_INT24",3))
elif fieldtype == 4:
headerlist.append(("ST_INT32",4))
elif fieldtype == 5:
headerlist.append(("ST_INT48",6))
elif fieldtype == 6:
headerlist.append(("ST_INT64",8))
elif fieldtype == 7:
headerlist.append(("ST_FLOAT",8))
elif fieldtype == 8:
headerlist.append(("ST_C0",0))
elif fieldtype == 9:
headerlist.append(("ST_C1",0))
elif fieldtype > 11:
if (fieldtype%2) == 0:
headerlist.append(("ST_BLOB",(fieldtype-12)/2))
else:
headerlist.append(("ST_TEXT",(fieldtype-13)/2))
else:
headerlist.append(("Reserved: %s" % str(fieldtype),0))
offset+=length
return headerlist, offset, payloadlen, recordnum
def _getVarIntOfs(self,offset):
"""
Decode Huffman-coded two's compliment integers used for storing 64-bit
variable-length integers. Implements Mike Harrington's example technique
for decoding SQLite VarInts (https://mobileforensics.wordpress.com/2011/
09/17/huffman-coding-in-sqlite-a-primer-for-mobile-forensics/). SQLite
spec allows for between 1-9 byte runs per VarInt - this method should
scale to that size, despite such huge values being rare in practice.
Pass starting byte offset to decode.
Returns tuple(VarInt value and the VarInt length).
"""
self.dbfile.seek(offset)
varintlen = varintval = 0
while True:
if((ord(self.dbfile.read(1))&(1<<7))!=0):
varintlen+=1
else:
varintlen+=1
break
self.dbfile.seek(offset)
for i in reversed(range(0,varintlen)):
if (i == 0):
byteval = ord(self.dbfile.read(1))
varintval+=byteval
else:
byteval = ord(self.dbfile.read(1))
varintval+=(byteval - 128)*(2**(i*7))
return varintval,varintlen
def _getVarInt(self,bytestring):
"""
As with _getVarIntOfs, but with an already-known length byte string.
Example: result = _getVarInt(file.read(3))
Warning: This methid will attempt to decode the bytestring regardless
of whether it's a valid VarInt.
Pass byte string to decode.
Returns VarInt value.
"""
varintlen = len(bytestring)
varintval = bytestringpos = 0
for i in reversed(range(0,varintlen)):
if (i == 0):
byteval = ord(bytestring[bytestringpos])
varintval+=byteval
else:
byteval = ord(bytestring[bytestringpos])
varintval+=(byteval - 128)*(2**(i*7))
bytestringpos+=1
return varintval,varintlen
def getPageTypeDict(self,pagesize):
"""
Return a dict containing seperate lists of all Page type absolute
starting offsets.
"""
pagedict = dict()
pagedict['intindex'] = list()
pagedict['inttable'] = list()
pagedict['leafindex'] = list()
pagedict['leaftable'] = list()
pagedict['overflow'] = list()
offset = 0
filesize = os.path.getsize(self.dbfile.name)
while (offset < filesize):
self.dbfile.seek(offset)
flag = ord(self.dbfile.read(1))
if (flag == 2):
pagedict['intindex'].append(offset)
elif (flag == 5):
pagedict['inttable'].append(offset)
elif (flag == 10):
pagedict['leafindex'].append(offset)
elif (flag == 13):
pagedict['leaftable'].append(offset)
elif (flag == 83):
pass
elif (flag == 0):
pagedict['overflow'].append(offset)
else:
print "Invalid Page Type: %s (%s)" % (str(flag), str(offset))
offset+=pagesize
return pagedict
def getActiveRowContent(self, offset, pagesize):
"""
Return a list of lists containing the content of all active cells in the
page.
"""
cellcontentlist = list()
a,celllist,c,d = self._parseTableLeafPageHeader(offset,pagesize)
for cell in celllist:
cellcontentlist.append(self._parseCell(offset+cell))
return cellcontentlist
def getUnallocContent(self, offset, pagesize):
"""
Return a list of lists containing the content of all unallocated areas
in the page. All non-printable chars are stripped.
"""
unalloclist = list()
pageheader, celllist, freeblklist, cellptrendofs = self._parseTableLeafPageHeader(offset,pagesize)
self.dbfile.seek(offset+cellptrendofs)
length = pageheader['contentareaofs']-cellptrendofs
unalloclist.append([offset+cellptrendofs,"Unallocated",length,self._strip_nonprintable(self.dbfile.read(length))])
for freeblk in freeblklist:
self.dbfile.seek(offset+freeblk+2) # skip past the 2-byte next freeblock ptr
freeblklen = struct.unpack(">H",self.dbfile.read(2))[0]
unalloclist.append([offset+freeblk,"Free Block",freeblklen,self._strip_nonprintable(self.dbfile.read(freeblklen-4))])
return unalloclist
def mapPages(self,pagesize):
"""
Debugging method to give a visual representation of the distribution of
page types.
Pass the pagesize value from the DB header.
Returns a string.
key:
h = header page
i = interior index b-tree page
t = interior table b-tree page
I = leaf index b-tree page
T = leaf table b-tree page
"""
offset = intindex = inttbl = leafindex = leaftbl = headercnt = overflow = 0
pagemap = ""
filesize = os.path.getsize(self.dbfile.name)
while (offset < filesize):
self.dbfile.seek(offset)
flag = ord(self.dbfile.read(1))
if (flag == 2):
pagemap+="i"
intindex+=1
elif (flag == 5):
pagemap+="t"
inttbl+=1
elif (flag == 10):
pagemap+="I"
leafindex+=1
elif (flag == 13):
pagemap+="T"
leaftbl+=1
elif (flag == 83):
pagemap+="h"
headercnt+=1
else:
pagemap+="O"
overflow+=1
offset+=(pagesize)
total = intindex + inttbl + leafindex + leaftbl + headercnt + overflow
return (pagemap,intindex,inttbl,leafindex,leaftbl,headercnt,overflow,total)
def checkSignature(self):
"""
Convenience function to perform signature check.
Returns bool.
"""
if self.headerdict["sig"] == "SQLite format 3\x00":
return True
else:
return False
def translateHeader(self):
"""
Parse the unpacked header into human-readable values according to
the format spec at: http://www.sqlite.org/fileformat.html
Returns Dict.
"""
# Magic Header String
if self.headerdict["sig"] == 'SQLite format 3\x00':
self.headertransdict["sig"] = self.headerdict["sig"]
else:
self.headertransdict["sig"] = ("Invalid Signature")
# Page Size
if self.headerdict["pagesize"] == -32768:
self.headertransdict["pagesize"] = "65536 - SQLite v.3.7.1 or greater"
else:
self.headertransdict["pagesize"] = str(self.headerdict["pagesize"])
# File format version numbers
if (self.headerdict["writever"] > 2) and (self.headerdict["readver"] in (1,2)):
self.headertransdict["writever"] = "READ-ONLY"
if self.headerdict["readver"] == 1:
self.headertransdict["readver"] = "Legacy - Roll Back Journalling"
else:
self.headertransdict["readver"] = "WAL - Write Ahead Log Journalling"
elif (self.headerdict["readver"] > 2):
self.headertransdict["readver"] = "Read and Write Disabled."
if self.headerdict["writever"] == 1:
self.headertransdict["writever"] = "Legacy - Roll Back Journalling"
else:
self.headertransdict["writever"] = "WAL - Write Ahead Log Journalling"
elif (self.headerdict["writever"] in (1,2)) and (self.headerdict["readver"] in (1,2)):
if self.headerdict["readver"] == 1:
self.headertransdict["readver"] = "Legacy - Roll Back Journalling"
else:
self.headertransdict["readver"] = "WAL - Write Ahead Log Journalling"
if self.headerdict["writever"] == 1:
self.headertransdict["writever"] = "Legacy - Roll Back Journalling"
else:
self.headertransdict["writever"] = "WAL - Write Ahead Log Journalling"
else:
self.headertransdict["readver"] = "Invalid Value: %s" % self.headerdict["readver"]
self.headertransdict["writever"] = "Invalid Value: %s" % self.headerdict["writever"]
# Reserved bytes per page
self.headertransdict["resspace"] = str(self.headerdict["resspace"])
# Payload fractions
if (self.headerdict["maxpayload"] == 64):
self.headertransdict["maxpayload"] = "64"
else:
self.headertransdict["maxpayload"] = "Invalid value: %s" % str(headerdict["maxpayload"])
if (self.headerdict["minpayload"] == 32):
self.headertransdict["minpayload"] = "32"
else:
self.headertransdict["minpayload"] = "Invalid value: %s" % str(headerdict["minpayload"])
if (self.headerdict["leafpayload"] == 32):
self.headertransdict["leafpayload"] = "32"
else:
self.headertransdict["leafpayload"] = "Invalid value: %s" % str(headerdict["leafpayload"])
# File change counter
self.headertransdict["changecount"] = str(self.headerdict["changecount"])
if self.isWAL:
self.headertransdict["changecount"] += " (WAL enabled - value may be inaccurate.)"
# In-header Database Size
if (self.headerdict["changecount"] == self.headerdict["validfor"]) and (self.headerdict["dbsize"] > 0):
self.headertransdict["dbsize"] = str(self.headerdict["dbsize"]) + " page(s)"
else:
self.headertransdict["dbsize"] = "Invalid value: %s" % str(self.headerdict["dbsize"])
# Free Page List page number
self.headertransdict["freepagelist"] = str(self.headerdict["freepagelist"])
# Total Free Pages
self.headertransdict["totalfreepage"] = str(self.headerdict["totalfreepage"])
# Schema cookie
self.headertransdict["schemacookie"] = str(self.headerdict["schemacookie"])
# Schema Format number
if self.headerdict["schemanum"] == 1:
self.headertransdict["schemanum"] = "1 - SQLite 3.0.0+ Compatible"
elif self.headerdict["schemanum"] == 2:
self.headertransdict["schemanum"] = "2 - SQLite 3.1.3+ Compatible"
elif self.headerdict["schemanum"] == 3:
self.headertransdict["schemanum"] = "3 - SQLite 3.1.4+ Compatible"
elif self.headerdict["schemanum"] == 4:
self.headertransdict["schemanum"] = "4 - SQLite 3.3.0+ Compatible"
else:
self.headertransdict["schemanum"] = "Invalid value: %s" % str(self.headerdict["schemanum"])
# Suggested cache size
self.headertransdict["defpagecache"] = str(self.headerdict["defpagecache"])
# Largest Root Tree Page and Incremental Vacuum Settings
if self.headerdict["bigroottree"] == 0:
self.headertransdict["bigroottree"] = "0 - ptrmap pages disabled"
if self.headerdict["incvac"] == 0:
self.headertransdict["incvac"] = "0 - auto_vacuum mode"
else:
self.headertransdict["incvac"] = "Invalid mode: %s" % str(self.headerdict["incvac"])
else:
self.headertransdict["bigroottree"] = str(self.headerdict["bigroottree"])
self.headertransdict["incvac"] = "%s - incremental_vacuum mode" % str(self.headerdict["incvac"])
# Text Encoding
if self.headerdict["textencode"] == 1:
self.headertransdict["textencode"] = "UTF-8"
elif self.headerdict["textencode"] == 2:
self.headertransdict["textencode"] = "UTF-16LE"
elif self.headerdict["textencode"] == 3:
self.headertransdict["textencode"] = "UTF-16BE"
else:
self.headertransdict["textencode"] = "Invalid Encoding: %s" % self.headerdict["textencode"]
# User Version
self.headertransdict["userver"] = str(self.headerdict["userver"])
# Expansion block
self.headertransdict["expansion"] = ":".join("{:02x}".format(ord(c)) for c in self.headerdict["expansion"])
# Version Valid For number
self.headertransdict["validfor"] = self.headerdict["validfor"]
# SQlite version number
self.headertransdict["sqlver"] = self.headerdict["sqlver"]
return self.headertransdict
|
|
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponsePermanentRedirect
from django.middleware.locale import LocaleMiddleware
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from django.test.client import RequestFactory
from django.test.utils import override_script_prefix
from django.urls import clear_url_caches, reverse, translate_url
from django.utils import translation
class PermanentRedirectLocaleMiddleWare(LocaleMiddleware):
response_redirect_class = HttpResponsePermanentRedirect
@override_settings(
USE_I18N=True,
LOCALE_PATHS=[
os.path.join(os.path.dirname(__file__), 'locale'),
],
LANGUAGE_CODE='en-us',
LANGUAGES=[
('nl', 'Dutch'),
('en', 'English'),
('pt-br', 'Brazilian Portuguese'),
],
MIDDLEWARE=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.patterns.urls.default',
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.i18n',
],
},
}],
)
class URLTestCaseBase(SimpleTestCase):
"""
TestCase base-class for the URL tests.
"""
def setUp(self):
# Make sure the cache is empty before we are doing our tests.
clear_url_caches()
def tearDown(self):
# Make sure we will leave an empty cache for other testcases.
clear_url_caches()
class URLPrefixTests(URLTestCaseBase):
"""
Tests if the `i18n_patterns` is adding the prefix correctly.
"""
def test_not_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
with translation.override('nl'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
def test_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/en/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/nl/prefixed/')
with translation.override(None):
self.assertEqual(reverse('prefixed'), '/%s/prefixed/' % settings.LANGUAGE_CODE)
@override_settings(ROOT_URLCONF='i18n.patterns.urls.wrong')
def test_invalid_prefix_use(self):
msg = 'Using i18n_patterns in an included URLconf is not allowed.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
reverse('account:register')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.disabled')
class URLDisabledTests(URLTestCaseBase):
@override_settings(USE_I18N=False)
def test_prefixed_i18n_disabled(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
class RequestURLConfTests(SimpleTestCase):
@override_settings(ROOT_URLCONF='i18n.patterns.urls.path_unused')
def test_request_urlconf_considered(self):
request = RequestFactory().get('/nl/')
request.urlconf = 'i18n.patterns.urls.default'
middleware = LocaleMiddleware()
with translation.override('nl'):
middleware.process_request(request)
self.assertEqual(request.LANGUAGE_CODE, 'nl')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.path_unused')
class PathUnusedTests(URLTestCaseBase):
"""
If no i18n_patterns is used in root URLconfs, then no language activation
activation happens based on url prefix.
"""
def test_no_lang_activate(self):
response = self.client.get('/nl/foo/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
class URLTranslationTests(URLTestCaseBase):
"""
Tests if the pattern-strings are translated correctly (within the
`i18n_patterns` and the normal `patterns` function).
"""
def test_no_prefix_translated(self):
with translation.override('en'):
self.assertEqual(reverse('no-prefix-translated'), '/translated/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/translated/yeah/')
with translation.override('nl'):
self.assertEqual(reverse('no-prefix-translated'), '/vertaald/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/vertaald/yeah/')
with translation.override('pt-br'):
self.assertEqual(reverse('no-prefix-translated'), '/traduzidos/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/traduzidos/yeah/')
def test_users_url(self):
with translation.override('en'):
self.assertEqual(reverse('users'), '/en/users/')
with translation.override('nl'):
self.assertEqual(reverse('users'), '/nl/gebruikers/')
self.assertEqual(reverse('prefixed_xml'), '/nl/prefixed.xml')
with translation.override('pt-br'):
self.assertEqual(reverse('users'), '/pt-br/usuarios/')
def test_translate_url_utility(self):
with translation.override('en'):
self.assertEqual(translate_url('/en/nonexistent/', 'nl'), '/en/nonexistent/')
self.assertEqual(translate_url('/en/users/', 'nl'), '/nl/gebruikers/')
# Namespaced URL
self.assertEqual(translate_url('/en/account/register/', 'nl'), '/nl/profiel/registreren/')
# path() URL pattern
self.assertEqual(translate_url('/en/account/register-as-path/', 'nl'), '/nl/profiel/registreren-als-pad/')
self.assertEqual(translation.get_language(), 'en')
with translation.override('nl'):
self.assertEqual(translate_url('/nl/gebruikers/', 'en'), '/en/users/')
self.assertEqual(translation.get_language(), 'nl')
class URLNamespaceTests(URLTestCaseBase):
"""
Tests if the translations are still working within namespaces.
"""
def test_account_register(self):
with translation.override('en'):
self.assertEqual(reverse('account:register'), '/en/account/register/')
self.assertEqual(reverse('account:register-as-path'), '/en/account/register-as-path/')
with translation.override('nl'):
self.assertEqual(reverse('account:register'), '/nl/profiel/registreren/')
self.assertEqual(reverse('account:register-as-path'), '/nl/profiel/registreren-als-pad/')
class URLRedirectTests(URLTestCaseBase):
"""
Tests if the user gets redirected to the right URL when there is no
language-prefix in the request URL.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_en_redirect_wrong_url(self):
response = self.client.get('/profiel/registreren/', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
def test_nl_redirect(self):
response = self.client.get('/profiel/registreren/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertRedirects(response, '/nl/profiel/registreren/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_nl_redirect_wrong_url(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertEqual(response.status_code, 404)
def test_pt_br_redirect(self):
response = self.client.get('/conta/registre-se/', HTTP_ACCEPT_LANGUAGE='pt-br')
self.assertRedirects(response, '/pt-br/conta/registre-se/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_pl_pl_redirect(self):
# language from outside of the supported LANGUAGES list
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='pl-pl')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
@override_settings(
MIDDLEWARE=[
'i18n.patterns.tests.PermanentRedirectLocaleMiddleWare',
'django.middleware.common.CommonMiddleware',
],
)
def test_custom_redirect_class(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/', 301)
class URLVaryAcceptLanguageTests(URLTestCaseBase):
"""
'Accept-Language' is not added to the Vary header when using prefixed URLs.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Vary'), 'Accept-Language')
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
self.assertFalse(response.get('Vary'))
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
self.assertFalse(response.get('Vary'))
class URLRedirectWithoutTrailingSlashTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=True`).
"""
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/not-prefixed/', 301)
def test_en_redirect(self):
response = self.client.get('/account/register', HTTP_ACCEPT_LANGUAGE='en', follow=True)
# We only want one redirect, bypassing CommonMiddleware
self.assertEqual(response.redirect_chain, [('/en/account/register/', 302)])
self.assertRedirects(response, '/en/account/register/', 302)
response = self.client.get('/prefixed.xml', HTTP_ACCEPT_LANGUAGE='en', follow=True)
self.assertRedirects(response, '/en/prefixed.xml', 302)
class URLRedirectWithoutTrailingSlashSettingTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=False`).
"""
@override_settings(APPEND_SLASH=False)
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=False)
def test_en_redirect(self):
response = self.client.get('/account/register-without-slash', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register-without-slash', 302)
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
class URLResponseTests(URLTestCaseBase):
"""Tests if the response has the correct language code."""
def test_not_prefixed_with_prefix(self):
response = self.client.get('/en/not-prefixed/')
self.assertEqual(response.status_code, 404)
def test_en_url(self):
response = self.client.get('/en/account/register/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
def test_nl_url(self):
response = self.client.get('/nl/profiel/registreren/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'nl')
self.assertEqual(response.context['LANGUAGE_CODE'], 'nl')
def test_wrong_en_prefix(self):
response = self.client.get('/en/profiel/registreren/')
self.assertEqual(response.status_code, 404)
def test_wrong_nl_prefix(self):
response = self.client.get('/nl/account/register/')
self.assertEqual(response.status_code, 404)
def test_pt_br_url(self):
response = self.client.get('/pt-br/conta/registre-se/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'pt-br')
self.assertEqual(response.context['LANGUAGE_CODE'], 'pt-br')
def test_en_path(self):
response = self.client.get('/en/account/register-as-path/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
def test_nl_path(self):
response = self.client.get('/nl/profiel/registreren-als-pad/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'nl')
self.assertEqual(response.context['LANGUAGE_CODE'], 'nl')
class URLRedirectWithScriptAliasTests(URLTestCaseBase):
"""
#21579 - LocaleMiddleware should respect the script prefix.
"""
def test_language_prefix_with_script_prefix(self):
prefix = '/script_prefix'
with override_script_prefix(prefix):
response = self.client.get('/prefixed/', HTTP_ACCEPT_LANGUAGE='en', SCRIPT_NAME=prefix)
self.assertRedirects(response, '%s/en/prefixed/' % prefix, target_status_code=404)
class URLTagTests(URLTestCaseBase):
"""
Test if the language tag works.
"""
def test_strings_only(self):
t = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(t.render(Context({})).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_context(self):
ctx = Context({'lang1': 'nl', 'lang2': 'pt-br'})
tpl = Template("""{% load i18n %}
{% language lang1 %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language lang2 %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(tpl.render(ctx).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_args(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
def test_kwargs(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Vultr DNS Driver
"""
import json
from typing import Optional, List, Dict, Any
from libcloud.utils.py3 import urlencode
from libcloud.common.vultr import VultrConnection
from libcloud.common.vultr import VultrResponse
from libcloud.common.vultr import VultrConnectionV2, VultrResponseV2
from libcloud.common.vultr import DEFAULT_API_VERSION
from libcloud.dns.base import DNSDriver, Zone, Record
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.types import ZoneAlreadyExistsError, RecordAlreadyExistsError
from libcloud.dns.types import Provider, RecordType
__all__ = [
'ZoneRequiredException',
'VultrDNSResponse',
'VultrDNSConnection',
'VultrDNSDriver',
]
class ZoneRequiredException(Exception):
pass
class VultrDNSResponse(VultrResponse):
pass
class VultrDNSConnection(VultrConnection):
responseCls = VultrDNSResponse
class VultrDNSResponseV2(VultrResponseV2):
pass
class VultrDNSConnectionV2(VultrConnectionV2):
responseCls = VultrDNSResponseV2
class VultrDNSDriver(DNSDriver):
type = Provider.VULTR
name = 'Vultr DNS'
website = 'https://www.vultr.com'
def __new__(cls, key, secret=None, secure=True, host=None, port=None,
api_version=DEFAULT_API_VERSION, region=None, **kwargs):
if cls is VultrDNSDriver:
if api_version == '1':
cls = VultrDNSDriverV1
elif api_version == '2':
cls = VultrDNSDriverV2
else:
raise NotImplementedError(
'No Vultr driver found for API version: %s' %
(api_version))
return super().__new__(cls)
class VultrDNSDriverV1(VultrDNSDriver):
connectionCls = VultrDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.TXT: 'TXT',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.NS: 'NS',
RecordType.SRV: 'SRV',
}
def list_zones(self):
"""
Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record`
"""
action = '/v1/dns/list'
params = {'api_key': self.key}
response = self.connection.request(action=action,
params=params)
zones = self._to_zones(response.objects[0])
return zones
def list_records(self, zone):
"""
Returns a list of records for the provided zone.
:param zone: zone to list records for
:type zone: `Zone`
:rtype: list of :class: `Record`
"""
if not isinstance(zone, Zone):
raise ZoneRequiredException('zone should be of type Zone')
zones = self.list_zones()
if not self.ex_zone_exists(zone.domain, zones):
raise ZoneDoesNotExistError(value='', driver=self,
zone_id=zone.domain)
action = '/v1/dns/records'
params = {'domain': zone.domain}
response = self.connection.request(action=action,
params=params)
records = self._to_records(response.objects[0], zone=zone)
return records
def get_zone(self, zone_id):
"""
Returns a `Zone` instance.
:param zone_id: name of the zone user wants to get.
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
ret_zone = None
action = '/v1/dns/list'
params = {'api_key': self.key}
response = self.connection.request(action=action,
params=params)
zones = self._to_zones(response.objects[0])
if not self.ex_zone_exists(zone_id, zones):
raise ZoneDoesNotExistError(value=None, zone_id=zone_id,
driver=self)
for zone in zones:
if zone_id == zone.domain:
ret_zone = zone
return ret_zone
def get_record(self, zone_id, record_id):
"""
Returns a Record instance.
:param zone_id: name of the required zone
:type zone_id: ``str``
:param record_id: ID of the required record
:type record_id: ``str``
:rtype: :class: `Record`
"""
ret_record = None
zone = self.get_zone(zone_id=zone_id)
records = self.list_records(zone=zone)
if not self.ex_record_exists(record_id, records):
raise RecordDoesNotExistError(value='', driver=self,
record_id=record_id)
for record in records:
if record_id == record.id:
ret_record = record
return ret_record
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Returns a `Zone` object.
:param domain: Zone domain name, (e.g. example.com).
:type domain: ``str``
:param type: Zone type (master / slave).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
:param extra: (optional) Extra attributes (driver specific).
(e.g. {'serverip':'127.0.0.1'})
"""
extra = extra or {}
if extra and extra.get('serverip'):
serverip = extra['serverip']
params = {'api_key': self.key}
data = urlencode({'domain': domain, 'serverip': serverip})
action = '/v1/dns/create_domain'
zones = self.list_zones()
if self.ex_zone_exists(domain, zones):
raise ZoneAlreadyExistsError(value='', driver=self,
zone_id=domain)
self.connection.request(params=params, action=action, data=data,
method='POST')
zone = Zone(id=domain, domain=domain, type=type, ttl=ttl,
driver=self, extra=extra)
return zone
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created.
:type zone: :class:`Zone`
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Record`
"""
extra = extra or {}
ret_record = None
old_records_list = self.list_records(zone=zone)
# check if record already exists
# if exists raise RecordAlreadyExistsError
for record in old_records_list:
if record.name == name and record.data == data:
raise RecordAlreadyExistsError(value='', driver=self,
record_id=record.id)
MX = self.RECORD_TYPE_MAP.get('MX')
SRV = self.RECORD_TYPE_MAP.get('SRV')
if extra and extra.get('priority'):
priority = int(extra['priority'])
post_data = {'domain': zone.domain, 'name': name,
'type': self.RECORD_TYPE_MAP.get(type), 'data': data}
if type == MX or type == SRV:
post_data['priority'] = priority
encoded_data = urlencode(post_data)
params = {'api_key': self.key}
action = '/v1/dns/create_record'
self.connection.request(action=action, params=params,
data=encoded_data, method='POST')
updated_zone_records = zone.list_records()
for record in updated_zone_records:
if record.name == name and record.data == data:
ret_record = record
return ret_record
def delete_zone(self, zone):
"""
Delete a zone.
Note: This will delete all the records belonging to this zone.
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
"""
action = '/v1/dns/delete_domain'
params = {'api_key': self.key}
data = urlencode({'domain': zone.domain})
zones = self.list_zones()
if not self.ex_zone_exists(zone.domain, zones):
raise ZoneDoesNotExistError(value='', driver=self,
zone_id=zone.domain)
response = self.connection.request(params=params, action=action,
data=data, method='POST')
return response.status == 200
def delete_record(self, record):
"""
Delete a record.
:param record: Record to delete.
:type record: :class:`Record`
:rtype: ``bool``
"""
action = '/v1/dns/delete_record'
params = {'api_key': self.key}
data = urlencode({'RECORDID': record.id,
'domain': record.zone.domain})
zone_records = self.list_records(record.zone)
if not self.ex_record_exists(record.id, zone_records):
raise RecordDoesNotExistError(value='', driver=self,
record_id=record.id)
response = self.connection.request(action=action, params=params,
data=data, method='POST')
return response.status == 200
def ex_zone_exists(self, zone_id, zones_list):
"""
Function to check if a `Zone` object exists.
:param zone_id: Name of the `Zone` object.
:type zone_id: ``str``
:param zones_list: A list containing `Zone` objects
:type zones_list: ``list``
:rtype: Returns `True` or `False`
"""
zone_ids = []
for zone in zones_list:
zone_ids.append(zone.domain)
return zone_id in zone_ids
def ex_record_exists(self, record_id, records_list):
"""
:param record_id: Name of the `Record` object.
:type record_id: ``str``
:param records_list: A list containing `Record` objects
:type records_list: ``list``
:rtype: ``bool``
"""
record_ids = []
for record in records_list:
record_ids.append(record.id)
return record_id in record_ids
def _to_zone(self, item):
"""
Build an object `Zone` from the item dictionary
:param item: item to build the zone from
:type item: `dictionary`
:rtype: :instance: `Zone`
"""
type = 'master'
extra = {'date_created': item['date_created']}
zone = Zone(id=item['domain'], domain=item['domain'], driver=self,
type=type, ttl=None, extra=extra)
return zone
def _to_zones(self, items):
"""
Returns a list of `Zone` objects.
:param: items: a list that contains dictionary objects to be passed
to the _to_zone function.
:type items: ``list``
"""
zones = []
for item in items:
zones.append(self._to_zone(item))
return zones
def _to_record(self, item, zone):
extra = {}
if item.get('priority'):
extra['priority'] = item['priority']
type = self._string_to_record_type(item['type'])
record = Record(id=item['RECORDID'], name=item['name'], type=type,
data=item['data'], zone=zone, driver=self, extra=extra)
return record
def _to_records(self, items, zone):
records = []
for item in items:
records.append(self._to_record(item, zone=zone))
return records
class VultrDNSDriverV2(VultrDNSDriver):
connectionCls = VultrDNSConnectionV2
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.NS: 'NS',
RecordType.MX: 'MX',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT',
RecordType.CAA: 'CAA',
RecordType.SSHFP: 'SSHFP',
}
def list_zones(self) -> List[Zone]:
"""Return a list of zones.
:return: ``list`` of :class:`Zone`
"""
data = self._paginated_request('/v2/domains', 'domains')
return [self._to_zone(item) for item in data]
def get_zone(self, zone_id: str) -> Zone:
"""Return a Zone instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
resp = self.connection.request('/v2/domains/%s' % zone_id)
return self._to_zone(resp.object['domain'])
def create_zone(self,
domain: str,
type: str = 'master',
ttl: Optional[int] = None,
extra: Optional[Dict[str, Any]] = None,
) -> Zone:
"""Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type. Only 'master' value is supported.
:type type: ``str``
:param ttl: TTL for new records. (unused)
:type ttl: ``int``
:param extra: Extra attributes 'ip': ``str`` IP for a default A record
'dns_sec': ``bool`` Enable DSNSEC.
:type extra: ``dict``
:rtype: :class:`Zone`
"""
data = {
'domain': domain,
}
extra = extra or {}
if 'ip' in extra:
data['ip'] = extra['ip']
if 'dns_sec' in extra:
data['dns_sec'] = ('enabled'
if extra['dns_sec'] is True else 'disabled')
resp = self.connection.request('/v2/domains',
data=json.dumps(data),
method='POST')
return self._to_zone(resp.object['domain'])
def delete_zone(self, zone: Zone) -> bool:
"""Delete a zone.
Note: This will delete all the records belonging to this zone.
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
"""
resp = self.connection.request('/v2/domains/%s' % zone.domain,
method='DELETE')
return resp.success()
def list_records(self, zone: Zone) -> List[Record]:
"""Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record`
"""
data = self._paginated_request('/v2/domains/%s/records' % zone.domain,
'records')
return [self._to_record(item, zone) for item in data]
def get_record(self, zone_id: str, record_id: str) -> Record:
"""Return a Record instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:param record_id: ID of the required record
:type record_id: ``str``
:rtype: :class:`Record`
"""
resp = self.connection.request('/v2/domains/%s/records/%s' %
(zone_id, record_id))
# Avoid making an extra API call, as zone_id is enough for
# standard fields
zone = Zone(id=zone_id,
domain=zone_id,
type='master',
ttl=None,
driver=self)
return self._to_record(resp.object['record'], zone)
def create_record(self,
name: str,
zone: Zone,
type: RecordType,
data: str,
extra: Optional[Dict[str, Any]] = None
) -> Record:
"""Create a new record.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created.
:type zone: :class:`Zone`
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:keyword extra: Extra attributes 'ttl': Time to live in seconds
'priority': DNS priority. Only
required for MX and SRV
:type extra: ``dict``
:rtype: :class:`Record`
"""
data = {
'name': name,
'type': self.RECORD_TYPE_MAP[type],
'data': data,
}
extra = extra or {}
if 'ttl' in extra:
data['ttl'] = int(extra['ttl'])
if 'priority' in extra:
data['priority'] = int(extra['priority'])
resp = self.connection.request('/v2/domains/%s/records' % zone.domain,
data=json.dumps(data),
method='POST')
return self._to_record(resp.object['record'], zone)
def update_record(self,
record: Record,
name: Optional[str] = None,
type: Optional[RecordType] = None,
data: Optional[str] = None,
extra: Optional[Dict[str, Any]] = None
) -> bool:
"""Update an existing record.
:param record: Record to update.
:type record: :class:`Record`
:keyword name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:keyword type: DNS record type. (Unused)
:type type: :class:`RecordType`
:keyword data: Data for the record (depends on the record type).
:type data: ``str``
:keyword extra: Extra attributes 'ttl': Time to live in seconds
'priority': DNS priority. Only
required for MX and SRV
:type extra: ``dict``
:rtype: ``bool``
"""
body = {}
if name:
body['name'] = name
if data:
body['data'] = data
extra = extra or {}
if 'ttl' in extra:
body['ttl'] = int(extra['ttl'])
if 'priority' in extra:
body['priority'] = int(extra['priority'])
resp = self.connection.request('/v2/domains/%s/records/%s' %
(record.zone.domain, record.id),
data=json.dumps(body),
method='PATCH')
return resp.success()
def delete_record(self, record: Record) -> bool:
"""Delete a record.
:param record: Record to delete.
:type record: :class:`Record`
:rtype: ``bool``
"""
resp = self.connection.request('/v2/domains/%s/records/%s' %
(record.zone.domain, record.id),
method='DELETE')
return resp.success()
def _to_zone(self, data: Dict[str, Any]) -> Zone:
type_ = 'master'
domain = data['domain']
extra = {
'date_created': data['date_created'],
}
return Zone(id=domain,
domain=domain,
driver=self,
type=type_,
ttl=None,
extra=extra)
def _to_record(self, data: Dict[str, Any], zone: Zone) -> Record:
id_ = data['id']
name = data['name']
type_ = self._string_to_record_type(data['type'])
data_ = data['data']
ttl = data['ttl']
extra = {
'priority': data['priority'],
}
return Record(id=id_,
name=name,
type=type_,
data=data_,
ttl=ttl,
driver=self,
zone=zone,
extra=extra)
def _paginated_request(self,
url: str,
key: str,
params: Optional[Dict[str, Any]] = None,
) -> List[Any]:
"""Perform multiple calls to get the full list of items when
the API responses are paginated.
:param url: API endpoint
:type url: ``str``
:param key: Result object key
:type key: ``str``
:param params: Request parameters
:type params: ``dict``
:return: ``list`` of API response objects
:rtype: ``list``
"""
params = params if params is not None else {}
resp = self.connection.request(url, params=params).object
data = list(resp.get(key, []))
objects = data
while True:
next_page = resp['meta']['links']['next']
if next_page:
params['cursor'] = next_page
resp = self.connection.request(url, params=params).object
data = list(resp.get(key, []))
objects.extend(data)
else:
return objects
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import datetime
import time
import unittest2
from operator import attrgetter
from expects import be_none, equal, expect, raise_error
from apitools.base.py import encoding
from endpoints_management.control import (caches, label_descriptor,
metric_value, sc_messages,
metric_descriptor, report_request,
timestamp)
class TestReportingRules(unittest2.TestCase):
subject_cls = report_request.ReportingRules
WANTED_LABELS = (label_descriptor.KnownLabels.REFERER,)
WANTED_METRICS = (metric_descriptor.KnownMetrics.CONSUMER_REQUEST_COUNT,)
def test_should_construct_with_no_args(self):
rules = self.subject_cls()
expect(rules).not_to(be_none)
expect(rules.logs).to(equal(set()))
expect(rules.metrics).to(equal(tuple()))
expect(rules.labels).to(equal(tuple()))
def test_should_construct_with_ok_expected_args(self):
rules = self.subject_cls(logs=[u'wanted_log'],
metrics=self.WANTED_METRICS,
labels=self.WANTED_LABELS)
expect(rules).not_to(be_none)
expect(rules.logs).to(equal(set([u'wanted_log'])))
expect(rules.metrics).to(equal(self.WANTED_METRICS))
expect(rules.labels).to(equal(self.WANTED_LABELS))
def test_should_construct_with_alt_constructor(self):
rules = self.subject_cls.from_known_inputs()
expect(rules).not_to(be_none)
expect(rules.logs).to(equal(set()))
expect(rules.metrics).to(equal(tuple()))
expect(rules.labels).to(equal(tuple()))
def test_should_construct_with_alt_constructor_with_ok_args(self):
logs = [u'wanted_log', u'wanted_log']
label_names = [x.label_name for x in self.WANTED_LABELS]
metric_names = [x.metric_name for x in self.WANTED_METRICS]
rules = self.subject_cls.from_known_inputs(
logs=logs,
label_names=label_names,
metric_names=metric_names
)
expect(rules).not_to(be_none)
expect(rules.logs).to(equal(set([u'wanted_log'])))
expect(rules.metrics).to(equal(self.WANTED_METRICS))
expect(rules.labels).to(equal(self.WANTED_LABELS))
_TEST_CONSUMER_ID = u'testConsumerID'
_TEST_OP1_NAME = u'testOp1'
_TEST_OP2_NAME = u'testOp2'
_WANTED_USER_AGENT = label_descriptor.USER_AGENT
_START_OF_EPOCH = datetime.datetime.utcfromtimestamp(0)
_START_OF_EPOCH_TIMESTAMP = timestamp.to_rfc3339(_START_OF_EPOCH)
_TEST_SERVICE_NAME = u'a_service_name'
_TEST_SIZE=1
_TEST_LATENCY=datetime.timedelta(seconds=7)
_EXPECTED_OK_LOG_ENTRY = sc_messages.LogEntry(
name = u'endpoints-log',
severity = sc_messages.LogEntry.SeverityValueValuesEnum.INFO,
structPayload=encoding.PyValueToMessage(
sc_messages.LogEntry.StructPayloadValue, {
u'http_response_code': 200,
u'http_method': u'GET',
u'request_latency_in_ms': 7000.0,
u'timestamp': time.mktime(_START_OF_EPOCH.timetuple()),
u'response_size': 1,
u'request_size': 1,
u'referer': u'a_referer',
}),
timestamp=_START_OF_EPOCH_TIMESTAMP
)
_EXPECTED_NOK_LOG_ENTRY = sc_messages.LogEntry(
name = u'endpoints-log',
severity = sc_messages.LogEntry.SeverityValueValuesEnum.ERROR,
structPayload=encoding.PyValueToMessage(
sc_messages.LogEntry.StructPayloadValue, {
u'http_response_code': 404,
u'http_method': u'GET',
u'request_latency_in_ms': 7000.0,
u'timestamp': time.mktime(_START_OF_EPOCH.timetuple()),
u'response_size': 1,
u'request_size': 1,
u'referer': u'a_referer',
u'error_cause': u'internal',
}),
timestamp=_START_OF_EPOCH_TIMESTAMP
)
_WANTED_USER_AGENT = label_descriptor.USER_AGENT
_WANTED_SERVICE_AGENT = label_descriptor.SERVICE_AGENT
_WANTED_PLATFORM = u'Unknown'
_EXPECTED_OK_METRIC = metric_descriptor.KnownMetrics.CONSUMER_REQUEST_COUNT
_EXPECTED_NOK_METRIC = metric_descriptor.KnownMetrics.CONSUMER_ERROR_COUNT
_ADD_LOG_TESTS = [
(report_request.Info(
operation_id=u'an_op_id',
operation_name=u'an_op_name',
method=u'GET',
referer=u'a_referer',
backend_time=_TEST_LATENCY,
overhead_time=_TEST_LATENCY,
request_time=_TEST_LATENCY,
request_size=_TEST_SIZE,
response_size=_TEST_SIZE,
service_name=_TEST_SERVICE_NAME),
sc_messages.Operation(
importance=sc_messages.Operation.ImportanceValueValuesEnum.LOW,
logEntries=[_EXPECTED_OK_LOG_ENTRY],
operationId=u'an_op_id',
operationName=u'an_op_name',
startTime=_START_OF_EPOCH_TIMESTAMP,
endTime=_START_OF_EPOCH_TIMESTAMP)
),
(report_request.Info(
response_code=404,
operation_id=u'an_op_id',
operation_name=u'an_op_name',
method=u'GET',
referer=u'a_referer',
backend_time=_TEST_LATENCY,
overhead_time=_TEST_LATENCY,
request_time=_TEST_LATENCY,
request_size=_TEST_SIZE,
response_size=_TEST_SIZE,
service_name=_TEST_SERVICE_NAME),
sc_messages.Operation(
importance=sc_messages.Operation.ImportanceValueValuesEnum.LOW,
logEntries=[_EXPECTED_NOK_LOG_ENTRY],
operationId=u'an_op_id',
operationName=u'an_op_name',
startTime=_START_OF_EPOCH_TIMESTAMP,
endTime=_START_OF_EPOCH_TIMESTAMP)
)
]
_TEST_API_KEY = u'test_key'
_ADD_METRICS_TESTS = [
(report_request.Info(
operation_id=u'an_op_id',
operation_name=u'an_op_name',
method=u'GET',
referer=u'a_referer',
backend_time=_TEST_LATENCY,
overhead_time=_TEST_LATENCY,
request_time=_TEST_LATENCY,
request_size=_TEST_SIZE,
response_size=_TEST_SIZE,
service_name=_TEST_SERVICE_NAME,
api_key=_TEST_API_KEY,
api_key_valid=True),
sc_messages.Operation(
importance=sc_messages.Operation.ImportanceValueValuesEnum.LOW,
logEntries=[],
labels=encoding.PyValueToMessage(
sc_messages.Operation.LabelsValue, {
u'servicecontrol.googleapis.com/service_agent':
_WANTED_SERVICE_AGENT,
u'servicecontrol.googleapis.com/user_agent':
_WANTED_USER_AGENT,
u'servicecontrol.googleapis.com/platform':
_WANTED_PLATFORM,
}),
metricValueSets = [
sc_messages.MetricValueSet(
metricName=_EXPECTED_OK_METRIC.metric_name,
metricValues=[
metric_value.create(int64Value=1),
]
),
],
consumerId=u'api_key:' + _TEST_API_KEY,
operationId=u'an_op_id',
operationName=u'an_op_name',
startTime=_START_OF_EPOCH_TIMESTAMP,
endTime=_START_OF_EPOCH_TIMESTAMP)
),
(report_request.Info(
response_code=404,
operation_id=u'an_op_id',
operation_name=u'an_op_name',
method=u'GET',
referer=u'a_referer',
backend_time=_TEST_LATENCY,
overhead_time=_TEST_LATENCY,
request_time=_TEST_LATENCY,
request_size=_TEST_SIZE,
response_size=_TEST_SIZE,
service_name=_TEST_SERVICE_NAME,
api_key=_TEST_API_KEY,
api_key_valid=True),
sc_messages.Operation(
importance=sc_messages.Operation.ImportanceValueValuesEnum.LOW,
logEntries=[],
labels=encoding.PyValueToMessage(
sc_messages.Operation.LabelsValue, {
u'servicecontrol.googleapis.com/service_agent':
_WANTED_SERVICE_AGENT,
u'servicecontrol.googleapis.com/user_agent':
_WANTED_USER_AGENT,
u'servicecontrol.googleapis.com/platform':
_WANTED_PLATFORM,
}),
metricValueSets = [
sc_messages.MetricValueSet(
metricName=_EXPECTED_OK_METRIC.metric_name,
metricValues=[
metric_value.create(int64Value=1),
]
),
sc_messages.MetricValueSet(
metricName=_EXPECTED_NOK_METRIC.metric_name,
metricValues=[
metric_value.create(int64Value=1),
]
),
],
consumerId=u'api_key:' + _TEST_API_KEY,
operationId=u'an_op_id',
operationName=u'an_op_name',
startTime=_START_OF_EPOCH_TIMESTAMP,
endTime=_START_OF_EPOCH_TIMESTAMP)
),
]
_EXPECTED_OK_LABEL = label_descriptor.KnownLabels.REFERER
_ADD_LABELS_TESTS = [
(report_request.Info(
operation_id=u'an_op_id',
operation_name=u'an_op_name',
method=u'GET',
referer=u'a_referer',
service_name=_TEST_SERVICE_NAME),
sc_messages.Operation(
importance=sc_messages.Operation.ImportanceValueValuesEnum.LOW,
labels=encoding.PyValueToMessage(
sc_messages.Operation.LabelsValue, {
_EXPECTED_OK_LABEL.label_name: u'a_referer',
u'servicecontrol.googleapis.com/service_agent':
_WANTED_SERVICE_AGENT,
u'servicecontrol.googleapis.com/user_agent':
_WANTED_USER_AGENT,
u'servicecontrol.googleapis.com/platform':
_WANTED_PLATFORM,
}),
logEntries=[],
operationId=u'an_op_id',
operationName=u'an_op_name',
startTime=_START_OF_EPOCH_TIMESTAMP,
endTime=_START_OF_EPOCH_TIMESTAMP)
),
]
KEYGETTER = attrgetter(u'key')
class TestInfo(unittest2.TestCase):
def test_should_construct_with_no_args(self):
expect(report_request.Info()).not_to(be_none)
def test_should_raise_if_constructed_with_a_bad_protocol(self):
testf = lambda: report_request.Info(protocol=object())
# not a report_request.ReportedProtocols
expect(testf).to(raise_error(ValueError))
def test_should_raise_if_constructed_with_a_bad_platform(self):
testf = lambda: report_request.Info(platform=object())
expect(testf).to(raise_error(ValueError))
def test_should_raise_if_constructed_with_a_bad_request_size(self):
testf = lambda: report_request.Info(request_size=object())
expect(testf).to(raise_error(ValueError))
testf = lambda: report_request.Info(request_size=-2)
expect(testf).to(raise_error(ValueError))
def test_should_raise_if_constructed_with_a_bad_response_size(self):
testf = lambda: report_request.Info(response_size=object())
expect(testf).to(raise_error(ValueError))
testf = lambda: report_request.Info(response_size=-2)
expect(testf).to(raise_error(ValueError))
def test_should_raise_if_constructed_with_a_bad_backend_time(self):
testf = lambda: report_request.Info(backend_time=object())
expect(testf).to(raise_error(ValueError))
def test_should_raise_if_constructed_with_a_bad_overhead_time(self):
testf = lambda: report_request.Info(overhead_time=object())
expect(testf).to(raise_error(ValueError))
def test_should_raise_if_constructed_with_a_bad_request_time(self):
testf = lambda: report_request.Info(request_time=object())
expect(testf).to(raise_error(ValueError))
def test_should_raise_if_constructed_with_a_bad_error_cause(self):
testf = lambda: report_request.Info(error_cause=object())
expect(testf).to(raise_error(ValueError))
def test_should_fail_as_report_request_on_incomplete_info(self):
timer = _DateTimeTimer()
incomplete = report_request.Info() # has no service_name
rules = report_request.ReportingRules()
testf = lambda: incomplete.as_report_request(rules, timer=timer)
expect(testf).to(raise_error(ValueError))
def test_should_add_expected_logs_as_report_request(self):
timer = _DateTimeTimer()
rules = report_request.ReportingRules(logs=[u'endpoints-log'])
for info, want in _ADD_LOG_TESTS:
got = info.as_report_request(rules, timer=timer)
expect(got.serviceName).to(equal(_TEST_SERVICE_NAME))
# compare the log entry in detail to avoid instability when
# comparing the operations directly
wantLogEntry = want.logEntries[0]
gotLogEntry = got.reportRequest.operations[0].logEntries[0]
expect(gotLogEntry.name).to(equal(wantLogEntry.name))
expect(gotLogEntry.timestamp).to(equal(wantLogEntry.timestamp))
print u'got timestamp', gotLogEntry.timestamp
print u'want timestamp', wantLogEntry.timestamp
expect(gotLogEntry.severity).to(equal(wantLogEntry.severity))
gotStruct = encoding.MessageToPyValue(gotLogEntry.structPayload)
print u'got struct', gotStruct
wantStruct = encoding.MessageToPyValue(wantLogEntry.structPayload)
print u'want struct', wantStruct
expect(gotStruct).to(equal(wantStruct))
def test_should_add_expected_metric_as_report_request(self):
timer = _DateTimeTimer()
rules = report_request.ReportingRules(metrics=[
_EXPECTED_OK_METRIC, _EXPECTED_NOK_METRIC
])
for info, want in _ADD_METRICS_TESTS:
got = info.as_report_request(rules, timer=timer)
# These additional properties have no well-defined order, so sort them.
got.reportRequest.operations[0].labels.additionalProperties.sort(key=KEYGETTER)
want.labels.additionalProperties.sort(key=KEYGETTER)
expect(got.serviceName).to(equal(_TEST_SERVICE_NAME))
expect(got.reportRequest.operations[0]).to(equal(want))
def test_should_add_expected_label_as_report_request(self):
timer = _DateTimeTimer()
rules = report_request.ReportingRules(labels=[
_EXPECTED_OK_LABEL
])
for info, want in _ADD_LABELS_TESTS:
got = info.as_report_request(rules, timer=timer)
# These additional properties have no well-defined order, so sort them.
got.reportRequest.operations[0].labels.additionalProperties.sort(key=KEYGETTER)
want.labels.additionalProperties.sort(key=KEYGETTER)
expect(got.serviceName).to(equal(_TEST_SERVICE_NAME))
expect(got.reportRequest.operations[0]).to(equal(want))
class TestAggregatorReport(unittest2.TestCase):
SERVICE_NAME = u'service.report'
def setUp(self):
self.timer = _DateTimeTimer()
self.agg = report_request.Aggregator(
self.SERVICE_NAME, caches.ReportOptions())
def test_should_fail_if_req_is_bad(self):
testf = lambda: self.agg.report(object())
expect(testf).to(raise_error(ValueError))
testf = lambda: self.agg.report(None)
expect(testf).to(raise_error(ValueError))
def test_should_fail_if_service_name_does_not_match(self):
req = _make_test_request(self.SERVICE_NAME + u'-will-not-match')
testf = lambda: self.agg.report(req)
expect(testf).to(raise_error(ValueError))
def test_should_fail_if_check_request_is_missing(self):
req = sc_messages.ServicecontrolServicesReportRequest(
serviceName=self.SERVICE_NAME)
testf = lambda: self.agg.report(req)
expect(testf).to(raise_error(ValueError))
class TestAggregatorTheCannotCache(unittest2.TestCase):
SERVICE_NAME = u'service.no_cache'
def setUp(self):
# -ve num_entries means no cache is present
self.agg = report_request.Aggregator(
self.SERVICE_NAME,
caches.ReportOptions(num_entries=-1))
def test_should_not_cache_responses(self):
req = _make_test_request(self.SERVICE_NAME)
expect(self.agg.report(req)).to(be_none)
def test_should_have_empty_flush_response(self):
expect(len(self.agg.flush())).to(equal(0))
def test_should_have_none_as_flush_interval(self):
expect(self.agg.flush_interval).to(be_none)
class TestCachingAggregator(unittest2.TestCase):
SERVICE_NAME = u'service.with_cache'
def setUp(self):
self.timer = _DateTimeTimer()
self.flush_interval = datetime.timedelta(seconds=1)
options = caches.ReportOptions(flush_interval=self.flush_interval)
self.agg = report_request.Aggregator(
self.SERVICE_NAME, options, timer=self.timer)
def test_should_have_option_flush_interval_as_the_flush_interval(self):
expect(self.agg.flush_interval).to(equal(self.flush_interval))
def test_should_not_cache_requests_with_important_operations(self):
req = _make_test_request(
self.SERVICE_NAME,
importance=sc_messages.Operation.ImportanceValueValuesEnum.HIGH)
agg = self.agg
expect(agg.report(req)).to(be_none)
def test_should_cache_requests_and_return_cached_ok(self):
req = _make_test_request(self.SERVICE_NAME, n=2, start=0)
agg = self.agg
expect(agg.report(req)).to(equal(report_request.Aggregator.CACHED_OK))
def test_should_cache_requests_and_batch_them_on_flush(self):
req1 = _make_test_request(self.SERVICE_NAME, n=2, start=0)
req2 = _make_test_request(self.SERVICE_NAME, n=2, start=2)
agg = self.agg
expect(agg.report(req1)).to(equal(report_request.Aggregator.CACHED_OK))
expect(agg.report(req2)).to(equal(report_request.Aggregator.CACHED_OK))
# no immediate requests for flush
flushed_reqs = agg.flush()
expect(len(flushed_reqs)).to(equal(0))
self.timer.tick() # time passes ...
self.timer.tick() # ... and is now past the flush_interval
flushed_reqs = agg.flush()
expect(len(flushed_reqs)).to(equal(1))
flushed_ops = flushed_reqs[0].reportRequest.operations
expect(len(flushed_ops)).to(equal(4)) # number of ops in the req{1,2}
def test_should_aggregate_operations_in_requests(self):
n = 261 # arbitrary
agg = self.agg
for _ in range(n):
# many requests, but only two ops
req = _make_test_request(self.SERVICE_NAME, n=2, start=0)
expect(agg.report(req)).to(
equal(report_request.Aggregator.CACHED_OK))
# time passes ...
self.timer.tick()
self.timer.tick() # ... and is now past the flush_interval
flushed_reqs = agg.flush()
expect(len(flushed_reqs)).to(equal(1))
flushed_ops = flushed_reqs[0].reportRequest.operations
expect(len(flushed_ops)).to(equal(2)) # many requests, but only two ops
def test_may_clear_aggregated_operations(self):
n = 261 # arbitrary
agg = self.agg
for i in range(n):
# many requests, but only two ops
req = _make_test_request(self.SERVICE_NAME, n=2, start=0)
expect(agg.report(req)).to(
equal(report_request.Aggregator.CACHED_OK))
# time passes ...
agg.clear() # the aggregator is cleared
self.timer.tick()
self.timer.tick() # ... and is now past the flush_interval
flushed_reqs = agg.flush()
expect(len(flushed_reqs)).to(equal(0)) # but there is nothing
class _DateTimeTimer(object):
def __init__(self, auto=False):
self.auto = auto
self.time = datetime.datetime.utcfromtimestamp(0)
def __call__(self):
if self.auto:
self.tick()
return self.time
def tick(self):
self.time += datetime.timedelta(seconds=1)
def _make_op_names(n, start=0):
return (u'testOp%d' % (x,) for x in range(start, start + n))
def _make_test_request(service_name, importance=None, n=3, start=0):
if importance is None:
importance = sc_messages.Operation.ImportanceValueValuesEnum.LOW
op_names = _make_op_names(n, start=start)
ops = [sc_messages.Operation(consumerId=_TEST_CONSUMER_ID,
operationName=op_name,
importance=importance) for op_name in op_names]
if ops:
ops[0].labels = encoding.PyValueToMessage(
sc_messages.Operation.LabelsValue, {
u'key1': u'always add a label to the first op'})
report_request = sc_messages.ReportRequest(operations=ops)
return sc_messages.ServicecontrolServicesReportRequest(
serviceName=service_name,
reportRequest=report_request)
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1LimitRange(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1LimitRangeSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1LimitRange - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1LimitRange. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1LimitRange. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1LimitRange.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1LimitRange. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1LimitRange. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1LimitRange. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1LimitRange.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1LimitRange. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1LimitRange. # noqa: E501
:return: The metadata of this V1LimitRange. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1LimitRange.
:param metadata: The metadata of this V1LimitRange. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1LimitRange. # noqa: E501
:return: The spec of this V1LimitRange. # noqa: E501
:rtype: V1LimitRangeSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1LimitRange.
:param spec: The spec of this V1LimitRange. # noqa: E501
:type: V1LimitRangeSpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1LimitRange):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1LimitRange):
return True
return self.to_dict() != other.to_dict()
|
|
#!/usr/bin/env python
# test_connection.py - unit test for connection attributes
#
# Copyright (C) 2008-2011 James Henstridge <james@jamesh.id.au>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import os
import time
import threading
from operator import attrgetter
import psycopg2
import psycopg2.errorcodes
import psycopg2.extensions
from .testutils import unittest, decorate_all_tests, skip_if_no_superuser
from .testutils import skip_before_postgres, skip_after_postgres
from .testutils import ConnectingTestCase, skip_if_tpc_disabled
from .testutils import skip_if_windows
from .testconfig import dsn, dbname
class ConnectionTests(ConnectingTestCase):
def test_closed_attribute(self):
conn = self.conn
self.assertEqual(conn.closed, False)
conn.close()
self.assertEqual(conn.closed, True)
def test_close_idempotent(self):
conn = self.conn
conn.close()
conn.close()
self.assertTrue(conn.closed)
def test_cursor_closed_attribute(self):
conn = self.conn
curs = conn.cursor()
self.assertEqual(curs.closed, False)
curs.close()
self.assertEqual(curs.closed, True)
# Closing the connection closes the cursor:
curs = conn.cursor()
conn.close()
self.assertEqual(curs.closed, True)
@skip_before_postgres(8, 4)
@skip_if_no_superuser
@skip_if_windows
def test_cleanup_on_badconn_close(self):
# ticket #148
conn = self.conn
cur = conn.cursor()
try:
cur.execute("select pg_terminate_backend(pg_backend_pid())")
except psycopg2.OperationalError as e:
if e.pgcode != psycopg2.errorcodes.ADMIN_SHUTDOWN:
raise
except psycopg2.DatabaseError as e:
# curiously when disconnected in green mode we get a DatabaseError
# without pgcode.
if e.pgcode is not None:
raise
self.assertEqual(conn.closed, 2)
conn.close()
self.assertEqual(conn.closed, 1)
def test_reset(self):
conn = self.conn
# switch isolation level, then reset
level = conn.isolation_level
conn.set_isolation_level(0)
self.assertEqual(conn.isolation_level, 0)
conn.reset()
# now the isolation level should be equal to saved one
self.assertEqual(conn.isolation_level, level)
def test_notices(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("create temp table chatty (id serial primary key);")
self.assertEqual("CREATE TABLE", cur.statusmessage)
self.assertTrue(conn.notices)
def test_notices_consistent_order(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("create temp table table1 (id serial); create temp table table2 (id serial);")
cur.execute("create temp table table3 (id serial); create temp table table4 (id serial);")
self.assertEqual(4, len(conn.notices))
self.assertTrue('table1' in conn.notices[0])
self.assertTrue('table2' in conn.notices[1])
self.assertTrue('table3' in conn.notices[2])
self.assertTrue('table4' in conn.notices[3])
def test_notices_limited(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
for i in range(0, 100, 10):
sql = " ".join(["create temp table table%d (id serial);" % j for j in range(i, i+10)])
cur.execute(sql)
self.assertEqual(50, len(conn.notices))
self.assertTrue('table99' in conn.notices[-1], conn.notices[-1])
def test_server_version(self):
self.assertTrue(self.conn.server_version)
def test_protocol_version(self):
self.assertTrue(self.conn.protocol_version in (2,3),
self.conn.protocol_version)
def test_tpc_unsupported(self):
cnn = self.conn
if cnn.server_version >= 80100:
return self.skipTest("tpc is supported")
self.assertRaises(psycopg2.NotSupportedError,
cnn.xid, 42, "foo", "bar")
@skip_before_postgres(8, 2)
def test_concurrent_execution(self):
def slave():
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select pg_sleep(4)")
cur.close()
cnn.close()
t1 = threading.Thread(target=slave)
t2 = threading.Thread(target=slave)
t0 = time.time()
t1.start()
t2.start()
t1.join()
t2.join()
self.assertTrue(time.time() - t0 < 7,
"something broken in concurrency")
def test_encoding_name(self):
self.conn.set_client_encoding("EUC_JP")
# conn.encoding is 'EUCJP' now.
cur = self.conn.cursor()
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, cur)
cur.execute("select 'foo'::text;")
self.assertEqual(cur.fetchone()[0], 'foo')
def test_connect_nonnormal_envvar(self):
# We must perform encoding normalization at connection time
self.conn.close()
oldenc = os.environ.get('PGCLIENTENCODING')
os.environ['PGCLIENTENCODING'] = 'utf-8' # malformed spelling
try:
self.conn = self.connect()
finally:
if oldenc is not None:
os.environ['PGCLIENTENCODING'] = oldenc
else:
del os.environ['PGCLIENTENCODING']
def test_weakref(self):
from weakref import ref
import gc
conn = psycopg2.connect(dsn)
w = ref(conn)
conn.close()
del conn
gc.collect()
self.assertTrue(w() is None)
def test_commit_concurrency(self):
# The problem is the one reported in ticket #103. Because of bad
# status check, we commit even when a commit is already on its way.
# We can detect this condition by the warnings.
conn = self.conn
notices = []
stop = []
def committer():
while not stop:
conn.commit()
while conn.notices:
notices.append((2, conn.notices.pop()))
cur = conn.cursor()
t1 = threading.Thread(target=committer)
t1.start()
i = 1
for i in range(1000):
cur.execute("select %s;",(i,))
conn.commit()
while conn.notices:
notices.append((1, conn.notices.pop()))
# Stop the committer thread
stop.append(True)
self.assertTrue(not notices, "%d notices raised" % len(notices))
def test_connect_cursor_factory(self):
import psycopg2.extras
conn = self.connect(cursor_factory=psycopg2.extras.DictCursor)
cur = conn.cursor()
cur.execute("select 1 as a")
self.assertEqual(cur.fetchone()['a'], 1)
def test_cursor_factory(self):
self.assertEqual(self.conn.cursor_factory, None)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertRaises(TypeError, (lambda r: r['a']), cur.fetchone())
self.conn.cursor_factory = psycopg2.extras.DictCursor
self.assertEqual(self.conn.cursor_factory, psycopg2.extras.DictCursor)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertEqual(cur.fetchone()['a'], 1)
self.conn.cursor_factory = None
self.assertEqual(self.conn.cursor_factory, None)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertRaises(TypeError, (lambda r: r['a']), cur.fetchone())
def test_cursor_factory_none(self):
# issue #210
conn = self.connect()
cur = conn.cursor(cursor_factory=None)
self.assertEqual(type(cur), psycopg2.extensions.cursor)
conn = self.connect(cursor_factory=psycopg2.extras.DictCursor)
cur = conn.cursor(cursor_factory=None)
self.assertEqual(type(cur), psycopg2.extras.DictCursor)
def test_failed_init_status(self):
class SubConnection(psycopg2.extensions.connection):
def __init__(self, dsn):
try:
super(SubConnection, self).__init__(dsn)
except Exception:
pass
c = SubConnection("dbname=thereisnosuchdatabasemate password=foobar")
self.assertTrue(c.closed, "connection failed so it must be closed")
self.assertTrue('foobar' not in c.dsn, "password was not obscured")
class IsolationLevelsTestCase(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
conn = self.connect()
cur = conn.cursor()
try:
cur.execute("drop table isolevel;")
except psycopg2.ProgrammingError:
conn.rollback()
cur.execute("create table isolevel (id integer);")
conn.commit()
conn.close()
def test_isolation_level(self):
conn = self.connect()
self.assertEqual(
conn.isolation_level,
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
def test_encoding(self):
conn = self.connect()
self.assertTrue(conn.encoding in psycopg2.extensions.encodings)
def test_set_isolation_level(self):
conn = self.connect()
curs = conn.cursor()
levels = [
(None, psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT),
('read uncommitted', psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED),
('read committed', psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED),
('repeatable read', psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ),
('serializable', psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE),
]
for name, level in levels:
conn.set_isolation_level(level)
# the only values available on prehistoric PG versions
if conn.server_version < 80000:
if level in (
psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ):
name, level = levels[levels.index((name, level)) + 1]
self.assertEqual(conn.isolation_level, level)
curs.execute('show transaction_isolation;')
got_name = curs.fetchone()[0]
if name is None:
curs.execute('show default_transaction_isolation;')
name = curs.fetchone()[0]
self.assertEqual(name, got_name)
conn.commit()
self.assertRaises(ValueError, conn.set_isolation_level, -1)
self.assertRaises(ValueError, conn.set_isolation_level, 5)
def test_set_isolation_level_abort(self):
conn = self.connect()
cur = conn.cursor()
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("insert into isolevel values (10);")
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_INTRANS,
conn.get_transaction_status())
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("select count(*) from isolevel;")
self.assertEqual(0, cur.fetchone()[0])
cur.execute("insert into isolevel values (10);")
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_INTRANS,
conn.get_transaction_status())
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("select count(*) from isolevel;")
self.assertEqual(0, cur.fetchone()[0])
cur.execute("insert into isolevel values (10);")
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("select count(*) from isolevel;")
self.assertEqual(1, cur.fetchone()[0])
def test_isolation_level_autocommit(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
def test_isolation_level_read_committed(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("insert into isolevel values (20);")
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cnn1.commit()
cur2.execute("select count(*) from isolevel;")
self.assertEqual(2, cur2.fetchone()[0])
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
cnn2.commit()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(2, cur1.fetchone()[0])
def test_isolation_level_serializable(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("insert into isolevel values (20);")
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cnn1.commit()
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
cnn2.commit()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(2, cur1.fetchone()[0])
cur2.execute("select count(*) from isolevel;")
self.assertEqual(2, cur2.fetchone()[0])
def test_isolation_level_closed(self):
cnn = self.connect()
cnn.close()
self.assertRaises(psycopg2.InterfaceError, getattr,
cnn, 'isolation_level')
self.assertRaises(psycopg2.InterfaceError,
cnn.set_isolation_level, 0)
self.assertRaises(psycopg2.InterfaceError,
cnn.set_isolation_level, 1)
class ConnectionTwoPhaseTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
self.make_test_table()
self.clear_test_xacts()
def tearDown(self):
self.clear_test_xacts()
ConnectingTestCase.tearDown(self)
def clear_test_xacts(self):
"""Rollback all the prepared transaction in the testing db."""
cnn = self.connect()
cnn.set_isolation_level(0)
cur = cnn.cursor()
try:
cur.execute(
"select gid from pg_prepared_xacts where database = %s",
(dbname,))
except psycopg2.ProgrammingError:
cnn.rollback()
cnn.close()
return
gids = [ r[0] for r in cur ]
for gid in gids:
cur.execute("rollback prepared %s;", (gid,))
cnn.close()
def make_test_table(self):
cnn = self.connect()
cur = cnn.cursor()
try:
cur.execute("DROP TABLE test_tpc;")
except psycopg2.ProgrammingError:
cnn.rollback()
cur.execute("CREATE TABLE test_tpc (data text);")
cnn.commit()
cnn.close()
def count_xacts(self):
"""Return the number of prepared xacts currently in the test db."""
cnn = self.connect()
cur = cnn.cursor()
cur.execute("""
select count(*) from pg_prepared_xacts
where database = %s;""",
(dbname,))
rv = cur.fetchone()[0]
cnn.close()
return rv
def count_test_records(self):
"""Return the number of records in the test table."""
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select count(*) from test_tpc;")
rv = cur.fetchone()[0]
cnn.close()
return rv
def test_tpc_commit(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_PREPARED)
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_commit()
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_commit_one_phase(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_1p');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_commit()
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_commit_recovered(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_rec');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
cnn.close()
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
cnn.tpc_commit(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_rollback(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_rollback');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_PREPARED)
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_rollback()
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_tpc_rollback_one_phase(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_rollback_1p');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_rollback()
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_tpc_rollback_recovered(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_rec');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
cnn.close()
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
cnn.tpc_rollback(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_status_after_recover(self):
cnn = self.connect()
self.assertEqual(psycopg2.extensions.STATUS_READY, cnn.status)
xns = cnn.tpc_recover()
self.assertEqual(psycopg2.extensions.STATUS_READY, cnn.status)
cur = cnn.cursor()
cur.execute("select 1")
self.assertEqual(psycopg2.extensions.STATUS_BEGIN, cnn.status)
xns = cnn.tpc_recover()
self.assertEqual(psycopg2.extensions.STATUS_BEGIN, cnn.status)
def test_recovered_xids(self):
# insert a few test xns
cnn = self.connect()
cnn.set_isolation_level(0)
cur = cnn.cursor()
cur.execute("begin; prepare transaction '1-foo';")
cur.execute("begin; prepare transaction '2-bar';")
# read the values to return
cur.execute("""
select gid, prepared, owner, database
from pg_prepared_xacts
where database = %s;""",
(dbname,))
okvals = cur.fetchall()
okvals.sort()
cnn = self.connect()
xids = cnn.tpc_recover()
xids = [ xid for xid in xids if xid.database == dbname ]
xids.sort(key=attrgetter('gtrid'))
# check the values returned
self.assertEqual(len(okvals), len(xids))
for (xid, (gid, prepared, owner, database)) in zip (xids, okvals):
self.assertEqual(xid.gtrid, gid)
self.assertEqual(xid.prepared, prepared)
self.assertEqual(xid.owner, owner)
self.assertEqual(xid.database, database)
def test_xid_encoding(self):
cnn = self.connect()
xid = cnn.xid(42, "gtrid", "bqual")
cnn.tpc_begin(xid)
cnn.tpc_prepare()
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select gid from pg_prepared_xacts where database = %s;",
(dbname,))
self.assertEqual('42_Z3RyaWQ=_YnF1YWw=', cur.fetchone()[0])
def test_xid_roundtrip(self):
for fid, gtrid, bqual in [
(0, "", ""),
(42, "gtrid", "bqual"),
(0x7fffffff, "x" * 64, "y" * 64),
]:
cnn = self.connect()
xid = cnn.xid(fid, gtrid, bqual)
cnn.tpc_begin(xid)
cnn.tpc_prepare()
cnn.close()
cnn = self.connect()
xids = [ xid for xid in cnn.tpc_recover()
if xid.database == dbname ]
self.assertEqual(1, len(xids))
xid = xids[0]
self.assertEqual(xid.format_id, fid)
self.assertEqual(xid.gtrid, gtrid)
self.assertEqual(xid.bqual, bqual)
cnn.tpc_rollback(xid)
def test_unparsed_roundtrip(self):
for tid in [
'',
'hello, world!',
'x' * 199, # PostgreSQL's limit in transaction id length
]:
cnn = self.connect()
cnn.tpc_begin(tid)
cnn.tpc_prepare()
cnn.close()
cnn = self.connect()
xids = [ xid for xid in cnn.tpc_recover()
if xid.database == dbname ]
self.assertEqual(1, len(xids))
xid = xids[0]
self.assertEqual(xid.format_id, None)
self.assertEqual(xid.gtrid, tid)
self.assertEqual(xid.bqual, None)
cnn.tpc_rollback(xid)
def test_xid_construction(self):
from psycopg2.extensions import Xid
x1 = Xid(74, 'foo', 'bar')
self.assertEqual(74, x1.format_id)
self.assertEqual('foo', x1.gtrid)
self.assertEqual('bar', x1.bqual)
def test_xid_from_string(self):
from psycopg2.extensions import Xid
x2 = Xid.from_string('42_Z3RyaWQ=_YnF1YWw=')
self.assertEqual(42, x2.format_id)
self.assertEqual('gtrid', x2.gtrid)
self.assertEqual('bqual', x2.bqual)
x3 = Xid.from_string('99_xxx_yyy')
self.assertEqual(None, x3.format_id)
self.assertEqual('99_xxx_yyy', x3.gtrid)
self.assertEqual(None, x3.bqual)
def test_xid_to_string(self):
from psycopg2.extensions import Xid
x1 = Xid.from_string('42_Z3RyaWQ=_YnF1YWw=')
self.assertEqual(str(x1), '42_Z3RyaWQ=_YnF1YWw=')
x2 = Xid.from_string('99_xxx_yyy')
self.assertEqual(str(x2), '99_xxx_yyy')
def test_xid_unicode(self):
cnn = self.connect()
x1 = cnn.xid(10, 'uni', 'code')
cnn.tpc_begin(x1)
cnn.tpc_prepare()
cnn.reset()
xid = [ xid for xid in cnn.tpc_recover()
if xid.database == dbname ][0]
self.assertEqual(10, xid.format_id)
self.assertEqual('uni', xid.gtrid)
self.assertEqual('code', xid.bqual)
def test_xid_unicode_unparsed(self):
# We don't expect people shooting snowmen as transaction ids,
# so if something explodes in an encode error I don't mind.
# Let's just check uniconde is accepted as type.
cnn = self.connect()
cnn.set_client_encoding('utf8')
cnn.tpc_begin("transaction-id")
cnn.tpc_prepare()
cnn.reset()
xid = [ xid for xid in cnn.tpc_recover()
if xid.database == dbname ][0]
self.assertEqual(None, xid.format_id)
self.assertEqual('transaction-id', xid.gtrid)
self.assertEqual(None, xid.bqual)
def test_cancel_fails_prepared(self):
cnn = self.connect()
cnn.tpc_begin('cancel')
cnn.tpc_prepare()
self.assertRaises(psycopg2.ProgrammingError, cnn.cancel)
def test_tpc_recover_non_dbapi_connection(self):
from psycopg2.extras import RealDictConnection
cnn = self.connect(connection_factory=RealDictConnection)
cnn.tpc_begin('dict-connection')
cnn.tpc_prepare()
cnn.reset()
xids = cnn.tpc_recover()
xid = [ xid for xid in xids if xid.database == dbname ][0]
self.assertEqual(None, xid.format_id)
self.assertEqual('dict-connection', xid.gtrid)
self.assertEqual(None, xid.bqual)
decorate_all_tests(ConnectionTwoPhaseTests, skip_if_tpc_disabled)
class TransactionControlTests(ConnectingTestCase):
def test_closed(self):
self.conn.close()
self.assertRaises(psycopg2.InterfaceError,
self.conn.set_session,
psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
def test_not_in_transaction(self):
cur = self.conn.cursor()
cur.execute("select 1")
self.assertRaises(psycopg2.ProgrammingError,
self.conn.set_session,
psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
def test_set_isolation_level(self):
cur = self.conn.cursor()
self.conn.set_session(
psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session(
psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ)
cur.execute("SHOW default_transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'repeatable read')
else:
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session(
isolation_level=psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.set_session(
isolation_level=psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED)
cur.execute("SHOW default_transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'read uncommitted')
else:
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
def test_set_isolation_level_str(self):
cur = self.conn.cursor()
self.conn.set_session("serializable")
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session("repeatable read")
cur.execute("SHOW default_transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'repeatable read')
else:
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session("read committed")
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.set_session("read uncommitted")
cur.execute("SHOW default_transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'read uncommitted')
else:
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
def test_bad_isolation_level(self):
self.assertRaises(ValueError, self.conn.set_session, 0)
self.assertRaises(ValueError, self.conn.set_session, 5)
self.assertRaises(ValueError, self.conn.set_session, 'whatever')
def test_set_read_only(self):
cur = self.conn.cursor()
self.conn.set_session(readonly=True)
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur = self.conn.cursor()
self.conn.set_session(readonly=None)
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
self.conn.set_session(readonly=False)
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'off')
self.conn.rollback()
def test_set_default(self):
cur = self.conn.cursor()
cur.execute("SHOW default_transaction_isolation;")
default_isolevel = cur.fetchone()[0]
cur.execute("SHOW default_transaction_read_only;")
default_readonly = cur.fetchone()[0]
self.conn.rollback()
self.conn.set_session(isolation_level='serializable', readonly=True)
self.conn.set_session(isolation_level='default', readonly='default')
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], default_isolevel)
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], default_readonly)
@skip_before_postgres(9, 1)
def test_set_deferrable(self):
cur = self.conn.cursor()
self.conn.set_session(readonly=True, deferrable=True)
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
cur.execute("SHOW default_transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur.execute("SHOW default_transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
self.conn.set_session(deferrable=False)
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
cur.execute("SHOW default_transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'off')
self.conn.rollback()
@skip_after_postgres(9, 1)
def test_set_deferrable_error(self):
self.assertRaises(psycopg2.ProgrammingError,
self.conn.set_session, readonly=True, deferrable=True)
class AutocommitTests(ConnectingTestCase):
def test_closed(self):
self.conn.close()
self.assertRaises(psycopg2.InterfaceError,
setattr, self.conn, 'autocommit', True)
# The getter doesn't have a guard. We may change this in future
# to make it consistent with other methods; meanwhile let's just check
# it doesn't explode.
try:
self.assertTrue(self.conn.autocommit in (True, False))
except psycopg2.InterfaceError:
pass
def test_default_no_autocommit(self):
self.assertTrue(not self.conn.autocommit)
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_INTRANS)
self.conn.rollback()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
def test_set_autocommit(self):
self.conn.autocommit = True
self.assertTrue(self.conn.autocommit)
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
self.conn.autocommit = False
self.assertTrue(not self.conn.autocommit)
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
cur.execute('select 1;')
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_INTRANS)
def test_set_intrans_error(self):
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertRaises(psycopg2.ProgrammingError,
setattr, self.conn, 'autocommit', True)
def test_set_session_autocommit(self):
self.conn.set_session(autocommit=True)
self.assertTrue(self.conn.autocommit)
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
self.conn.set_session(autocommit=False)
self.assertTrue(not self.conn.autocommit)
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
cur.execute('select 1;')
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_INTRANS)
self.conn.rollback()
self.conn.set_session('serializable', readonly=True, autocommit=True)
self.assertTrue(self.conn.autocommit)
cur.execute('select 1;')
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs a character LSTM model trained on Shakespeare."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
# pylint: disable=wrong-import-order
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
# pylint: enable=wrong-import-order
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
EMBEDDING_DIM = 256
RNN_UNITS = 1024
SEQ_LENGTH = 100
# Calculated by running batch_size=1
BATCHES_PER_EPOCH = 11043
def define_flags():
"""Define the flags for the Shakespeare character LSTM."""
flags_core.define_base(data_dir=False,
clean=False,
train_epochs=True,
epochs_between_evals=False,
stop_threshold=False,
num_gpu=True,
export_dir=False,
run_eagerly=True,
distribution_strategy=True)
flags_core.define_performance(num_parallel_calls=False,
inter_op=False,
intra_op=False,
synthetic_data=False,
max_train_steps=False,
dtype=True,
loss_scale=True,
enable_xla=True)
flags_core.set_defaults(train_epochs=43,
batch_size=64)
flags.DEFINE_boolean(name='enable_eager', default=True, help='Enable eager?')
flags.DEFINE_boolean(
name='train', default=True,
help='If true trains the model.')
flags.DEFINE_string(
name='predict_context', default=None,
help='If set, makes a prediction with the given context.')
flags.DEFINE_integer(
name='predict_length', default=1000,
help='Length of the predicted text including the context.')
flags.DEFINE_integer(name='train_steps', default=None,
help='Overrides train_steps per epoch if not None.')
flags.DEFINE_integer(
name='log_steps', default=100,
help='For every log_steps, we log the timing information such as '
'examples per second.')
flags.DEFINE_string(
name='training_data', default=None,
help='Path to file containing the training data.')
flags.DEFINE_boolean(name='cudnn', default=True, help='Use CuDNN LSTM.')
def get_dataset(path_to_file, batch_size=None, seq_length=SEQ_LENGTH):
"""Creates a dataset from a given text file.
Args:
path_to_file: The path to the training data.
batch_size: Batch size to use.
seq_length: The length of the LSTM sequence.
Returns:
A tuple, consisting of the Dataset and the class to character mapping
and character to class mapping.
"""
with tf.io.gfile.GFile(path_to_file, 'rb') as train_data:
text = train_data.read().decode(encoding='utf-8')
# Create vocab
vocab = sorted(set(text))
char2idx = {u: i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
# Split text into sequence length + 1 chucks to create examples
text_as_int = np.array([char2idx[c] for c in text])
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
sequences = char_dataset.batch(seq_length+1, drop_remainder=True)
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, tf.one_hot(target_text, len(vocab))
dataset = sequences.map(split_input_target)
dataset = dataset.shuffle(10000).repeat()
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset, idx2char, char2idx
def build_model(vocab_size,
embedding_dim=EMBEDDING_DIM,
rnn_units=RNN_UNITS,
batch_size=None,
stateful=False,
use_cudnn=True):
"""Builds the Shakespeare model.
Args:
vocab_size: The number of character classes in the input.
embedding_dim: The dimension of the embedding space for each class.
rnn_units: The number of RNN units in the layer.
batch_size: When predicting, the batch size of the predictions.
stateful: If true, the LSTM is stateful.
Returns:
A Keras Model.
"""
LSTM = functools.partial(tf.keras.layers.LSTM, implementation=2)
# By indirecting the activation through a lambda layer, the logic to dispatch
# to CuDNN in V2 doesn't trigger and we force the LSTM to run in non-CuDNN
# mode.
lstm_activation = ('tanh' if use_cudnn else
lambda x: tf.math.tanh(x))
batch_shape = [batch_size if stateful else None, None]
return tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=batch_shape),
LSTM(rnn_units,
activation=lstm_activation,
return_sequences=True,
stateful=stateful,
recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dense(vocab_size),
tf.keras.layers.Softmax(dtype=tf.float32)])
def train_model(flags_obj, dataset, vocab_size, strategy, checkpoint_dir=None):
"""Trains a Shakespeare model.
Args:
flags_obj: An object containing parsed flag values.s
dataset: the training data set.
vocab_size: the number of unique character classes.
strategy: distribution strategy to use.
checkpoint_dir: if not None, the directory in which to make checkpoints.
Returns:
The training history and callbacks.
"""
if flags_obj.train_steps:
train_steps = flags_obj.train_steps
else:
train_steps = BATCHES_PER_EPOCH // flags_obj.batch_size
strategy_scope = distribution_utils.get_strategy_scope(strategy)
with strategy_scope:
model = build_model(vocab_size=vocab_size, batch_size=flags_obj.batch_size,
use_cudnn=flags_obj.cudnn)
# When keras_use_ctl is False, Model.fit() automatically applies
# loss scaling so we don't need to create a LossScaleOptimizer.
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=[tf.keras.metrics.Recall(top_k=1, name='RecallAt1'),
tf.keras.metrics.Recall(top_k=5, name='RecallAt5')],
run_eagerly=flags_obj.run_eagerly)
callbacks = []
if checkpoint_dir:
checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt_{epoch}')
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix,
save_weights_only=True)
callbacks.append(checkpoint_callback)
time_callback = keras_utils.TimeHistory(flags_obj.batch_size,
flags_obj.log_steps)
callbacks.append(time_callback)
history = model.fit(dataset,
epochs=flags_obj.train_epochs,
steps_per_epoch=train_steps,
callbacks=callbacks,
verbose=2)
return history, callbacks
def make_prediction(checkpoint_dir, length, context, idx2char, char2idx):
"""Make predictions from a Shakespeare model.
Args:
checkpoint_dir: the directory from which to load checkpoints
length: the total length of the generated text (including the context).
context: the initial text with which the LSTM is primed.
idx2char: the character class to character mapping.
char2idx: the character to character class mapping.
Returns:
A generated string of text of the given length.
"""
prediction_model = build_model(
vocab_size=len(idx2char), batch_size=1, stateful=True)
prediction_model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
prediction_model.build(tf.TensorShape([1, None]))
input_eval = [char2idx[s] for s in context]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = []
prediction_model.reset_states()
for _ in range(length - len(context)):
predictions = prediction_model(input_eval)
predictions = tf.squeeze(predictions, 0)
# We applied a softmax to the output of the model so that
# tf.keras.metrics.Recall would work. We need logits for
# tf.random.categorical, so we convert the probabilities back to log odds
predictions = tf.math.log(predictions / (1 - predictions))
random_output = tf.random.categorical(predictions, num_samples=1)
selected_id = random_output[-1, 0].numpy()
input_eval = tf.expand_dims([selected_id], 0)
text_generated.append(idx2char[selected_id])
return context + ''.join(text_generated)
def run(flags_obj):
"""Run Shakespeare training and predict.
Args:
flags_obj: An object containing parsed flag values.
Returns:
Dictionary with status from the run.
"""
if not flags_obj.training_data:
raise ValueError(
'Must set the path to a training data file. e.g download the following '
'https://storage.googleapis.com/download.tensorflow.org/data/'
'shakespeare.txt')
if flags_obj.dtype == 'fp16':
policy = tf.keras.mixed_precision.experimental.Policy(
'mixed_float16',
loss_scale=flags_core.get_loss_scale(flags_obj,
default_for_fp16='dynamic'))
tf.keras.mixed_precision.experimental.set_policy(policy)
keras_utils.set_session_config(
enable_xla=flags_obj.enable_xla)
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=flags_obj.num_gpus)
dataset, idx2char, char2idx = get_dataset(flags_obj.training_data,
batch_size=flags_obj.batch_size)
stats = {}
if flags_obj.train:
history, callbacks = train_model(flags_obj, dataset,
len(idx2char), strategy,
checkpoint_dir=flags_obj.model_dir)
stats['history'] = history.history
stats['callbacks'] = callbacks
if flags_obj.predict_context:
if not flags_obj.model_dir:
raise ValueError('Must set model_dir to get predictions.')
print(make_prediction(flags_obj.model_dir,
flags_obj.predict_length,
flags_obj.predict_context,
idx2char,
char2idx))
return stats
def main(_):
flags_obj = flags.FLAGS
run(flags_obj)
if __name__ == '__main__':
define_flags()
app.run(main)
|
|
from __future__ import print_function, absolute_import
from distutils import sysconfig
from distutils import version
from distutils.core import Extension
import glob
import io
import multiprocessing
import os
import re
import subprocess
import sys
from textwrap import fill
try:
from subprocess import check_output
except ImportError:
# check_output is not available in Python 2.6
def check_output(*popenargs, **kwargs):
"""
Run command with arguments and return its output as a byte
string.
Backported from Python 2.7 as it's implemented as pure python
on stdlib.
"""
process = subprocess.Popen(
stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
if sys.platform != 'win32':
if sys.version_info[0] < 3:
from commands import getstatusoutput
else:
from subprocess import getstatusoutput
if sys.version_info[0] < 3:
import ConfigParser as configparser
else:
import configparser
# matplotlib build options, which can be altered using setup.cfg
options = {
'display_status': True,
'verbose': False,
'backend': None,
'basedirlist': None
}
setup_cfg = os.environ.get('MPLSETUPCFG', 'setup.cfg')
if os.path.exists(setup_cfg):
config = configparser.SafeConfigParser()
config.read(setup_cfg)
try:
options['display_status'] = not config.getboolean("status", "suppress")
except:
pass
try:
options['backend'] = config.get("rc_options", "backend")
except:
pass
try:
options['basedirlist'] = [
x.strip() for x in
config.get("directories", "basedirlist").split(',')]
except:
pass
else:
config = None
def get_win32_compiler():
"""
Determine the compiler being used on win32.
"""
# Used to determine mingw32 or msvc
# This is pretty bad logic, someone know a better way?
for v in sys.argv:
if 'mingw32' in v:
return 'mingw32'
return 'msvc'
win32_compiler = get_win32_compiler()
def extract_versions():
"""
Extracts version values from the main matplotlib __init__.py and
returns them as a dictionary.
"""
with open('lib/matplotlib/__init__.py') as fd:
for line in fd.readlines():
if (line.startswith('__version__')):
exec(line.strip())
return locals()
def has_include_file(include_dirs, filename):
"""
Returns `True` if `filename` can be found in one of the
directories in `include_dirs`.
"""
for dir in include_dirs:
if os.path.exists(os.path.join(dir, filename)):
return True
return False
def check_include_file(include_dirs, filename, package):
"""
Raises an exception if the given include file can not be found.
"""
if sys.platform == 'win32':
include_dirs.extend(os.getenv('INCLUDE', '.').split(';'))
if not has_include_file(include_dirs, filename):
raise CheckFailed(
"The C/C++ header for %s (%s) could not be found. You "
"may need to install the development package." %
(package, filename))
def get_base_dirs():
"""
Returns a list of standard base directories on this platform.
"""
if options['basedirlist']:
return options['basedirlist']
basedir_map = {
'win32': ['win32_static',],
'darwin': ['/usr/local/', '/usr', '/usr/X11', '/opt/local'],
'sunos5': [os.getenv('MPLIB_BASE') or '/usr/local',],
'gnu0': ['/usr'],
'aix5': ['/usr/local'],
}
return basedir_map.get(sys.platform, ['/usr/local', '/usr'])
def is_min_version(found, minversion):
"""
Returns `True` if `found` is at least as high a version as
`minversion`.
"""
expected_version = version.LooseVersion(minversion)
found_version = version.LooseVersion(found)
return found_version >= expected_version
# Define the display functions only if display_status is True.
if options['display_status']:
def print_line(char='='):
print(char * 76)
def print_status(package, status):
initial_indent = "%22s: " % package
indent = ' ' * 24
print(fill(str(status), width=76,
initial_indent=initial_indent,
subsequent_indent=indent))
def print_message(message):
indent = ' ' * 24 + "* "
print(fill(str(message), width=76,
initial_indent=indent,
subsequent_indent=indent))
def print_raw(section):
print(section)
else:
def print_line(*args, **kwargs):
pass
print_status = print_message = print_raw = print_line
# Remove the -Wstrict-prototypesoption, is it's not valid for C++
customize_compiler = sysconfig.customize_compiler
def my_customize_compiler(compiler):
retval = customize_compiler(compiler)
try:
compiler.compiler_so.remove('-Wstrict-prototypes')
except (ValueError, AttributeError):
pass
return retval
sysconfig.customize_compiler = my_customize_compiler
def make_extension(name, files, *args, **kwargs):
"""
Make a new extension. Automatically sets include_dirs and
library_dirs to the base directories appropriate for this
platform.
`name` is the name of the extension.
`files` is a list of source files.
Any additional arguments are passed to the
`distutils.core.Extension` constructor.
"""
ext = DelayedExtension(name, files, *args, **kwargs)
for dir in get_base_dirs():
include_dir = os.path.join(dir, 'include')
if os.path.exists(include_dir):
ext.include_dirs.append(include_dir)
for lib in ('lib', 'lib64'):
lib_dir = os.path.join(dir, lib)
if os.path.exists(lib_dir):
ext.library_dirs.append(lib_dir)
ext.include_dirs.append('.')
return ext
class PkgConfig(object):
"""
This is a class for communicating with pkg-config.
"""
def __init__(self):
"""
Determines whether pkg-config exists on this machine.
"""
if sys.platform == 'win32':
self.has_pkgconfig = False
else:
self.set_pkgconfig_path()
status, output = getstatusoutput("pkg-config --help")
self.has_pkgconfig = (status == 0)
def set_pkgconfig_path(self):
pkgconfig_path = sysconfig.get_config_var('LIBDIR')
if pkgconfig_path is None:
return
pkgconfig_path = os.path.join(pkgconfig_path, 'pkgconfig')
if not os.path.isdir(pkgconfig_path):
return
try:
os.environ['PKG_CONFIG_PATH'] += ':' + pkgconfig_path
except KeyError:
os.environ['PKG_CONFIG_PATH'] = pkgconfig_path
def setup_extension(self, ext, package, default_include_dirs=[],
default_library_dirs=[], default_libraries=[],
alt_exec=None):
"""
Add parameters to the given `ext` for the given `package`.
"""
flag_map = {
'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
executable = alt_exec
if self.has_pkgconfig:
executable = 'pkg-config {0}'.format(package)
use_defaults = True
if executable is not None:
command = "{0} --libs --cflags ".format(executable)
try:
output = check_output(command, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
pass
else:
output = output.decode(sys.getfilesystemencoding())
use_defaults = False
for token in output.split():
attr = flag_map.get(token[:2])
if attr is not None:
getattr(ext, attr).append(token[2:])
if use_defaults:
basedirs = get_base_dirs()
for base in basedirs:
for include in default_include_dirs:
dir = os.path.join(base, include)
if os.path.exists(dir):
ext.include_dirs.append(dir)
for lib in default_library_dirs:
dir = os.path.join(base, lib)
if os.path.exists(dir):
ext.library_dirs.append(dir)
ext.libraries.extend(default_libraries)
return True
return False
def get_version(self, package):
"""
Get the version of the package from pkg-config.
"""
if not self.has_pkgconfig:
return None
status, output = getstatusoutput(
"pkg-config %s --modversion" % (package))
if status == 0:
return output
return None
# The PkgConfig class should be used through this singleton
pkg_config = PkgConfig()
class CheckFailed(Exception):
"""
Exception thrown when a `SetupPackage.check` method fails.
"""
pass
class SetupPackage(object):
optional = False
def check(self):
"""
Checks whether the dependencies are met. Should raise a
`CheckFailed` exception if the dependency could not be met,
otherwise return a string indicating a version number or some
other message indicating what was found.
"""
pass
def get_packages(self):
"""
Get a list of package names to add to the configuration.
These are added to the `packages` list passed to
`distutils.setup`.
"""
return []
def get_namespace_packages(self):
"""
Get a list of namespace package names to add to the configuration.
These are added to the `namespace_packages` list passed to
`distutils.setup`.
"""
return []
def get_py_modules(self):
"""
Get a list of top-level modules to add to the configuration.
These are added to the `py_modules` list passed to
`distutils.setup`.
"""
return []
def get_package_data(self):
"""
Get a package data dictionary to add to the configuration.
These are merged into to the `package_data` list passed to
`distutils.setup`.
"""
return {}
def get_extension(self):
"""
Get a list of C extensions (`distutils.core.Extension`
objects) to add to the configuration. These are added to the
`extensions` list passed to `distutils.setup`.
"""
return None
def get_install_requires(self):
"""
Get a list of Python packages that we require.
pip/easy_install will attempt to download and install this
package if it is not installed.
"""
return []
def get_setup_requires(self):
"""
Get a list of Python packages that we require at build time.
pip/easy_install will attempt to download and install this
package if it is not installed.
"""
return []
def _check_for_pkg_config(self, package, include_file, min_version=None,
version=None):
"""
A convenience function for writing checks for a
pkg_config-defined dependency.
`package` is the pkg_config package name.
`include_file` is a top-level include file we expect to find.
`min_version` is the minimum version required.
`version` will override the found version if this package
requires an alternate method for that.
"""
if version is None:
version = pkg_config.get_version(package)
if version is None:
raise CheckFailed(
"pkg-config information for '%s' could not be found." %
package)
if min_version == 'PATCH':
raise CheckFailed(
"Requires patches that have not been merged upstream.")
if min_version:
if (not is_min_version(version, min_version)):
raise CheckFailed(
"Requires %s %s or later. Found %s." %
(package, min_version, version))
ext = self.get_extension()
if ext is None:
ext = make_extension('test', [])
pkg_config.setup_extension(ext, package)
check_include_file(ext.include_dirs, include_file, package)
return 'version %s' % version
class OptionalPackage(SetupPackage):
optional = True
force = False
config_category = "packages"
def get_config(self):
"""
Look at `setup.cfg` and return one of ["auto", True, False] indicating
if the package is at default state ("auto"), forced by the user (True)
or opted-out (False).
"""
try:
return config.getboolean(self.config_category, self.name)
except:
return "auto"
def check(self):
"""
Do not override this method!
For custom dependency checks override self.check_requirements().
Two things are checked: Configuration file and requirements.
"""
# Check configuration file
conf = self.get_config()
# Default "auto" state or install forced by user
if conf in [True, 'auto']:
message = "installing"
# Set non-optional if user sets `True` in config
if conf is True:
self.optional = False
# Configuration opt-out by user
else:
# Some backend extensions (e.g. Agg) need to be built for certain
# other GUI backends (e.g. TkAgg) even when manually disabled
if self.force is True:
message = "installing forced (config override)"
else:
raise CheckFailed("skipping due to configuration")
# Check requirements and add extra information (if any) to message.
# If requirements are not met a CheckFailed should be raised in there.
additional_info = self.check_requirements()
if additional_info:
message += ", " + additional_info
# No CheckFailed raised until now, return install message.
return message
def check_requirements(self):
"""
Override this method to do custom dependency checks.
- Raise CheckFailed() if requirements are not met.
- Return message with additional information, or an empty string
(or None) for no additional information.
"""
return ""
class OptionalBackendPackage(OptionalPackage):
config_category = "gui_support"
class Platform(SetupPackage):
name = "platform"
def check(self):
return sys.platform
class Python(SetupPackage):
name = "python"
def check(self):
major, minor1, minor2, s, tmp = sys.version_info
if major < 2:
raise CheckFailed(
"Requires Python 2.6 or later")
elif major == 2 and minor1 < 6:
raise CheckFailed(
"Requires Python 2.6 or later (in the 2.x series)")
elif major == 3 and minor1 < 1:
raise CheckFailed(
"Requires Python 3.1 or later (in the 3.x series)")
return sys.version
class Matplotlib(SetupPackage):
name = "matplotlib"
def check(self):
return extract_versions()['__version__']
def get_packages(self):
return [
'matplotlib',
'matplotlib.backends',
'matplotlib.backends.qt4_editor',
'matplotlib.compat',
'matplotlib.projections',
'matplotlib.sphinxext',
'matplotlib.testing',
'matplotlib.testing.jpl_units',
'matplotlib.tri',
]
def get_py_modules(self):
return ['pylab']
def get_package_data(self):
return {
'matplotlib':
[
'mpl-data/fonts/afm/*.afm',
'mpl-data/fonts/pdfcorefonts/*.afm',
'mpl-data/fonts/pdfcorefonts/*.txt',
'mpl-data/fonts/ttf/*.ttf',
'mpl-data/fonts/ttf/LICENSE_STIX',
'mpl-data/fonts/ttf/COPYRIGHT.TXT',
'mpl-data/fonts/ttf/README.TXT',
'mpl-data/fonts/ttf/RELEASENOTES.TXT',
'mpl-data/images/*.xpm',
'mpl-data/images/*.svg',
'mpl-data/images/*.gif',
'mpl-data/images/*.png',
'mpl-data/images/*.ppm',
'mpl-data/example/*.npy',
'mpl-data/matplotlibrc',
'backends/web_backend/*.*',
'backends/web_backend/jquery/js/*',
'backends/web_backend/jquery/css/themes/base/*.*',
'backends/web_backend/jquery/css/themes/base/images/*',
'backends/web_backend/css/*.*',
'backends/Matplotlib.nib/*'
]}
class SampleData(OptionalPackage):
"""
This handles the sample data that ships with matplotlib. It is
technically optional, though most often will be desired.
"""
name = "sample_data"
def get_package_data(self):
return {
'matplotlib':
[
'mpl-data/sample_data/*.*',
'mpl-data/sample_data/axes_grid/*.*',
]}
class Toolkits(OptionalPackage):
name = "toolkits"
def get_packages(self):
return [
'mpl_toolkits',
'mpl_toolkits.mplot3d',
'mpl_toolkits.axes_grid',
'mpl_toolkits.axes_grid1',
'mpl_toolkits.axisartist',
]
def get_namespace_packages(self):
return ['mpl_toolkits']
class Tests(OptionalPackage):
name = "tests"
def check(self):
super(Tests, self).check()
try:
import nose
except ImportError:
return (
"nose 0.11.1 or later is required to run the "
"matplotlib test suite")
if nose.__versioninfo__ < (0, 11, 1):
return (
"nose 0.11.1 or later is required to run the "
"matplotlib test suite")
return 'using nose version %s' % nose.__version__
def get_packages(self):
return [
'matplotlib.tests',
]
def get_package_data(self):
baseline_images = [
'tests/baseline_images/%s/*' % x
for x in os.listdir('lib/matplotlib/tests/baseline_images')]
return {
'matplotlib':
baseline_images +
[
'tests/mpltest.ttf',
'tests/test_rcparams.rc'
]}
def get_install_requires(self):
return ['nose']
class DelayedExtension(Extension, object):
"""
A distutils Extension subclass where some of its members
may have delayed computation until reaching the build phase.
This is so we can, for example, get the Numpy include dirs
after pip has installed Numpy for us if it wasn't already
on the system.
"""
def __init__(self, *args, **kwargs):
super(DelayedExtension, self).__init__(*args, **kwargs)
self._finalized = False
self._hooks = {}
def add_hook(self, member, func):
"""
Add a hook to dynamically compute a member.
Parameters
----------
member : string
The name of the member
func : callable
The function to call to get dynamically-computed values
for the member.
"""
self._hooks[member] = func
def finalize(self):
self._finalized = True
class DelayedMember(property):
def __init__(self, name):
self._name = name
def __get__(self, obj, objtype=None):
result = getattr(obj, '_' + self._name, [])
if obj._finalized:
if self._name in obj._hooks:
result = obj._hooks[self._name]() + result
return result
def __set__(self, obj, value):
setattr(obj, '_' + self._name, value)
include_dirs = DelayedMember('include_dirs')
class Numpy(SetupPackage):
name = "numpy"
@staticmethod
def include_dirs_hook():
if sys.version_info[0] >= 3:
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import imp
import numpy
imp.reload(numpy)
else:
import __builtin__
if hasattr(__builtin__, '__NUMPY_SETUP__'):
del __builtin__.__NUMPY_SETUP__
import numpy
reload(numpy)
ext = Extension('test', [])
ext.include_dirs.append(numpy.get_include())
if not has_include_file(
ext.include_dirs, os.path.join("numpy", "arrayobject.h")):
warnings.warn(
"The C headers for numpy could not be found. "
"You may need to install the development package")
return [numpy.get_include()]
def check(self):
min_version = extract_versions()['__version__numpy__']
try:
import numpy
except ImportError:
return 'not found. pip may install it below.'
if not is_min_version(numpy.__version__, min_version):
raise SystemExit(
"Requires numpy %s or later to build. (Found %s)" %
(min_version, numpy.__version__))
return 'version %s' % numpy.__version__
def add_flags(self, ext):
# Ensure that PY_ARRAY_UNIQUE_SYMBOL is uniquely defined for
# each extension
array_api_name = 'MPL_' + ext.name.replace('.', '_') + '_ARRAY_API'
ext.define_macros.append(('PY_ARRAY_UNIQUE_SYMBOL', array_api_name))
ext.add_hook('include_dirs', self.include_dirs_hook)
def get_setup_requires(self):
return ['numpy>=1.5']
def get_install_requires(self):
return ['numpy>=1.5']
class CXX(SetupPackage):
name = 'pycxx'
def check(self):
if sys.version_info[0] >= 3:
# There is no version of PyCXX in the wild that will work
# with Python 3.x
self.__class__.found_external = False
return ("Official versions of PyCXX are not compatible with "
"Python 3.x. Using local copy")
self.__class__.found_external = True
old_stdout = sys.stdout
if sys.version_info[0] >= 3:
sys.stdout = io.StringIO()
else:
sys.stdout = io.BytesIO()
try:
import CXX
except ImportError:
self.__class__.found_external = False
return "Couldn't import. Using local copy."
finally:
sys.stdout = old_stdout
try:
return self._check_for_pkg_config(
'PyCXX', 'CXX/Extensions.hxx', min_version='6.2.4')
except CheckFailed as e:
# It's ok to just proceed here, since the `import CXX`
# worked above, and PyCXX (at least upstream) ensures that
# its header files are on the default distutils include
# path (either in a standard C place such as /usr/include,
# or in /usr/include/pythonX.Y.
return 'Using system CXX (version unknown, no pkg-config info)'
def add_flags(self, ext):
if self.found_external and not 'sdist' in sys.argv:
support_dir = os.path.normpath(
os.path.join(
sys.prefix,
'share',
'python%d.%d' % (
sys.version_info[0], sys.version_info[1]),
'CXX'))
if not os.path.exists(support_dir):
# On Fedora 17, these files are installed in /usr/share/CXX
support_dir = '/usr/src/CXX'
ext.sources.extend([
os.path.join(support_dir, x) for x in
['cxxsupport.cxx', 'cxx_extensions.cxx',
'IndirectPythonInterface.cxx',
'cxxextensions.c']])
pkg_config.setup_extension(ext, 'PyCXX')
else:
ext.sources.extend(glob.glob('CXX/*.cxx'))
ext.sources.extend(glob.glob('CXX/*.c'))
ext.define_macros.append(('PYCXX_ISO_CPP_LIB', '1'))
if sys.version_info[0] >= 3:
ext.define_macros.append(('PYCXX_PYTHON_2TO3', '1'))
if not (sys.platform == 'win32' and win32_compiler == 'msvc'):
ext.libraries.append('stdc++')
ext.libraries.append('m')
class LibAgg(SetupPackage):
name = 'libagg'
def check(self):
self.__class__.found_external = True
try:
return self._check_for_pkg_config(
'libagg', 'agg2/agg_basics.h', min_version='PATCH')
except CheckFailed as e:
self.__class__.found_external = False
return str(e) + ' Using local copy.'
def add_flags(self, ext):
if self.found_external:
pkg_config.setup_extension(ext, 'libagg')
else:
ext.include_dirs.append('agg24/include')
agg_sources = [
'agg_bezier_arc.cpp',
'agg_curves.cpp',
'agg_image_filters.cpp',
'agg_trans_affine.cpp',
'agg_vcgen_contour.cpp',
'agg_vcgen_dash.cpp',
'agg_vcgen_stroke.cpp',
'agg_vpgen_segmentator.cpp'
]
ext.sources.extend(
os.path.join('agg24', 'src', x) for x in agg_sources)
class FreeType(SetupPackage):
name = "freetype"
def check(self):
if sys.platform == 'win32':
return "Unknown version"
status, output = getstatusoutput("freetype-config --version")
if status == 0:
version = output
else:
version = None
return self._check_for_pkg_config(
'freetype2', 'ft2build.h',
min_version='2.4', version=version)
def add_flags(self, ext):
pkg_config.setup_extension(
ext, 'freetype2',
default_include_dirs=[
'freetype2', 'lib/freetype2/include',
'lib/freetype2/include/freetype2'],
default_library_dirs=[
'freetype2/lib'],
default_libraries=['freetype', 'z'],
alt_exec='freetype-config')
class FT2Font(SetupPackage):
name = 'ft2font'
def get_extension(self):
sources = [
'src/ft2font.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib.ft2font', sources)
FreeType().add_flags(ext)
Numpy().add_flags(ext)
CXX().add_flags(ext)
return ext
class Png(SetupPackage):
name = "png"
def check(self):
try:
return self._check_for_pkg_config(
'libpng', 'png.h',
min_version='1.2')
except CheckFailed as e:
self.__class__.found_external = False
return str(e) + ' Using unknown version.'
def get_extension(self):
sources = [
'src/_png.cpp', 'src/mplutils.cpp'
]
ext = make_extension('matplotlib._png', sources)
pkg_config.setup_extension(
ext, 'libpng', default_libraries=['png', 'z'])
Numpy().add_flags(ext)
CXX().add_flags(ext)
return ext
class TTConv(SetupPackage):
name = "ttconv"
def get_extension(self):
sources = [
'src/_ttconv.cpp',
'ttconv/pprdrv_tt.cpp',
'ttconv/pprdrv_tt2.cpp',
'ttconv/ttutil.cpp'
]
ext = make_extension('matplotlib.ttconv', sources)
Numpy().add_flags(ext)
CXX().add_flags(ext)
return ext
class Path(SetupPackage):
name = "path"
def get_extension(self):
sources = [
'src/_path.cpp',
'src/path_cleanup.cpp',
'src/agg_py_transforms.cpp'
]
ext = make_extension('matplotlib._path', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
CXX().add_flags(ext)
return ext
class Image(SetupPackage):
name = "image"
def get_extension(self):
sources = [
'src/_image.cpp', 'src/mplutils.cpp'
]
ext = make_extension('matplotlib._image', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
CXX().add_flags(ext)
return ext
class Contour(SetupPackage):
name = "contour"
def get_extension(self):
sources = [
"src/cntr.c"
]
ext = make_extension('matplotlib._cntr', sources)
Numpy().add_flags(ext)
return ext
class Delaunay(SetupPackage):
name = "delaunay"
def get_packages(self):
return ['matplotlib.delaunay']
def get_extension(self):
sources = ["_delaunay.cpp", "VoronoiDiagramGenerator.cpp",
"delaunay_utils.cpp", "natneighbors.cpp"]
sources = [os.path.join('lib/matplotlib/delaunay', s) for s in sources]
ext = make_extension('matplotlib._delaunay', sources)
Numpy().add_flags(ext)
return ext
class Tri(SetupPackage):
name = "tri"
def get_extension(self):
sources = [
"lib/matplotlib/tri/_tri.cpp",
"src/mplutils.cpp"
]
ext = make_extension('matplotlib._tri', sources)
Numpy().add_flags(ext)
CXX().add_flags(ext)
return ext
class Dateutil(SetupPackage):
name = "dateutil"
def __init__(self, version=None):
self.version = version
def check(self):
try:
import dateutil
except ImportError:
# dateutil 2.1 has a file encoding bug that breaks installation on
# python 3.3
# https://github.com/matplotlib/matplotlib/issues/2373
# hack around the problem by installing the the (working) v2.0
major, minor1, _, _, _ = sys.version_info
if self.version is None and (major, minor1) == (3, 3):
self.version = '!=2.1'
return (
"dateutil was not found. It is required for date axis "
"support. pip/easy_install may attempt to install it "
"after matplotlib.")
return "using dateutil version %s" % dateutil.__version__
def get_install_requires(self):
dateutil = 'python-dateutil'
if self.version is not None:
dateutil += self.version
return [dateutil]
class Tornado(SetupPackage):
name = "tornado"
def check(self):
try:
import tornado
except ImportError:
return (
"tornado was not found. It is required for the WebAgg "
"backend. pip/easy_install may attempt to install it "
"after matplotlib.")
return "using tornado version %s" % tornado.version
def get_install_requires(self):
return ['tornado']
class Pyparsing(SetupPackage):
name = "pyparsing"
def is_ok(self):
# pyparsing 2.0.0 bug, but it may be patched in distributions
try:
import pyparsing
f = pyparsing.Forward()
f <<= pyparsing.Literal('a')
return f is not None
except:
return False
def check(self):
try:
import pyparsing
except ImportError:
return (
"pyparsing was not found. It is required for mathtext "
"support. pip/easy_install may attempt to install it "
"after matplotlib.")
required = [1, 5, 6]
if [int(x) for x in pyparsing.__version__.split('.')] < required:
return (
"matplotlib requires pyparsing >= {0}".format(
'.'.join(str(x) for x in required)))
if not self.is_ok():
return (
"Your pyparsing contains a bug that will be monkey-patched by "
"matplotlib. For best results, upgrade to pyparsing 2.0.1 or "
"later.")
return "using pyparsing version %s" % pyparsing.__version__
def get_install_requires(self):
if self.is_ok():
return ['pyparsing>=1.5.6']
else:
return ['pyparsing>=1.5.6,!=2.0.0']
class BackendAgg(OptionalBackendPackage):
name = "agg"
def get_extension(self):
sources = [
"src/mplutils.cpp",
"src/agg_py_transforms.cpp",
"src/_backend_agg.cpp"
]
ext = make_extension('matplotlib.backends._backend_agg', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
FreeType().add_flags(ext)
CXX().add_flags(ext)
return ext
class BackendTkAgg(OptionalBackendPackage):
name = "tkagg"
def __init__(self):
self.tcl_tk_cache = None
def check_requirements(self):
try:
if sys.version_info[0] < 3:
import Tkinter
else:
import tkinter as Tkinter
except ImportError:
raise CheckFailed('TKAgg requires Tkinter.')
except RuntimeError:
raise CheckFailed('Tkinter present but import failed.')
else:
if Tkinter.TkVersion < 8.3:
raise CheckFailed("Tcl/Tk v8.3 or later required.")
ext = self.get_extension()
check_include_file(ext.include_dirs, "tk.h", "Tk")
try:
tk_v = Tkinter.__version__.split()[-2]
except (AttributeError, IndexError):
# Tkinter.__version__ has been removed in python 3
tk_v = 'version not identified'
BackendAgg.force = True
return "version %s" % tk_v
def get_extension(self):
sources = [
'src/agg_py_transforms.cpp',
'src/_tkagg.cpp'
]
ext = make_extension('matplotlib.backends._tkagg', sources)
self.add_flags(ext)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
CXX().add_flags(ext)
return ext
def query_tcltk(self):
"""
Tries to open a Tk window in order to query the Tk object
about its library paths. This should never be called more
than once by the same process, as Tk intricacies may cause the
Python interpreter to hang. The function also has a workaround
if no X server is running (useful for autobuild systems).
"""
# Use cached values if they exist, which ensures this function
# only executes once
if self.tcl_tk_cache is not None:
return self.tcl_tk_cache
# By this point, we already know that Tkinter imports correctly
if sys.version_info[0] < 3:
import Tkinter
else:
import tkinter as Tkinter
tcl_lib_dir = ''
tk_lib_dir = ''
# First try to open a Tk window (requires a running X server)
try:
tk = Tkinter.Tk()
except Tkinter.TclError:
# Next, start Tcl interpreter without opening a Tk window
# (no need for X server) This feature is available in
# python version 2.4 and up
try:
tcl = Tkinter.Tcl()
except AttributeError: # Python version not high enough
pass
except Tkinter.TclError: # Something went wrong while opening Tcl
pass
else:
tcl_lib_dir = str(tcl.getvar('tcl_library'))
# Guess Tk location based on Tcl location
(head, tail) = os.path.split(tcl_lib_dir)
tail = tail.replace('Tcl', 'Tk').replace('tcl', 'tk')
tk_lib_dir = os.path.join(head, tail)
if not os.path.exists(tk_lib_dir):
tk_lib_dir = tcl_lib_dir.replace(
'Tcl', 'Tk').replace('tcl', 'tk')
else:
# Obtain Tcl and Tk locations from Tk widget
tk.withdraw()
tcl_lib_dir = str(tk.getvar('tcl_library'))
tk_lib_dir = str(tk.getvar('tk_library'))
tk.destroy()
# Save directories and version string to cache
self.tcl_tk_cache = tcl_lib_dir, tk_lib_dir, str(Tkinter.TkVersion)[:3]
return self.tcl_tk_cache
def parse_tcl_config(self, tcl_lib_dir, tk_lib_dir):
try:
if sys.version_info[0] < 3:
import Tkinter
else:
import tkinter as Tkinter
except ImportError:
return None
tcl_poss = [tcl_lib_dir,
os.path.normpath(os.path.join(tcl_lib_dir, '..')),
"/usr/lib/tcl" + str(Tkinter.TclVersion),
"/usr/lib"]
tk_poss = [tk_lib_dir,
os.path.normpath(os.path.join(tk_lib_dir, '..')),
"/usr/lib/tk" + str(Tkinter.TkVersion),
"/usr/lib"]
for ptcl, ptk in zip(tcl_poss, tk_poss):
tcl_config = os.path.join(ptcl, "tclConfig.sh")
tk_config = os.path.join(ptk, "tkConfig.sh")
if (os.path.exists(tcl_config) and os.path.exists(tk_config)):
break
if not (os.path.exists(tcl_config) and os.path.exists(tk_config)):
return None
def get_var(file, varname):
p = subprocess.Popen(
'. %s ; eval echo ${%s}' % (file, varname),
shell=True,
executable="/bin/sh",
stdout=subprocess.PIPE)
result = p.communicate()[0]
return result.decode('ascii')
tcl_lib_dir = get_var(
tcl_config, 'TCL_LIB_SPEC').split()[0][2:].strip()
tcl_inc_dir = get_var(
tcl_config, 'TCL_INCLUDE_SPEC')[2:].strip()
tcl_lib = get_var(tcl_config, 'TCL_LIB_FLAG')[2:].strip()
tk_lib_dir = get_var(tk_config, 'TK_LIB_SPEC').split()[0][2:].strip()
tk_inc_dir = get_var(tk_config, 'TK_INCLUDE_SPEC').strip()
if tk_inc_dir == '':
tk_inc_dir = tcl_inc_dir
else:
tk_inc_dir = tk_inc_dir[2:]
tk_lib = get_var(tk_config, 'TK_LIB_FLAG')[2:].strip()
if not os.path.exists(os.path.join(tk_inc_dir, 'tk.h')):
return None
return (tcl_lib_dir, tcl_inc_dir, tcl_lib,
tk_lib_dir, tk_inc_dir, tk_lib)
def guess_tcl_config(self, tcl_lib_dir, tk_lib_dir, tk_ver):
if not (os.path.exists(tcl_lib_dir) and os.path.exists(tk_lib_dir)):
return None
tcl_lib = os.path.normpath(os.path.join(tcl_lib_dir, '../'))
tk_lib = os.path.normpath(os.path.join(tk_lib_dir, '../'))
tcl_inc = os.path.normpath(
os.path.join(tcl_lib_dir,
'../../include/tcl' + tk_ver))
if not os.path.exists(tcl_inc):
tcl_inc = os.path.normpath(
os.path.join(tcl_lib_dir,
'../../include'))
tk_inc = os.path.normpath(os.path.join(
tk_lib_dir,
'../../include/tk' + tk_ver))
if not os.path.exists(tk_inc):
tk_inc = os.path.normpath(os.path.join(
tk_lib_dir,
'../../include'))
if not os.path.exists(os.path.join(tk_inc, 'tk.h')):
tk_inc = tcl_inc
if not os.path.exists(tcl_inc):
# this is a hack for suse linux, which is broken
if (sys.platform.startswith('linux') and
os.path.exists('/usr/include/tcl.h') and
os.path.exists('/usr/include/tk.h')):
tcl_inc = '/usr/include'
tk_inc = '/usr/include'
if not os.path.exists(os.path.join(tk_inc, 'tk.h')):
return None
return tcl_lib, tcl_inc, 'tcl' + tk_ver, tk_lib, tk_inc, 'tk' + tk_ver
def hardcoded_tcl_config(self):
tcl_inc = "/usr/local/include"
tk_inc = "/usr/local/include"
tcl_lib = "/usr/local/lib"
tk_lib = "/usr/local/lib"
return tcl_lib, tcl_inc, 'tcl', tk_lib, tk_inc, 'tk'
def add_flags(self, ext):
if sys.platform == 'win32':
major, minor1, minor2, s, tmp = sys.version_info
ext.include_dirs.extend(['win32_static/include/tcl85'])
ext.libraries.extend(['tk85', 'tcl85'])
ext.library_dirs.extend([os.path.join(sys.prefix, 'dlls')])
elif sys.platform == 'darwin':
# this config section lifted directly from Imaging - thanks to
# the effbot!
# First test for a MacOSX/darwin framework install
from os.path import join, exists
framework_dirs = [
join(os.getenv('HOME'), '/Library/Frameworks'),
'/Library/Frameworks',
'/System/Library/Frameworks/',
]
# Find the directory that contains the Tcl.framework and
# Tk.framework bundles.
tk_framework_found = 0
for F in framework_dirs:
# both Tcl.framework and Tk.framework should be present
for fw in 'Tcl', 'Tk':
if not exists(join(F, fw + '.framework')):
break
else:
# ok, F is now directory with both frameworks. Continure
# building
tk_framework_found = 1
break
if tk_framework_found:
# For 8.4a2, we must add -I options that point inside
# the Tcl and Tk frameworks. In later release we
# should hopefully be able to pass the -F option to
# gcc, which specifies a framework lookup path.
tk_include_dirs = [
join(F, fw + '.framework', H)
for fw in ('Tcl', 'Tk')
for H in ('Headers', 'Versions/Current/PrivateHeaders')
]
# For 8.4a2, the X11 headers are not included. Rather
# than include a complicated search, this is a
# hard-coded path. It could bail out if X11 libs are
# not found...
# tk_include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
ext.include_dirs.extend(tk_include_dirs)
ext.extra_link_args.extend(frameworks)
ext.extra_compile_args.extend(frameworks)
# you're still here? ok we'll try it this way...
else:
# There are 3 methods to try, in decreasing order of "smartness"
#
# 1. Parse the tclConfig.sh and tkConfig.sh files that have
# all the information we need
#
# 2. Guess the include and lib dirs based on the location of
# Tkinter's 'tcl_library' and 'tk_library' variables.
#
# 3. Use some hardcoded locations that seem to work on a lot
# of distros.
# Query Tcl/Tk system for library paths and version string
try:
tcl_lib_dir, tk_lib_dir, tk_ver = self.query_tcltk()
except:
tk_ver = ''
result = self.hardcoded_tcl_config()
else:
result = self.parse_tcl_config(tcl_lib_dir, tk_lib_dir)
if result is None:
result = self.guess_tcl_config(
tcl_lib_dir, tk_lib_dir, tk_ver)
if result is None:
result = self.hardcoded_tcl_config()
# Add final versions of directories and libraries to ext lists
(tcl_lib_dir, tcl_inc_dir, tcl_lib,
tk_lib_dir, tk_inc_dir, tk_lib) = result
ext.include_dirs.extend([tcl_inc_dir, tk_inc_dir])
ext.library_dirs.extend([tcl_lib_dir, tk_lib_dir])
ext.libraries.extend([tcl_lib, tk_lib])
class BackendGtk(OptionalBackendPackage):
name = "gtk"
def check_requirements(self):
try:
import gtk
except ImportError:
raise CheckFailed("Requires pygtk")
except RuntimeError:
raise CheckFailed('pygtk present, but import failed.')
else:
version = (2, 2, 0)
if gtk.pygtk_version < version:
raise CheckFailed(
"Requires pygtk %d.%d.%d or later. "
"Found %d.%d.%d" % (version + gtk.pygtk_version))
ext = self.get_extension()
self.add_flags(ext)
check_include_file(ext.include_dirs,
os.path.join("gtk", "gtk.h"),
'gtk')
check_include_file(ext.include_dirs,
os.path.join("pygtk", "pygtk.h"),
'pygtk')
return 'Gtk: %s pygtk: %s' % (
".".join(str(x) for x in gtk.gtk_version),
".".join(str(x) for x in gtk.pygtk_version))
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def get_extension(self):
sources = [
'src/_backend_gdk.c'
]
ext = make_extension('matplotlib.backends._backend_gdk', sources)
self.add_flags(ext)
Numpy().add_flags(ext)
return ext
def add_flags(self, ext):
if sys.platform == 'win32':
def getoutput(s):
ret = os.popen(s).read().strip()
return ret
if 'PKG_CONFIG_PATH' not in os.environ:
# If Gtk+ is installed, pkg-config is required to be installed
os.environ['PKG_CONFIG_PATH'] = 'C:\\GTK\\lib\\pkgconfig'
# popen broken on my win32 plaform so I can't use pkgconfig
ext.library_dirs.extend(
['C:/GTK/bin', 'C:/GTK/lib'])
ext.include_dirs.extend(
['win32_static/include/pygtk-2.0',
'C:/GTK/include',
'C:/GTK/include/gobject',
'C:/GTK/include/gext',
'C:/GTK/include/glib',
'C:/GTK/include/pango',
'C:/GTK/include/atk',
'C:/GTK/include/X11',
'C:/GTK/include/cairo',
'C:/GTK/include/gdk',
'C:/GTK/include/gdk-pixbuf',
'C:/GTK/include/gtk',
])
pygtkIncludes = getoutput(
'pkg-config --cflags-only-I pygtk-2.0').split()
gtkIncludes = getoutput(
'pkg-config --cflags-only-I gtk+-2.0').split()
includes = pygtkIncludes + gtkIncludes
ext.include_dirs.extend([include[2:] for include in includes])
pygtkLinker = getoutput('pkg-config --libs pygtk-2.0').split()
gtkLinker = getoutput('pkg-config --libs gtk+-2.0').split()
linkerFlags = pygtkLinker + gtkLinker
ext.libraries.extend(
[flag[2:] for flag in linkerFlags if flag.startswith('-l')])
ext.library_dirs.extend(
[flag[2:] for flag in linkerFlags if flag.startswith('-L')])
ext.extra_link_args.extend(
[flag for flag in linkerFlags if not
(flag.startswith('-l') or flag.startswith('-L'))])
# visual studio doesn't need the math library
if (sys.platform == 'win32' and
win32_compiler == 'msvc' and
'm' in ext.libraries):
ext.libraries.remove('m')
elif sys.platform != 'win32':
pkg_config.setup_extension(ext, 'pygtk-2.0')
pkg_config.setup_extension(ext, 'gtk+-2.0')
class BackendGtkAgg(BackendGtk):
name = "gtkagg"
def check(self):
try:
return super(BackendGtkAgg, self).check()
except:
raise
else:
BackendAgg.force = True
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def get_extension(self):
sources = [
'src/agg_py_transforms.cpp',
'src/_gtkagg.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib.backends._gtkagg', sources)
self.add_flags(ext)
LibAgg().add_flags(ext)
CXX().add_flags(ext)
Numpy().add_flags(ext)
return ext
def backend_gtk3agg_internal_check(x):
try:
import gi
except ImportError:
return (False, "Requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except ValueError:
return (False, "Requires gtk3 development files to be installed.")
except AttributeError:
return (False, "pygobject version too old.")
try:
from gi.repository import Gtk, Gdk, GObject
except (ImportError, RuntimeError):
return (False, "Requires pygobject to be installed.")
return (True, "version %s.%s.%s" % (
Gtk.get_major_version(),
Gtk.get_micro_version(),
Gtk.get_minor_version()))
class BackendGtk3Agg(OptionalBackendPackage):
name = "gtk3agg"
def check_requirements(self):
if 'TRAVIS' in os.environ:
raise CheckFailed("Can't build with Travis")
if sys.version_info[0] >= 3:
raise CheckFailed("gtk3agg backend does not work on Python 3")
# This check needs to be performed out-of-process, because
# importing gi and then importing regular old pygtk afterward
# segfaults the interpreter.
try:
p = multiprocessing.Pool()
except:
return "unknown (can not use multiprocessing to determine)"
try:
success, msg = p.map(backend_gtk3agg_internal_check, [0])[0]
except:
success = False
msg = "Could not determine"
finally:
p.close()
p.join()
if success:
BackendAgg.force = True
return msg
else:
raise CheckFailed(msg)
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def backend_gtk3cairo_internal_check(x):
try:
import cairo
except ImportError:
return (False, "Requires cairo to be installed.")
try:
import gi
except ImportError:
return (False, "Requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except ValueError:
return (False, "Requires gtk3 development files to be installed.")
except AttributeError:
return (False, "pygobject version too old.")
try:
from gi.repository import Gtk, Gdk, GObject
except (RuntimeError, ImportError):
return (False, "Requires pygobject to be installed.")
return (True, "version %s.%s.%s" % (
Gtk.get_major_version(),
Gtk.get_micro_version(),
Gtk.get_minor_version()))
class BackendGtk3Cairo(OptionalBackendPackage):
name = "gtk3cairo"
def check_requirements(self):
if 'TRAVIS' in os.environ:
raise CheckFailed("Can't build with Travis")
# This check needs to be performed out-of-process, because
# importing gi and then importing regular old pygtk afterward
# segfaults the interpreter.
try:
p = multiprocessing.Pool()
except:
return "unknown (can not use multiprocessing to determine)"
success, msg = p.map(backend_gtk3cairo_internal_check, [0])[0]
p.close()
p.join()
if success:
BackendAgg.force = True
return msg
else:
raise CheckFailed(msg)
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
class BackendWxAgg(OptionalBackendPackage):
name = "wxagg"
def check_requirements(self):
try:
import wxversion
except ImportError:
raise CheckFailed("requires wxPython")
try:
_wx_ensure_failed = wxversion.AlreadyImportedError
except AttributeError:
_wx_ensure_failed = wxversion.VersionError
try:
wxversion.ensureMinimal('2.8')
except _wx_ensure_failed:
pass
try:
import wx
backend_version = wx.VERSION_STRING
except ImportError:
raise CheckFailed("requires wxPython")
# Extra version check in case wxversion lacks AlreadyImportedError;
# then VersionError might have been raised and ignored when
# there really *is* a problem with the version.
major, minor = [int(n) for n in backend_version.split('.')[:2]]
if major < 2 or (major < 3 and minor < 8):
raise CheckFailed(
"Requires wxPython 2.8, found %s" % backend_version)
BackendAgg.force = True
return "version %s" % backend_version
class BackendMacOSX(OptionalBackendPackage):
name = 'macosx'
def check_requirements(self):
if sys.platform != 'darwin':
raise CheckFailed("Mac OS-X only")
return 'darwin'
def get_extension(self):
sources = [
'src/_macosx.m',
'src/agg_py_transforms.cpp',
'src/path_cleanup.cpp'
]
ext = make_extension('matplotlib.backends._macosx', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
CXX().add_flags(ext)
ext.extra_link_args.extend(['-framework', 'Cocoa'])
return ext
class Windowing(OptionalBackendPackage):
"""
Builds the windowing extension.
"""
name = "windowing"
def check_requirements(self):
if sys.platform != 'win32':
raise CheckFailed("Microsoft Windows only")
config = self.get_config()
if config is False:
raise CheckFailed("skipping due to configuration")
return "installing"
def get_extension(self):
sources = [
"src/_windowing.cpp"
]
ext = make_extension('matplotlib._windowing', sources)
ext.include_dirs.extend(['C:/include'])
ext.libraries.extend(['user32'])
ext.library_dirs.extend(['C:/lib'])
ext.extra_link_args.append("-mwindows")
return ext
class BackendQt4(OptionalBackendPackage):
name = "qt4agg"
def convert_qt_version(self, version):
version = '%x' % version
temp = []
while len(version) > 0:
version, chunk = version[:-2], version[-2:]
temp.insert(0, str(int(chunk, 16)))
return '.'.join(temp)
def check_requirements(self):
try:
from PyQt4 import pyqtconfig
except ImportError:
raise CheckFailed("PyQt4 not found")
# Import may still be broken for our python
try:
qtconfig = pyqtconfig.Configuration()
except AttributeError:
raise CheckFailed('PyQt4 not correctly imported')
BackendAgg.force = True
return ("Qt: %s, PyQt4: %s" %
(self.convert_qt_version(
qtconfig.qt_version),
qtconfig.pyqt_version_str))
class BackendPySide(OptionalBackendPackage):
name = "pyside"
def check_requirements(self):
try:
from PySide import __version__
from PySide import QtCore
except ImportError:
raise CheckFailed("PySide not found")
else:
BackendAgg.force = True
return ("Qt: %s, PySide: %s" %
(QtCore.__version__, __version__))
class BackendCairo(OptionalBackendPackage):
name = "cairo"
def check_requirements(self):
try:
import cairo
except ImportError:
raise CheckFailed("cairo not found")
else:
return "version %s" % cairo.version
class DviPng(SetupPackage):
name = "dvipng"
optional = True
def check(self):
try:
output = check_output('dvipng -version', shell=True,
stderr=subprocess.STDOUT)
return "version %s" % output.splitlines()[1].decode().split()[-1]
except (IndexError, ValueError, subprocess.CalledProcessError):
raise CheckFailed()
class Ghostscript(SetupPackage):
name = "ghostscript"
optional = True
def check(self):
try:
if sys.platform == 'win32':
command = 'gswin32c --version'
try:
output = check_output(command, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
command = 'gswin64c --version'
output = check_output(command, shell=True,
stderr=subprocess.STDOUT)
else:
command = 'gs --version'
output = check_output(command, shell=True,
stderr=subprocess.STDOUT)
return "version %s" % output.decode()[:-1]
except (IndexError, ValueError, subprocess.CalledProcessError):
raise CheckFailed()
class LaTeX(SetupPackage):
name = "latex"
optional = True
def check(self):
try:
output = check_output('latex -version', shell=True,
stderr=subprocess.STDOUT)
line = output.splitlines()[0].decode()
pattern = '(3\.1\d+)|(MiKTeX \d+.\d+)'
match = re.search(pattern, line)
return "version %s" % match.group(0)
except (IndexError, ValueError, AttributeError, subprocess.CalledProcessError):
raise CheckFailed()
class PdfToPs(SetupPackage):
name = "pdftops"
optional = True
def check(self):
try:
output = check_output('pdftops -v', shell=True,
stderr=subprocess.STDOUT)
for line in output.splitlines():
line = line.decode()
if 'version' in line:
return "version %s" % line.split()[2]
except (IndexError, ValueError, subprocess.CalledProcessError):
pass
raise CheckFailed()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import object
import numpy as np
from .generalTools import sym_to_name
from scipy.interpolate import InterpolatedUnivariateSpline as InterpUS
def getNebAbunds(set_name, logZ, dust=True, re_z=False, **kwargs):
'''
neb_abund.get_abunds(set_name, logZ, dust=True, re_z=False)
set_name must be 'dopita', 'newdopita', 'cl01' or 'yeh'
'''
allowed_names = ['dopita', 'newdopita', 'cl01', 'yeh',
'varyNO', 'gutkin', 'UVbyler', 'varyCO', 'LIMS']
if set_name in allowed_names:
return eval('{}({}, dust={}, re_z={})'.format(set_name, logZ, dust, re_z))
else:
raise IOError(allowed_names)
class abundSet(object):
def __init__(self, set_name, logZ):
'''
overarching class for abundance sets.
abundSet('dopita', 0.0)
'''
self.logZ = logZ
self.abund_0 = load_abund(set_name)
self.depl = load_depl(set_name)
self.calcSpecial()
self.calcFinal()
self.inputStrings()
def calcSpecial(self):
return
def calcFinal(self):
return
def inputStrings(self):
self.solarstr = 'abundances {} {}'.format(self.solar, self.grains)
elem_strs = []
names = sym_to_name()
for key in list(self.abund_0.keys()):
elm = names[key]
abund = self.__getattribute__(key)
#if hasattr(self, 're_z'):
# if key != 'He':
# abund -= self.re_z
outstr = 'element abundance {0} {1:.2f} log'.format(elm, abund)
elem_strs.append(outstr)
self.__setattr__('elem_strs', elem_strs)
return
class dopita(abundSet):
solar = 'old solar 84'
def __init__(self, logZ, dust=True, re_z=False):
'''
Dopita+2001: old solar abundances = 0.019
ISM grains
'''
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
if re_z:
self.re_z = logZ
else:
self.re_z = 0.0
abundSet.__init__(self, 'dopita', logZ)
def calcSpecial(self):
'''
piece-wise function for nitrogen abund (step-function)
functional form for helium
'''
def calc_N(logZ):
if logZ <= -0.63:
return -4.57 + logZ
else:
return -3.94 + (2.0*logZ)
def calc_He(logZ):
return np.log10(0.08096 + (0.02618*(10.0**logZ)))
self.__setattr__('He', calc_He(self.logZ))
self.__setattr__('N', calc_N(self.logZ)+self.depl['N'])
return
def calcFinal(self):
'''
apply depletions and scale with logZ
'''
[self.__setattr__(key, val+self.logZ+self.depl[key])
for key, val in list(self.abund_0.items()) if not hasattr(self, key)]
return
class newdopita(abundSet):
solar = 'GASS10'
def __init__(self, logZ, dust=True, re_z=False):
'''
Abundances from Dopita (2013)
Solar Abundances from Grevasse 2010 - z= 0.013
includes smooth polynomial for N/O, C/O relationship
functional form for He(z)
new depletion factors
ISM grains
'''
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
self.re_z=re_z
abundSet.__init__(self, 'newdopita', logZ)
def calcSpecial(self):
def calc_He(logZ):
return np.log10(0.0737 + (0.024*(10.0**logZ)))
def calc_CNO(logZ):
oxy = np.array([7.39, 7.50, 7.69, 7.99, 8.17,
8.39, 8.69, 8.80, 8.99, 9.17, 9.39])
nit = np.array([-6.61, -6.47, -6.23, -5.79, -5.51,
-5.14, -4.60, -4.40, -4.04, -3.67, -3.17])
car = np.array([-5.58, -5.44, -5.20, -4.76, -4.48,
-4.11, -3.57, -3.37, -3.01, -2.64, -2.14])
O = self.abund_0['O'] + logZ
C = float(InterpUS(oxy, car, k=1)(O + 12.0))
N = float(InterpUS(oxy, nit, k=1)(O + 12.0))
return C, N, O
self.__setattr__('He', calc_He(self.logZ))
C, N, O = calc_CNO(self.logZ)
[self.__setattr__(key, val + self.depl[key])
for key, val in zip(['C', 'N', 'O'], [C, N, O])]
return
def calcFinal(self):
[self.__setattr__(key, val+self.logZ+self.depl[key])
for key, val in list(self.abund_0.items()) if not hasattr(self, key)]
return
class UVbyler(abundSet):
solar = 'GASS10'
def __init__(self, logZ, dust=True, re_z=False):
'''
Abundances from Dopita (2013)
Solar Abundances from Grevasse 2010 - z= 0.013
New fit for N/O, C/O relationship
functional form for He(z)
new depletion factors
ISM grains
'''
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
self.re_z=re_z
abundSet.__init__(self, 'UVbyler', logZ)
def calcSpecial(self):
def calc_He(logZ):
return np.log10(0.0737 + (0.024*(10.0**logZ)))
def calc_CNO(logZ):
O = self.abund_0['O'] + logZ
logOH = O + 12.0
logCO = -0.8 + 0.14*(logOH - 8.0) + (0.192*np.log(1. + np.exp((logOH - 8.0)/0.2)))
logNO = -1.5 + (0.1*np.log(1. + np.exp((logOH - 8.3)/0.1)))
#C = np.log10((10.**O)*(10.**-0.789 + 10.**(4.105 + 1.263*O)))
#N = np.log10((10.**O)*(10.**-1.579 + 10.**(3.579 + 1.526*O)))
C = logCO + O
N = logNO + O
return C, N, O
self.__setattr__('He', calc_He(self.logZ))
C, N, O = calc_CNO(self.logZ)
[self.__setattr__(key, val + self.depl[key])
for key, val in zip(['C', 'N', 'O'], [C, N, O])]
return
def calcFinal(self):
[self.__setattr__(key, val+self.logZ+self.depl[key])
for key, val in list(self.abund_0.items()) if not hasattr(self, key)]
return
class LIMS(abundSet):
solar = 'GASS10'
def __init__(self, logZ, dust=True, re_z=False):
'''
Solar UVByler abundances modified:
Enhance alpha abundances +0.2 dex (O, Ne, Mg, Si, S, Ar, Ca)
Zhu+2010, Conroy+2014, Choi+2014
Enhance C, N following PNe abundances (logNO ~ -0.5, logCO ~ 0)
Henry+2018, but also Karakas 2010, Maciel 2017
'''
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
self.re_z=re_z
abundSet.__init__(self, 'LIMS', logZ)
def calcSpecial(self):
def calc_He(logZ):
return np.log10(0.0737 + (0.024*(10.0**logZ)))
self.__setattr__('He', calc_He(self.logZ))
return
def calcFinal(self):
[self.__setattr__(key, val+self.logZ+self.depl[key])
for key, val in list(self.abund_0.items()) if not hasattr(self, key)]
return
class IIZw(abundSet):
solar = 'GASS10'
def __init__(self, logZ, dust=True, re_z=False):
'''
Abundances from Dopita (2013)
Solar Abundances from Grevasse 2010 - z= 0.013
New fit for N/O, C/O relationship
functional form for He(z)
new depletion factors
ISM grains
log O/H + 12. = 8.09
'''
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
self.re_z=re_z
abundSet.__init__(self, 'UVbyler', logZ)
def calcSpecial(self):
def calc_He(logZ):
return np.log10(0.0737 + (0.024*(10.0**-0.6)))
def calc_CNO(logZ):
#O = self.abund_0['O'] + logZ
#logOH = O + 12.0
O = self.abund_0['O'] + -0.6
logOH = 8.09
#
logCO = -0.8 + 0.14*(logOH - 8.0) +\
(0.192*np.log(1.+np.exp((logOH - 8.0)/0.2)))
logNO = -1.5 + (0.1*np.log(1. + np.exp((logOH - 8.3)/0.1)))
C = logCO + O + logZ
N = logNO + O
return C, N, O
self.__setattr__('He', calc_He(self.logZ))
C, N, O = calc_CNO(self.logZ)
[self.__setattr__(key, val)
for key, val in zip(['C', 'O'], [C, O])]
self.N = N + self.depl['N']
return
def calcFinal(self):
[self.__setattr__(key, val+self.depl[key])
for key, val in list(self.abund_0.items()) if not hasattr(self, key)]
return
class varyCO(abundSet):
solar = 'GASS10'
def __init__(self, logZ, dust=True, re_z=0.0):
'''
arbitrarily vary C/O at fixed O.
'''
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
self.re_z=re_z
abundSet.__init__(self, 'UVbyler', logZ)
def calcSpecial(self):
def calc_He(logZ):
return np.log10(0.0737 + (0.024*(10.0**logZ)))
def calc_CNO(logZ):
O = self.abund_0['O'] + logZ
logOH = O + 12.0
logCO = -0.8 + 0.14*(logOH - 8.0) + (0.192*np.log(1. + np.exp((logOH - 8.0)/0.2)))
logNO = -1.5 + (0.1*np.log(1. + np.exp((logOH - 8.3)/0.1)))
#C = np.log10((10.**O)*(10.**-0.789 + 10.**(4.105 + 1.263*O)))
#N = np.log10((10.**O)*(10.**-1.579 + 10.**(3.579 + 1.526*O)))
C = logCO + O
N = logNO + O
return C, N, O
self.__setattr__('He', calc_He(self.logZ))
C, N, O = calc_CNO(self.logZ)
self.__setattr__('C', C + self.depl['C'] + self.re_z)
self.__setattr__('N', N + self.depl['N'])
self.__setattr__('O', O + self.depl['O'])
return
def calcFinal(self):
[self.__setattr__(key, val + self.re_z + self.depl[key])
for key, val in list(self.abund_0.items()) if not hasattr(self, key)]
return
class gutkin(abundSet):
solar = 'GASS10'
def __init__(self, logZ, dust=True, re_z=False):
'''
Gutkin+2016
PARSEC metallicity (Bressan+2012)
based on Grevesse+Sauvel (1998) and Caffau+2011
'''
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
self.re_z=re_z
abundSet.__init__(self, 'gutkin', logZ)
def calcSpecial(self):
def calc_He(logZ):
Z = (10.**logZ)*0.01524
Y = 0.2485 + 1.7756*Z
X = 1. - Y - Z
return np.log10(Y/X/4.)
def calc_CNO(logZ):
O = self.abund_0['O'] + logZ
N = np.log10((0.41 * 10.**O)*(10.**-1.6 + 10.**(2.33 + O)))
C = self.abund_0['C'] + logZ
return C, N, O
self.__setattr__('He', calc_He(self.logZ))
C, N, O = calc_CNO(self.logZ)
[self.__setattr__(key, val)
for key, val in zip(['C', 'N', 'O'], [C, N, O])]
return
def calcFinal(self):
[self.__setattr__(key, val)
for key, val in list(self.abund_0.items()) if not hasattr(self, key)]
return
class varyNO(abundSet):
solar = 'GASS10'
def __init__(self, logZ, dust=True, re_z=False):
'''
varying N at fixed O.
'''
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
self.re_z=re_z
abundSet.__init__(self, 'dopita', logZ)
def calcSpecial(self):
def calc_He(logZ):
return -1.01
def calc_CNO(logZ):
oxy = np.array([7.39, 7.50, 7.69, 7.99, 8.17,
8.39, 8.69, 8.80, 8.99, 9.17, 9.39])
nit = np.array([-6.61, -6.47, -6.23, -5.79, -5.51,
-5.14, -4.60, -4.40, -4.04, -3.67, -3.17])
car = np.array([-5.58, -5.44, -5.20, -4.76, -4.48,
-4.11, -3.57, -3.37, -3.01, -2.64, -2.14])
O = self.abund_0['O']
C = float(InterpUS(oxy, car, k=1)(O + 12.0))
N = float(InterpUS(oxy, nit, k=1)(O + logZ + 12.0))
return C, N, O
self.__setattr__('He', calc_He(self.logZ))
C, N, O = calc_CNO(self.logZ)
[self.__setattr__(key, val)
for key, val in zip(['C', 'N', 'O'], [C, N, O])]
return
def calcFinal(self):
[self.__setattr__(key, val)
for key, val in list(self.abund_0.items()) if not hasattr(self, key)]
return
def load_abund(set_name):
if set_name == 'dopita':
adict = dict(He=-1.01,
C=-3.44,
N=-3.95,
O=-3.07,
Ne=-3.91,
Mg=-4.42,
Si=-4.45,
S=-4.79,
Ar=-5.44,
Ca=-5.64,
Fe=-4.33,
F=-7.52,
Na=-5.69,
Al=-5.53,
P=-6.43,
Cl=-6.73,
K=-6.87,
Ti=-6.96,
Cr=-6.32,
Mn=-6.47,
Co=-7.08,
Ni=-5.75,
Cu=-7.73,
Zn=-7.34)
elif set_name == 'newdopita':
adict = dict(He=-1.01,
C=-3.57,
N=-4.60,
O=-3.31,
Ne=-4.07,
Na=-5.75,
Mg=-4.40,
Al=-5.55,
Si=-4.49,
S=-4.86,
Cl=-6.63,
Ar=-5.60,
Ca=-5.66,
Fe=-4.50,
Ni=-5.78,
F=-7.44,
P=-6.59,
K=-6.97,
Cr=-6.36,
Ti=-7.05,
Mn=-6.57,
Co=-7.01,
Cu=-7.81,
Zn=-7.44)
elif set_name == 'UVbyler':
adict = dict(He=-1.01,
C=-3.57,
N=-4.17,
O=-3.31,
Ne=-4.07,
Na=-5.75,
Mg=-4.40,
Al=-5.55,
Si=-4.49,
S=-4.86,
Cl=-6.63,
Ar=-5.60,
Ca=-5.66,
Fe=-4.50,
Ni=-5.78,
F=-7.44,
P=-6.59,
K=-6.97,
Cr=-6.36,
Ti=-7.05,
Mn=-6.57,
Co=-7.01,
Cu=-7.81,
Zn=-7.44)
elif set_name == 'LIMS':
adict = dict(He=-1.01,
C=-3.11,
N=-3.61,
O=-3.11,
Ne=-3.87,
Na=-5.75,
Mg=-4.20,
Al=-5.55,
Si=-4.29,
S=-4.66,
Cl=-6.63,
Ar=-5.40,
Ca=-5.46,
Fe=-4.50,
Ni=-5.78,
F=-7.44,
P=-6.59,
K=-6.97,
Cr=-6.36,
Ti=-7.05,
Mn=-6.57,
Co=-7.01,
Cu=-7.81,
Zn=-7.44)
elif set_name == 'gutkin':
adict = dict(He=-1.01,
C=-3.53,
N=-4.32,
O=-3.17,
F=-7.47,
Ne=-4.01,
Na=-5.70,
Mg=-4.45,
Al=-5.56,
Si=-4.48,
P=-6.57,
S=-4.87,
Cl=-6.53,
Ar=-5.63,
K=-6.92,
Ca=-5.67,
Sc=-8.86,
Ti=-7.01,
V=-8.03,
Cr=-6.36,
Mn=-6.64,
Fe=-4.51,
Co=-7.11,
Ni=-5.78,
Cu=-7.82,
Zn=-7.43)
return adict
def load_depl(set_name):
if set_name == 'dopita':
ddict = dict(C=-0.30,
N=-0.22,
O=-0.22,
Ne=0.0,
Mg=-0.70,
Si=-1.0,
S=0.0,
Ar=0.0,
Ca=-2.52,
Fe=-2.0,
F=0.0,
Na=0.0,
Al=0.0,
P=0.0,
Cl=0.0,
K=0.0,
Ti=0.0,
Cr=0.0,
Mn=0.0,
Co=0.0,
Ni=0.0,
Cu=0.0,
Zn=0.0)
elif set_name == 'newdopita':
ddict = dict(He=0.00,
C=-0.30,
N=-0.05,
O=-0.07,
Ne=0.00,
Na=-1.00,
Mg=-1.08,
Al=-1.39,
Si=-0.81,
S=0.00,
Cl=-1.00,
Ar=0.00,
Ca=-2.52,
Fe=-1.31,
Ni=-2.00,
F=0.0,
P=0.0,
K=0.0,
Cr=0.0,
Ti=0.0,
Mn=0.0,
Co=0.0,
Cu=0.0,
Zn=0.0)
elif set_name == 'UVbyler':
ddict = dict(He=0.00,
C=-0.30,
N=-0.05,
O=-0.07,
Ne=0.00,
Na=-1.00,
Mg=-1.08,
Al=-1.39,
Si=-0.81,
S=0.00,
Cl=-1.00,
Ar=0.00,
Ca=-2.52,
Fe=-1.31,
Ni=-2.00,
F=0.0,
P=0.0,
K=0.0,
Cr=0.0,
Ti=0.0,
Mn=0.0,
Co=0.0,
Cu=0.0,
Zn=0.0)
elif set_name == 'LIMS':
ddict = dict(He=0.00,
C=-0.30,
N=-0.05,
O=-0.07,
Ne=0.00,
Na=-1.00,
Mg=-1.08,
Al=-1.39,
Si=-0.81,
S=0.00,
Cl=-1.00,
Ar=0.00,
Ca=-2.52,
Fe=-1.31,
Ni=-2.00,
F=0.0,
P=0.0,
K=0.0,
Cr=0.0,
Ti=0.0,
Mn=0.0,
Co=0.0,
Cu=0.0,
Zn=0.0)
elif set_name == 'gutkin':
ddict = dict(He=0.00,
Li=-0.8,
C=-0.30,
O=-0.15,
Na=-0.60,
Mg=-0.70,
Al=-1.70,
Si=-1.00,
Cl=-0.30,
Ca=-2.52,
Fe=-2.00,
Ni=-1.40)
return ddict
|
|
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base import *
import json
import toml
import yaml
from itertools import chain
from threading import Thread
import socket
def extract_path_attribute(path, typ):
for a in path['attrs']:
if a['type'] == typ:
return a
return None
class GoBGPContainer(BGPContainer):
SHARED_VOLUME = '/root/shared_volume'
QUAGGA_VOLUME = '/etc/quagga'
def __init__(self, name, asn, router_id, ctn_image_name='osrg/gobgp',
log_level='debug', zebra=False, config_format='toml'):
super(GoBGPContainer, self).__init__(name, asn, router_id,
ctn_image_name)
self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
self.log_level = log_level
self.prefix_set = None
self.neighbor_set = None
self.bgp_set = None
self.default_policy = None
self.zebra = zebra
self.config_format = config_format
def _start_gobgp(self, graceful_restart=False):
c = CmdBuffer()
c << '#!/bin/bash'
c << '/go/bin/gobgpd -f {0}/gobgpd.conf -l {1} -p {2} -t {3} > ' \
'{0}/gobgpd.log 2>&1'.format(self.SHARED_VOLUME, self.log_level, '-r' if graceful_restart else '', self.config_format)
cmd = 'echo "{0:s}" > {1}/start.sh'.format(c, self.config_dir)
local(cmd, capture=True)
cmd = "chmod 755 {0}/start.sh".format(self.config_dir)
local(cmd, capture=True)
self.local("{0}/start.sh".format(self.SHARED_VOLUME), detach=True)
def graceful_restart(self):
self.local("pkill -INT gobgpd")
def _start_zebra(self):
cmd = 'cp {0}/zebra.conf {1}/'.format(self.SHARED_VOLUME, self.QUAGGA_VOLUME)
self.local(cmd)
cmd = '/usr/lib/quagga/zebra -f {0}/zebra.conf'.format(self.QUAGGA_VOLUME)
self.local(cmd, detach=True)
def run(self):
super(GoBGPContainer, self).run()
if self.zebra:
self._start_zebra()
self._start_gobgp()
return self.WAIT_FOR_BOOT
def _get_as_path(self, path):
asps = (p['as_paths'] for p in path['attrs'] if
p['type'] == BGP_ATTR_TYPE_AS_PATH and 'as_paths' in p
and p['as_paths'] != None)
asps = chain.from_iterable(asps)
asns = (asp['asns'] for asp in asps)
return list(chain.from_iterable(asns))
def _get_nexthop(self, path):
for p in path['attrs']:
if p['type'] == BGP_ATTR_TYPE_NEXT_HOP or p['type'] == BGP_ATTR_TYPE_MP_REACH_NLRI:
return p['nexthop']
def _trigger_peer_cmd(self, cmd, peer):
if peer not in self.peers:
raise Exception('not found peer {0}'.format(peer.router_id))
peer_addr = self.peers[peer]['neigh_addr'].split('/')[0]
cmd = 'gobgp neighbor {0} {1}'.format(peer_addr, cmd)
self.local(cmd)
def disable_peer(self, peer):
self._trigger_peer_cmd('disable', peer)
def enable_peer(self, peer):
self._trigger_peer_cmd('enable', peer)
def reset(self, peer):
self._trigger_peer_cmd('reset', peer)
def softreset(self, peer, rf='ipv4', type='in'):
self._trigger_peer_cmd('softreset{0} -a {1}'.format(type, rf), peer)
def get_local_rib(self, peer, prefix='', rf='ipv4'):
if peer not in self.peers:
raise Exception('not found peer {0}'.format(peer.router_id))
peer_addr = self.peers[peer]['neigh_addr'].split('/')[0]
cmd = 'gobgp -j neighbor {0} local {1} -a {2}'.format(peer_addr, prefix, rf)
output = self.local(cmd, capture=True)
ret = json.loads(output)
for d in ret:
for p in d["paths"]:
p["nexthop"] = self._get_nexthop(p)
p["aspath"] = self._get_as_path(p)
return ret
def get_global_rib(self, prefix='', rf='ipv4'):
cmd = 'gobgp -j global rib {0} -a {1}'.format(prefix, rf)
output = self.local(cmd, capture=True)
ret = json.loads(output)
for d in ret:
for p in d["paths"]:
p["nexthop"] = self._get_nexthop(p)
p["aspath"] = self._get_as_path(p)
return ret
def monitor_global_rib(self, queue, rf='ipv4'):
def monitor():
it = self.local('gobgp -j monitor global rib -a {0}'.format(rf), stream=True)
buf = ''
try:
for line in it:
if line == '\n':
p = json.loads(buf)[0]
p["nexthop"] = self._get_nexthop(p)
p["aspath"] = self._get_as_path(p)
queue.put(p)
buf = ''
else:
buf += line
except socket.timeout:
#self.local('pkill -x gobgp')
queue.put('timeout')
return
t = Thread(target=monitor)
t.daemon = True
t.start()
def _get_adj_rib(self, adj_type, peer, prefix='', rf='ipv4'):
if peer not in self.peers:
raise Exception('not found peer {0}'.format(peer.router_id))
peer_addr = self.peers[peer]['neigh_addr'].split('/')[0]
cmd = 'gobgp neighbor {0} adj-{1} {2} -a {3} -j'.format(peer_addr,
adj_type,
prefix, rf)
output = self.local(cmd, capture=True)
ret = [p["paths"][0] for p in json.loads(output)]
for p in ret:
p["nexthop"] = self._get_nexthop(p)
p["aspath"] = self._get_as_path(p)
p["prefix"] = p['nlri']['prefix']
return ret
def get_adj_rib_in(self, peer, prefix='', rf='ipv4'):
return self._get_adj_rib('in', peer, prefix, rf)
def get_adj_rib_out(self, peer, prefix='', rf='ipv4'):
return self._get_adj_rib('out', peer, prefix, rf)
def get_neighbor(self, peer):
if peer not in self.peers:
raise Exception('not found peer {0}'.format(peer.router_id))
peer_addr = self.peers[peer]['neigh_addr'].split('/')[0]
cmd = 'gobgp -j neighbor {0}'.format(peer_addr)
return json.loads(self.local(cmd, capture=True))
def get_neighbor_state(self, peer):
return self.get_neighbor(peer)['info']['bgp_state']
def clear_policy(self):
self.policies = {}
for info in self.peers.itervalues():
info['policies'] = {}
self.prefix_set = []
self.neighbor_set = []
self.statements = []
def set_prefix_set(self, ps):
if type(ps) is not list:
ps = [ps]
self.prefix_set = ps
def add_prefix_set(self, ps):
if self.prefix_set is None:
self.prefix_set = []
self.prefix_set.append(ps)
def set_neighbor_set(self, ns):
if type(ns) is not list:
ns = [ns]
self.neighbor_set = ns
def add_neighbor_set(self, ns):
if self.neighbor_set is None:
self.neighbor_set = []
self.neighbor_set.append(ns)
def set_bgp_defined_set(self, bs):
self.bgp_set = bs
def create_config(self):
self._create_config_bgp()
if self.zebra:
self._create_config_zebra()
def _create_config_bgp(self):
config = {'global': {'config': {'as': self.asn, 'router-id': self.router_id},
'route-selection-options':{
'config': {
'external-compare-router-id': True,
},
},
}}
for peer, info in self.peers.iteritems():
afi_safi_list = []
version = netaddr.IPNetwork(info['neigh_addr']).version
if version == 4:
afi_safi_list.append({'config':{'afi-safi-name': 'ipv4-unicast'}})
elif version == 6:
afi_safi_list.append({'config':{'afi-safi-name': 'ipv6-unicast'}})
else:
Exception('invalid ip address version. {0}'.format(version))
if info['vpn']:
afi_safi_list.append({'config': {'afi-safi-name': 'l3vpn-ipv4-unicast'}})
afi_safi_list.append({'config': {'afi-safi-name': 'l3vpn-ipv6-unicast'}})
afi_safi_list.append({'config': {'afi-safi-name': 'l2vpn-evpn'}})
afi_safi_list.append({'config': {'afi-safi-name': 'rtc'}, 'route-target-membership': {'config': {'deferral-time': 10}}})
if info['flowspec']:
afi_safi_list.append({'config': {'afi-safi-name': 'ipv4-flowspec'}})
afi_safi_list.append({'config': {'afi-safi-name': 'l3vpn-ipv4-flowspec'}})
afi_safi_list.append({'config': {'afi-safi-name': 'ipv6-flowspec'}})
afi_safi_list.append({'config': {'afi-safi-name': 'l3vpn-ipv6-flowspec'}})
n = {'config':
{'neighbor-address': info['neigh_addr'].split('/')[0],
'peer-as': peer.asn,
'auth-password': info['passwd'],
},
'afi-safis': afi_safi_list,
'timers': {'config': {
'connect-retry': 10,
}},
}
if info['passive']:
n['transport'] = {'config': {'passive-mode': True}}
if info['is_rs_client']:
n['route-server'] = {'config': {'route-server-client': True}}
if info['local_as']:
n['config']['local-as'] = info['local_as']
if info['prefix_limit']:
for v in afi_safi_list:
v['prefix-limit'] = {'config': {'max-prefixes': info['prefix_limit'], 'shutdown-threshold-pct': 80 }}
if info['graceful_restart'] is not None:
n['graceful-restart'] = {'config': {'enabled': True, 'restart-time': 20}}
for afi_safi in afi_safi_list:
afi_safi['mp-graceful-restart'] = {'config': {'enabled': True}}
if info['is_rr_client']:
clusterId = self.router_id
if 'cluster_id' in info and info['cluster_id'] is not None:
clusterId = info['cluster_id']
n['route-reflector'] = {'config' : {'route-reflector-client': True,
'route-reflector-cluster-id': clusterId}}
if len(info.get('default-policy', [])) + len(info.get('policies', [])) > 0:
n['apply-policy'] = {'config': {}}
for typ, p in info.get('policies', {}).iteritems():
n['apply-policy']['config']['{0}-policy-list'.format(typ)] = [p['name']]
def f(v):
if v == 'reject':
return 'reject-route'
elif v == 'accept':
return 'accept-route'
raise Exception('invalid default policy type {0}'.format(v))
for typ, d in info.get('default-policy', {}).iteritems():
n['apply-policy']['config']['default-{0}-policy'.format(typ)] = f(d)
if 'neighbors' not in config:
config['neighbors'] = []
config['neighbors'].append(n)
config['defined-sets'] = {}
if self.prefix_set:
config['defined-sets']['prefix-sets'] = self.prefix_set
if self.neighbor_set:
config['defined-sets']['neighbor-sets'] = self.neighbor_set
if self.bgp_set:
config['defined-sets']['bgp-defined-sets'] = self.bgp_set
policy_list = []
for p in self.policies.itervalues():
policy = {'name': p['name']}
if 'statements' in p:
policy['statements'] = p['statements']
policy_list.append(policy)
if len(policy_list) > 0:
config['policy-definitions'] = policy_list
if self.zebra:
config['zebra'] = {'config':{'enabled': True,
'redistribute-route-type-list':['connect']}}
with open('{0}/gobgpd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new config]'.format(self.name))
if self.config_format is 'toml':
raw = toml.dumps(config)
elif self.config_format is 'yaml':
raw = yaml.dump(config)
elif self.config_format is 'json':
raw = json.dumps(config)
else:
raise Exception('invalid config_format {0}'.format(self.config_format))
print colors.yellow(indent(raw))
f.write(raw)
def _create_config_zebra(self):
c = CmdBuffer()
c << 'hostname zebra'
c << 'password zebra'
c << 'log file {0}/zebra.log'.format(self.QUAGGA_VOLUME)
c << 'debug zebra packet'
c << 'debug zebra kernel'
c << 'debug zebra rib'
c << ''
with open('{0}/zebra.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new config]'.format(self.name))
print colors.yellow(indent(str(c)))
f.writelines(str(c))
def reload_config(self):
daemon = []
daemon.append('gobgpd')
if self.zebra:
daemon.append('zebra')
for d in daemon:
cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
self.local(cmd)
for v in self.routes.itervalues():
if v['rf'] == 'ipv4' or v['rf'] == 'ipv6':
r = CmdBuffer(' ')
r << 'gobgp global -a {0}'.format(v['rf'])
r << 'rib add {0}'.format(v['prefix'])
if v['next-hop']:
r << 'nexthop {0}'.format(v['next-hop'])
if v['local-pref']:
r << 'local-pref {0}'.format(v['local-pref'])
if v['med']:
r << 'med {0}'.format(v['med'])
cmd = str(r)
elif v['rf'] == 'ipv4-flowspec' or v['rf'] == 'ipv6-flowspec':
cmd = 'gobgp global '\
'rib add match {0} then {1} -a {2}'.format(' '.join(v['matchs']), ' '.join(v['thens']), v['rf'])
else:
raise Exception('unsupported route faily: {0}'.format(rf))
self.local(cmd)
class RawGoBGPContainer(GoBGPContainer):
def __init__(self, name, config, ctn_image_name='osrg/gobgp',
log_level='debug', zebra=False, config_format='yaml'):
if config_format is 'toml':
d = toml.loads(config)
elif config_format is 'yaml':
d = yaml.load(config)
elif config_format is 'json':
d = json.loads(config)
else:
raise Exception('invalid config format {0}'.format(config_format))
asn = d['global']['config']['as']
router_id = d['global']['config']['router-id']
self.config = config
super(RawGoBGPContainer, self).__init__(name, asn, router_id,
ctn_image_name, log_level,
zebra, config_format)
def create_config(self):
with open('{0}/gobgpd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new config]'.format(self.name))
print colors.yellow(indent(self.config))
f.write(self.config)
|
|
import os
import time
import M2Crypto.threading
import hashlib
import logging
import threading
import unittest
from M2Crypto import m2, SSL
from . import ssl, util as testutil, node as node_util
from .. import util, errors
from ..client.chunk import split_file
from ..jsonrpc import gen_storage_node_proxy_creator, StorageNodeProxy
from ..storage.jsonrpc import create_storage_node, StorageNodeSSLServer
from .test_storagenode import gen_chunk_name, gen_transfer_name
from .test_storagenode import make_expiretime, ControlNodeMockup
logger = logging.getLogger(__name__)
testutil.setup_logging()
TEST_FILE_PATH = None
TEST_CHUNKS = None
LISTEN_ADDRESS = '127.0.0.1'
def setup():
M2Crypto.threading.init()
TEST_DIR = testutil.create_tmp_directory()
global TEST_FILE_PATH
TEST_FILE_PATH = testutil.create_temp_file(int(2 ** 22 * 1.5))
output_dir = os.path.join(TEST_DIR, 'file_chunks')
os.mkdir(output_dir)
logger.debug('splitting file %s' % TEST_FILE_PATH)
chunks = split_file(TEST_FILE_PATH, output_dir, compress=False)
global TEST_CHUNKS
TEST_CHUNKS = chunks
logger.debug('done splitting file')
# generate ssl keys and certs
ssl.gen_ssl_all()
def make_control_node_context():
ca_cert_path = ssl.get_cert_path('ca_root')
key_path = ssl.get_key_path('control_node')
cert_path = ssl.get_cert_path('control_node')
ctx = SSL.Context('tlsv1')
ctx.load_cert_chain(cert_path, key_path)
ctx.load_verify_locations(ca_cert_path)
ctx.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert, 2)
ctx.set_session_id_ctx('ScatterBytes')
ctx.load_client_ca(ca_cert_path)
ctx.set_session_cache_mode(m2.SSL_SESS_CACHE_SERVER)
return ctx
class BaseMixIn(object):
def _upload_chunk(self, chunk, transfer_name, chunk_name, snode_name=None):
expire_time = make_expiretime(2)
transfers = self.control_node_proxy.transfers
self.transfers = transfers
snode_serial_number = self.snode_config.cert_info['serial_number']
transfers[transfer_name] = {
'chunk': chunk,
'chunk_name': chunk_name,
# serial number
'receiving_node_id': snode_serial_number,
'chunk_name': chunk_name
}
self.transfer = transfers[transfer_name]
self.transfer_name = transfer_name
# sender serial, receiver serial, .., ..
chunk_hash_salt = util.b64encode(os.urandom(4))
chunk.salt = chunk_hash_salt
sn_args = [1002, self.snode_config.cert_info['serial_number'],
expire_time, transfer_name, chunk_name, chunk_hash_salt]
if snode_name is None:
snode_name = self.snode_name
snode_info = self.storage_nodes[snode_name]
snode_port = snode_info['port']
sn_proxy = self.make_sn_proxy(port=snode_port)
self.control_node_proxy._sign_args(sn_args)
# don't have to pass serial numbers
del sn_args[2]
del sn_args[2]
f = open(chunk.file_path, 'rb')
sn_args.append(f)
response = sn_proxy.store_chunk(*sn_args)
self.assertEqual(response, 'OK')
def create_storage_node_server(self, startup_registration_updater=True):
if not hasattr(self, 'storage_nodes'):
self.storage_nodes = {}
self.storage_node_ports = []
node_names = sorted(self.storage_nodes.keys())
if not node_names:
node_name = 'SN000001'
node_port = 40000
else:
last_name = node_names[-1]
node_name = 'SN' + str(int(last_name[2:]) + 1).zfill(6)
node_port = self.storage_node_ports[-1] + 1
self.storage_node_ports.append(node_port)
# generate a cert
# ssl.gen_ssl_node(node_name)
snode_config = node_util.prepare_node(node_name, 'storage', True)
snode_config.set('listen_address', LISTEN_ADDRESS, section='network')
snode_config.set('listen_port', node_port, section='network')
proxy_creator = gen_storage_node_proxy_creator(snode_config)
storage_node = create_storage_node(
self.control_node_proxy,
proxy_creator,
snode_config
)
server = StorageNodeSSLServer(storage_node)
if not startup_registration_updater:
server.storage_node.startup_registration_updater = False
server.storage_node.startup()
t = threading.Thread(target=server.serve_forever)
t.start()
self.storage_nodes[node_name] = {
'port': node_port,
'server': server,
'server_thread': t,
'storage_node': storage_node
}
return node_name
@property
def storage_node(self):
# default storage node
return self.storage_nodes[self.snode_name]['storage_node']
def make_sn_proxy(self, ssl_context=None, port=None):
if ssl_context is None:
ssl_context = self.client_ctx
if port is None:
port = self.snode_port
sn_proxy = StorageNodeProxy(
"https://%s:%s" % (LISTEN_ADDRESS, port), ssl_context
)
return sn_proxy
def test_check_hash(self):
chunk = TEST_CHUNKS[3]
sn_proxy = self.make_sn_proxy(ssl_context=self.control_node_ctx)
self._upload_chunk(chunk, gen_transfer_name(), gen_chunk_name())
chunk_name = self.transfer['chunk_name']
salt = os.urandom(4)
hash = chunk.calc_hash(salt=salt)
response = sn_proxy.check_hash(chunk_name, util.b64encode(salt))
self.assertEqual(response['chunk_hash'], hash)
class SingleThreadedTestCase(unittest.TestCase, BaseMixIn):
def setUp(self):
client_node_name = 'EEFFGGFF'
self.client_node_name = client_node_name
# generate a cert
ssl.gen_ssl_node(client_node_name)
client_config = node_util.prepare_node(
client_node_name, 'client', True
)
self.client_config = client_config
control_node_proxy = ControlNodeMockup()
self.control_node_proxy = control_node_proxy
# create 1 storage node to begin with
self.snode_name = self.create_storage_node_server(
startup_registration_updater=False
)
snode_info = self.storage_nodes[self.snode_name]
self.snode_port = snode_info['port']
self.snode_config = snode_info['storage_node'].config
# client_context
self.client_ctx = client_config.make_ssl_context()
# control node context
self.control_node_ctx = make_control_node_context()
def test_send_chunk(self):
chunk = TEST_CHUNKS[0]
transfer_name = gen_transfer_name()
chunk_name = gen_chunk_name()
self._upload_chunk(chunk, transfer_name, chunk_name)
# create a nother server
snode2_name = self.create_storage_node_server(
startup_registration_updater=False
)
snode2_info = self.storage_nodes[snode2_name]
snode2_port = snode2_info['port']
snode2 = snode2_info['storage_node']
# need the serial number to sign the request
snode2_serial_number = snode2.certificate.serial_number
# create argument list to sign
from_serial = self.storage_node.certificate.serial_number
to_serial = snode2_serial_number
expire_time = make_expiretime(5)
transfer_name = gen_transfer_name()
auth_args = [from_serial, to_serial, expire_time, transfer_name,
chunk_name, chunk.salt]
self.control_node_proxy._sign_args(auth_args)
chunk_name = self.transfer['chunk_name']
signature = auth_args[0]
signature_ts = auth_args[1]
# register a new transfer on the control node
self.transfers[transfer_name] = {
'chunk_name': chunk_name,
'chunk': chunk,
'receiving_node_id': to_serial,
}
# use the context of the control node
sn_proxy = self.make_sn_proxy(self.control_node_ctx, self.snode_port)
priority = 20
uri = "https://%s:%s" % ('127.0.0.1', snode2_port)
# test
# sn_proxy2 = self.make_sn_proxy(self.control_node_ctx, snode2_port)
# response = sn_proxy2.hello()
# self.assertEqual(response, 'Hello Control Node!')
# self._upload_chunk(chunk, transfer_name, chunk_name, snode2_name)
response = sn_proxy.send_chunk(
chunk_name, chunk.salt, uri, transfer_name, priority,
signature, signature_ts, expire_time
)
self.assertEqual(response, 'OK')
# Second storage node should have the chunk.
# Give it time to make the transfer.
time.sleep(1)
try:
chunk_new = snode2._get_chunk(chunk_name)
except errors.ChunkNotFoundError:
time.sleep(1)
chunk_new = snode2._get_chunk(chunk_name)
self.assertEqual(
chunk.calc_hash(),
chunk_new.calc_hash()
)
def test_store_chunk(self):
chunk = TEST_CHUNKS[0]
transfer_name = gen_transfer_name()
chunk_name = gen_chunk_name()
self._upload_chunk(chunk, transfer_name, chunk_name)
# check what was sent to the server
cn_proxy = self.control_node_proxy
args = cn_proxy.confirm_transfer_args
self.assertEqual(args[0], self.storage_node.certificate.serial_number)
self.assertEqual(args[1], transfer_name)
self.assertEqual(args[2], chunk.calc_hash(util.b64decode(chunk.salt)))
new_path = self.storage_node.config.find_chunk_path(
self.transfer['chunk_name'])
self.assert_(os.path.exists(new_path))
def test_retrieve_chunk(self):
chunk = TEST_CHUNKS[3]
file_size = os.stat(chunk.file_path).st_size
expire_time = make_expiretime(30)
self._upload_chunk(chunk, gen_transfer_name(), gen_chunk_name())
chunk_name = self.transfer['chunk_name']
expire_time = make_expiretime(5)
# sender serial, receiver serial, .., ..
auth_args = [1002, self.storage_node.certificate.serial_number,
expire_time, chunk_name]
self.control_node_proxy._sign_args(auth_args)
args = auth_args
args.insert(6, 31)
args.insert(6, 30)
# remove serial
args.pop(2)
args.pop(2)
sn_proxy = self.make_sn_proxy()
# read the first byte
f = open(chunk.file_path, 'rb')
f.seek(30)
byte = f.read(1)
f.close()
f = sn_proxy.retrieve_chunk(*args)
self.assertEqual(f.read(), byte)
# read the last byte
f = open(chunk.file_path, 'rb')
f.seek(file_size - 1)
byte = f.read()
f.close()
args[-2] = file_size - 1
args[-1] = file_size
f = sn_proxy.retrieve_chunk(*args)
self.assertEqual(f.read(), byte)
# read the first byte
f = open(chunk.file_path, 'rb')
byte = f.read(1)
f.close()
args[-2] = 0
args[-1] = 1
f = sn_proxy.retrieve_chunk(*args)
self.assertEqual(f.read(), byte)
# read 2K
f = open(chunk.file_path, 'rb')
f.seek(500)
bytes = f.read(2048)
f.close()
args[-2] = 500
args[-1] = 500 + 2048
f = sn_proxy.retrieve_chunk(*args)
self.assertEqual(
hashlib.sha1(f.read()).hexdigest(),
hashlib.sha1(bytes).hexdigest()
)
f.close()
# read entire file
args[-2] = 0
args[-1] = file_size
f = sn_proxy.retrieve_chunk(*args)
# calling read immiediately results in an Incomplete Read error
data = f.read()
self.assertEqual(
hashlib.sha1(data).hexdigest(),
hashlib.sha1(open(chunk.file_path, 'rb').read()).hexdigest()
)
f.close()
def test_delete_chunk(self):
chunk = TEST_CHUNKS[3]
self._upload_chunk(chunk, gen_transfer_name(), gen_chunk_name())
chunk_name = self.transfer['chunk_name']
chunk_path = self.storage_node.config.find_chunk_path(chunk_name)
self.assert_(os.path.exists(chunk_path))
sn_proxy = self.make_sn_proxy(ssl_context=self.control_node_ctx)
response = sn_proxy.delete_chunk(chunk_name)
self.assert_('mbytes_available' in response)
self.assert_(not os.path.exists(chunk_path))
def tearDown(self):
import time
time.sleep(.1)
for (node_name, node_info) in self.storage_nodes.items():
logger.debug('shutting down %s' % node_name)
server = node_info['server']
t_shut = threading.Thread(target=server.shutdown)
self.storage_nodes[node_name]['shutdown_thread'] = t_shut
t_shut.start()
for (node_name, node_info) in self.storage_nodes.items():
logger.debug('joining %s shutdown thread' % node_name)
node_info['shutdown_thread'].join()
logger.debug('joining %s server thread' % node_name)
node_info['server_thread'].join()
del node_info['server']
del self.storage_nodes
def teardown():
testutil.remove_tmp_directory()
M2Crypto.threading.cleanup()
|
|
import json
from collections import OrderedDict
import pyomo.kernel as pmo
import numpy
import relaxations
registered_problems = {}
class Problem(object):
def create_model(self):
raise NotImplementedError
def transform_to_convex(self, model):
raise NotImplementedError
def restore_from_convex(self, model):
raise NotImplementedError
def plot_history(model, solver):
raise NotImplementedError
class P1D(Problem):
@staticmethod
def g(x):
return numpy.array([-(x**2) + 1,
-4*(x**2) + 8*x - 1,
-(x**3) - 0.4*(x**4)])
def create_model(self):
m = pmo.block()
m.x = pmo.variable(lb=-2.4394, ub=1.8117)
m.y = pmo.variable()
m.f = pmo.objective(m.y)
m.c = pmo.constraint_list()
for e in self.g(m.x):
m.c.append(pmo.constraint(m.y >= e))
m.x.value = -1.60
m.y.value = 20.744
return m
def branching_candidates(self, model):
return [model.x]
def transform_to_convex(self, m):
convex = m.convex = pmo.block()
# add relaxation variables
convex.v = pmo.variable_dict(ordered=True)
convex.v_c = pmo.constraint_dict(ordered=True)
convex.v['x2'], convex.v_c['x2'] = \
relaxations.relax_square_term(m.x)
convex.v['x3'], convex.v_c['x3'] = \
relaxations.relax_bilinear_term(m.x,
convex.v['x2'])
convex.v['x4'], convex.v_c['x4'] = \
relaxations.relax_square_term(convex.v['x2'])
# deactivate non-convex constraints
m.c.deactivate()
# add convex versions
convex.c = pmo.constraint_list()
convex.c.append(pmo.constraint(
m.y >= -convex.v['x2'] + 1))
convex.c.append(pmo.constraint(
m.y >= -4*convex.v['x2'] + 8*m.x - 1))
convex.c.append(pmo.constraint(
m.y >= -convex.v['x3'] - 0.4*convex.v['x4']))
def restore_from_convex(self, m):
del m.convex
m.c.activate()
def plot_history(self, model, solver):
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
plt.figure()
plt.plot(solver.best_bound_history[1:], '-b')
plt.plot(solver.best_objective_history[1:], '-k')
plt.xlabel("Iteration")
x = np.linspace(model.x.lb, model.x.ub, 250)
fx = self.g(x).max(axis=0)
bound_points = set()
bound_history = []
for n in solver.nodes_visited:
if n.parent is not None:
assert len(n.variable_bounds) == 1
else:
assert len(n.variable_bounds) == 2
assert len(n.variable_bounds[0]) == 2
assert n.variable_bounds[0][0] is model.x
x_bounds = n.variable_bounds[0][1]
assert len(x_bounds) == 2
bound_points.update(x_bounds)
bound_history.append(x_bounds)
cplex = pmo.SolverFactory("cplex")
def Fmin_(m, x):
orig_x_state = m.x.value, m.x.fixed
orig_y_state = m.y.value, m.y.fixed
m.x.fix(x)
try:
status = cplex.solve(m)
assert str(status.solver.status) == "ok"
assert str(status.solver.termination_condition) == "optimal"
obj = m.f()
finally:
m.x.value, m.x.fixed = orig_x_state
m.y.value, m.y.fixed = orig_y_state
return obj
Fmin = np.vectorize(lambda x: Fmin_(model, x))
plt.figure()
plt.plot(x, fx, '-k')
for p in sorted(bound_points):
plt.axvline(p, color='grey')
for x_bounds in bound_history:
model.x.bounds = x_bounds
self.transform_to_convex(model)
xr = np.linspace(model.x.lb, model.x.ub, 50)
fr = Fmin(xr)
plt.plot(xr, fr, '-b')
self.restore_from_convex(model)
plt.show()
registered_problems['p1d'] = P1D
class Rosenbrock2D(Problem):
@staticmethod
def f(x, y):
return (x**4 - 2*(x**2)*y + \
0.5*(x**2) - x + \
(y**2) + 0.5)
def create_model(self):
m = pmo.block()
m.x = pmo.variable(lb=-10, ub=10)
m.y = pmo.variable(lb=-5, ub=25)
m.f = pmo.objective(self.f(m.x, m.y))
m.x.value = 2.5
m.y.value = 2.5
return m
def branching_candidates(self, model):
return [model.x, model.y]
def transform_to_convex(self, m):
convex = m.convex = pmo.block()
# add relaxation variables
convex.v = pmo.variable_dict(ordered=True)
convex.v_c = pmo.constraint_dict(ordered=True)
convex.v['x2y'], convex.v_c['x2y'], blv = \
relaxations.relax_trilinear_term(m.x, m.x, m.y)
assert all([b_ is None for b_ in blv])
# replace objective with convex version
m.f.expr = (m.x**4 - 2*convex.v['x2y'] + \
0.5*(m.x**2) - m.x + \
(m.y**2) + 0.5)
def restore_from_convex(self, m):
del m.convex
m.f.expr = self.f(m.x, m.y)
def stash_convex(self, m):
return (m.convex, m.f.expr)
def restor_stashed_convex(self, m, state):
m.convex, m.f.expr = state
def plot_history(self, model, solver):
import numpy as np
import matplotlib.pyplot
import matplotlib.animation
import matplotlib.patches
matplotlib.pyplot.figure()
bb = np.array(solver.best_bound_history[1:], dtype=float)
bo = np.array(solver.best_objective_history[1:], dtype=float)
scale = np.array(bo)
scale[scale < 1] = 1.0
relgap_iter = (bo-bb)/scale*100
expanded_bbh = []
expanded_boh = []
for node, bbound, bobj in zip(solver.nodes_visited[1:],
solver.best_bound_history[1:],
solver.best_objective_history[1:]):
assert node.solves >= 1
expanded_bbh.extend([bbound]*node.solves)
expanded_boh.extend([bobj]*node.solves)
bb = np.array(expanded_bbh, dtype=float)
bo = np.array(expanded_boh, dtype=float)
scale = np.array(bo)
scale[scale < 1] = 1.0
relgap_solve = (bo-bb)/scale*100
matplotlib.pyplot.semilogy(relgap_solve,'-k')
matplotlib.pyplot.xlabel("Solve (Iteration)")
matplotlib.pyplot.ylabel(r"Relative Gap (%)")
matplotlib.pyplot.savefig('sbb.pdf', bbox_inches='tight')
with open("sbb_iter.json", "w") as f:
json.dump(relgap_iter.tolist(), f)
with open("sbb_solve.json", "w") as f:
json.dump(relgap_solve.tolist(), f)
def _get_bounds(node):
xlb = None
xub = None
ylb = None
yub = None
for child in reversed(node.path_to_root):
for var, var_bounds in child.variable_bounds:
if var is model.x:
xlb, xub = var_bounds
else:
assert var is model.y
ylb, yub = var_bounds
return xlb, xub, ylb, yub
x = np.linspace(model.x.lb, model.x.ub, 250)
y = np.linspace(model.y.lb, model.y.ub, 250)
X, Y = np.meshgrid(x, y)
Z = self.f(X, Y)
fig = matplotlib.pyplot.figure()
Z[Z > 2500] = 2500
norm = matplotlib.colors.LogNorm()
matplotlib.pyplot.contourf(
X, Y, Z,
cmap=matplotlib.pyplot.cm.jet,
levels=np.linspace(0, 2500, 2500),
norm=norm)
matplotlib.pyplot.axis('scaled')
matplotlib.pyplot.xlabel('x')
matplotlib.pyplot.ylabel('y')
matplotlib.pyplot.savefig('rosen.pdf', bbox_inches='tight')
boxes = set()
box_history = []
for n in solver.nodes_visited:
xlb = None
xub = None
ylb = None
yub = None
box = _get_bounds(n)
boxes.add(box)
box_history.append((box, n.depth))
max_depth = max(n.depth for n in solver.nodes_visited)
min_bound = min(n.bound for n in solver.nodes_visited)
max_bound = max(n.bound for n in solver.nodes_visited)
lw = 2
fig = matplotlib.pyplot.figure()
matplotlib.pyplot.xlabel('x')
matplotlib.pyplot.ylabel('y')
rects = OrderedDict()
ax = matplotlib.pyplot.gca()
ax.patch.set_fc('black')
black_ = ax.patch.get_fc()
ax.patch.set_fc('lightgrey')
ax.set_xlim([-11, 11])
ax.set_ylim([-6, 26])
dummy_rect = matplotlib.patches.Rectangle(
(-10, -5), 20, 30, fill=True,
fc='black', ec='black', lw=lw)
ax.add_patch(dummy_rect)
frames = []
for (box, depth), node in zip(box_history,
solver.nodes_visited):
frames.append(node.id)
x = box[0]
y = box[2]
w = (box[1] - box[0])
h = (box[3] - box[2])
fc = matplotlib.pyplot.cm.jet(norm(max(0, node.bound)))
rects[node.id] = \
(matplotlib.patches.Rectangle(
(x, y), w, h, fill=True,
fc=fc, ec='black',
lw=lw*(1-(float(depth)/max_depth))),
fc)
ax.add_patch(rects[node.id][0])
xL, xU, yL, yU = _get_bounds(solver.root)
matplotlib.pyplot.axis('scaled')
dummy_rect.set_visible(False)
child_count = dict((n.id,0) for n in solver.nodes_visited)
def init():
for id_, (r,fc) in rects.items():
r.set_visible(False)
r.set_fc(fc)
child_count[id_] = 0
rects[solver.nodes_visited[0].id][0].set_visible(True)
def animate(arg):
assert len(arg) == 2
node, cnt = arg
if node is None:
for _, (r, fc) in rects.items():
if r.get_fc() != black_:
r.set_fc(fc)
r.set_visible(True)
else:
assert 1 <= cnt <= node.solves
if cnt == 1:
rects[node.id][0].set_visible(True)
if node.status in ("F", "O"):
rects[node.id][0].set_fc('lightgrey')
if cnt == node.solves:
parent = node.parent
if parent is not None:
child_count[parent.id] += 1
if child_count[parent.id] == len(parent.children):
rects[parent.id][0].set_fc(black_)
frame_order = []
for node in solver.nodes_visited:
assert node.solves > 0
tmp = [(node, cnt) for cnt in range(1, node.solves+1)]
frame_order.extend(tmp)
ani = matplotlib.animation.\
FuncAnimation(fig, animate,
frames=frame_order+[(None,None)],
interval=50, blit=False,
init_func=init)
# Set up formatting for the movie files
#print(matplotlib.animation.writers.list())
matplotlib.verbose.set_level("helpful")
writer = matplotlib.animation.writers['ffmpeg'](
fps=40, metadata=dict(artist='Me'), bitrate=1800)
ani.save('sbb.mp4', writer=writer)
#matplotlib.pyplot.show()
registered_problems['rosen2d'] = Rosenbrock2D
|
|
#!/usr/bin/env python3
#
# A plugin for the Unbound DNS resolver to resolve DNS records in
# multicast DNS [RFC 6762] via Avahi.
#
# Copyright (C) 2018-2019 Internet Real-Time Lab, Columbia University
# http://www.cs.columbia.edu/irt/
#
# Written by Jan Janak <janakj@cs.columbia.edu>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Dependendies:
# Unbound with pythonmodule configured for Python 3
# dnspython [http://www.dnspython.org]
# pydbus [https://github.com/LEW21/pydbus]
#
# To enable Python 3 support, configure Unbound as follows:
# PYTHON_VERSION=3 ./configure --with-pythonmodule
#
# The plugin in meant to be used as a fallback resolver that resolves
# records in multicast DNS if the upstream server cannot be reached or
# provides no answer (NXDOMAIN).
#
# mDNS requests for negative records, i.e., records for which Avahi
# returns no answer (NXDOMAIN), are expensive. Since there is no
# single authoritative server in mDNS, such requests terminate only
# via a timeout. The timeout is about a second (if MDNS_TIMEOUT is not
# configured), or the value configured via MDNS_TIMEOUT. The
# corresponding Unbound thread will be blocked for this amount of
# time. For this reason, it is important to configure an appropriate
# number of threads in unbound.conf and limit the RR types and names
# that will be resolved via Avahi via the environment variables
# described later.
#
# An example unbound.conf with the plugin enabled:
#
# | server:
# | module-config: "validator python iterator"
# | num-threads: 32
# | cache-max-negative-ttl: 60
# | cache-max-ttl: 60
# | python:
# | python-script: path/to/this/file
#
#
# The plugin can also be run interactively. Provide the name and
# record type to be resolved as command line arguments and the
# resolved record will be printed to standard output:
#
# $ ./avahi-resolver.py voip-phx4.phxnet.org A
# voip-phx4.phxnet.org. 120 IN A 10.4.3.2
#
#
# The behavior of the plugin can be controlled via the following
# environment variables:
#
# DBUS_SYSTEM_BUS_ADDRESS
#
# The address of the system DBus bus, in the format expected by DBus,
# e.g., unix:path=/run/avahi/system-bus.sock
#
#
# DEBUG
#
# Set this environment variable to "yes", "true", "on", or "1" to
# enable debugging. In debugging mode, the plugin will output a lot
# more information about what it is doing either to the standard
# output (when run interactively) or to Unbound via log_info and
# log_error.
#
# By default debugging is disabled.
#
#
# MDNS_TTL
#
# Avahi does not provide the TTL value for the records it returns.
# This environment variable can be used to configure the TTL value for
# such records.
#
# The default value is 120 seconds.
#
#
# MDNS_TIMEOUT
#
# The maximum amount of time (in milliseconds) an Avahi request is
# allowed to run. This value sets the time it takes to resolve
# negative (non-existent) records in Avahi. If unset, the request
# terminates when Avahi sends the "AllForNow" signal, telling the
# client that more records are unlikely to arrive. This takes roughly
# about one second. You may need to configure a longer value here on
# slower networks, e.g., networks that relay mDNS packets such as
# MANETs.
#
#
# MDNS_GETONE
#
# If set to "true", "1", or "on", an Avahi request will terminate as
# soon as at least one record has been found. If there are multiple
# nodes in the mDNS network publishing the same record, only one (or
# subset) will be returned.
#
# If set to "false", "0", or "off", the plugin will gather records for
# MDNS_TIMEOUT and return all records found. This is only useful in
# networks where multiple nodes are known to publish different records
# under the same name and the client needs to be able to obtain them
# all. When configured this way, all Avahi requests will always take
# MDNS_TIMEOUT to complete!
#
# This option is set to true by default.
#
#
# MDNS_REJECT_TYPES
#
# A comma-separated list of record types that will NOT be resolved in
# mDNS via Avahi. Use this environment variable to prevent specific
# record types from being resolved via Avahi. For example, if your
# network does not support IPv6, you can put AAAA on this list.
#
# The default value is an empty list.
#
# Example: MDNS_REJECT_TYPES=aaaa,mx,soa
#
#
# MDNS_ACCEPT_TYPES
#
# If set, a record type will be resolved via Avahi if and only if it
# is present on this comma-separated list. In other words, this is a
# whitelist.
#
# The default value is an empty list which means all record types will
# be resolved via Avahi.
#
# Example: MDNS_ACCEPT_TYPES=a,ptr,txt,srv,aaaa,cname
#
#
# MDNS_REJECT_NAMES
#
# If the name being resolved matches the regular expression in this
# environment variable, the name will NOT be resolved via Avahi. In
# other words, this environment variable provides a blacklist.
#
# The default value is empty--no names will be reject.
#
# Example: MDNS_REJECT_NAMES=(^|\.)example\.com\.$
#
#
# MDNS_ACCEPT_NAMES
#
# If set to a regular expression, a name will be resolved via Avahi if
# and only if it matches the regular expression. In other words, this
# variable provides a whitelist.
#
# The default value is empty--all names will be resolved via Avahi.
#
# Example: MDNS_ACCEPT_NAMES=^.*\.example\.com\.$
#
import os
import re
import array
import threading
import traceback
import dns.rdata
import dns.rdatatype
import dns.rdataclass
from queue import Queue
from gi.repository import GLib
from pydbus import SystemBus
IF_UNSPEC = -1
PROTO_UNSPEC = -1
sysbus = None
avahi = None
trampoline = dict()
thread_local = threading.local()
dbus_thread = None
dbus_loop = None
def str2bool(v):
if v.lower() in ['false', 'no', '0', 'off', '']:
return False
return True
def dbg(msg):
if DEBUG != False:
log_info('avahi-resolver: %s' % msg)
#
# Although pydbus has an internal facility for handling signals, we
# cannot use that with Avahi. When responding from an internal cache,
# Avahi sends the first signal very quickly, before pydbus has had a
# chance to subscribe for the signal. This will result in lost signal
# and missed data:
#
# https://github.com/LEW21/pydbus/issues/87
#
# As a workaround, we subscribe to all signals before creating a
# record browser and do our own signal matching and dispatching via
# the following function.
#
def signal_dispatcher(connection, sender, path, interface, name, args):
o = trampoline.get(path, None)
if o is None:
return
if name == 'ItemNew': o.itemNew(*args)
elif name == 'ItemRemove': o.itemRemove(*args)
elif name == 'AllForNow': o.allForNow(*args)
elif name == 'Failure': o.failure(*args)
class RecordBrowser:
def __init__(self, callback, name, type_, timeout=None, getone=True):
self.callback = callback
self.records = []
self.error = None
self.getone = getone
self.timer = None if timeout is None else GLib.timeout_add(timeout, self.timedOut)
self.browser_path = avahi.RecordBrowserNew(IF_UNSPEC, PROTO_UNSPEC, name, dns.rdataclass.IN, type_, 0)
trampoline[self.browser_path] = self
self.browser = sysbus.get('.Avahi', self.browser_path)
self.dbg('Created RecordBrowser(name=%s, type=%s, getone=%s, timeout=%s)'
% (name, dns.rdatatype.to_text(type_), getone, timeout))
def dbg(self, msg):
dbg('[%s] %s' % (self.browser_path, msg))
def _done(self):
del trampoline[self.browser_path]
self.dbg('Freeing')
self.browser.Free()
if self.timer is not None:
self.dbg('Removing timer')
GLib.source_remove(self.timer)
self.callback(self.records, self.error)
def itemNew(self, interface, protocol, name, class_, type_, rdata, flags):
self.dbg('Got signal ItemNew')
self.records.append((name, class_, type_, rdata))
if self.getone:
self._done()
def itemRemove(self, interface, protocol, name, class_, type_, rdata, flags):
self.dbg('Got signal ItemRemove')
self.records.remove((name, class_, type_, rdata))
def failure(self, error):
self.dbg('Got signal Failure')
self.error = Exception(error)
self._done()
def allForNow(self):
self.dbg('Got signal AllForNow')
if self.timer is None:
self._done()
def timedOut(self):
self.dbg('Timed out')
self._done()
return False
#
# This function runs the main event loop for DBus (GLib). This
# function must be run in a dedicated worker thread.
#
def dbus_main():
global sysbus, avahi, dbus_loop
dbg('Connecting to system DBus')
sysbus = SystemBus()
dbg('Subscribing to .Avahi.RecordBrowser signals')
sysbus.con.signal_subscribe('org.freedesktop.Avahi',
'org.freedesktop.Avahi.RecordBrowser',
None, None, None, 0, signal_dispatcher)
avahi = sysbus.get('.Avahi', '/')
dbg("Connected to Avahi Daemon: %s (API %s) [%s]"
% (avahi.GetVersionString(), avahi.GetAPIVersion(), avahi.GetHostNameFqdn()))
dbg('Starting DBus main loop')
dbus_loop = GLib.MainLoop()
dbus_loop.run()
#
# This function must be run in the DBus worker thread. It creates a
# new RecordBrowser instance and once it has finished doing it thing,
# it will send the result back to the original thread via the queue.
#
def start_resolver(queue, *args, **kwargs):
try:
RecordBrowser(lambda *v: queue.put_nowait(v), *args, **kwargs)
except Exception as e:
queue.put_nowait((None, e))
return False
#
# To resolve a request, we setup a queue, post a task to the DBus
# worker thread, and wait for the result (or error) to arrive over the
# queue. If the worker thread reports an error, raise the error as an
# exception.
#
def resolve(*args, **kwargs):
try:
queue = thread_local.queue
except AttributeError:
dbg('Creating new per-thread queue')
queue = Queue()
thread_local.queue = queue
GLib.idle_add(lambda: start_resolver(queue, *args, **kwargs))
records, error = queue.get()
queue.task_done()
if error is not None:
raise error
return records
def parse_type_list(lst):
return list(map(dns.rdatatype.from_text, [v.strip() for v in lst.split(',') if len(v)]))
def init(*args, **kwargs):
global dbus_thread, DEBUG
global MDNS_TTL, MDNS_GETONE, MDNS_TIMEOUT
global MDNS_REJECT_TYPES, MDNS_ACCEPT_TYPES
global MDNS_REJECT_NAMES, MDNS_ACCEPT_NAMES
DEBUG = str2bool(os.environ.get('DEBUG', str(False)))
MDNS_TTL = int(os.environ.get('MDNS_TTL', 120))
dbg("TTL for records from Avahi: %d" % MDNS_TTL)
MDNS_REJECT_TYPES = parse_type_list(os.environ.get('MDNS_REJECT_TYPES', ''))
if MDNS_REJECT_TYPES:
dbg('Types NOT resolved via Avahi: %s' % MDNS_REJECT_TYPES)
MDNS_ACCEPT_TYPES = parse_type_list(os.environ.get('MDNS_ACCEPT_TYPES', ''))
if MDNS_ACCEPT_TYPES:
dbg('ONLY resolving the following types via Avahi: %s' % MDNS_ACCEPT_TYPES)
v = os.environ.get('MDNS_REJECT_NAMES', None)
MDNS_REJECT_NAMES = re.compile(v, flags=re.I | re.S) if v is not None else None
if MDNS_REJECT_NAMES is not None:
dbg('Names NOT resolved via Avahi: %s' % MDNS_REJECT_NAMES.pattern)
v = os.environ.get('MDNS_ACCEPT_NAMES', None)
MDNS_ACCEPT_NAMES = re.compile(v, flags=re.I | re.S) if v is not None else None
if MDNS_ACCEPT_NAMES is not None:
dbg('ONLY resolving the following names via Avahi: %s' % MDNS_ACCEPT_NAMES.pattern)
v = os.environ.get('MDNS_TIMEOUT', None)
MDNS_TIMEOUT = int(v) if v is not None else None
if MDNS_TIMEOUT is not None:
dbg('Avahi request timeout: %s' % MDNS_TIMEOUT)
MDNS_GETONE = str2bool(os.environ.get('MDNS_GETONE', str(True)))
dbg('Terminate Avahi requests on first record: %s' % MDNS_GETONE)
dbus_thread = threading.Thread(target=dbus_main)
dbus_thread.daemon = True
dbus_thread.start()
def deinit(*args, **kwargs):
dbus_loop.quit()
dbus_thread.join()
return True
def inform_super(id, qstate, superqstate, qdata):
return True
def get_rcode(msg):
if not msg:
return RCODE_SERVFAIL
return msg.rep.flags & 0xf
def rr2text(rec, ttl):
name, class_, type_, rdata = rec
wire = array.array('B', rdata).tostring()
return '%s. %d %s %s %s' % (
name,
ttl,
dns.rdataclass.to_text(class_),
dns.rdatatype.to_text(type_),
dns.rdata.from_wire(class_, type_, wire, 0, len(wire), None))
def operate(id, event, qstate, qdata):
qi = qstate.qinfo
name = qi.qname_str
type_ = qi.qtype
type_str = dns.rdatatype.to_text(type_)
class_ = qi.qclass
class_str = dns.rdataclass.to_text(class_)
rc = get_rcode(qstate.return_msg)
if event == MODULE_EVENT_NEW or event == MODULE_EVENT_PASS:
qstate.ext_state[id] = MODULE_WAIT_MODULE
return True
if event != MODULE_EVENT_MODDONE:
log_err("avahi-resolver: Unexpected event %d" % event)
qstate.ext_state[id] = MODULE_ERROR
return True
qstate.ext_state[id] = MODULE_FINISHED
# Only resolve via Avahi if we got NXDOMAIn from the upstream DNS
# server, or if we could not reach the upstream DNS server. If we
# got some records for the name from the upstream DNS server
# already, do not resolve the record in Avahi.
if rc != RCODE_NXDOMAIN and rc != RCODE_SERVFAIL:
return True
dbg("Got request for '%s %s %s'" % (name, class_str, type_str))
# Avahi only supports the IN class
if class_ != RR_CLASS_IN:
dbg('Rejected, Avahi only supports the IN class')
return True
# Avahi does not support meta queries (e.g., ANY)
if dns.rdatatype.is_metatype(type_):
dbg('Rejected, Avahi does not support the type %s' % type_str)
return True
# If we have a type blacklist and the requested type is on the
# list, reject it.
if MDNS_REJECT_TYPES and type_ in MDNS_REJECT_TYPES:
dbg('Rejected, type %s is on the blacklist' % type_str)
return True
# If we have a type whitelist and if the requested type is not on
# the list, reject it.
if MDNS_ACCEPT_TYPES and type_ not in MDNS_ACCEPT_TYPES:
dbg('Rejected, type %s is not on the whitelist' % type_str)
return True
# If we have a name blacklist and if the requested name matches
# the blacklist, reject it.
if MDNS_REJECT_NAMES is not None:
if MDNS_REJECT_NAMES.search(name):
dbg('Rejected, name %s is on the blacklist' % name)
return True
# If we have a name whitelist and if the requested name does not
# match the whitelist, reject it.
if MDNS_ACCEPT_NAMES is not None:
if not MDNS_ACCEPT_NAMES.search(name):
dbg('Rejected, name %s is not on the whitelist' % name)
return True
dbg("Resolving '%s %s %s' via Avahi" % (name, class_str, type_str))
recs = resolve(name, type_, getone=MDNS_GETONE, timeout=MDNS_TIMEOUT)
if not recs:
dbg('Result: Not found (NXDOMAIN)')
qstate.return_rcode = RCODE_NXDOMAIN
return True
m = DNSMessage(name, type_, class_, PKT_QR | PKT_RD | PKT_RA)
for r in recs:
s = rr2text(r, MDNS_TTL)
dbg('Result: %s' % s)
m.answer.append(s)
if not m.set_return_msg(qstate):
raise Exception("Error in set_return_msg")
if not storeQueryInCache(qstate, qstate.return_msg.qinfo, qstate.return_msg.rep, 0):
raise Exception("Error in storeQueryInCache")
qstate.return_msg.rep.security = 2
qstate.return_rcode = RCODE_NOERROR
return True
#
# It does not appear to be sufficient to check __name__ to determine
# whether we are being run in interactive mode. As a workaround, try
# to import module unboundmodule and if that fails, assume we're being
# run in interactive mode.
#
try:
import unboundmodule
embedded = True
except ImportError:
embedded = False
if __name__ == '__main__' and not embedded:
import sys
def log_info(msg):
print(msg)
def log_err(msg):
print('ERROR: %s' % msg, file=sys.stderr)
if len(sys.argv) != 3:
print('Usage: %s <name> <rr_type>' % sys.argv[0])
sys.exit(2)
name = sys.argv[1]
type_str = sys.argv[2]
try:
type_ = dns.rdatatype.from_text(type_str)
except dns.rdatatype.UnknownRdatatype:
log_err('Unsupported DNS record type "%s"' % type_str)
sys.exit(2)
if dns.rdatatype.is_metatype(type_):
log_err('Meta record type "%s" cannot be resolved via Avahi' % type_str)
sys.exit(2)
init()
try:
recs = resolve(name, type_, getone=MDNS_GETONE, timeout=MDNS_TIMEOUT)
if not len(recs):
print('%s not found (NXDOMAIN)' % name)
sys.exit(1)
for r in recs:
print(rr2text(r, MDNS_TTL))
finally:
deinit()
|
|
#!/usr/bin/env python3
import collections
import functools
import importlib
import multiprocessing
import os
import sys
import json
import qtutil
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtGui import QApplication
from datadialog import DataDialog
from pipeconf import PipeconfDialog, PipelineModel
from project import ProjectManager
from plugins.util import custom_qt_items as cqt
from plugins import set_coordinate_system as scs
import traceback
from plugins.util import constants
APPNAME = 'Mesoscale Brain Explorer'
VERSION = open('../VERSION').read()
def clear_layout(layout):
while True:
item = layout.takeAt(0)
if not item:
break
item.widget().setParent(None)
child = item.layout()
if child:
clear_layout(child)
del item
class PipelineView(QListView):
active_plugin_changed = pyqtSignal(str, int)
def __init__(self, parent=None):
super(PipelineView, self).__init__(parent)
self.setStyleSheet('QListView::item { border: 0px; padding-left: 4px;'
'height: 26px; }'
'QListView::item::selected { background-color: #ccf; }')
def currentChanged(self, current, previous):
super(PipelineView, self).currentChanged(current, previous)
plugin_name = str(current.data(Qt.UserRole))
plugin_position = current.row()
self.active_plugin_changed.emit(plugin_name, plugin_position)
class ToolButton(QToolButton):
def __init__(self, parent=None):
super(ToolButton, self).__init__(parent)
class Sidebar(QWidget):
open_pipeconf_requested = pyqtSignal()
open_datadialog_requested = pyqtSignal()
automate_pipeline_requested = pyqtSignal()
x_origin_changed = pyqtSignal(float)
y_origin_changed = pyqtSignal(float)
units_per_pixel_changed = pyqtSignal(float)
def __init__(self, parent=None):
super(Sidebar, self).__init__(parent)
self.x_origin = QDoubleSpinBox()
self.y_origin = QDoubleSpinBox()
self.units_per_pixel = QDoubleSpinBox()
self.auto_pb = QPushButton('&Automation')
self.setup_ui()
self.setup_signals()
self.setup_whats_this()
def setup_ui(self):
self.setContentsMargins(4, 6, 5, 0)
vbox = QVBoxLayout()
vbox.addWidget(QLabel('Origin:'))
hbox = QHBoxLayout()
hbox.addWidget(QLabel("X:"))
hbox.addWidget(self.x_origin)
hbox.addWidget(QLabel("Y:"))
hbox.addWidget(self.y_origin)
vbox.addLayout(hbox)
vbox.addWidget(QLabel('Units per pixel:'))
vbox.addWidget(self.units_per_pixel)
self.units_per_pixel.setDecimals(5)
self.units_per_pixel.setMaximum(100000)
self.x_origin.setMaximum(100000)
self.y_origin.setMaximum(100000)
# self.x_origin.setValue(self.project['origin'][0])
# self.y_origin.setValue(self.project['origin'][1])
# self.units_per_pixel.setValue(self.project['unit_per_pixel'])
vbox.addWidget(cqt.InfoWidget("Whatever units you use for units per pixel is the units of length used "
"in all plugins. i.e. you'll notice the x and y axis are in 'units'."))
self.pl_list = PipelineView()
self.pl_list.setIconSize(QSize(18, 18))
self.pl_list.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.pl_list.setEditTriggers(QAbstractItemView.NoEditTriggers)
vbox.addWidget(QLabel('Pipeline:'))
vbox.addWidget(self.pl_list)
self.auto_pb.clicked.connect(self.automate_pipeline_requested)
vbox.addWidget(self.auto_pb)
vbox.addWidget(cqt.InfoWidget('Automation allows you to use the output from a preceding plugin in the pipeline as '
'input to the next. You can configure your pipeline to set up a custom order, '
'add additional plugins or remove plugins. '
'To use: \n'
'1) If your first plugin is not the importer, select files in the video list to use '
'as input. These files will go through each step in your pipeline. \n'
'2) Set paramaters on each individual plugin in your pipeline. For example, in the '
'alignment plugin select the reference frame all files will be aligned to and in the '
'ROI plugin select the ROIs cropped to for all files. \n'
'Use the "What\'s This" help feature on UI elements in each plugin to learn '
'how to set paramaters for each one.\n'
'3) When you\'re ready highlight each plugin you intend to run and make sure they '
'are in the correct order. Some plugins cannot be automated so do not highlight '
'those.\n'
'\n Click Automate!'))
vbox.addSpacerItem(QSpacerItem(0, 1, QSizePolicy.Minimum, QSizePolicy.Expanding))
pb = QPushButton('&Configure Pipeline')
pb.clicked.connect(self.open_pipeconf_requested)
vbox.addWidget(pb)
pb = QPushButton('&Manage Data')
pb.clicked.connect(self.open_datadialog_requested)
vbox.addWidget(pb)
vbox.setStretch(0, 0)
vbox.setStretch(1, 0)
vbox.setStretch(2, 0)
self.setLayout(vbox)
def setup_sidebar_values(self, project):
if project:
self.x_origin.setValue(project['origin'][0])
self.y_origin.setValue(project['origin'][1])
self.units_per_pixel.setValue(project['unit_per_pixel'])
def setup_signals(self):
self.x_origin.valueChanged.connect(self.x_origin_changed)
self.y_origin.valueChanged.connect(self.y_origin_changed)
self.units_per_pixel.valueChanged.connect(self.units_per_pixel_changed)
def setup_whats_this(self):
self.x_origin.setWhatsThis("Set the x coordinate used across all plugins in this project. "
"Units are in pixels so coordinates must be within the size of "
"all imported files"
"Coordinates can also be set via plugins, after which the change "
"should be reflected in the value here"
)
self.y_origin.setWhatsThis("Set the y coordinate used across all plugins in this project. "
"Units are in pixels so coordinates must be within the size of "
"all imported files"
"Coordinates can also be set via plugins, after which the change "
"should be reflected in the value here"
)
self.units_per_pixel.setWhatsThis("Set the amount of units there are in a single pixel. "
"This is applied across all plugins in this project. "
"Units can be anything as long as the same units are used "
"across all plugins. e.g. if you use microns for units per pixel "
"then microns must be used for the coordinates in the ROI Placer plugin.")
self.pl_list.setWhatsThis("Each of these plugins performs a particular set of processing on your data. "
"This list is called a pipeline since you can order this list using the Configure "
"Pipeline button and then click then select some of the plugins and click Automate "
"to perform the processing steps of all selected plugins in your custom order "
"for a set of inputs. \n"
"\n"
"NOTE: There is a known bug where if you press and hold the up button in this list "
"a new window occurs. This is a harmless bug and you can just close the window. ")
self.auto_pb.setWhatsThis("If you are still unsure how to automate, watch a video tutorial. "
"A link to where to find tutorials can be find by clicking Help -> About")
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setWindowTitle(APPNAME)
self.setWindowFlags(Qt.Window | Qt.WindowContextHelpButtonHint |
Qt.WindowMinimizeButtonHint |
Qt.WindowMaximizeButtonHint |
Qt.WindowCloseButtonHint)
self.project = None
self.current_plugin = None
self.project_manager = ProjectManager(self)
self.plugins = self.load_plugins()
self.sidebar = Sidebar()
self.setup_ui()
self.setup_signals()
self.enable(False)
self.pipeline_model = PipelineModel()
self.sidebar.pl_list.setModel(self.pipeline_model)
self.pipeconf.pipeline_list.setModel(self.pipeline_model)
last = str(QSettings().value('path_of_last_project'))
if last:
quit_msg = "Load last project " + last + " ?"
reply = QMessageBox.question(self, 'Project Setup',
quit_msg, QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
if last:
try:
self.open_project(last)
except:
qtutil.critical('Previous project appears to have been corrupted.\n \n'
+ traceback.format_exc(), self)
#qtutil.critical("Previous project appears to have been corrupted. Please move or delete it.")
self.sidebar.setup_sidebar_values(self.project)
def load_plugins(self):
"""This just gets all the plugins (no reference to pipeline)"""
plugins = collections.OrderedDict()
filenames = [f for f in sorted(os.listdir('plugins')) if f.endswith('.py')]
for filename in filenames:
name, ext = os.path.splitext(filename)
p = self.load_plugin('plugins.' + name, None)
if p:
plugins[name] = p
return plugins
def load_plugin(self, module, plugin_position):
try:
m = importlib.import_module(module)
if not hasattr(m, 'MyPlugin'):
return None
p = m.MyPlugin(self.project, plugin_position)
# p.run()
except:
print('Failed to import \'{}\'.'.format(module))
raise
else:
return p
# def reset_pipeline_plugins(self, plugin_names):
# for i, plugin_name in enumerate(plugin_names):
# p = self.load_plugin('plugins.' + plugin_name, i)
# if p:
# self.plugins[plugin_name] = p
# if self.current_plugin:
# self.set_plugin(self.current_plugin, None)
def reload_pipeline_plugins(self):
for i, plugin_name in enumerate(self.pipeline_model.get_plugin_names()):
p = self.load_plugin('plugins.' + plugin_name, i)
if p:
self.plugins[plugin_name] = p
# def set_x(val):
# self.sidebar.x_origin.setValue(val)
# def set_y(val):
# self.sidebar.y_origin.setValue(val)
# if plugin_name == 'set_coordinate_system':
# p.widget.x_origin_changed[float].connect(self.set_x)
# p.widget.y_origin_changed[float].connect(set_y)
if self.current_plugin:
self.set_plugin(self.current_plugin, None)
def setup_ui(self):
self.pipeconf = PipeconfDialog(self.plugins, self)
self.datadialog = DataDialog(self)
self.pl_frame = QFrame()
splitter = QSplitter(self)
self.enable = lambda yes: splitter.setEnabled(yes)
splitter.setHandleWidth(3)
splitter.setStyleSheet('QSplitter::handle {background: #cccccc;}')
splitter.addWidget(self.sidebar)
splitter.addWidget(self.pl_frame)
# splitter.setStretchFactor(0, 0)
# splitter.setStretchFactor(1, 1)
self.setCentralWidget(splitter)
self.menu = self.menuBar()
m = self.menu.addMenu('&File')
a = QAction('&New project', self)
a.setShortcut('Ctrl+N')
a.setStatusTip('Create new project')
a.triggered.connect(self.create_project)
m.addAction(a)
a = QAction('&Open project', self)
a.setShortcut('Ctrl+O')
a.setStatusTip('Open project')
a.triggered.connect(self.open_project)
m.addAction(a)
a = QAction("&Quit", self)
a.setShortcut("Ctrl+Q")
a.setStatusTip('Leave The App')
a.setIcon(QIcon('pics/quit.png'))
a.triggered.connect(self.close)
m.addAction(a)
about_action = QAction('&About ' + APPNAME, self)
about_action.setStatusTip('About ' + APPNAME)
about_action.triggered.connect(self.about)
about_action.setShortcut('F1')
whats_this_action = QWhatsThis.createAction(QAction('&What\'s This?', self))
whats_this_action.setShortcut('Shift+F1')
m = self.menu.addMenu('&Project')
m.setEnabled(False)
a = QAction("&Close", self)
a.setStatusTip('Close project')
a.triggered.connect(self.close_project)
m.addAction(a)
a = QAction("&Reset All Plugin Parameters", self)
a.setStatusTip('This is useful if you experience JSON-related issues allowing for a clean slate')
a.triggered.connect(self.reset_all_params)
m.addAction(a)
self.project_menu = m
help_menu = self.menu.addMenu('&Help')
help_menu.addAction(about_action)
help_menu.addAction(whats_this_action)
def reset_all_params(self):
for plugin_position, p in enumerate(self.project.pipeline):
if p['name'] in self.plugins.keys():
plugin = self.plugins[p['name']]
if hasattr(plugin, 'widget'):
if hasattr(plugin.widget, 'setup_params'):
if not hasattr(plugin.widget, 'params'):
plugin.widget.params = self.project.pipeline[plugin_position]
plugin.widget.project = self.project
plugin.widget.plugin_position = plugin_position
try:
plugin.widget.setup_params(reset=True)
except:
print("Failed to reset " + p['name'])
def setup_signals(self):
self.datadialog.reload_plugins.connect(self.reload_pipeline_plugins)
self.sidebar.open_pipeconf_requested.connect(self.open_pipeconf)
self.sidebar.open_datadialog_requested.connect(self.open_datadialog)
self.sidebar.automate_pipeline_requested.connect(self.automate_pipeline)
self.sidebar.x_origin_changed[float].connect(functools.partial(self.set_project_coordinate_system, 'x'))
self.sidebar.y_origin_changed[float].connect(functools.partial(self.set_project_coordinate_system, 'y'))
self.sidebar.units_per_pixel_changed[float].connect(functools.partial(self.set_project_coordinate_system,
'units_per_pixel'))
self.plugins['set_coordinate_system'].widget.x_origin_changed[float].connect(functools.partial(
self.set_project_coordinate_system, 'x'))
self.plugins['set_coordinate_system'].widget.y_origin_changed[float].connect(functools.partial(
self.set_project_coordinate_system, 'y'))
# todo: add signals from set_coordinate_system here
self.sidebar.pl_list.active_plugin_changed[str, int].connect(self.set_plugin)
self.sidebar.pl_list.setModel(self.pipeconf.pipeline_list.model())
def set_project_coordinate_system(self, key, value):
#todo: add update for view
self.project['origin'] = list(self.project['origin'])
if key == 'x':
self.project['origin'][0] = value
self.project.save()
elif key == 'y':
self.project['origin'][1] = value
self.project.save()
elif key == 'units_per_pixel':
self.project['unit_per_pixel'] = value
self.project.save()
if self.current_plugin in self.plugins.keys():
self.plugins[self.current_plugin].widget.view.update()
def create_project(self):
project = self.project_manager.new_project()
if project:
self.load_project(project)
def load_project(self, project):
self.clean()
self.project = project
self.setWindowTitle(APPNAME + ' - ' + project.name)
self.enable(True)
self.project_menu.setEnabled(True)
QSettings().setValue('path_of_last_project', project.path)
pipeline = []
for plugin_dict in self.project.pipeline:
# include fix to update old versions to new format
try:
plugin_name = plugin_dict['name']
except:
attrs = json.load(open('../templates/mbeproject.json'))
pipeline_template = attrs['pipeline']
# pipeline_names = [p['name'] for p in pipeline_template]
# plugins = self.load_plugins()
# for plugin_name in pipeline_names:
# for plugin in plugins:
# if plugin == plugin_name:
# pipeline.append((plugin, plugins[plugin].name))
# break
QMessageBox.about(self, 'Critical Error in pipeline. Manual Reset Recommended',
"""
<p>Please quit and replace the pipeline in your project JSON file with</p>
<p></p>
<td>%s</td>
<p>which can be copied from
<a href="https://github.com/Frikster/Mesoscale-Brain-Explorer/blob/master/templates/mbeproject.json">here</a></p>
<p>Only the pipeline section needs to be replaced</p>
""" % pipeline_template)
# qtutil.critical("Pipeline appears to be corrupt. "
# "Please replace your current pipeline in the JSON file with \n"
# " " + str(pipeline_template) + "which can be copied from \n" +
# < a href = "https://github.com/Frikster/Mesoscale-Brain-Explorer/issues" > here < / a > < / p >
# "https://github.com/Frikster/Mesoscale-Brain-Explorer/blob/master/templates/mbeproject.json")
return
for plugin in self.plugins:
if plugin == plugin_name:
pipeline.append((plugin, self.plugins[plugin].name))
break
self.pipeline_model.set_plugins(pipeline)
self.reload_pipeline_plugins()
self.sidebar.setup_sidebar_values(project)
# self.reset_pipeline_plugins([p[0] for p in pipeline])
def open_project(self, path=''):
project = self.project_manager.open_project(path)
if project:
self.load_project(project)
def close_project(self):
self.clean()
self.project = None
self.setWindowTitle(APPNAME)
self.project_menu.setEnabled(False)
self.enable(False)
def set_plugin(self, plugin_name, plugin_position):
p = self.load_plugin('plugins.' + str(plugin_name), plugin_position)
if not p:
return
self.current_plugin = plugin_name
self.plugins[plugin_name] = p
def set_x(val):
self.sidebar.x_origin.setValue(val)
def set_y(val):
self.sidebar.y_origin.setValue(val)
if plugin_name == 'set_coordinate_system':
p.widget.x_origin_changed[float].connect(set_x)
p.widget.y_origin_changed[float].connect(set_y)
lt = QVBoxLayout()
lt.addWidget(p.widget)
self.clean_plugin()
self.pl_frame.setLayout(lt)
# p.run()
def clean_plugin(self):
if self.pl_frame.layout():
clear_layout(self.pl_frame.layout())
QWidget().setLayout(self.pl_frame.layout())
def clean(self):
model = self.sidebar.pl_list.model()
if model:
model.clear()
self.clean_plugin()
def open_pipeconf(self):
self.pipeconf.exec_()
pipeline = self.pipeline_model.get_plugin_names()
self.project.set_pipeline(pipeline)
self.project.save()
def open_datadialog(self):
self.datadialog.update(self.project)
self.datadialog.exec_()
def automate_pipeline(self):
# order by index
ordered_q_model_indexes = sorted(self.sidebar.pl_list.selectedIndexes(), key=lambda x: x.row(), reverse=False)
if not ordered_q_model_indexes:
qtutil.info("Select all the plugins you want to process through. Use shift or ctrl to select multiple")
import_plugin_indexes = [ind for ind in range(len(ordered_q_model_indexes)) if
self.plugins[ordered_q_model_indexes[ind].data(Qt.UserRole)].name in
constants.IMPORT_PLUGINS]
if not import_plugin_indexes:
p = self.plugins[ordered_q_model_indexes[0].data(Qt.UserRole)]
if p.name not in constants.IMPORT_PLUGINS:
number_of_outputs = p.output_number_expected() # find the number of files outputted by the first plugin
for q_model_index in ordered_q_model_indexes:
p = self.plugins[q_model_index.data(Qt.UserRole)]
if p.name not in constants.IMPORT_PLUGINS:
number_of_outputs = p.output_number_expected()
if not p.check_ready_for_automation(number_of_outputs):
qtutil.critical(p.automation_error_message())
return
number_of_outputs = p.output_number_expected(number_of_outputs) # find the number of outputs
p = self.plugins[ordered_q_model_indexes[0].data(Qt.UserRole)]
input_paths = p.get_input_paths()
if not input_paths:
qtutil.critical("The first plugin in the pipeline does not have a set of input files selected")
return
for q_model_index in ordered_q_model_indexes:
p = self.plugins[q_model_index.data(Qt.UserRole)]
output_paths = p.run(input_paths)
input_paths = output_paths
ordered_q_model_indexes_segments = []
for i, import_ind in enumerate(import_plugin_indexes):
if i < len(import_plugin_indexes)-1:
# ordered_q_model_indexes are the indices in the GUI.
# retrieve the indices from where this import plugin is (import_ind) to
# the index right before the next import plugin
ordered_q_model_indexes_segment = ordered_q_model_indexes[import_ind:(import_plugin_indexes[i+1])]
else:
ordered_q_model_indexes_segment = ordered_q_model_indexes[import_ind:]
ordered_q_model_indexes_segments = ordered_q_model_indexes_segments + [ordered_q_model_indexes_segment]
import_paths = []
for i, ordered_q_model_indexes_segment in enumerate(ordered_q_model_indexes_segments):
import_plugin = self.plugins[ordered_q_model_indexes_segment[0].data(Qt.UserRole)]
qtutil.info("Please select all your files for the pipeline starting from import plugin " + str(i))
input_paths = import_plugin.get_input_paths()
import_paths = import_paths + [input_paths]
if not input_paths:
qtutil.critical("import plugin " + str(i) + " in the pipeline does not have a set of input files selected")
return
# ensure all selected plugins are ready for automation
for i, ordered_q_model_indexes_segment in enumerate(ordered_q_model_indexes_segments):
not_import_plugins = [self.plugins[ordered_q_model_indexes_segment[j].data(Qt.UserRole)] for j in
range(1, len(ordered_q_model_indexes_segment))]
number_of_outputs = len(import_paths[i])
for not_import_plugin in not_import_plugins:
if not not_import_plugin.check_ready_for_automation(number_of_outputs):
qtutil.critical(not_import_plugin.automation_error_message())
return
number_of_outputs = not_import_plugin.output_number_expected(number_of_outputs)
for i, ordered_q_model_indexes_segment in enumerate(ordered_q_model_indexes_segments):
input_paths = import_paths[i]
for q_model_index in ordered_q_model_indexes_segment:
p = self.plugins[q_model_index.data(Qt.UserRole)]
output_paths = p.run(input_paths)
input_paths = output_paths
# p = self.plugins[ordered_q_model_indexes[0].data(Qt.UserRole)]
# input_paths = p.get_input_paths()
# if not input_paths:
# qtutil.critical("The first plugin in the pipeline does not have a set of input files selected")
# return
#
# for q_model_index in ordered_q_model_indexes:
# p = self.plugins[q_model_index.data(Qt.UserRole)]
# output_paths = p.run(input_paths)
# input_paths = output_paths
# self.sidebar.pl_list.selectedIndexes()[0].data(Qt.UserRole)
#
# self.sidebar.pl_list.selectedIndexes()[0].row()
# self.sidebar.pl_list.model().data(self.sidebar.pl_list.selectedIndexes()[0])
def about(self):
author = 'Cornelis Dirk Haupt'
date = '2017'
QMessageBox.about(self, 'About ' + APPNAME,
"""
<b>%s</b>
<p>An online readme, including user manual and developer tutorial can be found
<a href="https://github.com/Frikster/Mesoscale-Brain-Explorer">here</a></p>
<p>Use the "what's this" feature to click on any UI component and learn how to use it</p>
<p>Please submit any feature requests or issues
<a href="https://github.com/Frikster/Mesoscale-Brain-Explorer/issues">here</a></p>
<p></p>
<p><table border="0" width="150">
<tr>
<td>Author:</td>
<td>%s</td>
</tr>
<tr>
<td>Version:</td>
<td>%s</td>
</tr>
<tr>
<td>Date:</td>
<td>%s</td>
</tr>
</table></p>
""" % (APPNAME, author, VERSION, date))
if __name__ == '__main__':
multiprocessing.freeze_support()
app = QApplication(sys.argv)
app.setApplicationName(APPNAME)
app.setOrganizationName('University of British Columbia')
app.setOrganizationDomain('https://github.com/Frikster/Mesoscale-Brain-Explorer')
w = MainWindow()
w.resize(1700, 800)
w.setWindowIcon(QIcon('pics/cbhlogo.png'))
w.show()
app.exec_()
app.deleteLater()
del w
sys.exit()
|
|
"""
A module for sharing intermediates between contractions.
Copyright (c) 2018 Uber Technologies
"""
import contextlib
import functools
import numbers
import threading
from collections import Counter, defaultdict
from typing import Any
from typing import Counter as CounterType
from typing import Dict, Generator, List, Optional, Tuple, Union
from .parser import alpha_canonicalize, parse_einsum_input
from .typing import ArrayType
CacheKeyType = Union[Tuple[str, str, int, Tuple[int, ...]], Tuple[str, int]]
CacheType = Dict[CacheKeyType, ArrayType]
__all__ = [
"currently_sharing",
"get_sharing_cache",
"shared_intermediates",
"count_cached_ops",
"transpose_cache_wrap",
"einsum_cache_wrap",
"to_backend_cache_wrap",
]
_SHARING_STACK: Dict[int, List[CacheType]] = defaultdict(list)
def currently_sharing() -> bool:
"""Check if we are currently sharing a cache -- thread specific."""
return threading.get_ident() in _SHARING_STACK
def get_sharing_cache() -> CacheType:
"""Return the most recent sharing cache -- thread specific."""
return _SHARING_STACK[threading.get_ident()][-1]
def _add_sharing_cache(cache: CacheType) -> Any:
_SHARING_STACK[threading.get_ident()].append(cache)
def _remove_sharing_cache() -> None:
tid = threading.get_ident()
_SHARING_STACK[tid].pop()
if not _SHARING_STACK[tid]:
del _SHARING_STACK[tid]
@contextlib.contextmanager
def shared_intermediates(
cache: Optional[CacheType] = None,
) -> Generator[CacheType, None, None]:
"""Context in which contract intermediate results are shared.
Note that intermediate computations will not be garbage collected until
1. this context exits, and
2. the yielded cache is garbage collected (if it was captured).
**Parameters:**
- **cache** - *(dict)* If specified, a user-stored dict in which intermediate results will be stored. This can be used to interleave sharing contexts.
**Returns:**
- **cache** - *(dict)* A dictionary in which sharing results are stored. If ignored,
sharing results will be garbage collected when this context is
exited. This dict can be passed to another context to resume
sharing.
"""
if cache is None:
cache = {}
_add_sharing_cache(cache)
try:
yield cache
finally:
_remove_sharing_cache()
def count_cached_ops(cache: CacheType) -> CounterType[str]:
"""Returns a counter of the types of each op in the cache.
This is useful for profiling to increase sharing.
"""
return Counter(key[0] for key in cache.keys())
def _save_tensors(*tensors: ArrayType) -> None:
"""Save tensors in the cache to prevent their ids from being recycled.
This is needed to prevent false cache lookups.
"""
cache = get_sharing_cache()
for tensor in tensors:
cache["tensor", id(tensor)] = tensor
def _memoize(key: CacheKeyType, fn: Any, *args: Any, **kwargs: Any) -> ArrayType:
"""Memoize ``fn(*args, **kwargs)`` using the given ``key``.
Results will be stored in the innermost ``cache`` yielded by
:func:`shared_intermediates`.
"""
cache = get_sharing_cache()
if key in cache:
return cache[key]
result = fn(*args, **kwargs)
cache[key] = result
return result
def transpose_cache_wrap(transpose: Any) -> Any:
"""Decorates a ``transpose()`` implementation to be memoized inside a
:func:`shared_intermediates` context.
"""
@functools.wraps(transpose)
def cached_transpose(a, axes, backend="numpy"):
if not currently_sharing():
return transpose(a, axes, backend=backend)
# hash by axes
_save_tensors(a)
axes = tuple(axes)
key = "transpose", backend, id(a), axes
return _memoize(key, transpose, a, axes, backend=backend)
return cached_transpose
def tensordot_cache_wrap(tensordot: Any) -> Any:
"""Decorates a ``tensordot()`` implementation to be memoized inside a
:func:`shared_intermediates` context.
"""
@functools.wraps(tensordot)
def cached_tensordot(x, y, axes=2, backend="numpy"):
if not currently_sharing():
return tensordot(x, y, axes, backend=backend)
# hash based on the (axes_x,axes_y) form of axes
_save_tensors(x, y)
if isinstance(axes, numbers.Number):
axes = (
list(range(len(x.shape)))[len(x.shape) - axes :],
list(range(len(y.shape)))[:axes],
)
axes = tuple(axes[0]), tuple(axes[1])
key = "tensordot", backend, id(x), id(y), axes
return _memoize(key, tensordot, x, y, axes, backend=backend)
return cached_tensordot
def einsum_cache_wrap(einsum: Any) -> Any:
"""Decorates an ``einsum()`` implementation to be memoized inside a
:func:`shared_intermediates` context.
"""
@functools.wraps(einsum)
def cached_einsum(*args, **kwargs):
if not currently_sharing():
return einsum(*args, **kwargs)
# hash modulo commutativity by computing a canonical ordering and names
backend = kwargs.pop("backend", "numpy")
equation = args[0]
inputs, output, operands = parse_einsum_input(args)
inputs = inputs.split(",")
_save_tensors(*operands)
# Build canonical key
canonical = sorted(zip(inputs, map(id, operands)), key=lambda x: x[1])
canonical_ids = tuple(id_ for _, id_ in canonical)
canonical_inputs = ",".join(input_ for input_, _ in canonical)
canonical_equation = alpha_canonicalize(canonical_inputs + "->" + output)
key = "einsum", backend, canonical_equation, canonical_ids
return _memoize(key, einsum, equation, *operands, backend=backend)
return cached_einsum
def to_backend_cache_wrap(to_backend: Any = None, constants: Any = False) -> Any:
"""Decorates an ``to_backend()`` implementation to be memoized inside a
:func:`shared_intermediates` context (e.g. ``to_cupy``, ``to_torch``).
"""
# manage the case that decorator is called with args
if to_backend is None:
return functools.partial(to_backend_cache_wrap, constants=constants)
if constants:
@functools.wraps(to_backend)
def cached_to_backend(array, constant=False):
if not currently_sharing():
return to_backend(array, constant=constant)
# hash by id
key = to_backend.__name__, id(array), constant
return _memoize(key, to_backend, array, constant=constant)
else:
@functools.wraps(to_backend)
def cached_to_backend(array):
if not currently_sharing():
return to_backend(array)
# hash by id
key = to_backend.__name__, id(array)
return _memoize(key, to_backend, array)
return cached_to_backend
|
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss
from ..builder import LOSSES
from .utils import weight_reduce_loss
# This method is only for debugging
def py_sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def py_focal_loss_with_prob(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Different from `py_sigmoid_focal_loss`, this function accepts probability
as input.
Args:
pred (torch.Tensor): The prediction probability with shape (N, C),
C is the number of classes.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
num_classes = pred.size(1)
target = F.one_hot(target, num_classes=num_classes + 1)
target = target[:, :num_classes]
target = target.type_as(pred)
pt = (1 - pred) * target + pred * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy(
pred, target, reduction='none') * focal_weight
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
r"""A warpper of cuda version `Focal Loss
<https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
# Function.apply does not accept keyword arguments, so the decorator
# "weighted_loss" is not applicable
loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma,
alpha, None, 'none')
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class FocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=1.0,
activated=False):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
activated (bool, optional): Whether the input is activated.
If True, it means the input has been activated and can be
treated as probabilities. Else, it should be treated as logits.
Defaults to False.
"""
super(FocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
self.activated = activated
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
if self.activated:
calculate_loss_func = py_focal_loss_with_prob
else:
if torch.cuda.is_available() and pred.is_cuda:
calculate_loss_func = sigmoid_focal_loss
else:
num_classes = pred.size(1)
target = F.one_hot(target, num_classes=num_classes + 1)
target = target[:, :num_classes]
calculate_loss_func = py_sigmoid_focal_loss
loss_cls = self.loss_weight * calculate_loss_func(
pred,
target,
weight,
gamma=self.gamma,
alpha=self.alpha,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the Test engine."""
import collections
import itertools
import threading
from unittest import mock
from rally.common import objects
from rally import consts
from rally import exceptions
from rally.task import context
from rally.task import engine
from rally.task import scenario
from rally.task import task_cfg
from tests.unit import test
class MyException(exceptions.RallyException):
msg_fmt = "MyException"
class TaskEngineTestCase(test.TestCase):
@staticmethod
def _make_workload(name, args=None, description=None, contexts=None,
sla=None, runner=None, hooks=None, position=0):
return {"uuid": "foo",
"name": name,
"position": position,
"description": description,
"args": args,
"contexts": contexts or {},
"runner_type": runner[0] if runner else "serial",
"runner": runner[1] if runner else {},
"sla": sla or {},
"hooks": hooks or []}
def test_init(self):
config = mock.MagicMock()
task = mock.MagicMock()
eng = engine.TaskEngine(config, task, mock.Mock())
self.assertEqual(eng.config, config)
self.assertEqual(eng.task, task)
@mock.patch("jsonschema.validate")
def test_validate(self, mock_validate):
config = mock.MagicMock()
eng = engine.TaskEngine(config, mock.MagicMock(),
mock.Mock())
mock_validate = mock.MagicMock()
eng._validate_config_syntax = mock_validate.syntax
eng._validate_config_platforms = mock_validate.platforms
eng._validate_config_semantic = mock_validate.semantic
eng.validate()
mock_validate.syntax.assert_called_once_with(config)
mock_validate.platforms.assert_called_once_with(config)
mock_validate.semantic.assert_called_once_with(config)
def test_validate__wrong_syntax(self):
task = mock.MagicMock()
eng = engine.TaskEngine(mock.MagicMock(), task, mock.Mock())
e = exceptions.InvalidTaskConfig(name="foo", pos=0, config="",
reason="foo")
eng._validate_config_syntax = mock.MagicMock(side_effect=e)
eng._validate_config_platforms = mock.Mock()
actual_e = self.assertRaises(exceptions.InvalidTaskException,
eng.validate)
self.assertEqual(e, actual_e)
self.assertTrue(task.set_failed.called)
# the next validation step should not be processed
self.assertFalse(eng._validate_config_platforms.called)
def test_validate__wrong_semantic(self):
task = mock.MagicMock()
eng = engine.TaskEngine(mock.MagicMock(), task, mock.Mock())
e = exceptions.InvalidTaskConfig(name="foo", pos=0, config="",
reason="foo")
eng._validate_config_syntax = mock.MagicMock()
eng._validate_config_platforms = mock.MagicMock()
eng._validate_config_semantic = mock.MagicMock(side_effect=e)
actual_e = self.assertRaises(exceptions.InvalidTaskException,
eng.validate)
self.assertEqual(e, actual_e)
self.assertTrue(task.set_failed.called)
# all steps of validation are called, which means that the last one is
# failed
self.assertTrue(eng._validate_config_syntax)
self.assertTrue(eng._validate_config_platforms)
self.assertTrue(eng._validate_config_semantic)
@mock.patch("rally.task.engine.scenario.Scenario.get")
@mock.patch("rally.task.sla.SLA.validate")
@mock.patch("rally.task.hook.HookTrigger.validate")
@mock.patch("rally.task.hook.HookAction.validate")
@mock.patch("rally.task.engine.runner.ScenarioRunner.validate")
@mock.patch("rally.task.engine.context.Context.validate")
def test__validate_workload(
self, mock_context_validate,
mock_scenario_runner_validate,
mock_hook_action_validate,
mock_hook_trigger_validate,
mock_sla_validate,
mock_scenario_get):
mock_context_validate.return_value = []
mock_sla_validate.return_value = []
mock_hook_action_validate.return_value = []
mock_hook_trigger_validate.return_value = []
default_context = {"foo": "foo_conf"}
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_platform.return_value = "default"
scenario_cls.get_default_context.return_value = default_context
scenario_name = "Foo.bar"
runner_type = "MegaRunner"
hook_conf = {"action": ("c", "c_args"),
"trigger": ("d", "d_args")}
workload = {"name": scenario_name,
"runner_type": runner_type,
"runner": {},
"contexts": {"a": "a_conf"},
"hooks": [hook_conf],
"sla": {"foo_sla": "sla_conf"},
"position": 2}
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
eng._validate_workload(workload)
mock_scenario_runner_validate.assert_called_once_with(
name=runner_type, context=None, config=None,
plugin_cfg={}, vtype=None)
self.assertEqual([mock.call(name="a",
context=None,
config=None,
plugin_cfg="a_conf",
vtype=None),
mock.call(name="foo",
context=None,
config=None,
plugin_cfg="foo_conf",
allow_hidden=True,
vtype=None)],
mock_context_validate.call_args_list)
mock_sla_validate.assert_called_once_with(
config=None, context=None,
name="foo_sla", plugin_cfg="sla_conf", vtype=None)
mock_hook_action_validate.assert_called_once_with(
config=None, context=None, name="c", plugin_cfg="c_args",
vtype=None)
mock_hook_trigger_validate.assert_called_once_with(
config=None, context=None, name="d", plugin_cfg="d_args",
vtype=None)
@mock.patch("rally.task.engine.json.dumps")
@mock.patch("rally.task.engine.scenario.Scenario.get")
@mock.patch("rally.task.engine.runner.ScenarioRunner.validate")
def test___validate_workload__wrong_runner(
self, mock_scenario_runner_validate,
mock_scenario_get, mock_dumps):
mock_dumps.return_value = "<JSON>"
mock_scenario_runner_validate.return_value = [
"There is no such runner"]
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_default_context.return_value = {}
workload = self._make_workload(name="sca", runner=("b", {}))
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
e = self.assertRaises(exceptions.InvalidTaskConfig,
eng._validate_workload, workload)
self.assertEqual("Input task is invalid!\n\nSubtask sca[0] has wrong "
"configuration\nSubtask configuration:\n"
"<JSON>\n\nReason(s):\n"
" There is no such runner", e.format_message())
@mock.patch("rally.task.engine.json.dumps")
@mock.patch("rally.task.engine.scenario.Scenario.get")
@mock.patch("rally.task.engine.context.Context.validate")
def test__validate_config_syntax__wrong_context(
self, mock_context_validate, mock_scenario_get, mock_dumps):
mock_dumps.return_value = "<JSON>"
mock_context_validate.return_value = ["context_error"]
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_default_context.return_value = {}
mock_task_instance = mock.MagicMock()
mock_task_instance.subtasks = [{"workloads": [
self._make_workload(name="sca"),
self._make_workload(name="sca", position=1,
contexts={"a": "a_conf"})
]}]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
e = self.assertRaises(exceptions.InvalidTaskConfig,
eng._validate_config_syntax, mock_task_instance)
self.assertEqual("Input task is invalid!\n\nSubtask sca[1] has wrong "
"configuration\nSubtask configuration:\n<JSON>\n\n"
"Reason(s):\n context_error", e.format_message())
@mock.patch("rally.task.engine.json.dumps")
@mock.patch("rally.task.engine.scenario.Scenario.get")
@mock.patch("rally.task.sla.SLA.validate")
def test__validate_config_syntax__wrong_sla(
self, mock_sla_validate, mock_scenario_get, mock_dumps):
mock_dumps.return_value = "<JSON>"
mock_sla_validate.return_value = ["sla_error"]
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_default_context.return_value = {}
mock_task_instance = mock.MagicMock()
mock_task_instance.subtasks = [{"workloads": [
self._make_workload(name="sca"),
self._make_workload(name="sca", position=1,
sla={"foo_sla": "sla_conf"})
]}]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
e = self.assertRaises(exceptions.InvalidTaskConfig,
eng._validate_config_syntax, mock_task_instance)
self.assertEqual("Input task is invalid!\n\n"
"Subtask sca[1] has wrong configuration\n"
"Subtask configuration:\n<JSON>\n\n"
"Reason(s):\n sla_error", e.format_message())
@mock.patch("rally.task.engine.json.dumps")
@mock.patch("rally.task.engine.scenario.Scenario.get")
@mock.patch("rally.task.hook.HookAction.validate")
@mock.patch("rally.task.hook.HookTrigger.validate")
def test__validate_config_syntax__wrong_hook(
self, mock_hook_trigger_validate,
mock_hook_action_validate,
mock_scenario_get, mock_dumps):
mock_dumps.return_value = "<JSON>"
mock_hook_trigger_validate.return_value = []
mock_hook_action_validate.return_value = ["hook_error"]
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_default_context.return_value = {}
mock_task_instance = mock.MagicMock()
hook_conf = {"action": ("c", "c_args"),
"trigger": ("d", "d_args")}
mock_task_instance.subtasks = [{"workloads": [
self._make_workload(name="sca"),
self._make_workload(name="sca", position=1,
hooks=[hook_conf])
]}]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
e = self.assertRaises(exceptions.InvalidTaskConfig,
eng._validate_config_syntax, mock_task_instance)
self.assertEqual("Input task is invalid!\n\n"
"Subtask sca[1] has wrong configuration\n"
"Subtask configuration:\n<JSON>\n\n"
"Reason(s):\n hook_error", e.format_message())
@mock.patch("rally.task.engine.json.dumps")
@mock.patch("rally.task.engine.scenario.Scenario.get")
@mock.patch("rally.task.hook.HookTrigger.validate")
@mock.patch("rally.task.hook.HookAction.validate")
def test__validate_config_syntax__wrong_trigger(
self, mock_hook_action_validate,
mock_hook_trigger_validate,
mock_scenario_get, mock_dumps):
mock_dumps.return_value = "<JSON>"
mock_hook_trigger_validate.return_value = ["trigger_error"]
mock_hook_action_validate.return_value = []
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_default_context.return_value = {}
mock_task_instance = mock.MagicMock()
hook_conf = {"action": ("c", "c_args"),
"trigger": ("d", "d_args")}
mock_task_instance.subtasks = [{"workloads": [
self._make_workload(name="sca"),
self._make_workload(name="sca", position=1,
hooks=[hook_conf])
]}]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
e = self.assertRaises(exceptions.InvalidTaskConfig,
eng._validate_config_syntax, mock_task_instance)
self.assertEqual("Input task is invalid!\n\n"
"Subtask sca[1] has wrong configuration\n"
"Subtask configuration:\n<JSON>\n\n"
"Reason(s):\n trigger_error", e.format_message())
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
def test__validate_config_semantic(self, mock_context_manager_setup,
mock_context_manager_cleanup):
env = mock.MagicMock(uuid="env_uuid")
env.check_health.return_value = {
"foo": {"available": True, "message": ""},
"bar": {"available": True, "message": ""}
}
@scenario.configure("SomeScen.scenario")
class SomeScen(scenario.Scenario):
def run(self):
pass
mock_task_instance = mock.MagicMock()
wconf1 = self._make_workload(name="SomeScen.scenario")
wconf2 = self._make_workload(name="SomeScen.scenario",
position=1)
subtask1 = {"workloads": [wconf1, wconf2]}
wconf3 = self._make_workload(name="SomeScen.scenario",
position=2)
subtask2 = {"workloads": [wconf3]}
mock_task_instance.subtasks = [subtask1, subtask2]
fake_task = mock.MagicMock()
eng = engine.TaskEngine(mock_task_instance, fake_task, env)
eng._validate_config_semantic(mock_task_instance)
env.check_health.return_value = {
"foo": {"available": True, "message": ""},
"bar": {"available": False, "message": "", "traceback": "AAAA"}
}
self.assertRaises(exceptions.ValidationError,
eng._validate_config_semantic,
mock_task_instance)
@mock.patch("rally.task.engine.TaskEngine._validate_workload")
def test__validate_config_platforms(self, mock__validate_workload):
foo_cred = {"admin": "admin", "users": ["user1"]}
env = mock.MagicMock(data={
"platforms": {
"foo": {
"platform_name": "foo", "platform_data": foo_cred
}
}
})
workload1 = "workload1"
workload2 = "workload2"
subtasks = [{"workloads": [workload1]},
{"workloads": [workload2]}]
config = mock.Mock(subtasks=subtasks)
eng = engine.TaskEngine({}, mock.MagicMock(), env)
eng._validate_config_platforms(config)
self.assertEqual(
[mock.call(w, vtype="platform",
vcontext={"platforms": {"foo": foo_cred},
"task": eng.task})
for w in (workload1, workload2)],
mock__validate_workload.call_args_list)
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
def test_run__update_status(
self, mock_scenario_runner, mock_scenario,
mock_context_manager_setup, mock_context_manager_cleanup,
mock_result_consumer, mock_task_get_status):
task = mock.MagicMock()
mock_task_get_status.return_value = consts.TaskStatus.ABORTING
eng = engine.TaskEngine(mock.MagicMock(), task, mock.Mock())
eng.run()
task.update_status.assert_has_calls([
mock.call(consts.TaskStatus.RUNNING),
mock.call(consts.TaskStatus.FINISHED)
])
@mock.patch("rally.task.engine.objects.task.Task.get_status")
@mock.patch("rally.task.engine.LOG")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.Context")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
def test_run_exception_is_logged(
self, mock_context_manager_setup, mock_context_manager_cleanup,
mock_scenario_runner, mock_scenario, mock_context,
mock_result_consumer, mock_log, mock_task_get_status):
scenario_cls = mock_scenario.get.return_value
scenario_cls.get_default_context.return_value = {}
context_cls = mock_context.get.return_value
context_cls.get_fullname.return_value = "context_a"
mock_context_manager_setup.side_effect = Exception
mock_result_consumer.is_task_in_aborting_status.return_value = False
mock_task_instance = mock.MagicMock()
mock_task_instance.subtasks = [{
"title": "foo",
"description": "Do not launch it!!",
"contexts": {},
"workloads": [
self._make_workload(name="a.task", description="foo",
contexts={"context_a": {"a": 1}}),
self._make_workload(name="a.task", description="foo",
contexts={"context_a": {"b": 2}},
position=2)]}]
eng = engine.TaskEngine(mock_task_instance, mock.MagicMock(),
mock.MagicMock())
eng.run()
self.assertEqual(2, mock_log.exception.call_count)
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
def test_run__task_soft_aborted(
self, mock_scenario_runner, mock_scenario,
mock_context_manager_setup, mock_context_manager_cleanup,
mock_result_consumer):
scenario_cls = mock_scenario.get.return_value
scenario_cls.get_platform.return_value = "openstack"
scenario_cls.get_info.return_value = {"title": ""}
task = mock.MagicMock()
mock_result_consumer.is_task_in_aborting_status.side_effect = [False,
False,
True]
config = task_cfg.TaskConfig({
"a.task": [{"runner": {"type": "a", "b": 1},
"description": "foo"}],
"b.task": [{"runner": {"type": "a", "b": 1},
"description": "bar"}],
"c.task": [{"runner": {"type": "a", "b": 1},
"description": "xxx"}]
})
fake_runner_cls = mock.MagicMock()
fake_runner = mock.MagicMock()
fake_runner_cls.return_value = fake_runner
mock_scenario_runner.get.return_value = fake_runner_cls
eng = engine.TaskEngine(config, task, mock.MagicMock())
eng.run()
self.assertEqual(2, fake_runner.run.call_count)
self.assertEqual(mock.call(consts.TaskStatus.ABORTED),
task.update_status.mock_calls[-1])
subtask_obj = task.add_subtask.return_value
subtask_obj.update_status.assert_has_calls((
mock.call(consts.SubtaskStatus.FINISHED),
mock.call(consts.SubtaskStatus.FINISHED),
mock.call(consts.SubtaskStatus.ABORTED),
))
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
def test_run__task_aborted(
self, mock_scenario_runner, mock_scenario,
mock_context_manager_setup, mock_context_manager_cleanup,
mock_result_consumer, mock_task_get_status):
task = mock.MagicMock(spec=objects.Task)
config = task_cfg.TaskConfig({
"a.task": [{"runner": {"type": "a", "b": 1}}],
"b.task": [{"runner": {"type": "a", "b": 1}}],
"c.task": [{"runner": {"type": "a", "b": 1}}]
})
fake_runner_cls = mock.MagicMock()
fake_runner = mock.MagicMock()
fake_runner_cls.return_value = fake_runner
mock_task_get_status.return_value = consts.TaskStatus.SOFT_ABORTING
mock_scenario_runner.get.return_value = fake_runner_cls
eng = engine.TaskEngine(config, task, mock.Mock())
eng.run()
self.assertEqual(mock.call(consts.TaskStatus.ABORTED),
task.update_status.mock_calls[-1])
subtask_obj = task.add_subtask.return_value
subtask_obj.update_status.assert_called_once_with(
consts.SubtaskStatus.ABORTED)
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
def test_run__subtask_crashed(
self, mock_scenario_runner, mock_scenario,
mock_context_manager_setup, mock_context_manager_cleanup,
mock_result_consumer, mock_task_get_status):
task = mock.MagicMock(spec=objects.Task)
subtask_obj = task.add_subtask.return_value
subtask_obj.add_workload.side_effect = MyException()
mock_result_consumer.is_task_in_aborting_status.return_value = False
config = task_cfg.TaskConfig({
"a.task": [{"runner": {"type": "a", "b": 1}}],
"b.task": [{"runner": {"type": "a", "b": 1}}],
"c.task": [{"runner": {"type": "a", "b": 1}}]
})
fake_runner_cls = mock.MagicMock()
fake_runner = mock.MagicMock()
fake_runner_cls.return_value = fake_runner
mock_scenario_runner.get.return_value = fake_runner_cls
eng = engine.TaskEngine(config, task, mock.Mock())
self.assertRaises(MyException, eng.run)
task.update_status.assert_has_calls((
mock.call(consts.TaskStatus.RUNNING),
mock.call(consts.TaskStatus.CRASHED),
))
subtask_obj.update_status.assert_called_once_with(
consts.SubtaskStatus.CRASHED)
def test__prepare_context(self):
@context.configure("test1", 1, platform="testing")
class TestContext1(context.Context):
pass
self.addCleanup(TestContext1.unregister)
@context.configure("test2", 2, platform="testing")
class TestContext2(context.Context):
pass
self.addCleanup(TestContext2.unregister)
@scenario.configure("test_ctx.test", platform="testing",
context={"test1@testing": {"a": 1}})
class TestScenario(scenario.Scenario):
pass
self.addCleanup(TestScenario.unregister)
task = mock.MagicMock()
name = "test_ctx.test"
context_config = {"test1": 1, "test2": 2}
env = mock.MagicMock()
eng = engine.TaskEngine({}, task, env)
result = eng._prepare_context(context_config, name, "foo_uuid")
expected_result = {
"task": task,
"owner_id": "foo_uuid",
"scenario_name": name,
"config": {"test1@testing": 1, "test2@testing": 2},
"env": env.data
}
self.assertEqual(expected_result, result)
class ResultConsumerTestCase(test.TestCase):
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer.wait_and_abort")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results(
self, mock_sla_checker, mock_result_consumer_wait_and_abort,
mock_task_get_status):
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
workload_cfg = {"fake": 2, "hooks": []}
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
results = [
[{"duration": 1, "timestamp": 3}],
[{"duration": 2, "timestamp": 2}]
]
runner.result_queue = collections.deque(results)
runner.event_queue = collections.deque()
ctx_manager = mock.MagicMock()
with engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=False,
ctx_manager=ctx_manager) as consumer_obj:
pass
mock_sla_instance.add_iteration.assert_has_calls([
mock.call({"duration": 1, "timestamp": 3}),
mock.call({"duration": 2, "timestamp": 2})])
self.assertEqual([{"duration": 2, "timestamp": 2},
{"duration": 1, "timestamp": 3}],
consumer_obj.results)
@mock.patch("rally.task.hook.HookExecutor")
@mock.patch("rally.task.engine.LOG")
@mock.patch("rally.task.engine.time.time")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer.wait_and_abort")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_no_iteration(
self, mock_sla_checker, mock_result_consumer_wait_and_abort,
mock_task_get_status, mock_time, mock_log, mock_hook_executor):
mock_time.side_effect = [0, 1]
mock_sla_instance = mock.MagicMock()
mock_sla_results = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
mock_sla_instance.results.return_value = mock_sla_results
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
workload_cfg = {"fake": 2, "hooks": []}
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
results = []
runner.result_queue = collections.deque(results)
runner.event_queue = collections.deque()
ctx_manager = mock.MagicMock()
with engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=False,
ctx_manager=ctx_manager):
pass
self.assertFalse(workload.add_workload_data.called)
workload.set_results.assert_called_once_with(
full_duration=1, sla_results=mock_sla_results, load_duration=0,
start_time=None,
contexts_results=ctx_manager.contexts_results())
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer.wait_and_abort")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_sla_failure_abort(
self, mock_sla_checker, mock_result_consumer_wait_and_abort,
mock_task_get_status):
workload_cfg = {"fake": 2, "hooks": []}
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
runner.result_queue = collections.deque(
[[{"duration": 1, "timestamp": 1},
{"duration": 2, "timestamp": 2}]] * 4)
iteration_count = len(list(
itertools.chain(*runner.result_queue)
))
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
mock_sla_instance.add_iteration.side_effect = [
i < 3 for i in range(iteration_count)
]
ctx_manager = mock.MagicMock()
with engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=True,
ctx_manager=ctx_manager):
pass
self.assertTrue(runner.abort.called)
task.update_status.assert_called_once_with(
consts.TaskStatus.SOFT_ABORTING)
@mock.patch("rally.task.hook.HookExecutor")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.threading.Thread")
@mock.patch("rally.task.engine.threading.Event")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_abort_manually(self, mock_sla_checker,
mock_event, mock_thread,
mock_task_get_status,
mock_hook_executor):
runner = mock.MagicMock(result_queue=False)
is_done = mock.MagicMock()
is_done.isSet.side_effect = (False, True)
task = mock.MagicMock()
mock_task_get_status.return_value = consts.TaskStatus.ABORTED
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
workload_cfg = {"fake": 2, "hooks": []}
mock_hook_executor_instance = mock_hook_executor.return_value
ctx_manager = mock.MagicMock()
with engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=True,
ctx_manager=ctx_manager):
pass
mock_sla_checker.assert_called_once_with(workload_cfg)
mock_hook_executor.assert_called_once_with(workload_cfg, task)
self.assertFalse(mock_hook_executor_instance.on_iteration.called)
mocked_set_aborted = mock_sla_checker.return_value.set_aborted_manually
mocked_set_aborted.assert_called_once_with()
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_sla_failure_continue(self, mock_sla_checker,
mock_task_get_status):
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
mock_task_get_status.return_value = consts.TaskStatus.CRASHED
mock_sla_instance.add_iteration.side_effect = [True, True, False,
False]
workload_cfg = {"fake": 2, "hooks": []}
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
runner.result_queue = collections.deque(
[[{"duration": 1, "timestamp": 4}]] * 4)
runner.event_queue = collections.deque()
ctx_manager = mock.MagicMock()
with engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
ctx_manager=ctx_manager,
abort_on_sla_failure=False):
pass
self.assertEqual(0, runner.abort.call_count)
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.threading.Thread")
@mock.patch("rally.task.engine.threading.Event")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_with_unexpected_failure(self, mock_sla_checker,
mock_event, mock_thread,
mock_task_get_status):
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
workload_cfg = {"fake": 2, "hooks": []}
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
runner.result_queue = collections.deque([1])
runner.event_queue = collections.deque()
ctx_manager = mock.MagicMock()
exc = MyException()
try:
with engine.ResultConsumer(workload_cfg, task=task,
subtask=subtask, workload=workload,
runner=runner, ctx_manager=ctx_manager,
abort_on_sla_failure=False):
raise exc
except MyException:
pass
else:
self.fail("ResultConsumer should re-raise the exception.")
mock_sla_instance.set_unexpected_failure.assert_has_calls(
[mock.call(exc)])
@mock.patch("rally.task.engine.CONF")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer.wait_and_abort")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_chunked(
self, mock_sla_checker, mock_result_consumer_wait_and_abort,
mock_task_get_status, mock_conf):
mock_conf.raw_result_chunk_size = 2
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
workload_cfg = {"fake": 2, "hooks": []}
task = mock.MagicMock(spec=objects.Task)
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
results = [
[{"duration": 1, "timestamp": 3},
{"duration": 2, "timestamp": 2},
{"duration": 3, "timestamp": 3}],
[{"duration": 4, "timestamp": 2},
{"duration": 5, "timestamp": 3}],
[{"duration": 6, "timestamp": 2}],
[{"duration": 7, "timestamp": 1}],
]
runner.result_queue = collections.deque(results)
runner.event_queue = collections.deque()
ctx_manager = mock.MagicMock()
with engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=False,
ctx_manager=ctx_manager) as consumer_obj:
pass
mock_sla_instance.add_iteration.assert_has_calls([
mock.call({"duration": 1, "timestamp": 3}),
mock.call({"duration": 2, "timestamp": 2}),
mock.call({"duration": 3, "timestamp": 3}),
mock.call({"duration": 4, "timestamp": 2}),
mock.call({"duration": 5, "timestamp": 3}),
mock.call({"duration": 6, "timestamp": 2}),
mock.call({"duration": 7, "timestamp": 1})])
self.assertEqual([{"duration": 7, "timestamp": 1}],
consumer_obj.results)
workload.add_workload_data.assert_has_calls([
mock.call(0, {"raw": [{"duration": 2, "timestamp": 2},
{"duration": 1, "timestamp": 3}]}),
mock.call(1, {"raw": [{"duration": 4, "timestamp": 2},
{"duration": 3, "timestamp": 3}]}),
mock.call(2, {"raw": [{"duration": 6, "timestamp": 2},
{"duration": 5, "timestamp": 3}]}),
mock.call(3, {"raw": [{"duration": 7, "timestamp": 1}]})])
@mock.patch("rally.task.engine.LOG")
@mock.patch("rally.task.hook.HookExecutor")
@mock.patch("rally.task.engine.time.time")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer.wait_and_abort")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_events(
self, mock_sla_checker, mock_result_consumer_wait_and_abort,
mock_task_get_status, mock_time, mock_hook_executor, mock_log):
mock_time.side_effect = [0, 1]
mock_sla_instance = mock_sla_checker.return_value
mock_sla_results = mock_sla_instance.results.return_value
mock_hook_executor_instance = mock_hook_executor.return_value
mock_hook_results = mock_hook_executor_instance.results.return_value
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
workload_cfg = {"fake": 2, "hooks": [{"config": True}]}
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
events = [
{"type": "iteration", "value": 1},
{"type": "iteration", "value": 2},
{"type": "iteration", "value": 3}
]
runner.result_queue = collections.deque()
runner.event_queue = collections.deque(events)
ctx_manager = mock.MagicMock()
consumer_obj = engine.ResultConsumer(
workload_cfg, task=task, subtask=subtask, workload=workload,
runner=runner, abort_on_sla_failure=False, ctx_manager=ctx_manager)
stop_event = threading.Event()
def set_stop_event(event_type, value):
if not runner.event_queue:
stop_event.set()
mock_hook_executor_instance.on_event.side_effect = set_stop_event
with consumer_obj:
stop_event.wait(1)
mock_hook_executor_instance.on_event.assert_has_calls([
mock.call(event_type="iteration", value=1),
mock.call(event_type="iteration", value=2),
mock.call(event_type="iteration", value=3)
])
self.assertFalse(workload.add_workload_data.called)
workload.set_results.assert_called_once_with(
full_duration=1,
load_duration=0,
sla_results=mock_sla_results,
hooks_results=mock_hook_results,
start_time=None,
contexts_results=ctx_manager.contexts_results())
@mock.patch("rally.task.engine.threading.Thread")
@mock.patch("rally.task.engine.threading.Event")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.TaskEngine._prepare_context")
@mock.patch("rally.task.engine.time.sleep")
def test_wait_and_abort_on_abort(
self, mock_sleep, mock_task_engine__prepare_context,
mock_task_get_status, mock_event, mock_thread):
runner = mock.MagicMock()
workload_cfg = mock.MagicMock()
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
mock_task_get_status.side_effect = (consts.TaskStatus.RUNNING,
consts.TaskStatus.RUNNING,
consts.TaskStatus.ABORTING)
mock_is_done = mock.MagicMock()
mock_event.return_value = mock_is_done
mock_is_done.isSet.return_value = False
ctx_manager = mock.MagicMock()
res = engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=True,
ctx_manager=ctx_manager)
res.wait_and_abort()
runner.abort.assert_called_with()
# test task.get_status is checked until is_done is not set
self.assertEqual(3, mock_task_get_status.call_count)
@mock.patch("rally.task.engine.threading.Thread")
@mock.patch("rally.task.engine.threading.Event")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.TaskEngine._prepare_context")
@mock.patch("rally.task.engine.time.sleep")
def test_wait_and_abort_on_no_abort(
self, mock_sleep, mock_task_engine__prepare_context,
mock_task_get_status, mock_event, mock_thread):
runner = mock.MagicMock()
workload_cfg = mock.MagicMock()
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
mock_is_done = mock.MagicMock()
mock_event.return_value = mock_is_done
ctx_manager = mock.MagicMock()
mock_is_done.isSet.side_effect = [False, False, False, False, True]
res = engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=True,
ctx_manager=ctx_manager)
res.wait_and_abort()
# check method don't abort runner if task is not aborted
self.assertFalse(runner.abort.called)
# test task.get_status is checked until is_done is not set
self.assertEqual(4, mock_task_get_status.call_count)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.