gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
from . import _ni_support
from . import _nd_image
from scipy.misc import doccer
from scipy._lib._version import NumpyVersion
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
_input_doc = \
"""input : array_like
Input array to filter."""
_axis_doc = \
"""axis : int, optional
The axis of `input` along which to calculate. Default is -1."""
_output_doc = \
"""output : array, optional
The `output` parameter passes an array in which to store the
filter output."""
_size_foot_doc = \
"""size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
"""
_mode_doc = \
"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'"""
_cval_doc = \
"""cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0"""
_origin_doc = \
"""origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0.0."""
_extra_arguments_doc = \
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function"""
_extra_keywords_doc = \
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function"""
docdict = {
'input': _input_doc,
'axis': _axis_doc,
'output': _output_doc,
'size_foot': _size_foot_doc,
'mode': _mode_doc,
'cval': _cval_doc,
'origin': _origin_doc,
'extra_arguments': _extra_arguments_doc,
'extra_keywords': _extra_keywords_doc,
}
docfiller = doccer.filldoc(docdict)
@docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if (len(weights) // 2 + origin < 0) or (len(weights) // 2 +
origin > len(weights)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return return_value
@docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
@docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : {0, 1, 2, 3}, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. An order of 1, 2, or 3 corresponds to convolution with
the first, second or third derivatives of a Gaussian. Higher
order derivatives are not implemented
%(output)s
%(mode)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
"""
if order not in range(4):
raise ValueError('Order outside 0..3 not implemented')
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd = sd * sd
# calculate the kernel:
for ii in range(1, lw + 1):
tmp = math.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
# implement first, second and third order derivatives:
if order == 1: # first derivative
weights[lw] = 0.0
for ii in range(1, lw + 1):
x = float(ii)
tmp = -x / sd * weights[lw + ii]
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
elif order == 2: # second derivative
weights[lw] *= -1.0 / sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd
weights[lw + ii] = tmp
weights[lw - ii] = tmp
elif order == 3: # third derivative
weights[lw] = 0.0
sd2 = sd * sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
return correlate1d(input, weights, axis, output, mode, cval, 0)
@docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : {0, 1, 2, 3} or sequence from same set, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. An order of 1, 2, or 3
corresponds to convolution with the first, second or third
derivatives of a Gaussian. Higher order derivatives are not
implemented
%(output)s
%(mode)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
if not set(orders).issubset(set(range(4))):
raise ValueError('Order outside 0..4 not implemented')
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, mode, cval, 0,)
return return_value
@docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, mode, cval, 0)
return return_value
@docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords = None):
"""N-dimensional Laplace filter using a provided second derivative function
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
derivative2(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return return_value
@docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-dimensional Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode)s
%(cval)s
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords = None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
derivative(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
if NumpyVersion(numpy.__version__) > '1.6.1':
numpy.sqrt(output, output, casting='unsafe')
else:
numpy.sqrt(output, output)
else:
output[...] = input[...]
return return_value
@docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return return_value
@docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
input : array-like
input array to filter
weights : ndarray
array of weights, same number of dimensions as input
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
input : array_like
Input array to filter.
weights : array_like
Array of weights, same number of dimensions as input
output : ndarray, optional
The `output` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
the `mode` parameter determines how the array borders are
handled. For 'constant' mode, values beyond borders are set to be
`cval`. Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : array_like, optional
The `origin` parameter controls the placement of the filter.
Default is 0.
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+j-k} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return return_value
@docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return return_value
@docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return return_value
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
if numpy.alltrue(numpy.ravel(footprint), axis=0):
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return return_value
@docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return return_value
@docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculates a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
median_filter : ndarray
Return of same shape as `input`.
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords = None):
"""Calculate a one-dimensional filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : callable
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments, extra_keywords)
return return_value
@docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords = None):
"""Calculates a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : callable
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return return_value
|
|
# This is the vgg benchmark for CIFAR 10
import torch
from logical.vggpremade import vgg11
import torchvision.transforms as transforms
import torchvision.datasets
from torch.utils.data.dataloader import DataLoader
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import time
import math
import shutil
# class AndorVGG(nn.Module):
# def __init__(self,num_classes=1000,img_dim=32):
# super(AndorVGG,self).__init__()
#
# # # input is 224*224*3
# # self.z0=Parameter(torch.Tensor(64,3,32,32))
# # self.z1=Parameter(torch.Tensor(1).fill_(1))
#
# # a
# # thinking about a way to facilitate the calculation and utilize my video card
# # for based loop is not fast
# # for now I think adding more channels to the convolution should be a good idea
# # then I will collapse the channels by summing them up.
# # e.g. a 64 channel output will be added up to be 8 outputs
# # cv_1(x)+cv_2(x)....+cv_3(x) will be forced to have the same meaning
# # this idea is similar to the residual network too.
#
# # double the parameters of config A
# # cross validate for performance measures
#
# self.a=nn.Conv2d(3,128,kernel_size=3,padding=1)
# self.mp1=nn.MaxPool2d(kernel_size=2,stride=2)
# self.b=nn.Conv2d(32,256,kernel_size=3,padding=1)
# self.mp2=nn.MaxPool2d(kernel_size=2,stride=2)
# self.c=nn.Conv2d(64,512,kernel_size=3,padding=1)
# self.d=nn.Conv2d(128,512,kernel_size=3,padding=1)
# self.mp3=nn.MaxPool2d(kernel_size=2,stride=2)
# self.e=nn.Conv2d(128,1024,kernel_size=3,padding=1)
# self.f=nn.Conv2d(256,1024,kernel_size=3,padding=1)
# self.mp4=nn.MaxPool2d(kernel_size=2,stride=2)
# # self.g=nn.Conv2d(256,1024,kernel_size=2,padding=1)
# # self.h=nn.Conv2d(256,1024,kernel_size=2,padding=1)
# # self.mp5=nn.MaxPool2d(kernel_size=2,stride=2)
#
# self.classifier = nn.Sequential(
# nn.Linear(256 * 2 * 2, 4096),
# nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(4096, 4096),
# nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(4096, num_classes),
# )
# self._initialize_weights()
#
#
# def forward(self,inputs):
# # I need to make sure that gradients flow through these two
# # x=self.z1*inputs+self.z0
#
# # collapse the dimensions. Every 4 of them will be forced to add up,
# # activation will happen after the sum
# # or who knows, maybe I should activate on both. I don't know
# # TODO experiment this
# # to collapse, reshape the tensor, sum respective dimensions, then squeeze
# # rinse and repeat.
#
# x=self.a(inputs) #(-1,128,32,32)
# x=F.relu(x)
# x=x.view(-1,4,32,32,32)
# x=x.sum(1)
# x=self.mp1(x) #(-1,32,16,16)
#
# # no relu yet? TODO
# x=self.b(x)#(-1,258,16,16)
# x=F.relu(x)
# x=x.view(-1,4,64,16,16)
# x=x.sum(1)
# x=self.mp2(x)#(-1,64,8,8)
#
# x=self.c(x) #(-1,512,8,8)
# x=F.relu(x)
# x=x.view(-1,4,128,8,8)
# x=x.sum(1)
# x=self.d(x)
# x=F.relu(x)
# x=x.view(-1,4,128,8,8)
# x=x.sum(1)
# x=self.mp3(x) #(-1,128,8,8)
#
# x=self.e(x)
# x=F.relu(x)
# x=x.view(-1,4,256,4,4)
# x=x.sum(1)
# x=self.f(x)
# x=F.relu(x)
# x=x.view(-1,4,256,4,4)
# x=x.sum(1)
# x=self.mp4(x) #(-1,256,2,2)
#
# # x=self.g(x)
# # x=x.view(?)
# # x=x.sum(1)
# # x=self.h(x)
# # x=x.view(?)
# # x=x.sum(1)
# # x=self.mp5(x)
#
# # for the moment classifier is the only thing that does not have andor structure
# # I always get an intuition
# # the lower modules should be saturated, but higher modules should be discretized
# # from full measurements and intuitions to the conditions and booleans
# # expansion of dimensions is full
# # collapse of dimensions should be logical
# # this means I may retain all the architectures
# # but, if any, the final classifier should be in andor structure
#
# x=x.view(x.size(0),-1)
# x = self.classifier(x)
#
# return x
#
#
# def _initialize_weights(self):
# # init z0
# # stdv=1./math.sqrt(32*32*3)
# # self.z0.data.uniform_(-stdv,stdv)
#
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# if m.bias is not None:
# m.bias.data.zero_()
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
# elif isinstance(m, nn.Linear):
# n = m.weight.size(1)
# m.weight.data.normal_(0, 0.01)
# m.bias.data.zero_()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.cuda()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
target_var = torch.autograd.Variable(target).cuda()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print_freq=10
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True).cuda()
target_var = torch.autograd.Variable(target, volatile=True).cuda()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print_freq=10
if i % print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
best_prec1 = 0
def main():
global best_prec1
model=vgg11(num_classes=10,pretrained=False)
cifar_train=torchvision.datasets.CIFAR10(root='~/datasets/',train=True,download=True,transform=transforms.ToTensor())
cifar_test=torchvision.datasets.CIFAR10(root='~/datasets/',train=False,download=True,transform=transforms.ToTensor())
cifar_train=DataLoader(cifar_train,batch_size=64,shuffle=True,num_workers=1)
cifar_test=DataLoader(cifar_test,batch_size=64,shuffle=True,num_workers=1)
lr=0.0001
optimizer=torch.optim.Adam(model.parameters(),lr=lr)
criterion = nn.CrossEntropyLoss().cuda()
epochs=20
for epoch in range(epochs):
train(cifar_train,model,criterion,optimizer,epoch)
prec1 = validate(cifar_test, model, criterion)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best)
validate(cifar_test, model, criterion)
# today I will be able to run this code.
# I can plan ahead now. What if the performance is extremely bad? What if it's equal?
# What would be my next step?
# I expect that the performance will be good.
# I did not go fully on the interpretability constraint. I will go more if this stage
# is successful.
# z0 is like memory
# why don't you try to train and backprop the PIL transforms?
# reinforcement learning right?
# there are many techniques other than deep learning and backprop.
# tttest performance no longer improves,
# slightly overfitted with too many parameters
# instead of regulating the parameters, I plan to place a stronger
# logical architecture.
# * Prec@1 72.740 Prec@5 97.240
# original: * Prec@1 73.640 Prec@5 96.910
# so the architecture does not change the performance, as expected.
if __name__=="__main__":
main()
|
|
"""Test the Risco config flow."""
import pytest
import voluptuous as vol
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.risco.config_flow import (
CannotConnectError,
UnauthorizedError,
)
from homeassistant.components.risco.const import DOMAIN
from tests.async_mock import PropertyMock, patch
from tests.common import MockConfigEntry
TEST_SITE_NAME = "test-site-name"
TEST_DATA = {
"username": "test-username",
"password": "test-password",
"pin": "1234",
}
TEST_RISCO_TO_HA = {
"arm": "armed_away",
"partial_arm": "armed_home",
"A": "armed_home",
"B": "armed_home",
"C": "armed_night",
"D": "armed_night",
}
TEST_HA_TO_RISCO = {
"armed_away": "arm",
"armed_home": "partial_arm",
"armed_night": "C",
}
TEST_OPTIONS = {
"scan_interval": 10,
"code_arm_required": True,
"code_disarm_required": True,
}
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.risco.config_flow.RiscoAPI.login",
return_value=True,
), patch(
"homeassistant.components.risco.config_flow.RiscoAPI.site_name",
new_callable=PropertyMock(return_value=TEST_SITE_NAME),
), patch(
"homeassistant.components.risco.config_flow.RiscoAPI.close"
) as mock_close, patch(
"homeassistant.components.risco.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.risco.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_DATA
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_SITE_NAME
assert result2["data"] == TEST_DATA
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
mock_close.assert_awaited_once()
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.risco.config_flow.RiscoAPI.login",
side_effect=UnauthorizedError,
), patch("homeassistant.components.risco.config_flow.RiscoAPI.close") as mock_close:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_DATA
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
mock_close.assert_awaited_once()
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.risco.config_flow.RiscoAPI.login",
side_effect=CannotConnectError,
), patch("homeassistant.components.risco.config_flow.RiscoAPI.close") as mock_close:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_DATA
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
mock_close.assert_awaited_once()
async def test_form_exception(hass):
"""Test we handle unknown exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.risco.config_flow.RiscoAPI.login",
side_effect=Exception,
), patch("homeassistant.components.risco.config_flow.RiscoAPI.close") as mock_close:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_DATA
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
mock_close.assert_awaited_once()
async def test_form_already_exists(hass):
"""Test that a flow with an existing username aborts."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_DATA["username"],
data=TEST_DATA,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_DATA
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
async def test_options_flow(hass):
"""Test options flow."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_DATA["username"],
data=TEST_DATA,
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=TEST_OPTIONS,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "risco_to_ha"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=TEST_RISCO_TO_HA,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "ha_to_risco"
with patch("homeassistant.components.risco.async_setup_entry", return_value=True):
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=TEST_HA_TO_RISCO,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert entry.options == {
**TEST_OPTIONS,
"risco_states_to_ha": TEST_RISCO_TO_HA,
"ha_states_to_risco": TEST_HA_TO_RISCO,
}
async def test_ha_to_risco_schema(hass):
"""Test that the schema for the ha-to-risco mapping step is generated properly."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_DATA["username"],
data=TEST_DATA,
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=TEST_OPTIONS,
)
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=TEST_RISCO_TO_HA,
)
# Test an HA state that isn't used
with pytest.raises(vol.error.MultipleInvalid):
await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={**TEST_HA_TO_RISCO, "armed_custom_bypass": "D"},
)
# Test a combo that can't be selected
with pytest.raises(vol.error.MultipleInvalid):
await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={**TEST_HA_TO_RISCO, "armed_night": "A"},
)
|
|
"""
Core visualization operations based on PyVista.
Actual implementation of _Renderer and _Projection classes.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
# Joan Massich <mailsik@gmail.com>
#
# License: Simplified BSD
from contextlib import contextmanager
from distutils.version import LooseVersion
import os
import sys
import warnings
import numpy as np
import vtk
from .base_renderer import _BaseRenderer
from ._utils import _get_colormap_from_array
from ...utils import copy_base_doc_to_subclass_doc
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista
from pyvista import Plotter, PolyData, Line, close_all, UnstructuredGrid
try:
from pyvistaqt import BackgroundPlotter # noqa
except ImportError:
from pyvista import BackgroundPlotter
from pyvista.utilities import try_callback
from pyvista.plotting.plotting import _ALL_PLOTTERS
VTK9 = LooseVersion(vtk.VTK_VERSION) >= LooseVersion('9.0')
_FIGURES = dict()
class _Figure(object):
def __init__(self, plotter=None,
plotter_class=None,
display=None,
show=False,
title='PyVista Scene',
size=(600, 600),
shape=(1, 1),
background_color='black',
smooth_shading=True,
off_screen=False,
notebook=False):
self.plotter = plotter
self.plotter_class = plotter_class
self.display = display
self.background_color = background_color
self.smooth_shading = smooth_shading
self.notebook = notebook
self.store = dict()
self.store['show'] = show
self.store['title'] = title
self.store['window_size'] = size
self.store['shape'] = shape
self.store['off_screen'] = off_screen
self.store['border'] = False
self.store['auto_update'] = False
def build(self):
if self.plotter_class is None:
self.plotter_class = BackgroundPlotter
if self.notebook:
self.plotter_class = Plotter
if self.plotter_class == Plotter:
self.store.pop('show', None)
self.store.pop('title', None)
self.store.pop('auto_update', None)
if self.plotter is None:
plotter = self.plotter_class(**self.store)
plotter.background_color = self.background_color
self.plotter = plotter
_process_events(self.plotter)
_process_events(self.plotter)
return self.plotter
def is_active(self):
if self.plotter is None:
return False
return hasattr(self.plotter, 'ren_win')
class _Projection(object):
"""Class storing projection information.
Attributes
----------
xy : array
Result of 2d projection of 3d data.
pts : None
Scene sensors handle.
"""
def __init__(self, xy=None, pts=None):
"""Store input projection information into attributes."""
self.xy = xy
self.pts = pts
def visible(self, state):
"""Modify visibility attribute of the sensors."""
self.pts.SetVisibility(state)
def _enable_aa(figure, plotter):
"""Enable it everywhere except Azure."""
# XXX for some reason doing this on Azure causes access violations:
# ##[error]Cmd.exe exited with code '-1073741819'
# So for now don't use it there. Maybe has to do with setting these
# before the window has actually been made "active"...?
# For Mayavi we have an "on activated" event or so, we should look into
# using this for Azure at some point, too.
if os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true':
return
if figure.is_active():
if sys.platform != 'darwin':
plotter.enable_anti_aliasing()
plotter.ren_win.LineSmoothingOn()
@copy_base_doc_to_subclass_doc
class _Renderer(_BaseRenderer):
"""Class managing rendering scene.
Attributes
----------
plotter: Plotter
Main PyVista access point.
name: str
Name of the window.
"""
def __init__(self, fig=None, size=(600, 600), bgcolor='black',
name="PyVista Scene", show=False, shape=(1, 1),
notebook=None, smooth_shading=True):
from .renderer import MNE_3D_BACKEND_TESTING
from .._3d import _get_3d_option
figure = _Figure(show=show, title=name, size=size, shape=shape,
background_color=bgcolor, notebook=notebook,
smooth_shading=smooth_shading)
self.font_family = "arial"
self.tube_n_sides = 20
self.shape = shape
antialias = _get_3d_option('antialias')
self.antialias = antialias and not MNE_3D_BACKEND_TESTING
if isinstance(fig, int):
saved_fig = _FIGURES.get(fig)
# Restore only active plotter
if saved_fig is not None and saved_fig.is_active():
self.figure = saved_fig
else:
self.figure = figure
_FIGURES[fig] = self.figure
elif fig is None:
self.figure = figure
else:
self.figure = fig
# Enable off_screen if sphinx-gallery or testing
if pyvista.OFF_SCREEN:
self.figure.store['off_screen'] = True
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
if MNE_3D_BACKEND_TESTING:
self.tube_n_sides = 3
with _disabled_depth_peeling():
self.plotter = self.figure.build()
self.plotter.hide_axes()
if hasattr(self.plotter, "default_camera_tool_bar"):
self.plotter.default_camera_tool_bar.close()
if hasattr(self.plotter, "saved_cameras_tool_bar"):
self.plotter.saved_cameras_tool_bar.close()
if self.antialias:
_enable_aa(self.figure, self.plotter)
@contextmanager
def ensure_minimum_sizes(self):
sz = self.figure.store['window_size']
# plotter: pyvista.plotting.qt_plotting.BackgroundPlotter
# plotter.interactor: vtk.qt.QVTKRenderWindowInteractor.QVTKRenderWindowInteractor -> QWidget # noqa
# plotter.app_window: pyvista.plotting.qt_plotting.MainWindow -> QMainWindow # noqa
# plotter.frame: QFrame with QVBoxLayout with plotter.interactor as centralWidget # noqa
# plotter.ren_win: vtkXOpenGLRenderWindow
self.plotter.interactor.setMinimumSize(*sz)
try:
yield
finally:
for _ in range(2):
self.plotter.app.processEvents()
self.plotter.interactor.setMinimumSize(0, 0)
def subplot(self, x, y):
x = np.max([0, np.min([x, self.shape[0] - 1])])
y = np.max([0, np.min([y, self.shape[1] - 1])])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
self.plotter.subplot(x, y)
if self.antialias:
_enable_aa(self.figure, self.plotter)
def scene(self):
return self.figure
def set_interactive(self):
self.plotter.enable_terrain_style()
def polydata(self, mesh, color=None, opacity=1.0, normals=None,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
rgba = False
if color is not None and len(color) == mesh.n_points:
if color.shape[1] == 3:
scalars = np.c_[color, np.ones(mesh.n_points)]
else:
scalars = color
scalars = (scalars * 255).astype('ubyte')
color = None
rgba = True
if isinstance(colormap, np.ndarray):
if colormap.dtype == np.uint8:
colormap = colormap.astype(np.float64) / 255.
from matplotlib.colors import ListedColormap
colormap = ListedColormap(colormap)
if normals is not None:
mesh.point_arrays["Normals"] = normals
mesh.GetPointData().SetActiveNormals("Normals")
else:
_compute_normals(mesh)
actor = _add_mesh(
plotter=self.plotter,
mesh=mesh, color=color, scalars=scalars,
rgba=rgba, opacity=opacity, cmap=colormap,
backface_culling=backface_culling,
rng=[vmin, vmax], show_scalar_bar=False,
smooth_shading=self.figure.smooth_shading,
interpolate_before_map=interpolate_before_map,
style=representation, line_width=line_width, **kwargs,
)
return actor, mesh
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., normals=None, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
vertices = np.c_[x, y, z]
triangles = np.c_[np.full(len(triangles), 3), triangles]
mesh = PolyData(vertices, triangles)
return self.polydata(
mesh=mesh,
color=color,
opacity=opacity,
normals=normals,
backface_culling=backface_culling,
scalars=scalars,
colormap=colormap,
vmin=vmin,
vmax=vmax,
interpolate_before_map=interpolate_before_map,
representation=representation,
line_width=line_width,
**kwargs,
)
def contour(self, surface, scalars, contours, width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, kind='line', color=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
if colormap is not None:
colormap = _get_colormap_from_array(colormap,
normalized_colormap)
vertices = np.array(surface['rr'])
triangles = np.array(surface['tris'])
n_triangles = len(triangles)
triangles = np.c_[np.full(n_triangles, 3), triangles]
mesh = PolyData(vertices, triangles)
mesh.point_arrays['scalars'] = scalars
contour = mesh.contour(isosurfaces=contours, rng=(vmin, vmax))
line_width = width
if kind == 'tube':
contour = contour.tube(radius=width, n_sides=self.tube_n_sides)
line_width = 1.0
actor = _add_mesh(
plotter=self.plotter,
mesh=contour,
show_scalar_bar=False,
line_width=line_width,
color=color,
cmap=colormap,
opacity=opacity,
smooth_shading=self.figure.smooth_shading
)
return actor, contour
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, scalars=None,
backface_culling=False):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
normals = surface.get('nn', None)
vertices = np.array(surface['rr'])
triangles = np.array(surface['tris'])
triangles = np.c_[np.full(len(triangles), 3), triangles]
mesh = PolyData(vertices, triangles)
colormap = _get_colormap_from_array(colormap, normalized_colormap)
if scalars is not None:
mesh.point_arrays['scalars'] = scalars
return self.polydata(
mesh=mesh,
color=color,
opacity=opacity,
normals=normals,
backface_culling=backface_culling,
scalars=scalars,
colormap=colormap,
vmin=vmin,
vmax=vmax,
)
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False,
radius=None):
factor = 1.0 if radius is not None else scale
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(resolution)
sphere.SetPhiResolution(resolution)
if radius is not None:
sphere.SetRadius(radius)
sphere.Update()
geom = sphere.GetOutput()
mesh = PolyData(np.array(center))
glyph = mesh.glyph(orient=False, scale=False,
factor=factor, geom=geom)
actor = _add_mesh(
self.plotter,
mesh=glyph, color=color, opacity=opacity,
backface_culling=backface_culling,
smooth_shading=self.figure.smooth_shading
)
return actor, glyph
def tube(self, origin, destination, radius=0.001, color='white',
scalars=None, vmin=None, vmax=None, colormap='RdBu',
normalized_colormap=False, reverse_lut=False):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
cmap = _get_colormap_from_array(colormap, normalized_colormap)
for (pointa, pointb) in zip(origin, destination):
line = Line(pointa, pointb)
if scalars is not None:
line.point_arrays['scalars'] = scalars[0, :]
scalars = 'scalars'
color = None
else:
scalars = None
tube = line.tube(radius, n_sides=self.tube_n_sides)
_add_mesh(
plotter=self.plotter,
mesh=tube,
scalars=scalars,
flip_scalars=reverse_lut,
rng=[vmin, vmax],
color=color,
show_scalar_bar=False,
cmap=cmap,
smooth_shading=self.figure.smooth_shading,
)
return tube
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None,
backface_culling=False, line_width=2., name=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
factor = scale
vectors = np.c_[u, v, w]
points = np.vstack(np.c_[x, y, z])
n_points = len(points)
cell_type = np.full(n_points, vtk.VTK_VERTEX)
cells = np.c_[np.full(n_points, 1), range(n_points)]
args = (cells, cell_type, points)
if not VTK9:
args = (np.arange(n_points) * 3,) + args
grid = UnstructuredGrid(*args)
grid.point_arrays['vec'] = vectors
if scale_mode == 'scalar':
grid.point_arrays['mag'] = np.array(scalars)
scale = 'mag'
else:
scale = False
if mode == '2darrow':
return _arrow_glyph(grid, factor)
elif mode == 'arrow' or mode == '3darrow':
_add_mesh(
self.plotter,
mesh=grid.glyph(orient='vec',
scale=scale,
factor=factor),
color=color,
opacity=opacity,
backface_culling=backface_culling
)
elif mode == 'cone':
cone = vtk.vtkConeSource()
if glyph_height is not None:
cone.SetHeight(glyph_height)
if glyph_center is not None:
cone.SetCenter(glyph_center)
if glyph_resolution is not None:
cone.SetResolution(glyph_resolution)
cone.Update()
geom = cone.GetOutput()
_add_mesh(
self.plotter,
mesh=grid.glyph(orient='vec',
scale=scale,
factor=factor,
geom=geom),
color=color,
opacity=opacity,
backface_culling=backface_culling
)
elif mode == 'cylinder':
cylinder = vtk.vtkCylinderSource()
cylinder.SetHeight(glyph_height)
cylinder.SetRadius(0.15)
cylinder.SetCenter(glyph_center)
cylinder.SetResolution(glyph_resolution)
cylinder.Update()
# fix orientation
tr = vtk.vtkTransform()
tr.RotateWXYZ(90, 0, 0, 1)
trp = vtk.vtkTransformPolyDataFilter()
trp.SetInputData(cylinder.GetOutput())
trp.SetTransform(tr)
trp.Update()
geom = trp.GetOutput()
_add_mesh(
self.plotter,
mesh=grid.glyph(orient='vec',
scale=scale,
factor=factor,
geom=geom),
color=color,
opacity=opacity,
backface_culling=backface_culling
)
def text2d(self, x_window, y_window, text, size=14, color='white',
justification=None):
size = 14 if size is None else size
position = (x_window, y_window)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
actor = self.plotter.add_text(text, position=position,
font_size=size,
font=self.font_family,
color=color,
viewport=True)
if isinstance(justification, str):
if justification == 'left':
actor.GetTextProperty().SetJustificationToLeft()
elif justification == 'center':
actor.GetTextProperty().SetJustificationToCentered()
elif justification == 'right':
actor.GetTextProperty().SetJustificationToRight()
else:
raise ValueError('Expected values for `justification`'
'are `left`, `center` or `right` but '
'got {} instead.'.format(justification))
return actor
def text3d(self, x, y, z, text, scale, color='white'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
self.plotter.add_point_labels(points=[x, y, z],
labels=[text],
point_size=scale,
text_color=color,
font_family=self.font_family,
name=text,
shape_opacity=0)
def scalarbar(self, source, color="white", title=None, n_labels=4,
bgcolor=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
self.plotter.add_scalar_bar(color=color, title=title,
n_labels=n_labels,
use_opacity=False, n_colors=256,
position_x=0.15,
position_y=0.05, width=0.7,
shadow=False, bold=True,
label_font_size=22,
font_family=self.font_family,
background_color=bgcolor)
def show(self):
self.figure.display = self.plotter.show()
if hasattr(self.plotter, "app_window"):
with self.ensure_minimum_sizes():
self.plotter.app_window.show()
_process_events(self.plotter, show=True)
return self.scene()
def close(self):
_close_3d_figure(figure=self.figure)
def set_camera(self, azimuth=None, elevation=None, distance=None,
focalpoint=None):
_set_3d_view(self.figure, azimuth=azimuth, elevation=elevation,
distance=distance, focalpoint=focalpoint)
def reset_camera(self):
self.plotter.reset_camera()
def screenshot(self, mode='rgb', filename=None):
return _take_3d_screenshot(figure=self.figure, mode=mode,
filename=filename)
def project(self, xyz, ch_names):
xy = _3d_to_2d(self.plotter, xyz)
xy = dict(zip(ch_names, xy))
# pts = self.fig.children[-1]
pts = self.plotter.renderer.GetActors().GetLastItem()
return _Projection(xy=xy, pts=pts)
def enable_depth_peeling(self):
if not self.figure.store['off_screen']:
for renderer in self.plotter.renderers:
renderer.enable_depth_peeling()
def remove_mesh(self, mesh_data):
actor, _ = mesh_data
self.plotter.renderer.remove_actor(actor)
def _compute_normals(mesh):
"""Patch PyVista compute_normals."""
if 'Normals' not in mesh.point_arrays:
mesh.compute_normals(
cell_normals=False,
consistent_normals=False,
non_manifold_traversal=False,
inplace=True,
)
def _add_mesh(plotter, *args, **kwargs):
"""Patch PyVista add_mesh."""
_process_events(plotter)
mesh = kwargs.get('mesh')
if 'smooth_shading' in kwargs:
smooth_shading = kwargs.pop('smooth_shading')
else:
smooth_shading = True
actor = plotter.add_mesh(*args, **kwargs)
if smooth_shading and 'Normals' in mesh.point_arrays:
prop = actor.GetProperty()
prop.SetInterpolationToPhong()
return actor
def _deg2rad(deg):
return deg * np.pi / 180.
def _rad2deg(rad):
return rad * 180. / np.pi
def _mat_to_array(vtk_mat):
e = [vtk_mat.GetElement(i, j) for i in range(4) for j in range(4)]
arr = np.array(e, dtype=float)
arr.shape = (4, 4)
return arr
def _3d_to_2d(plotter, xyz):
size = plotter.window_size
xyz = np.column_stack([xyz, np.ones(xyz.shape[0])])
# Transform points into 'unnormalized' view coordinates
comb_trans_mat = _get_world_to_view_matrix(plotter)
view_coords = np.dot(comb_trans_mat, xyz.T).T
# Divide through by the fourth element for normalized view coords
norm_view_coords = view_coords / (view_coords[:, 3].reshape(-1, 1))
# Transform from normalized view coordinates to display coordinates.
view_to_disp_mat = _get_view_to_display_matrix(size)
xy = np.dot(view_to_disp_mat, norm_view_coords.T).T
# Pull the first two columns since they're meaningful for 2d plotting
xy = xy[:, :2]
return xy
def _get_world_to_view_matrix(plotter):
cam = plotter.renderer.camera
scene_size = plotter.window_size
clip_range = cam.GetClippingRange()
aspect_ratio = float(scene_size[0]) / scene_size[1]
vtk_comb_trans_mat = cam.GetCompositeProjectionTransformMatrix(
aspect_ratio, clip_range[0], clip_range[1])
vtk_comb_trans_mat = _mat_to_array(vtk_comb_trans_mat)
return vtk_comb_trans_mat
def _get_view_to_display_matrix(size):
x, y = size
view_to_disp_mat = np.array([[x / 2.0, 0., 0., x / 2.0],
[0., -y / 2.0, 0., y / 2.0],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
return view_to_disp_mat
def _close_all():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
close_all()
def _get_camera_direction(focalpoint, position):
x, y, z = position - focalpoint
r = np.sqrt(x * x + y * y + z * z)
theta = np.arccos(z / r)
phi = np.arctan2(y, x)
return r, theta, phi, focalpoint
def _set_3d_view(figure, azimuth, elevation, focalpoint, distance):
position = np.array(figure.plotter.camera_position[0])
focalpoint = np.array(figure.plotter.camera_position[1])
r, theta, phi, fp = _get_camera_direction(focalpoint, position)
if azimuth is not None:
phi = _deg2rad(azimuth)
if elevation is not None:
theta = _deg2rad(elevation)
renderer = figure.plotter.renderer
bounds = np.array(renderer.ComputeVisiblePropBounds())
if distance is not None:
r = distance
else:
r = max(bounds[1::2] - bounds[::2]) * 2.0
distance = r
if focalpoint is not None:
cen = np.asarray(focalpoint)
else:
cen = (bounds[1::2] + bounds[::2]) * 0.5
focalpoint = cen
# Now calculate the view_up vector of the camera. If the view up is
# close to the 'z' axis, the view plane normal is parallel to the
# camera which is unacceptable, so we use a different view up.
if elevation is None or 5. <= abs(elevation) <= 175.:
view_up = [0, 0, 1]
else:
view_up = [np.sin(phi), np.cos(phi), 0]
position = [
r * np.cos(phi) * np.sin(theta),
r * np.sin(phi) * np.sin(theta),
r * np.cos(theta)]
figure.plotter.camera_position = [
position, cen, view_up]
figure.plotter.renderer._azimuth = azimuth
figure.plotter.renderer._elevation = elevation
figure.plotter.renderer._distance = distance
def _set_3d_title(figure, title, size=16):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
figure.plotter.add_text(title, font_size=size, color='white')
def _check_3d_figure(figure):
if not isinstance(figure, _Figure):
raise TypeError('figure must be an instance of _Figure.')
def _close_3d_figure(figure):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
# close the window
figure.plotter.close()
_process_events(figure.plotter)
# free memory and deregister from the scraper
figure.plotter.deep_clean()
del _ALL_PLOTTERS[figure.plotter._id_name]
_process_events(figure.plotter)
def _take_3d_screenshot(figure, mode='rgb', filename=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
_process_events(figure.plotter)
return figure.plotter.screenshot(
transparent_background=(mode == 'rgba'),
filename=filename)
def _process_events(plotter, show=False):
if hasattr(plotter, 'app'):
plotter.app.processEvents()
if show:
plotter.app_window.show()
def _set_colormap_range(actor, ctable, scalar_bar, rng=None):
from vtk.util.numpy_support import numpy_to_vtk
mapper = actor.GetMapper()
lut = mapper.GetLookupTable()
# Catch: FutureWarning: Conversion of the second argument of
# issubdtype from `complex` to `np.complexfloating` is deprecated.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
lut.SetTable(numpy_to_vtk(ctable))
if rng is not None:
mapper.SetScalarRange(rng[0], rng[1])
lut.SetRange(rng[0], rng[1])
if scalar_bar is not None:
scalar_bar.SetLookupTable(actor.GetMapper().GetLookupTable())
def _set_mesh_scalars(mesh, scalars, name):
# Catch: FutureWarning: Conversion of the second argument of
# issubdtype from `complex` to `np.complexfloating` is deprecated.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
mesh.point_arrays[name] = scalars
def _update_slider_callback(slider, callback, event_type):
def _the_callback(widget, event):
value = widget.GetRepresentation().GetValue()
if hasattr(callback, '__call__'):
try_callback(callback, value)
return
if event_type == 'start':
event = vtk.vtkCommand.StartInteractionEvent
elif event_type == 'end':
event = vtk.vtkCommand.EndInteractionEvent
elif event_type == 'always':
event = vtk.vtkCommand.InteractionEvent
slider.RemoveObserver(event)
slider.AddObserver(event, _the_callback)
def _add_camera_callback(camera, callback):
camera.AddObserver(vtk.vtkCommand.ModifiedEvent, callback)
def _update_picking_callback(plotter,
on_mouse_move,
on_button_press,
on_button_release,
on_pick):
interactor = plotter.iren
interactor.AddObserver(
vtk.vtkCommand.RenderEvent,
on_mouse_move
)
interactor.AddObserver(
vtk.vtkCommand.LeftButtonPressEvent,
on_button_press
)
interactor.AddObserver(
vtk.vtkCommand.EndInteractionEvent,
on_button_release
)
picker = vtk.vtkCellPicker()
picker.AddObserver(
vtk.vtkCommand.EndPickEvent,
on_pick
)
plotter.picker = picker
def _arrow_glyph(grid, factor):
glyph = vtk.vtkGlyphSource2D()
glyph.SetGlyphTypeToArrow()
glyph.FilledOff()
glyph.Update()
geom = glyph.GetOutput()
# fix position
tr = vtk.vtkTransform()
tr.Translate(0.5, 0., 0.)
trp = vtk.vtkTransformPolyDataFilter()
trp.SetInputData(geom)
trp.SetTransform(tr)
trp.Update()
geom = trp.GetOutput()
polydata = _glyph(
grid,
scale_mode='vector',
scalars=False,
orient='vec',
factor=factor,
geom=geom,
)
return pyvista.wrap(polydata)
def _glyph(dataset, scale_mode='scalar', orient=True, scalars=True, factor=1.0,
geom=None, tolerance=0.0, absolute=False, clamping=False, rng=None):
if geom is None:
arrow = vtk.vtkArrowSource()
arrow.Update()
geom = arrow.GetOutput()
alg = vtk.vtkGlyph3D()
alg.SetSourceData(geom)
if isinstance(scalars, str):
dataset.active_scalars_name = scalars
if isinstance(orient, str):
dataset.active_vectors_name = orient
orient = True
if scale_mode == 'scalar':
alg.SetScaleModeToScaleByScalar()
elif scale_mode == 'vector':
alg.SetScaleModeToScaleByVector()
else:
alg.SetScaleModeToDataScalingOff()
if rng is not None:
alg.SetRange(rng)
alg.SetOrient(orient)
alg.SetInputData(dataset)
alg.SetScaleFactor(factor)
alg.SetClamping(clamping)
alg.Update()
return alg.GetOutput()
def _sphere(plotter, center, color, radius):
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(8)
sphere.SetPhiResolution(8)
sphere.SetRadius(radius)
sphere.SetCenter(center)
sphere.Update()
mesh = pyvista.wrap(sphere.GetOutput())
actor = _add_mesh(
plotter,
mesh=mesh,
color=color
)
return actor, mesh
def _require_minimum_version(version_required):
from distutils.version import LooseVersion
version = LooseVersion(pyvista.__version__)
if version < version_required:
raise ImportError('pyvista>={} is required for this module but the '
'version found is {}'.format(version_required,
version))
@contextmanager
def _testing_context(interactive):
from . import renderer
orig_offscreen = pyvista.OFF_SCREEN
orig_testing = renderer.MNE_3D_BACKEND_TESTING
if interactive:
pyvista.OFF_SCREEN = False
renderer.MNE_3D_BACKEND_TESTING = False
else:
pyvista.OFF_SCREEN = True
try:
yield
finally:
pyvista.OFF_SCREEN = orig_offscreen
renderer.MNE_3D_BACKEND_TESTING = orig_testing
@contextmanager
def _disabled_depth_peeling():
from pyvista import rcParams
depth_peeling_enabled = rcParams["depth_peeling"]["enabled"]
rcParams["depth_peeling"]["enabled"] = False
try:
yield
finally:
rcParams["depth_peeling"]["enabled"] = depth_peeling_enabled
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = tf.contrib.learn.datasets.load_boston()
features = tf.train.limit_epochs(
tf.reshape(tf.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
target = tf.reshape(tf.constant(boston.target), [-1, 1])
return features, target
def iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, _IRIS_INPUT_DIM])
target = tf.reshape(tf.constant(iris.target), [-1])
return features, target
def iris_input_fn_target_dict():
iris = tf.contrib.learn.datasets.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, _IRIS_INPUT_DIM])
target = {
'target': tf.reshape(tf.constant(iris.target), [-1])
}
return features, target
def boston_eval_fn():
boston = tf.contrib.learn.datasets.load_boston()
n_examples = len(boston.target)
features = tf.reshape(
tf.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
target = tf.reshape(tf.constant(boston.target), [n_examples, 1])
return tf.concat(0, [features, features]), tf.concat(0, [target, target])
def linear_model_params_fn(features, target, mode, params):
assert mode in (
tf.contrib.learn.ModeKeys.TRAIN,
tf.contrib.learn.ModeKeys.EVAL,
tf.contrib.learn.ModeKeys.INFER)
prediction, loss = (
tf.contrib.learn.models.linear_regression_zero_init(features, target)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, target, mode):
assert mode in (
tf.contrib.learn.ModeKeys.TRAIN,
tf.contrib.learn.ModeKeys.EVAL,
tf.contrib.learn.ModeKeys.INFER)
prediction, loss = (
tf.contrib.learn.models.linear_regression_zero_init(features, target)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def logistic_model_no_mode_fn(features, target):
if isinstance(target, dict):
target = target['target']
target = tf.one_hot(target, 3, 1, 0)
prediction, loss = (
tf.contrib.learn.models.logistic_regression_zero_init(features, target)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
class CheckCallsMonitor(tf.contrib.learn.monitors.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(tf.test.TestCase):
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, target):
# pylint: disable=unused-argument
tf.Variable(42.0, 'weight')
return None, None, None
est = tf.contrib.learn.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, target, mode):
# pylint: disable=unused-argument
w = tf.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
if mode == tf.contrib.learn.ModeKeys.EVAL:
loss = None
return None, loss, train_op
est = tf.contrib.learn.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, target):
# pylint: disable=unused-argument
w = tf.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = tf.contrib.learn.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(boston_input_fn, num_epochs=1),
as_iterable=True)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = tf.get_default_graph().seed
return tf.constant([[1.]]), tf.constant([1.])
config = tf.contrib.learn.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testCheckInputs(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_targets = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_targets(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7., 8.], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_targets = np.ones(shape=[7., 10.], dtype=np.float32)
wrong_size_targets = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_targets(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_targets(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_targets(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_targets, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_targets, steps=1)
def testBadInput(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(ValueError,
'Either x or input_fn must be provided.',
est.fit, x=None, input_fn=None)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and x or y',
est.fit, x='X', input_fn=iris_input_fn)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and x or y',
est.fit, y='Y', input_fn=iris_input_fn)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and batch_size',
est.fit, input_fn=iris_input_fn, batch_size=100)
self.assertRaisesRegexp(
ValueError, 'Inputs cannot be tensors. Please provide input_fn.',
est.fit, x=tf.constant(1.))
def testUntrained(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
with self.assertRaises(tf.contrib.learn.NotFittedError):
_ = est.evaluate(
x=boston.data,
y=boston.target.astype(np.float64))
with self.assertRaises(tf.contrib.learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = tf.contrib.learn.datasets.load_boston()
output_dir = tempfile.mkdtemp()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn,
model_dir=output_dir)
float64_target = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_target, steps=50)
scores = est.evaluate(
x=boston.data,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = tf.contrib.learn.Estimator(model_fn=linear_model_fn,
model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston.data,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'],
scores['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_target)
self.assertAllClose(other_score, scores['MSE'])
# Check we can keep training.
est2.fit(x=boston.data, y=float64_target, steps=100)
scores3 = est2.evaluate(
x=boston.data,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.Estimator(model_fn=linear_model_params_fn,
params={'learning_rate': 0.01})
est.fit(x=boston.data, y=boston.target, steps=100)
def testBostonAll(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
float64_target = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_target, steps=100)
scores = est.evaluate(
x=boston.data,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(iris.data, iris.target, steps=100)
scores = est.evaluate(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): tf.contrib.metrics.streaming_accuracy})
predictions = list(est.predict(x=iris.data))
predictions_class = list(est.predict(x=iris.data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(
classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnTargetIsDict(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_target_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_target_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='class',
label_key='target')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIterator(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainInputFn(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testTrainStepsIsIncremental(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFn(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {'other': tf.constant([0, 0, 0])}, tf.constant([0, 0, 0])
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitors(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testSummaryWriting(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = tf.contrib.testing.simple_values_from_events(
tf.contrib.testing.latest_events(est.model_dir), ['loss'])
self.assertEqual(len(loss_summary), 1)
def testLossInGraphCollection(self):
class _LossCheckerHook(tf.train.SessionRunHook):
def begin(self):
self.loss_collection = tf.get_collection(tf.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with tf.test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(actual, expected)
class InferRealValuedColumnsTest(tf.test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
tf.contrib.learn.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
tf.contrib.learn.infer_real_valued_columns_from_input(tf.constant(1.0))
def _assert_single_feature_column(
self, expected_shape, expected_dtype, feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual({
'': tf.FixedLenFeature(shape=expected_shape, dtype=expected_dtype)
}, feature_column.config)
def testInt32Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], tf.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.int32), None))
self._assert_single_feature_column([8], tf.int32, feature_columns)
def testInt64Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], tf.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.int64), None))
self._assert_single_feature_column([8], tf.int64, feature_columns)
def testFloat32Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], tf.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.float32), None))
self._assert_single_feature_column([8], tf.float32, feature_columns)
def testFloat64Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], tf.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.float64), None))
self._assert_single_feature_column([8], tf.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
tf.contrib.learn.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.constant(False, shape=[7, 8], dtype=tf.bool), None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (
tf.constant([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column(
[_BOSTON_INPUT_DIM], tf.float64, feature_columns)
def testIrisInputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column(
[_IRIS_INPUT_DIM], tf.float64, feature_columns)
class ReplicaDeviceSetterTest(tf.test.TestCase):
def testVariablesAreOnPs(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(num_ps_replicas=1))):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(num_ps_replicas=0))):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(num_ps_replicas=1))):
default_val = tf.constant([-1, -1], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
input_string = tf.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(num_ps_replicas=0))):
default_val = tf.constant([-1, -1], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
input_string = tf.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
with tf.device(
estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(
num_ps_replicas=1, job_name='worker', task=3))):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
tf.test.main()
|
|
"""Functions for parsing the parameter data files"""
import yaml
import pkgutil
from flavio.classes import *
from flavio.statistics.probability import *
from flavio._parse_errors import errors_from_string
import flavio
import re
from flavio.measurements import _fix_correlation_matrix
from math import sqrt
from particle import Particle, data as p_data
def _read_yaml_object_metadata(obj, constraints):
parameters = yaml.safe_load(obj)
for parameter_name, info in parameters.items():
p = Parameter(parameter_name)
if 'description' in info and info['description'] is not None:
p.description = info['description']
if 'tex' in info and info['tex'] is not None:
p.tex = info['tex']
def read_file_metadata(filename, constraints):
"""Read parameter values from a YAML file."""
with open(filename, 'r') as f:
_read_yaml_object_metadata(f, constraints)
def _read_yaml_object_values(obj, constraints):
parameters = yaml.safe_load(obj)
for parameter_name, value in parameters.items():
p = Parameter[parameter_name] # this will raise an error if the parameter doesn't exist!
constraints.set_constraint(parameter_name, value)
def _read_yaml_object_new(obj):
"""Read parameter constraints from a YAML stream or file that are compatible
with the format generated by the `get_yaml` method of
`flavio.classes.ParameterConstraints`."""
parameters = yaml.safe_load(obj)
return ParameterConstraints.from_yaml_dict(parameters)
def _read_yaml_object_values_correlated(obj, constraints):
list_ = yaml.safe_load(obj)
for parameter_group in list_:
parameter_names = []
central_values = []
errors = []
for dict_list in parameter_group['values']:
parameter_name, value = list(dict_list.items())[0]
Parameter[parameter_name] # this will raise an error if the parameter doesn't exist!
parameter_names.append(parameter_name)
error_dict = errors_from_string(value)
central_values.append(error_dict['central_value'])
squared_error = 0.
for sym_err in error_dict['symmetric_errors']:
squared_error += sym_err**2
for asym_err in error_dict['asymmetric_errors']:
squared_error += asym_err[0]*asym_err[1]
errors.append(sqrt(squared_error))
correlation = _fix_correlation_matrix(parameter_group['correlation'], len(parameter_names))
covariance = np.outer(np.asarray(errors), np.asarray(errors))*correlation
if not np.all(np.linalg.eigvals(covariance) > 0):
# if the covariance matrix is not positive definite, try a dirty trick:
# multiply all the correlations by 0.99.
n_dim = len(correlation)
correlation = (correlation - np.eye(n_dim))*0.99 + np.eye(n_dim)
covariance = np.outer(np.asarray(errors), np.asarray(errors))*correlation
# if it still isn't positive definite, give up.
assert np.all(np.linalg.eigvals(covariance) > 0), "The covariance matrix is not positive definite!" + str(covariance)
constraints.add_constraint(parameter_names, MultivariateNormalDistribution(central_values, covariance))
def read_file(filename):
"""Read parameter values from a YAML file in the format generated by the
`get_yaml` method of the `ParameterConstraints` class, returning a
`ParameterConstraints` instance."""
with open(filename, 'r') as f:
return _read_yaml_object_new(f)
def read_file_values(filename, constraints):
"""Read parameter values from a YAML file."""
with open(filename, 'r') as f:
_read_yaml_object_values(f, constraints)
def read_file_values_correlated(filename, constraints):
"""Read parameter values from a YAML file."""
with open(filename, 'r') as f:
_read_yaml_object_values_correlated(f, constraints)
def write_file(filename, constraints):
"""Write parameter constraints to a YAML file."""
with open(filename, 'w') as f:
yaml.dump(constraints.get_yaml_dict(), f)
class FlavioParticle(Particle):
"""This class extends the `particle.Particle` class.
Additional class methods
------------------------
- from_flavio_name(flavio_name)
returns a class instance for a given `flavio_name`
- flavio_all()
returns a set of all class instances used in flavio
Additional properties
---------------------
- flavio_name
the particle name as used in flavio if defined, otherwise `None`
- latex_name_simplified
a simplified version of the latex name returned by `latex_name`
- flavio_m
a tuple with data on the particle mass as used in flavio, containing
entries `name`, `tex`, `description`, `central`, `right`, `left`
- flavio_tau
a tuple with data on the particle lifetime as used in flavio, containing
entries `name`, `tex`, `description`, `central`, `right`, `left`
"""
PDG_PARTICLES = {
'Bs': 531,
'Bc': 541,
'Bs*': 533,
'B*+': 523,
'B*0': 513,
'B+': 521,
'B0': 511,
'Ds': 431,
'Ds*': 433,
'D+': 411,
'D0': 421,
'h': 25,
'J/psi': 443,
'KL': 130,
'KS': 310,
'K*+': 323,
'K*0': 313,
'K+': 321,
'K0': 311,
'Lambda': 3122,
'Lambdab': 5122,
'Lambdac': 4122,
'omega': 223,
'D*0': 423,
'D*+': 413,
'W': 24,
'Z': 23,
'e': 11,
'eta': 221,
'f0': 9010221,
'mu': 13,
'phi': 333,
'pi+': 211,
'pi0': 111,
'psi(2S)': 100443,
'rho+': 213,
'rho0': 113,
't': 6,
'tau': 15,
'u': 2,
'p': 2212,
'n': 2112,
}
_pdg_particles_inv = {v:k for k,v in PDG_PARTICLES.items()}
_pdg_tex_regex = re.compile(
r"^([A-Za-z\\/]+)" # latin or greek letters or slash
r"(?:_\{(.*?)\})*" # _{...}
r"(?:\^\{(.*?)\})*" # ^{...}
r"(?:\((.*?)\))*" # (...)
r"(?:\^\{(.*?)\})*" # ^{...}
)
@classmethod
def from_flavio_name(cls, flavio_name):
return cls.from_pdgid(cls.PDG_PARTICLES[flavio_name])
@classmethod
def flavio_all(cls):
return {particle for particle in cls.all() if particle.flavio_name}
@property
def flavio_name(self):
return self._pdg_particles_inv.get(self.pdgid, None)
@property
def latex_name_simplified(self):
m = self._pdg_tex_regex.match(self.latex_name)
if m is None:
return self.latex_name
name = m.group(1)
sub = m.group(2)
sup = (m.group(3) or '') + (m.group(5) or '')
par = m.group(4)
if sub or name in ('W', 'Z', 'H', 'e', '\\mu', '\\tau'):
# remove superscripts +-0 and keep only *
sup = '*' if '*' in sup else ''
if not sub and par and not par.isdigit() and name != 'J/\\psi':
# subscript absent and parantheses contain letter but not for 'J/\\psi'
sub = par
sub_tex = r'_{' + sub + r'}' if sub else ''
sup_tex = r'^{' + sup + r'}' if sup else ''
return name + sub_tex + sup_tex
@property
def flavio_m(self):
name = 'm_' + self.flavio_name
tex = r'$m_{' + self.latex_name_simplified + '}$'
pole_mass = ' quark pole' if self.name == 't' else ''
description = r'${}${} mass'.format(
self.latex_name_simplified, pole_mass
)
central = self.mass*1e-3
right = self.mass_upper*1e-3
left = self.mass_lower*1e-3
return name, tex, description, central, right, left
@property
def flavio_tau(self):
if {self.width, self.width_upper, self.width_lower} & {None, 0}:
return None
name = 'tau_' + self.flavio_name
tex = r'$\tau_{' + self.latex_name_simplified + '}$'
description = r'${}$ lifetime'.format(self.latex_name_simplified)
G_central = self.width*1e-3
G_right = self.width_upper*1e-3
G_left = self.width_lower*1e-3
central = 1/G_central # life time = 1/width
right = G_right/G_central**2
left = G_left/G_central**2
return name, tex, description, central, right, left
def read_pdg(year, constraints):
"""Read particle masses and widths from the PDG data file of a given year."""
FlavioParticle.load_table(p_data.basepath / f"particle{year}.csv")
for particle in FlavioParticle.flavio_all():
for data in (particle.flavio_m, particle.flavio_tau):
if data is None:
continue
name, tex, description, central, right, left = data
try:
# if parameter already exists, remove existing constraints on it
p = Parameter[name]
constraints.remove_constraint(name)
except KeyError:
# otherwise, create it
p = Parameter(name)
p.tex = tex
p.description = description
if right == left:
constraints.add_constraint([name],
NormalDistribution(central, right))
else:
constraints.add_constraint([name],
AsymmetricNormalDistribution(central,
right_deviation=right, left_deviation=left))
############### Read default parameters ###################
# Create the object
default_parameters = ParameterConstraints()
# read default parameters
default_parameters.read_default()
|
|
# member/views.py
import json
import requests
import rethinkdb as r
from rethinkdb.errors import RqlDriverError
from flask import g, Blueprint, render_template, request, \
url_for, redirect, abort, flash
import stathat
from monitors import Monitor
from users import User
from forms import ChangePassForm
member_blueprint = Blueprint('member', __name__,)
from web import app, verifyLogin, startData
@member_blueprint.before_app_request
def before_request():
'''
This function establishes a connection
to the rethinkDB before each connection
'''
try:
g.rdb_conn = r.connect(
host=app.config['DBHOST'], port=app.config['DBPORT'],
auth_key=app.config['DBAUTHKEY'], db=app.config['DATABASE'])
except RqlDriverError: # pragma: no cover
# If no connection possible throw 503 error
abort(503, "No Database Connection \
Could be Established.") # pragma: no cover
@member_blueprint.teardown_app_request
def teardown_request(exception):
''' This function closes the database connection when done '''
try:
g.rdb_conn.close()
except AttributeError: # pragma: no cover
# Who cares?
pass # pragma: no cover
#############################
### Member View Functions ###
#############################
# Dashboard Home
@member_blueprint.route('/dashboard')
def dashboard_page():
''' Dashboard: Generate the Welcome/Status page for the dashboard '''
verify = verifyLogin(
app.config['SECRET_KEY'], app.config['COOKIE_TIMEOUT'], request.cookies)
if verify:
user = User()
user.config = app.config
user.get('uid', verify, g.rdb_conn)
data = startData(user)
data['active'] = 'dashboard'
data['url'] = '/dashboard'
data['js_bottom'].append("member/screen-o-death.js")
data['js_bottom'].append("member/screen-o-death-chart.js")
data['js_bottom'].append("member/monitors.js")
if user.status != "active":
data['url'] = '/dashboard/mod-subscription'
page = render_template('member/mod-subscription.html', data=data)
else:
data['monitors'] = user.getMonitors(g.rdb_conn)
data['reactions'] = user.getReactions(g.rdb_conn)
data['monevents'] = user.getEvents(g.rdb_conn)
data['moneventsnum'] = len(data['monevents'])
data['monstats'] = {'true': 0,
'unknown': 0,
'false': 0}
for key in data['monitors'].keys():
if "true" in data['monitors'][key]['status']:
data['monstats']['true'] = data[
'monstats']['true'] + 1
elif "false" in data['monitors'][key]['status']:
data['monstats']['false'] = data['monstats']['false'] + 1
else:
data['monstats']['unknown'] = data[
'monstats']['unknown'] + 1
# If there are no monitors print a welcome message
if len(data['monitors']) < 1:
data['welcome'] = True
data['mons'] = False
else:
data['welcome'] = False
data['mons'] = True
if len(data['reactions']) < 1:
data['reacts'] = False
else:
data['reacts'] = True
from generalforms import subscribe
form = subscribe.AddPackForm(request.form)
page = render_template(
'member/screen-o-death.html', data=data, form=form)
return page
else:
flash('Please Login.', 'warning')
return redirect(url_for('user.login_page'))
# Mod-Subscription
@member_blueprint.route('/dashboard/mod-subscription', methods=['GET', 'POST'])
def modsub_page():
'''Dashboard Modify Subscription:
This will allow a user to modify their subscription and account plan
'''
verify = verifyLogin(
app.config['SECRET_KEY'], app.config['COOKIE_TIMEOUT'], request.cookies)
if verify:
user = User()
user.config = app.config
user.get('uid', verify, g.rdb_conn)
data = startData(user)
data['active'] = 'dashboard'
data['url'] = '/dashboard/mod-subscription'
data['uid'] = user.uid
tmpl = 'member/mod-subscription.html'
data['js_bottom'].append('forms/subscribe.js')
form = []
from generalforms import subscribe
payment = __import__("payments." + user.payments, globals(),
locals(), ['Payments'], -1)
subscription = payment.Payments(user=user, config=app.config, rdb=g.rdb_conn)
if request.method == "POST":
if data['upgraded'] is True:
result = subscription.adjust(request)
else:
result = subscription.create(request)
if result is True:
if data['upgraded'] is True:
flash('Adjustment to subscription successful', 'success')
else:
data['upgraded'] = True
flash('Successfully Subscribed!', 'success')
newdata = startData(user)
data['limit'] = newdata['limit']
data['rlimit'] = newdata['rlimit']
data['acttype'] = newdata['acttype']
data['subplans'] = newdata['subplans']
data['cost'] = newdata['cost']
data['subscription'] = newdata['subscription']
else:
flash('Unable to adjust subscription please notify support', 'danger')
if data['upgraded'] is True:
form = subscribe.AddPackForm(request.form)
page = render_template(tmpl, data=data, form=form)
return page
else:
flash('Please Login.', 'warning')
return redirect(url_for('user.login_page'))
# User-Preferences
@member_blueprint.route('/dashboard/user-preferences', methods=['GET', 'POST'])
def userpref_page():
'''
Dashbaord User Preferences:
This will allow a user to change user preferences, i.e. Password
'''
verify = verifyLogin(
app.config['SECRET_KEY'], app.config['COOKIE_TIMEOUT'], request.cookies)
if verify:
user = User()
user.config = app.config
user.get('uid', verify, g.rdb_conn)
data = startData(user)
data['active'] = 'dashboard'
if user.status != "active":
data['url'] = '/dashboard/mod-subscription'
tmpl = 'member/mod-subscription.html'
else:
# Start processing the change password form
form = ChangePassForm(request.form)
if request.method == 'POST':
if form.validate():
result = user.checkPass(form.old_password.data, g.rdb_conn)
if result:
update = user.setPass(form.password.data, g.rdb_conn)
if update:
print("/dashboard/user-preferences - Password changed")
flash('Password successfully changed.', 'success')
else:
print("/dashboard/user-preferences - \
Password change failed")
flash('Password change was unsuccessful.', 'danger')
else:
print("/login - User change password error: wrong old password")
flash('Old password does not seem valid.', 'danger')
data['url'] = '/dashboard/user-preferences'
tmpl = 'member/user-preferences.html'
page = render_template(tmpl, data=data, form=form)
return page
else:
flash('Please Login.', 'warning')
return redirect(url_for('user.login_page'))
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal_nulp
from astropy.stats.biweight import (biweight_location, biweight_scale,
biweight_midvariance, biweight_midcovariance,
biweight_midcorrelation)
from astropy.tests.helper import catch_warnings
from astropy.utils.misc import NumpyRNGContext
def test_biweight_location():
with NumpyRNGContext(12345):
# test that it runs
randvar = np.random.randn(10000)
cbl = biweight_location(randvar)
assert abs(cbl - 0) < 1e-2
def test_biweight_location_constant():
cbl = biweight_location(np.ones((10, 5)))
assert cbl == 1.
def test_biweight_location_constant_axis_2d():
shape = (10, 5)
data = np.ones(shape)
cbl = biweight_location(data, axis=0)
assert_allclose(cbl, np.ones(shape[1]))
cbl = biweight_location(data, axis=1)
assert_allclose(cbl, np.ones(shape[0]))
val1 = 100.
val2 = 2.
data = np.arange(50).reshape(10, 5)
data[2] = val1
data[7] = val2
cbl = biweight_location(data, axis=1)
assert_allclose(cbl[2], val1)
assert_allclose(cbl[7], val2)
def test_biweight_location_constant_axis_3d():
shape = (10, 5, 2)
data = np.ones(shape)
cbl = biweight_location(data, axis=0)
assert_allclose(cbl, np.ones((shape[1], shape[2])))
cbl = biweight_location(data, axis=1)
assert_allclose(cbl, np.ones((shape[0], shape[2])))
cbl = biweight_location(data, axis=2)
assert_allclose(cbl, np.ones((shape[0], shape[1])))
def test_biweight_location_small():
cbl = biweight_location([1, 3, 5, 500, 2])
assert abs(cbl - 2.745) < 1e-3
def test_biweight_location_axis():
"""Test a 2D array with the axis keyword."""
with NumpyRNGContext(12345):
ny = 100
nx = 200
data = np.random.normal(5, 2, (ny, nx))
bw = biweight_location(data, axis=0)
bwi = []
for i in range(nx):
bwi.append(biweight_location(data[:, i]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
bw = biweight_location(data, axis=1)
bwi = []
for i in range(ny):
bwi.append(biweight_location(data[i, :]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
def test_biweight_location_axis_3d():
"""Test a 3D array with the axis keyword."""
with NumpyRNGContext(12345):
nz = 3
ny = 4
nx = 5
data = np.random.normal(5, 2, (nz, ny, nx))
bw = biweight_location(data, axis=0)
assert bw.shape == (ny, nx)
y = 0
bwi = []
for i in range(nx):
bwi.append(biweight_location(data[:, y, i]))
bwi = np.array(bwi)
assert_allclose(bw[y], bwi)
def test_biweight_scale():
# NOTE: biweight_scale is covered by biweight_midvariance tests
data = [1, 3, 5, 500, 2]
scl = biweight_scale(data)
var = biweight_midvariance(data)
assert_allclose(scl, np.sqrt(var))
def test_biweight_midvariance():
with NumpyRNGContext(12345):
# test that it runs
randvar = np.random.randn(10000)
var = biweight_midvariance(randvar)
assert_allclose(var, 1.0, rtol=0.02)
def test_biweight_midvariance_small():
data = [1, 3, 5, 500, 2]
var = biweight_midvariance(data)
assert_allclose(var, 2.9238456) # verified with R
var = biweight_midvariance(data, modify_sample_size=True)
assert_allclose(var, 2.3390765)
def test_biweight_midvariance_5127():
# test a regression introduced in #5127
rand = np.random.RandomState(12345)
data = rand.normal(loc=0., scale=20., size=(100, 100))
var = biweight_midvariance(data)
assert_allclose(var, 406.86938710817344) # verified with R
def test_biweight_midvariance_axis():
"""Test a 2D array with the axis keyword."""
with NumpyRNGContext(12345):
ny = 100
nx = 200
data = np.random.normal(5, 2, (ny, nx))
bw = biweight_midvariance(data, axis=0)
bwi = []
for i in range(nx):
bwi.append(biweight_midvariance(data[:, i]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
bw = biweight_midvariance(data, axis=1)
bwi = []
for i in range(ny):
bwi.append(biweight_midvariance(data[i, :]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
def test_biweight_midvariance_axis_3d():
"""Test a 3D array with the axis keyword."""
with NumpyRNGContext(12345):
nz = 3
ny = 4
nx = 5
data = np.random.normal(5, 2, (nz, ny, nx))
bw = biweight_midvariance(data, axis=0)
assert bw.shape == (ny, nx)
y = 0
bwi = []
for i in range(nx):
bwi.append(biweight_midvariance(data[:, y, i]))
bwi = np.array(bwi)
assert_allclose(bw[y], bwi)
def test_biweight_midvariance_constant_axis():
bw = biweight_midvariance(np.ones((10, 5)))
assert bw == 0.0
def test_biweight_midvariance_constant_axis_2d():
shape = (10, 5)
data = np.ones(shape)
cbl = biweight_midvariance(data, axis=0)
assert_allclose(cbl, np.zeros(shape[1]))
cbl = biweight_midvariance(data, axis=1)
assert_allclose(cbl, np.zeros(shape[0]))
data = np.arange(50).reshape(10, 5)
data[2] = 100.
data[7] = 2.
bw = biweight_midvariance(data, axis=1)
assert_allclose(bw[2], 0.)
assert_allclose(bw[7], 0.)
def test_biweight_midvariance_constant_axis_3d():
shape = (10, 5, 2)
data = np.ones(shape)
cbl = biweight_midvariance(data, axis=0)
assert_allclose(cbl, np.zeros((shape[1], shape[2])))
cbl = biweight_midvariance(data, axis=1)
assert_allclose(cbl, np.zeros((shape[0], shape[2])))
cbl = biweight_midvariance(data, axis=2)
assert_allclose(cbl, np.zeros((shape[0], shape[1])))
def test_biweight_midcovariance_1d():
d = [0, 1, 2]
cov = biweight_midcovariance(d)
var = biweight_midvariance(d)
assert_allclose(cov, [[var]])
def test_biweight_midcovariance_2d():
d = [[0, 1, 2], [2, 1, 0]]
cov = biweight_midcovariance(d)
val = 0.70121809
assert_allclose(cov, [[val, -val], [-val, val]]) # verified with R
d = [[5, 1, 10], [500, 5, 2]]
cov = biweight_midcovariance(d)
assert_allclose(cov, [[14.54159077, -7.79026256], # verified with R
[-7.79026256, 6.92087252]])
cov = biweight_midcovariance(d, modify_sample_size=True)
assert_allclose(cov, [[14.54159077, -5.19350838],
[-5.19350838, 4.61391501]])
def test_biweight_midcovariance_constant():
data = np.ones((3, 10))
cov = biweight_midcovariance(data)
assert_allclose(cov, np.zeros((3, 3)))
def test_biweight_midcovariance_midvariance():
"""
Test that biweight_midcovariance diagonal elements agree with
biweight_midvariance.
"""
rng = np.random.RandomState(1)
d = rng.normal(0, 2, size=(100, 3))
cov = biweight_midcovariance(d)
var = [biweight_midvariance(a) for a in d]
assert_allclose(cov.diagonal(), var)
cov2 = biweight_midcovariance(d, modify_sample_size=True)
var2 = [biweight_midvariance(a, modify_sample_size=True)
for a in d]
assert_allclose(cov2.diagonal(), var2)
def test_midcovariance_shape():
"""
Test that biweight_midcovariance raises error with a 3D array.
"""
d = np.ones(27).reshape(3, 3, 3)
with pytest.raises(ValueError) as e:
biweight_midcovariance(d)
assert 'The input array must be 2D or 1D.' in str(e.value)
def test_midcovariance_M_shape():
"""
Test that biweight_midcovariance raises error when M is not a scalar
or 1D array.
"""
d = [0, 1, 2]
M = [[0, 1], [2, 3]]
with pytest.raises(ValueError) as e:
biweight_midcovariance(d, M=M)
assert 'M must be a scalar or 1D array.' in str(e.value)
def test_biweight_midcovariance_symmetric():
"""
Regression test to ensure that midcovariance matrix is symmetric
when ``modify_sample_size=True`` (see #5972).
"""
rng = np.random.RandomState(1)
d = rng.gamma(2, 2, size=(3, 500))
cov = biweight_midcovariance(d)
assert_array_almost_equal_nulp(cov, cov.T, nulp=5)
cov = biweight_midcovariance(d, modify_sample_size=True)
assert_array_almost_equal_nulp(cov, cov.T, nulp=5)
def test_biweight_midcorrelation():
x = [0, 1, 2]
y = [2, 1, 0]
assert_allclose(biweight_midcorrelation(x, x), 1.0)
assert_allclose(biweight_midcorrelation(x, y), -1.0)
x = [5, 1, 10, 12.4, 13.2]
y = [500, 5, 2, 7.1, 0.9]
# verified with R
assert_allclose(biweight_midcorrelation(x, y), -0.14411038976763313)
def test_biweight_midcorrelation_inputs():
a1 = np.ones((3, 3))
a2 = np.ones(5)
a3 = np.ones(7)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a1, a2)
assert 'x must be a 1D array.' in str(e.value)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a2, a1)
assert 'y must be a 1D array.' in str(e.value)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a2, a3)
assert 'x and y must have the same shape.' in str(e.value)
def test_biweight_32bit_runtime_warnings():
"""Regression test for #6905."""
with NumpyRNGContext(12345):
data = np.random.random(100).astype(np.float32)
data[50] = 30000.
with catch_warnings(RuntimeWarning) as warning_lines:
biweight_scale(data)
assert len(warning_lines) == 0
with catch_warnings(RuntimeWarning) as warning_lines:
biweight_midvariance(data)
assert len(warning_lines) == 0
|
|
import pytest, py, sys, os
from _pytest import runner
from py._code.code import ReprExceptionInfo
class TestSetupState:
def test_setup(self, testdir):
ss = runner.SetupState()
item = testdir.getitem("def test_func(): pass")
l = [1]
ss.prepare(item)
ss.addfinalizer(l.pop, colitem=item)
assert l
ss._pop_and_teardown()
assert not l
def test_setup_scope_None(self, testdir):
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
l = [1]
ss.prepare(item)
ss.addfinalizer(l.pop, colitem=None)
assert l
ss._pop_and_teardown()
assert l
ss._pop_and_teardown()
assert l
ss.teardown_all()
assert not l
def test_teardown_exact_stack_empty(self, testdir):
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
ss.teardown_exact(item, None)
ss.teardown_exact(item, None)
ss.teardown_exact(item, None)
def test_setup_fails_and_failure_is_cached(self, testdir):
item = testdir.getitem("""
def setup_module(mod):
raise ValueError(42)
def test_func(): pass
""")
ss = runner.SetupState()
pytest.raises(ValueError, "ss.prepare(item)")
pytest.raises(ValueError, "ss.prepare(item)")
class BaseFunctionalTests:
def test_passfunction(self, testdir):
reports = testdir.runitem("""
def test_func():
pass
""")
rep = reports[1]
assert rep.passed
assert not rep.failed
assert rep.outcome == "passed"
assert not rep.longrepr
def test_failfunction(self, testdir):
reports = testdir.runitem("""
def test_func():
assert 0
""")
rep = reports[1]
assert not rep.passed
assert not rep.skipped
assert rep.failed
assert rep.when == "call"
assert rep.outcome == "failed"
#assert isinstance(rep.longrepr, ReprExceptionInfo)
def test_skipfunction(self, testdir):
reports = testdir.runitem("""
import pytest
def test_func():
pytest.skip("hello")
""")
rep = reports[1]
assert not rep.failed
assert not rep.passed
assert rep.skipped
assert rep.outcome == "skipped"
#assert rep.skipped.when == "call"
#assert rep.skipped.when == "call"
#assert rep.skipped == "%sreason == "hello"
#assert rep.skipped.location.lineno == 3
#assert rep.skipped.location.path
#assert not rep.skipped.failurerepr
def test_skip_in_setup_function(self, testdir):
reports = testdir.runitem("""
import pytest
def setup_function(func):
pytest.skip("hello")
def test_func():
pass
""")
print(reports)
rep = reports[0]
assert not rep.failed
assert not rep.passed
assert rep.skipped
#assert rep.skipped.reason == "hello"
#assert rep.skipped.location.lineno == 3
#assert rep.skipped.location.lineno == 3
assert len(reports) == 2
assert reports[1].passed # teardown
def test_failure_in_setup_function(self, testdir):
reports = testdir.runitem("""
import pytest
def setup_function(func):
raise ValueError(42)
def test_func():
pass
""")
rep = reports[0]
assert not rep.skipped
assert not rep.passed
assert rep.failed
assert rep.when == "setup"
assert len(reports) == 2
def test_failure_in_teardown_function(self, testdir):
reports = testdir.runitem("""
import pytest
def teardown_function(func):
raise ValueError(42)
def test_func():
pass
""")
print(reports)
assert len(reports) == 3
rep = reports[2]
assert not rep.skipped
assert not rep.passed
assert rep.failed
assert rep.when == "teardown"
#assert rep.longrepr.reprcrash.lineno == 3
#assert rep.longrepr.reprtraceback.reprentries
def test_custom_failure_repr(self, testdir):
testdir.makepyfile(conftest="""
import pytest
class Function(pytest.Function):
def repr_failure(self, excinfo):
return "hello"
""")
reports = testdir.runitem("""
import pytest
def test_func():
assert 0
""")
rep = reports[1]
assert not rep.skipped
assert not rep.passed
assert rep.failed
#assert rep.outcome.when == "call"
#assert rep.failed.where.lineno == 3
#assert rep.failed.where.path.basename == "test_func.py"
#assert rep.failed.failurerepr == "hello"
def test_teardown_final_returncode(self, testdir):
rec = testdir.inline_runsource("""
def test_func():
pass
def teardown_function(func):
raise ValueError(42)
""")
assert rec.ret == 1
def test_exact_teardown_issue90(self, testdir):
rec = testdir.inline_runsource("""
import pytest
class TestClass:
def test_method(self):
pass
def teardown_class(cls):
raise Exception()
def test_func():
import sys
# on python2 exc_info is keept till a function exits
# so we would end up calling test functions while
# sys.exc_info would return the indexerror
# from guessing the lastitem
assert sys.exc_info()[0] is None
def teardown_function(func):
raise ValueError(42)
""")
reps = rec.getreports("pytest_runtest_logreport")
print (reps)
for i in range(2):
assert reps[i].nodeid.endswith("test_method")
assert reps[i].passed
assert reps[2].when == "teardown"
assert reps[2].failed
assert len(reps) == 6
for i in range(3,5):
assert reps[i].nodeid.endswith("test_func")
assert reps[i].passed
assert reps[5].when == "teardown"
assert reps[5].nodeid.endswith("test_func")
assert reps[5].failed
def test_failure_in_setup_function_ignores_custom_repr(self, testdir):
testdir.makepyfile(conftest="""
import pytest
class Function(pytest.Function):
def repr_failure(self, excinfo):
assert 0
""")
reports = testdir.runitem("""
def setup_function(func):
raise ValueError(42)
def test_func():
pass
""")
assert len(reports) == 2
rep = reports[0]
print(rep)
assert not rep.skipped
assert not rep.passed
assert rep.failed
#assert rep.outcome.when == "setup"
#assert rep.outcome.where.lineno == 3
#assert rep.outcome.where.path.basename == "test_func.py"
#assert instanace(rep.failed.failurerepr, PythonFailureRepr)
def test_systemexit_does_not_bail_out(self, testdir):
try:
reports = testdir.runitem("""
def test_func():
raise SystemExit(42)
""")
except SystemExit:
py.test.fail("runner did not catch SystemExit")
rep = reports[1]
assert rep.failed
assert rep.when == "call"
def test_exit_propagates(self, testdir):
try:
testdir.runitem("""
import pytest
def test_func():
raise pytest.exit.Exception()
""")
except py.test.exit.Exception:
pass
else:
py.test.fail("did not raise")
class TestExecutionNonForked(BaseFunctionalTests):
def getrunner(self):
def f(item):
return runner.runtestprotocol(item, log=False)
return f
def test_keyboardinterrupt_propagates(self, testdir):
try:
testdir.runitem("""
def test_func():
raise KeyboardInterrupt("fake")
""")
except KeyboardInterrupt:
pass
else:
py.test.fail("did not raise")
class TestExecutionForked(BaseFunctionalTests):
pytestmark = pytest.mark.skipif("not hasattr(os, 'fork')")
def getrunner(self):
# XXX re-arrange this test to live in pytest-xdist
xplugin = py.test.importorskip("xdist.plugin")
return xplugin.forked_run_report
def test_suicide(self, testdir):
reports = testdir.runitem("""
def test_func():
import os
os.kill(os.getpid(), 15)
""")
rep = reports[0]
assert rep.failed
assert rep.when == "???"
class TestSessionReports:
def test_collect_result(self, testdir):
col = testdir.getmodulecol("""
def test_func1():
pass
class TestClass:
pass
""")
rep = runner.pytest_make_collect_report(col)
assert not rep.failed
assert not rep.skipped
assert rep.passed
locinfo = rep.location
assert locinfo[0] == col.fspath.basename
assert not locinfo[1]
assert locinfo[2] == col.fspath.basename
res = rep.result
assert len(res) == 2
assert res[0].name == "test_func1"
assert res[1].name == "TestClass"
def test_skip_at_module_scope(self, testdir):
col = testdir.getmodulecol("""
import pytest
pytest.skip("hello")
def test_func():
pass
""")
rep = runner.pytest_make_collect_report(col)
assert not rep.failed
assert not rep.passed
assert rep.skipped
reporttypes = [
runner.BaseReport,
runner.TestReport,
runner.TeardownErrorReport,
runner.CollectReport,
]
@pytest.mark.parametrize('reporttype', reporttypes, ids=[x.__name__ for x in reporttypes])
def test_report_extra_parameters(reporttype):
args = py.std.inspect.getargspec(reporttype.__init__)[0][1:]
basekw = dict.fromkeys(args, [])
report = reporttype(newthing=1, **basekw)
assert report.newthing == 1
def test_callinfo():
ci = runner.CallInfo(lambda: 0, '123')
assert ci.when == "123"
assert ci.result == 0
assert "result" in repr(ci)
ci = runner.CallInfo(lambda: 0/0, '123')
assert ci.when == "123"
assert not hasattr(ci, 'result')
assert ci.excinfo
assert "exc" in repr(ci)
# design question: do we want general hooks in python files?
# then something like the following functional tests makes sense
@pytest.mark.xfail
def test_runtest_in_module_ordering(testdir):
p1 = testdir.makepyfile("""
def pytest_runtest_setup(item): # runs after class-level!
item.function.mylist.append("module")
class TestClass:
def pytest_runtest_setup(self, item):
assert not hasattr(item.function, 'mylist')
item.function.mylist = ['class']
def pytest_funcarg__mylist(self, request):
return request.function.mylist
def pytest_runtest_call(self, item, __multicall__):
try:
__multicall__.execute()
except ValueError:
pass
def test_hello1(self, mylist):
assert mylist == ['class', 'module'], mylist
raise ValueError()
def test_hello2(self, mylist):
assert mylist == ['class', 'module'], mylist
def pytest_runtest_teardown(item):
del item.function.mylist
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
"*2 passed*"
])
def test_outcomeexception_exceptionattributes():
outcome = runner.OutcomeException('test')
assert outcome.args[0] == outcome.msg
def test_pytest_exit():
try:
py.test.exit("hello")
except py.test.exit.Exception:
excinfo = py.code.ExceptionInfo()
assert excinfo.errisinstance(KeyboardInterrupt)
def test_pytest_fail():
try:
py.test.fail("hello")
except py.test.fail.Exception:
excinfo = py.code.ExceptionInfo()
s = excinfo.exconly(tryshort=True)
assert s.startswith("Failed")
def test_pytest_fail_notrace(testdir):
testdir.makepyfile("""
import pytest
def test_hello():
pytest.fail("hello", pytrace=False)
def teardown_function(function):
pytest.fail("world", pytrace=False)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"world",
"hello",
])
assert 'def teardown_function' not in result.stdout.str()
def test_exception_printing_skip():
try:
pytest.skip("hello")
except pytest.skip.Exception:
excinfo = py.code.ExceptionInfo()
s = excinfo.exconly(tryshort=True)
assert s.startswith("Skipped")
def test_importorskip():
importorskip = py.test.importorskip
def f():
importorskip("asdlkj")
try:
sys = importorskip("sys")
assert sys == py.std.sys
#path = py.test.importorskip("os.path")
#assert path == py.std.os.path
excinfo = pytest.raises(pytest.skip.Exception, f)
path = py.path.local(excinfo.getrepr().reprcrash.path)
# check that importorskip reports the actual call
# in this test the test_runner.py file
assert path.purebasename == "test_runner"
pytest.raises(SyntaxError, "py.test.importorskip('x y z')")
pytest.raises(SyntaxError, "py.test.importorskip('x=y')")
path = importorskip("py", minversion=".".join(py.__version__))
mod = py.std.types.ModuleType("hello123")
mod.__version__ = "1.3"
pytest.raises(pytest.skip.Exception, """
py.test.importorskip("hello123", minversion="5.0")
""")
except pytest.skip.Exception:
print(py.code.ExceptionInfo())
py.test.fail("spurious skip")
def test_importorskip_imports_last_module_part():
ospath = py.test.importorskip("os.path")
assert os.path == ospath
def test_pytest_cmdline_main(testdir):
p = testdir.makepyfile("""
import py
def test_hello():
assert 1
if __name__ == '__main__':
py.test.cmdline.main([__file__])
""")
import subprocess
popen = subprocess.Popen([sys.executable, str(p)], stdout=subprocess.PIPE)
s = popen.stdout.read()
ret = popen.wait()
assert ret == 0
def test_unicode_in_longrepr(testdir):
testdir.makeconftest("""
import py
def pytest_runtest_makereport(__multicall__):
rep = __multicall__.execute()
if rep.when == "call":
rep.longrepr = py.builtin._totext("\\xc3\\xa4", "utf8")
return rep
""")
testdir.makepyfile("""
def test_out():
assert 0
""")
result = testdir.runpytest()
assert result.ret == 1
assert "UnicodeEncodeError" not in result.stderr.str()
|
|
# -*- test-case-name: vumi.transports.twitter.tests.test_twitter -*-
from twisted.python import log
from twisted.internet.defer import inlineCallbacks
from txtwitter.twitter import TwitterClient
from txtwitter import messagetools
from vumi.transports.base import Transport
from vumi.config import ConfigBool, ConfigText, ConfigList, ConfigDict
class ConfigTwitterEndpoints(ConfigDict):
field_type = 'twitter_endpoints'
def clean(self, value):
endpoints_dict = super(ConfigTwitterEndpoints, self).clean(value)
if 'dms' not in endpoints_dict and 'tweets' not in endpoints_dict:
self.raise_config_error(
"needs configuration for either dms, tweets or both")
if endpoints_dict.get('dms') == endpoints_dict.get('tweets'):
self.raise_config_error(
"has the same endpoint for dms and tweets: '%s'"
% endpoints_dict['dms'])
return endpoints_dict
class TwitterTransportConfig(Transport.CONFIG_CLASS):
screen_name = ConfigText(
"The screen name for the twitter account",
required=True, static=True)
consumer_key = ConfigText(
"The OAuth consumer key for the twitter account",
required=True, static=True)
consumer_secret = ConfigText(
"The OAuth consumer secret for the twitter account",
required=True, static=True)
access_token = ConfigText(
"The OAuth access token for the twitter account",
required=True, static=True)
access_token_secret = ConfigText(
"The OAuth access token secret for the twitter account",
required=True, static=True)
endpoints = ConfigTwitterEndpoints(
"Which endpoints to use for dms and tweets",
default={'tweets': 'default'}, static=True)
terms = ConfigList(
"A list of terms to be tracked by the transport",
default=[], static=True)
autofollow = ConfigBool(
"Determines whether the transport will follow users that follow the "
"transport's user",
default=False, static=True)
class TwitterTransport(Transport):
"""Twitter transport."""
transport_type = 'twitter'
CONFIG_CLASS = TwitterTransportConfig
NO_USER_ADDR = 'NO_USER'
OUTBOUND_HANDLERS = {
'tweets': 'handle_outbound_tweet',
'dms': 'handle_outbound_dm',
}
def get_client(self, *a, **kw):
return TwitterClient(*a, **kw)
def setup_transport(self):
config = self.get_static_config()
self.screen_name = config.screen_name
self.autofollow = config.autofollow
self.client = self.get_client(
config.access_token,
config.access_token_secret,
config.consumer_key,
config.consumer_secret)
self.endpoints = config.endpoints
for msg_type, endpoint in self.endpoints.iteritems():
handler = getattr(self, self.OUTBOUND_HANDLERS[msg_type])
handler = self.make_outbound_handler(handler)
self.add_outbound_handler(handler, endpoint_name=endpoint)
if config.terms:
self.track_stream = self.client.stream_filter(
self.handle_track_stream, track=config.terms)
self.track_stream.startService()
else:
self.track_stream = None
self.user_stream = self.client.userstream_user(
self.handle_user_stream, with_='user')
self.user_stream.startService()
@inlineCallbacks
def teardown_transport(self):
if self.track_stream is not None:
yield self.track_stream.stopService()
yield self.user_stream.stopService()
def make_outbound_handler(self, twitter_handler):
@inlineCallbacks
def handler(message):
try:
twitter_message = yield twitter_handler(message)
yield self.publish_ack(
user_message_id=message['message_id'],
sent_message_id=twitter_message['id_str'])
except Exception, e:
reason = '%s' % (e,)
log.err('Outbound twitter message failed: %s' % (reason,))
yield self.publish_nack(
user_message_id=message['message_id'],
sent_message_id=message['message_id'],
reason=reason)
return handler
@classmethod
def screen_name_as_addr(cls, screen_name):
return u'@%s' % (screen_name,)
@classmethod
def addr_as_screen_name(cls, addr):
return addr[1:] if addr.startswith('@') else addr
def is_own_tweet(self, message):
user = messagetools.tweet_user(message)
return self.screen_name == messagetools.user_screen_name(user)
def is_own_dm(self, message):
sender = messagetools.dm_sender(message)
return self.screen_name == messagetools.user_screen_name(sender)
def is_own_follow(self, message):
source_screen_name = messagetools.user_screen_name(message['source'])
return source_screen_name == self.screen_name
@classmethod
def tweet_to_addr(cls, tweet):
mentions = messagetools.tweet_user_mentions(tweet)
to_addr = cls.NO_USER_ADDR
if mentions:
mention = mentions[0]
[start_index, end_index] = mention['indices']
if start_index == 0:
to_addr = cls.screen_name_as_addr(mention['screen_name'])
return to_addr
@classmethod
def tweet_from_addr(cls, tweet):
user = messagetools.tweet_user(tweet)
return cls.screen_name_as_addr(messagetools.user_screen_name(user))
@classmethod
def tweet_content(cls, tweet):
to_addr = cls.tweet_to_addr(tweet)
content = messagetools.tweet_text(tweet)
if to_addr != cls.NO_USER_ADDR and content.startswith(to_addr):
content = content[len(to_addr):].lstrip()
return content
def publish_tweet(self, tweet):
return self.publish_message(
content=self.tweet_content(tweet),
to_addr=self.tweet_to_addr(tweet),
from_addr=self.tweet_from_addr(tweet),
transport_type=self.transport_type,
routing_metadata={
'endpoint_name': self.endpoints['tweets']
},
transport_metadata={
'twitter': {
'status_id': messagetools.tweet_id(tweet)
}
},
helper_metadata={
'twitter': {
'in_reply_to_status_id': (
messagetools.tweet_in_reply_to_id(tweet)),
'in_reply_to_screen_name': (
messagetools.tweet_in_reply_to_screen_name(tweet)),
'user_mentions': messagetools.tweet_user_mentions(tweet),
}
})
def publish_dm(self, dm):
sender = messagetools.dm_sender(dm)
recipient = messagetools.dm_recipient(dm)
return self.publish_message(
content=messagetools.dm_text(dm),
to_addr=self.screen_name_as_addr(recipient['screen_name']),
from_addr=self.screen_name_as_addr(sender['screen_name']),
transport_type=self.transport_type,
routing_metadata={
'endpoint_name': self.endpoints['dms']
},
helper_metadata={
'dm_twitter': {
'id': messagetools.dm_id(dm),
'user_mentions': messagetools.dm_user_mentions(dm),
}
})
def handle_track_stream(self, message):
if messagetools.is_tweet(message):
if self.is_own_tweet(message):
log.msg("Tracked own tweet: %r" % (message,))
else:
log.msg("Tracked a tweet: %r" % (message,))
self.publish_tweet(message)
else:
log.msg("Received non-tweet from tracking stream: %r" % message)
def handle_user_stream(self, message):
if messagetools.is_tweet(message):
return self.handle_inbound_tweet(message)
elif messagetools.is_dm(message.get('direct_message', {})):
return self.handle_inbound_dm(message['direct_message'])
elif message.get('event') == 'follow':
return self.handle_follow(message)
log.msg(
"Received a user stream message that we do not handle: %r" %
message)
def handle_follow(self, follow):
if self.is_own_follow(follow):
log.msg("Received own follow on user stream: %r" % (follow,))
return
log.msg("Received follow on user stream: %r" % (follow,))
if self.autofollow:
screen_name = messagetools.user_screen_name(follow['source'])
log.msg("Auto-following '%s'" %
(self.screen_name_as_addr(screen_name,)))
return self.client.friendships_create(screen_name=screen_name)
def handle_inbound_dm(self, dm):
if self.is_own_dm(dm):
log.msg("Received own DM on user stream: %r" % (dm,))
elif 'dms' not in self.endpoints:
log.msg(
"Discarding DM received on user stream, no endpoint "
"configured for DMs: %r" % (dm,))
else:
log.msg("Received DM on user stream: %r" % (dm,))
self.publish_dm(dm)
def handle_inbound_tweet(self, tweet):
if self.is_own_tweet(tweet):
log.msg("Received own tweet on user stream: %r" % (tweet,))
elif 'tweets' not in self.endpoints:
log.msg(
"Discarding tweet received on user stream, no endpoint "
"configured for tweets: %r" % (tweet,))
else:
log.msg("Received tweet on user stream: %r" % (tweet,))
self.publish_tweet(tweet)
def handle_outbound_dm(self, message):
return self.client.direct_messages_new(
screen_name=self.addr_as_screen_name(message['to_addr']),
text=message['content'])
def handle_outbound_tweet(self, message):
log.msg("Twitter transport sending tweet %r" % (message,))
metadata = message['transport_metadata'].get(self.transport_type, {})
in_reply_to_status_id = metadata.get('status_id')
content = message['content']
if message['to_addr'] != self.NO_USER_ADDR:
content = "%s %s" % (message['to_addr'], content)
return self.client.statuses_update(
content, in_reply_to_status_id=in_reply_to_status_id)
|
|
# filemerge.py - file-level merge handling for Mercurial
#
# Copyright 2006, 2007, 2008 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import contextlib
import os
import re
import shutil
from .i18n import _
from .node import (
hex,
short,
)
from .pycompat import (
getattr,
open,
)
from . import (
encoding,
error,
formatter,
match,
pycompat,
registrar,
scmutil,
simplemerge,
tagmerge,
templatekw,
templater,
templateutil,
util,
)
from .utils import (
procutil,
stringutil,
)
def _toolstr(ui, tool, part, *args):
return ui.config(b"merge-tools", tool + b"." + part, *args)
def _toolbool(ui, tool, part, *args):
return ui.configbool(b"merge-tools", tool + b"." + part, *args)
def _toollist(ui, tool, part):
return ui.configlist(b"merge-tools", tool + b"." + part)
internals = {}
# Merge tools to document.
internalsdoc = {}
internaltool = registrar.internalmerge()
# internal tool merge types
nomerge = internaltool.nomerge
mergeonly = internaltool.mergeonly # just the full merge, no premerge
fullmerge = internaltool.fullmerge # both premerge and merge
# IMPORTANT: keep the last line of this prompt very short ("What do you want to
# do?") because of issue6158, ideally to <40 English characters (to allow other
# languages that may take more columns to still have a chance to fit in an
# 80-column screen).
_localchangedotherdeletedmsg = _(
b"file '%(fd)s' was deleted in other%(o)s but was modified in local%(l)s.\n"
b"You can use (c)hanged version, (d)elete, or leave (u)nresolved.\n"
b"What do you want to do?"
b"$$ &Changed $$ &Delete $$ &Unresolved"
)
_otherchangedlocaldeletedmsg = _(
b"file '%(fd)s' was deleted in local%(l)s but was modified in other%(o)s.\n"
b"You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.\n"
b"What do you want to do?"
b"$$ &Changed $$ &Deleted $$ &Unresolved"
)
class absentfilectx(object):
"""Represents a file that's ostensibly in a context but is actually not
present in it.
This is here because it's very specific to the filemerge code for now --
other code is likely going to break with the values this returns."""
def __init__(self, ctx, f):
self._ctx = ctx
self._f = f
def __bytes__(self):
return b'absent file %s@%s' % (self._f, self._ctx)
def path(self):
return self._f
def size(self):
return None
def data(self):
return None
def filenode(self):
return self._ctx.repo().nullid
_customcmp = True
def cmp(self, fctx):
"""compare with other file context
returns True if different from fctx.
"""
return not (
fctx.isabsent()
and fctx.changectx() == self.changectx()
and fctx.path() == self.path()
)
def flags(self):
return b''
def changectx(self):
return self._ctx
def isbinary(self):
return False
def isabsent(self):
return True
def _findtool(ui, tool):
if tool in internals:
return tool
cmd = _toolstr(ui, tool, b"executable", tool)
if cmd.startswith(b'python:'):
return cmd
return findexternaltool(ui, tool)
def _quotetoolpath(cmd):
if cmd.startswith(b'python:'):
return cmd
return procutil.shellquote(cmd)
def findexternaltool(ui, tool):
for kn in (b"regkey", b"regkeyalt"):
k = _toolstr(ui, tool, kn)
if not k:
continue
p = util.lookupreg(k, _toolstr(ui, tool, b"regname"))
if p:
p = procutil.findexe(p + _toolstr(ui, tool, b"regappend", b""))
if p:
return p
exe = _toolstr(ui, tool, b"executable", tool)
return procutil.findexe(util.expandpath(exe))
def _picktool(repo, ui, path, binary, symlink, changedelete):
strictcheck = ui.configbool(b'merge', b'strict-capability-check')
def hascapability(tool, capability, strict=False):
if tool in internals:
return strict and internals[tool].capabilities.get(capability)
return _toolbool(ui, tool, capability)
def supportscd(tool):
return tool in internals and internals[tool].mergetype == nomerge
def check(tool, pat, symlink, binary, changedelete):
tmsg = tool
if pat:
tmsg = _(b"%s (for pattern %s)") % (tool, pat)
if not _findtool(ui, tool):
if pat: # explicitly requested tool deserves a warning
ui.warn(_(b"couldn't find merge tool %s\n") % tmsg)
else: # configured but non-existing tools are more silent
ui.note(_(b"couldn't find merge tool %s\n") % tmsg)
elif symlink and not hascapability(tool, b"symlink", strictcheck):
ui.warn(_(b"tool %s can't handle symlinks\n") % tmsg)
elif binary and not hascapability(tool, b"binary", strictcheck):
ui.warn(_(b"tool %s can't handle binary\n") % tmsg)
elif changedelete and not supportscd(tool):
# the nomerge tools are the only tools that support change/delete
# conflicts
pass
elif not procutil.gui() and _toolbool(ui, tool, b"gui"):
ui.warn(_(b"tool %s requires a GUI\n") % tmsg)
else:
return True
return False
# internal config: ui.forcemerge
# forcemerge comes from command line arguments, highest priority
force = ui.config(b'ui', b'forcemerge')
if force:
toolpath = _findtool(ui, force)
if changedelete and not supportscd(toolpath):
return b":prompt", None
else:
if toolpath:
return (force, _quotetoolpath(toolpath))
else:
# mimic HGMERGE if given tool not found
return (force, force)
# HGMERGE takes next precedence
hgmerge = encoding.environ.get(b"HGMERGE")
if hgmerge:
if changedelete and not supportscd(hgmerge):
return b":prompt", None
else:
return (hgmerge, hgmerge)
# then patterns
# whether binary capability should be checked strictly
binarycap = binary and strictcheck
for pat, tool in ui.configitems(b"merge-patterns"):
mf = match.match(repo.root, b'', [pat])
if mf(path) and check(tool, pat, symlink, binarycap, changedelete):
if binary and not hascapability(tool, b"binary", strict=True):
ui.warn(
_(
b"warning: check merge-patterns configurations,"
b" if %r for binary file %r is unintentional\n"
b"(see 'hg help merge-tools'"
b" for binary files capability)\n"
)
% (pycompat.bytestr(tool), pycompat.bytestr(path))
)
toolpath = _findtool(ui, tool)
return (tool, _quotetoolpath(toolpath))
# then merge tools
tools = {}
disabled = set()
for k, v in ui.configitems(b"merge-tools"):
t = k.split(b'.')[0]
if t not in tools:
tools[t] = int(_toolstr(ui, t, b"priority"))
if _toolbool(ui, t, b"disabled"):
disabled.add(t)
names = tools.keys()
tools = sorted(
[(-p, tool) for tool, p in tools.items() if tool not in disabled]
)
uimerge = ui.config(b"ui", b"merge")
if uimerge:
# external tools defined in uimerge won't be able to handle
# change/delete conflicts
if check(uimerge, path, symlink, binary, changedelete):
if uimerge not in names and not changedelete:
return (uimerge, uimerge)
tools.insert(0, (None, uimerge)) # highest priority
tools.append((None, b"hgmerge")) # the old default, if found
for p, t in tools:
if check(t, None, symlink, binary, changedelete):
toolpath = _findtool(ui, t)
return (t, _quotetoolpath(toolpath))
# internal merge or prompt as last resort
if symlink or binary or changedelete:
if not changedelete and len(tools):
# any tool is rejected by capability for symlink or binary
ui.warn(_(b"no tool found to merge %s\n") % path)
return b":prompt", None
return b":merge", None
def _eoltype(data):
"""Guess the EOL type of a file"""
if b'\0' in data: # binary
return None
if b'\r\n' in data: # Windows
return b'\r\n'
if b'\r' in data: # Old Mac
return b'\r'
if b'\n' in data: # UNIX
return b'\n'
return None # unknown
def _matcheol(file, back):
"""Convert EOL markers in a file to match origfile"""
tostyle = _eoltype(back.data()) # No repo.wread filters?
if tostyle:
data = util.readfile(file)
style = _eoltype(data)
if style:
newdata = data.replace(style, tostyle)
if newdata != data:
util.writefile(file, newdata)
@internaltool(b'prompt', nomerge)
def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
"""Asks the user which of the local `p1()` or the other `p2()` version to
keep as the merged version."""
ui = repo.ui
fd = fcd.path()
uipathfn = scmutil.getuipathfn(repo)
# Avoid prompting during an in-memory merge since it doesn't support merge
# conflicts.
if fcd.changectx().isinmemory():
raise error.InMemoryMergeConflictsError(
b'in-memory merge does not support file conflicts'
)
prompts = partextras(labels)
prompts[b'fd'] = uipathfn(fd)
try:
if fco.isabsent():
index = ui.promptchoice(_localchangedotherdeletedmsg % prompts, 2)
choice = [b'local', b'other', b'unresolved'][index]
elif fcd.isabsent():
index = ui.promptchoice(_otherchangedlocaldeletedmsg % prompts, 2)
choice = [b'other', b'local', b'unresolved'][index]
else:
# IMPORTANT: keep the last line of this prompt ("What do you want to
# do?") very short, see comment next to _localchangedotherdeletedmsg
# at the top of the file for details.
index = ui.promptchoice(
_(
b"file '%(fd)s' needs to be resolved.\n"
b"You can keep (l)ocal%(l)s, take (o)ther%(o)s, or leave "
b"(u)nresolved.\n"
b"What do you want to do?"
b"$$ &Local $$ &Other $$ &Unresolved"
)
% prompts,
2,
)
choice = [b'local', b'other', b'unresolved'][index]
if choice == b'other':
return _iother(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
elif choice == b'local':
return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
elif choice == b'unresolved':
return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
except error.ResponseExpected:
ui.write(b"\n")
return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
@internaltool(b'local', nomerge)
def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
"""Uses the local `p1()` version of files as the merged version."""
return 0, fcd.isabsent()
@internaltool(b'other', nomerge)
def _iother(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
"""Uses the other `p2()` version of files as the merged version."""
if fco.isabsent():
# local changed, remote deleted -- 'deleted' picked
_underlyingfctxifabsent(fcd).remove()
deleted = True
else:
_underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
deleted = False
return 0, deleted
@internaltool(b'fail', nomerge)
def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
"""
Rather than attempting to merge files that were modified on both
branches, it marks them as unresolved. The resolve command must be
used to resolve these conflicts."""
# for change/delete conflicts write out the changed version, then fail
if fcd.isabsent():
_underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
return 1, False
def _underlyingfctxifabsent(filectx):
"""Sometimes when resolving, our fcd is actually an absentfilectx, but
we want to write to it (to do the resolve). This helper returns the
underyling workingfilectx in that case.
"""
if filectx.isabsent():
return filectx.changectx()[filectx.path()]
else:
return filectx
def _premerge(repo, fcd, fco, fca, toolconf, files, labels=None):
tool, toolpath, binary, symlink, scriptfn = toolconf
if symlink or fcd.isabsent() or fco.isabsent():
return 1
unused, unused, unused, back = files
ui = repo.ui
validkeep = [b'keep', b'keep-merge3', b'keep-mergediff']
# do we attempt to simplemerge first?
try:
premerge = _toolbool(ui, tool, b"premerge", not binary)
except error.ConfigError:
premerge = _toolstr(ui, tool, b"premerge", b"").lower()
if premerge not in validkeep:
_valid = b', '.join([b"'" + v + b"'" for v in validkeep])
raise error.ConfigError(
_(b"%s.premerge not valid ('%s' is neither boolean nor %s)")
% (tool, premerge, _valid)
)
if premerge:
mode = b'merge'
if premerge in {b'keep-merge3', b'keep-mergediff'}:
if not labels:
labels = _defaultconflictlabels
if len(labels) < 3:
labels.append(b'base')
if premerge == b'keep-mergediff':
mode = b'mergediff'
r = simplemerge.simplemerge(
ui, fcd, fca, fco, quiet=True, label=labels, mode=mode
)
if not r:
ui.debug(b" premerge successful\n")
return 0
if premerge not in validkeep:
# restore from backup and try again
_restorebackup(fcd, back)
return 1 # continue merging
def _mergecheck(repo, mynode, orig, fcd, fco, fca, toolconf):
tool, toolpath, binary, symlink, scriptfn = toolconf
uipathfn = scmutil.getuipathfn(repo)
if symlink:
repo.ui.warn(
_(b'warning: internal %s cannot merge symlinks for %s\n')
% (tool, uipathfn(fcd.path()))
)
return False
if fcd.isabsent() or fco.isabsent():
repo.ui.warn(
_(
b'warning: internal %s cannot merge change/delete '
b'conflict for %s\n'
)
% (tool, uipathfn(fcd.path()))
)
return False
return True
def _merge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, mode):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will fail if there are any conflicts and leave markers in
the partially merged file. Markers will have two sections, one for each side
of merge, unless mode equals 'union' which suppresses the markers."""
ui = repo.ui
r = simplemerge.simplemerge(ui, fcd, fca, fco, label=labels, mode=mode)
return True, r, False
@internaltool(
b'union',
fullmerge,
_(
b"warning: conflicts while merging %s! "
b"(edit, then use 'hg resolve --mark')\n"
),
precheck=_mergecheck,
)
def _iunion(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will use both left and right sides for conflict regions.
No markers are inserted."""
return _merge(
repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'union'
)
@internaltool(
b'merge',
fullmerge,
_(
b"warning: conflicts while merging %s! "
b"(edit, then use 'hg resolve --mark')\n"
),
precheck=_mergecheck,
)
def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will fail if there are any conflicts and leave markers in
the partially merged file. Markers will have two sections, one for each side
of merge."""
return _merge(
repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'merge'
)
@internaltool(
b'merge3',
fullmerge,
_(
b"warning: conflicts while merging %s! "
b"(edit, then use 'hg resolve --mark')\n"
),
precheck=_mergecheck,
)
def _imerge3(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will fail if there are any conflicts and leave markers in
the partially merged file. Marker will have three sections, one from each
side of the merge and one for the base content."""
if not labels:
labels = _defaultconflictlabels
if len(labels) < 3:
labels.append(b'base')
return _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels)
@internaltool(
b'merge3-lie-about-conflicts',
fullmerge,
b'',
precheck=_mergecheck,
)
def _imerge3alwaysgood(*args, **kwargs):
# Like merge3, but record conflicts as resolved with markers in place.
#
# This is used for `diff.merge` to show the differences between
# the auto-merge state and the committed merge state. It may be
# useful for other things.
b1, junk, b2 = _imerge3(*args, **kwargs)
# TODO is this right? I'm not sure what these return values mean,
# but as far as I can tell this will indicate to callers tha the
# merge succeeded.
return b1, False, b2
@internaltool(
b'mergediff',
fullmerge,
_(
b"warning: conflicts while merging %s! "
b"(edit, then use 'hg resolve --mark')\n"
),
precheck=_mergecheck,
)
def _imerge_diff(
repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None
):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will fail if there are any conflicts and leave markers in
the partially merged file. The marker will have two sections, one with the
content from one side of the merge, and one with a diff from the base
content to the content on the other side. (experimental)"""
if not labels:
labels = _defaultconflictlabels
if len(labels) < 3:
labels.append(b'base')
return _merge(
repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'mergediff'
)
def _imergeauto(
repo,
mynode,
orig,
fcd,
fco,
fca,
toolconf,
files,
labels=None,
localorother=None,
):
"""
Generic driver for _imergelocal and _imergeother
"""
assert localorother is not None
r = simplemerge.simplemerge(
repo.ui, fcd, fca, fco, label=labels, localorother=localorother
)
return True, r
@internaltool(b'merge-local', mergeonly, precheck=_mergecheck)
def _imergelocal(*args, **kwargs):
"""
Like :merge, but resolve all conflicts non-interactively in favor
of the local `p1()` changes."""
success, status = _imergeauto(localorother=b'local', *args, **kwargs)
return success, status, False
@internaltool(b'merge-other', mergeonly, precheck=_mergecheck)
def _imergeother(*args, **kwargs):
"""
Like :merge, but resolve all conflicts non-interactively in favor
of the other `p2()` changes."""
success, status = _imergeauto(localorother=b'other', *args, **kwargs)
return success, status, False
@internaltool(
b'tagmerge',
mergeonly,
_(
b"automatic tag merging of %s failed! "
b"(use 'hg resolve --tool :merge' or another merge "
b"tool of your choice)\n"
),
)
def _itagmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
"""
Uses the internal tag merge algorithm (experimental).
"""
success, status = tagmerge.merge(repo, fcd, fco, fca)
return success, status, False
@internaltool(b'dump', fullmerge, binary=True, symlink=True)
def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
"""
Creates three versions of the files to merge, containing the
contents of local, other and base. These files can then be used to
perform a merge manually. If the file to be merged is named
``a.txt``, these files will accordingly be named ``a.txt.local``,
``a.txt.other`` and ``a.txt.base`` and they will be placed in the
same directory as ``a.txt``.
This implies premerge. Therefore, files aren't dumped, if premerge
runs successfully. Use :forcedump to forcibly write files out.
"""
a = _workingpath(repo, fcd)
fd = fcd.path()
from . import context
if isinstance(fcd, context.overlayworkingfilectx):
raise error.InMemoryMergeConflictsError(
b'in-memory merge does not support the :dump tool.'
)
util.writefile(a + b".local", fcd.decodeddata())
repo.wwrite(fd + b".other", fco.data(), fco.flags())
repo.wwrite(fd + b".base", fca.data(), fca.flags())
return False, 1, False
@internaltool(b'forcedump', mergeonly, binary=True, symlink=True)
def _forcedump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
"""
Creates three versions of the files as same as :dump, but omits premerge.
"""
return _idump(
repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=labels
)
def _xmergeimm(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
# In-memory merge simply raises an exception on all external merge tools,
# for now.
#
# It would be possible to run most tools with temporary files, but this
# raises the question of what to do if the user only partially resolves the
# file -- we can't leave a merge state. (Copy to somewhere in the .hg/
# directory and tell the user how to get it is my best idea, but it's
# clunky.)
raise error.InMemoryMergeConflictsError(
b'in-memory merge does not support external merge tools'
)
def _describemerge(ui, repo, mynode, fcl, fcb, fco, env, toolpath, args):
tmpl = ui.config(b'command-templates', b'pre-merge-tool-output')
if not tmpl:
return
mappingdict = templateutil.mappingdict
props = {
b'ctx': fcl.changectx(),
b'node': hex(mynode),
b'path': fcl.path(),
b'local': mappingdict(
{
b'ctx': fcl.changectx(),
b'fctx': fcl,
b'node': hex(mynode),
b'name': _(b'local'),
b'islink': b'l' in fcl.flags(),
b'label': env[b'HG_MY_LABEL'],
}
),
b'base': mappingdict(
{
b'ctx': fcb.changectx(),
b'fctx': fcb,
b'name': _(b'base'),
b'islink': b'l' in fcb.flags(),
b'label': env[b'HG_BASE_LABEL'],
}
),
b'other': mappingdict(
{
b'ctx': fco.changectx(),
b'fctx': fco,
b'name': _(b'other'),
b'islink': b'l' in fco.flags(),
b'label': env[b'HG_OTHER_LABEL'],
}
),
b'toolpath': toolpath,
b'toolargs': args,
}
# TODO: make all of this something that can be specified on a per-tool basis
tmpl = templater.unquotestring(tmpl)
# Not using cmdutil.rendertemplate here since it causes errors importing
# things for us to import cmdutil.
tres = formatter.templateresources(ui, repo)
t = formatter.maketemplater(
ui, tmpl, defaults=templatekw.keywords, resources=tres
)
ui.status(t.renderdefault(props))
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels):
tool, toolpath, binary, symlink, scriptfn = toolconf
uipathfn = scmutil.getuipathfn(repo)
if fcd.isabsent() or fco.isabsent():
repo.ui.warn(
_(b'warning: %s cannot merge change/delete conflict for %s\n')
% (tool, uipathfn(fcd.path()))
)
return False, 1, None
unused, unused, unused, back = files
localpath = _workingpath(repo, fcd)
args = _toolstr(repo.ui, tool, b"args")
with _maketempfiles(
repo, fco, fca, repo.wvfs.join(back.path()), b"$output" in args
) as temppaths:
basepath, otherpath, localoutputpath = temppaths
outpath = b""
mylabel, otherlabel = labels[:2]
if len(labels) >= 3:
baselabel = labels[2]
else:
baselabel = b'base'
env = {
b'HG_FILE': fcd.path(),
b'HG_MY_NODE': short(mynode),
b'HG_OTHER_NODE': short(fco.changectx().node()),
b'HG_BASE_NODE': short(fca.changectx().node()),
b'HG_MY_ISLINK': b'l' in fcd.flags(),
b'HG_OTHER_ISLINK': b'l' in fco.flags(),
b'HG_BASE_ISLINK': b'l' in fca.flags(),
b'HG_MY_LABEL': mylabel,
b'HG_OTHER_LABEL': otherlabel,
b'HG_BASE_LABEL': baselabel,
}
ui = repo.ui
if b"$output" in args:
# read input from backup, write to original
outpath = localpath
localpath = localoutputpath
replace = {
b'local': localpath,
b'base': basepath,
b'other': otherpath,
b'output': outpath,
b'labellocal': mylabel,
b'labelother': otherlabel,
b'labelbase': baselabel,
}
args = util.interpolate(
br'\$',
replace,
args,
lambda s: procutil.shellquote(util.localpath(s)),
)
if _toolbool(ui, tool, b"gui"):
repo.ui.status(
_(b'running merge tool %s for file %s\n')
% (tool, uipathfn(fcd.path()))
)
if scriptfn is None:
cmd = toolpath + b' ' + args
repo.ui.debug(b'launching merge tool: %s\n' % cmd)
_describemerge(ui, repo, mynode, fcd, fca, fco, env, toolpath, args)
r = ui.system(
cmd, cwd=repo.root, environ=env, blockedtag=b'mergetool'
)
else:
repo.ui.debug(
b'launching python merge script: %s:%s\n' % (toolpath, scriptfn)
)
r = 0
try:
# avoid cycle cmdutil->merge->filemerge->extensions->cmdutil
from . import extensions
mod = extensions.loadpath(toolpath, b'hgmerge.%s' % tool)
except Exception:
raise error.Abort(
_(b"loading python merge script failed: %s") % toolpath
)
mergefn = getattr(mod, scriptfn, None)
if mergefn is None:
raise error.Abort(
_(b"%s does not have function: %s") % (toolpath, scriptfn)
)
argslist = procutil.shellsplit(args)
# avoid cycle cmdutil->merge->filemerge->hook->extensions->cmdutil
from . import hook
ret, raised = hook.pythonhook(
ui, repo, b"merge", toolpath, mergefn, {b'args': argslist}, True
)
if raised:
r = 1
repo.ui.debug(b'merge tool returned: %d\n' % r)
return True, r, False
def _formatconflictmarker(ctx, template, label, pad):
"""Applies the given template to the ctx, prefixed by the label.
Pad is the minimum width of the label prefix, so that multiple markers
can have aligned templated parts.
"""
if ctx.node() is None:
ctx = ctx.p1()
props = {b'ctx': ctx}
templateresult = template.renderdefault(props)
label = (b'%s:' % label).ljust(pad + 1)
mark = b'%s %s' % (label, templateresult)
if mark:
mark = mark.splitlines()[0] # split for safety
# 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
return stringutil.ellipsis(mark, 80 - 8)
_defaultconflictlabels = [b'local', b'other']
def _formatlabels(repo, fcd, fco, fca, labels, tool=None):
"""Formats the given labels using the conflict marker template.
Returns a list of formatted labels.
"""
cd = fcd.changectx()
co = fco.changectx()
ca = fca.changectx()
ui = repo.ui
template = ui.config(b'command-templates', b'mergemarker')
if tool is not None:
template = _toolstr(ui, tool, b'mergemarkertemplate', template)
template = templater.unquotestring(template)
tres = formatter.templateresources(ui, repo)
tmpl = formatter.maketemplater(
ui, template, defaults=templatekw.keywords, resources=tres
)
pad = max(len(l) for l in labels)
newlabels = [
_formatconflictmarker(cd, tmpl, labels[0], pad),
_formatconflictmarker(co, tmpl, labels[1], pad),
]
if len(labels) > 2:
newlabels.append(_formatconflictmarker(ca, tmpl, labels[2], pad))
return newlabels
def partextras(labels):
"""Return a dictionary of extra labels for use in prompts to the user
Intended use is in strings of the form "(l)ocal%(l)s".
"""
if labels is None:
return {
b"l": b"",
b"o": b"",
}
return {
b"l": b" [%s]" % labels[0],
b"o": b" [%s]" % labels[1],
}
def _restorebackup(fcd, back):
# TODO: Add a workingfilectx.write(otherfilectx) path so we can use
# util.copy here instead.
fcd.write(back.data(), fcd.flags())
def _makebackup(repo, ui, wctx, fcd, premerge):
"""Makes and returns a filectx-like object for ``fcd``'s backup file.
In addition to preserving the user's pre-existing modifications to `fcd`
(if any), the backup is used to undo certain premerges, confirm whether a
merge changed anything, and determine what line endings the new file should
have.
Backups only need to be written once (right before the premerge) since their
content doesn't change afterwards.
"""
if fcd.isabsent():
return None
# TODO: Break this import cycle somehow. (filectx -> ctx -> fileset ->
# merge -> filemerge). (I suspect the fileset import is the weakest link)
from . import context
back = scmutil.backuppath(ui, repo, fcd.path())
inworkingdir = back.startswith(repo.wvfs.base) and not back.startswith(
repo.vfs.base
)
if isinstance(fcd, context.overlayworkingfilectx) and inworkingdir:
# If the backup file is to be in the working directory, and we're
# merging in-memory, we must redirect the backup to the memory context
# so we don't disturb the working directory.
relpath = back[len(repo.wvfs.base) + 1 :]
if premerge:
wctx[relpath].write(fcd.data(), fcd.flags())
return wctx[relpath]
else:
if premerge:
# Otherwise, write to wherever path the user specified the backups
# should go. We still need to switch based on whether the source is
# in-memory so we can use the fast path of ``util.copy`` if both are
# on disk.
if isinstance(fcd, context.overlayworkingfilectx):
util.writefile(back, fcd.data())
else:
a = _workingpath(repo, fcd)
util.copyfile(a, back)
# A arbitraryfilectx is returned, so we can run the same functions on
# the backup context regardless of where it lives.
return context.arbitraryfilectx(back, repo=repo)
@contextlib.contextmanager
def _maketempfiles(repo, fco, fca, localpath, uselocalpath):
"""Writes out `fco` and `fca` as temporary files, and (if uselocalpath)
copies `localpath` to another temporary file, so an external merge tool may
use them.
"""
tmproot = None
tmprootprefix = repo.ui.config(b'experimental', b'mergetempdirprefix')
if tmprootprefix:
tmproot = pycompat.mkdtemp(prefix=tmprootprefix)
def maketempfrompath(prefix, path):
fullbase, ext = os.path.splitext(path)
pre = b"%s~%s" % (os.path.basename(fullbase), prefix)
if tmproot:
name = os.path.join(tmproot, pre)
if ext:
name += ext
f = open(name, "wb")
else:
fd, name = pycompat.mkstemp(prefix=pre + b'.', suffix=ext)
f = os.fdopen(fd, "wb")
return f, name
def tempfromcontext(prefix, ctx):
f, name = maketempfrompath(prefix, ctx.path())
data = repo.wwritedata(ctx.path(), ctx.data())
f.write(data)
f.close()
return name
b = tempfromcontext(b"base", fca)
c = tempfromcontext(b"other", fco)
d = localpath
if uselocalpath:
# We start off with this being the backup filename, so remove the .orig
# to make syntax-highlighting more likely.
if d.endswith(b'.orig'):
d, _ = os.path.splitext(d)
f, d = maketempfrompath(b"local", d)
with open(localpath, b'rb') as src:
f.write(src.read())
f.close()
try:
yield b, c, d
finally:
if tmproot:
shutil.rmtree(tmproot)
else:
util.unlink(b)
util.unlink(c)
# if not uselocalpath, d is the 'orig'/backup file which we
# shouldn't delete.
if d and uselocalpath:
util.unlink(d)
def _filemerge(premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
"""perform a 3-way merge in the working directory
premerge = whether this is a premerge
mynode = parent node before merge
orig = original local filename before merge
fco = other file context
fca = ancestor file context
fcd = local file context for current/destination file
Returns whether the merge is complete, the return value of the merge, and
a boolean indicating whether the file was deleted from disk."""
if not fco.cmp(fcd): # files identical?
return True, None, False
ui = repo.ui
fd = fcd.path()
uipathfn = scmutil.getuipathfn(repo)
fduipath = uipathfn(fd)
binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
symlink = b'l' in fcd.flags() + fco.flags()
changedelete = fcd.isabsent() or fco.isabsent()
tool, toolpath = _picktool(repo, ui, fd, binary, symlink, changedelete)
scriptfn = None
if tool in internals and tool.startswith(b'internal:'):
# normalize to new-style names (':merge' etc)
tool = tool[len(b'internal') :]
if toolpath and toolpath.startswith(b'python:'):
invalidsyntax = False
if toolpath.count(b':') >= 2:
script, scriptfn = toolpath[7:].rsplit(b':', 1)
if not scriptfn:
invalidsyntax = True
# missing :callable can lead to spliting on windows drive letter
if b'\\' in scriptfn or b'/' in scriptfn:
invalidsyntax = True
else:
invalidsyntax = True
if invalidsyntax:
raise error.Abort(_(b"invalid 'python:' syntax: %s") % toolpath)
toolpath = script
ui.debug(
b"picked tool '%s' for %s (binary %s symlink %s changedelete %s)\n"
% (
tool,
fduipath,
pycompat.bytestr(binary),
pycompat.bytestr(symlink),
pycompat.bytestr(changedelete),
)
)
if tool in internals:
func = internals[tool]
mergetype = func.mergetype
onfailure = func.onfailure
precheck = func.precheck
isexternal = False
else:
if wctx.isinmemory():
func = _xmergeimm
else:
func = _xmerge
mergetype = fullmerge
onfailure = _(b"merging %s failed!\n")
precheck = None
isexternal = True
toolconf = tool, toolpath, binary, symlink, scriptfn
if mergetype == nomerge:
r, deleted = func(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
return True, r, deleted
if premerge:
if orig != fco.path():
ui.status(
_(b"merging %s and %s to %s\n")
% (uipathfn(orig), uipathfn(fco.path()), fduipath)
)
else:
ui.status(_(b"merging %s\n") % fduipath)
ui.debug(b"my %s other %s ancestor %s\n" % (fcd, fco, fca))
if precheck and not precheck(repo, mynode, orig, fcd, fco, fca, toolconf):
if onfailure:
if wctx.isinmemory():
raise error.InMemoryMergeConflictsError(
b'in-memory merge does not support merge conflicts'
)
ui.warn(onfailure % fduipath)
return True, 1, False
back = _makebackup(repo, ui, wctx, fcd, premerge)
files = (None, None, None, back)
r = 1
try:
internalmarkerstyle = ui.config(b'ui', b'mergemarkers')
if isexternal:
markerstyle = _toolstr(ui, tool, b'mergemarkers')
else:
markerstyle = internalmarkerstyle
if not labels:
labels = _defaultconflictlabels
formattedlabels = labels
if markerstyle != b'basic':
formattedlabels = _formatlabels(
repo, fcd, fco, fca, labels, tool=tool
)
if premerge and mergetype == fullmerge:
# conflict markers generated by premerge will use 'detailed'
# settings if either ui.mergemarkers or the tool's mergemarkers
# setting is 'detailed'. This way tools can have basic labels in
# space-constrained areas of the UI, but still get full information
# in conflict markers if premerge is 'keep' or 'keep-merge3'.
premergelabels = labels
labeltool = None
if markerstyle != b'basic':
# respect 'tool's mergemarkertemplate (which defaults to
# command-templates.mergemarker)
labeltool = tool
if internalmarkerstyle != b'basic' or markerstyle != b'basic':
premergelabels = _formatlabels(
repo, fcd, fco, fca, premergelabels, tool=labeltool
)
r = _premerge(
repo, fcd, fco, fca, toolconf, files, labels=premergelabels
)
# complete if premerge successful (r is 0)
return not r, r, False
needcheck, r, deleted = func(
repo,
mynode,
orig,
fcd,
fco,
fca,
toolconf,
files,
labels=formattedlabels,
)
if needcheck:
r = _check(repo, r, ui, tool, fcd, files)
if r:
if onfailure:
if wctx.isinmemory():
raise error.InMemoryMergeConflictsError(
b'in-memory merge '
b'does not support '
b'merge conflicts'
)
ui.warn(onfailure % fduipath)
_onfilemergefailure(ui)
return True, r, deleted
finally:
if not r and back is not None:
back.remove()
def _haltmerge():
msg = _(b'merge halted after failed merge (see hg resolve)')
raise error.InterventionRequired(msg)
def _onfilemergefailure(ui):
action = ui.config(b'merge', b'on-failure')
if action == b'prompt':
msg = _(b'continue merge operation (yn)?$$ &Yes $$ &No')
if ui.promptchoice(msg, 0) == 1:
_haltmerge()
if action == b'halt':
_haltmerge()
# default action is 'continue', in which case we neither prompt nor halt
def hasconflictmarkers(data):
return bool(
re.search(
br"^(<<<<<<<.*|=======.*|------- .*|\+\+\+\+\+\+\+ .*|>>>>>>>.*)$",
data,
re.MULTILINE,
)
)
def _check(repo, r, ui, tool, fcd, files):
fd = fcd.path()
uipathfn = scmutil.getuipathfn(repo)
unused, unused, unused, back = files
if not r and (
_toolbool(ui, tool, b"checkconflicts")
or b'conflicts' in _toollist(ui, tool, b"check")
):
if hasconflictmarkers(fcd.data()):
r = 1
checked = False
if b'prompt' in _toollist(ui, tool, b"check"):
checked = True
if ui.promptchoice(
_(b"was merge of '%s' successful (yn)?$$ &Yes $$ &No")
% uipathfn(fd),
1,
):
r = 1
if (
not r
and not checked
and (
_toolbool(ui, tool, b"checkchanged")
or b'changed' in _toollist(ui, tool, b"check")
)
):
if back is not None and not fcd.cmp(back):
if ui.promptchoice(
_(
b" output file %s appears unchanged\n"
b"was merge successful (yn)?"
b"$$ &Yes $$ &No"
)
% uipathfn(fd),
1,
):
r = 1
if back is not None and _toolbool(ui, tool, b"fixeol"):
_matcheol(_workingpath(repo, fcd), back)
return r
def _workingpath(repo, ctx):
return repo.wjoin(ctx.path())
def premerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
return _filemerge(
True, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
)
def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
return _filemerge(
False, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
)
def loadinternalmerge(ui, extname, registrarobj):
"""Load internal merge tool from specified registrarobj"""
for name, func in pycompat.iteritems(registrarobj._table):
fullname = b':' + name
internals[fullname] = func
internals[b'internal:' + name] = func
internalsdoc[fullname] = func
capabilities = sorted([k for k, v in func.capabilities.items() if v])
if capabilities:
capdesc = b" (actual capabilities: %s)" % b', '.join(
capabilities
)
func.__doc__ = func.__doc__ + pycompat.sysstr(b"\n\n%s" % capdesc)
# to put i18n comments into hg.pot for automatically generated texts
# i18n: "binary" and "symlink" are keywords
# i18n: this text is added automatically
_(b" (actual capabilities: binary, symlink)")
# i18n: "binary" is keyword
# i18n: this text is added automatically
_(b" (actual capabilities: binary)")
# i18n: "symlink" is keyword
# i18n: this text is added automatically
_(b" (actual capabilities: symlink)")
# load built-in merge tools explicitly to setup internalsdoc
loadinternalmerge(None, None, internaltool)
# tell hggettext to extract docstrings from these functions:
i18nfunctions = internals.values()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for gan.python.train."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python import train
from tensorflow.contrib.gan.python.features.python import random_tensor_pool
from tensorflow.contrib.slim.python.slim import learning as slim_learning
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def generator_model(inputs):
return variable_scope.get_variable('dummy_g', initializer=2.0) * inputs
class Generator(object):
def __call__(self, inputs):
return generator_model(inputs)
def infogan_generator_model(inputs):
return variable_scope.get_variable('dummy_g', initializer=2.0) * inputs[0]
class InfoGANGenerator(object):
def __call__(self, inputs):
return infogan_generator_model(inputs)
def discriminator_model(inputs, _):
return variable_scope.get_variable('dummy_d', initializer=2.0) * inputs
class Discriminator(object):
def __call__(self, inputs, _):
return discriminator_model(inputs, _)
def infogan_discriminator_model(inputs, _):
return (variable_scope.get_variable('dummy_d', initializer=2.0) * inputs,
[categorical.Categorical([1.0])])
class InfoGANDiscriminator(object):
def __call__(self, inputs, _):
return infogan_discriminator_model(inputs, _)
def acgan_discriminator_model(inputs, _, num_classes=10):
return (discriminator_model(inputs, _), array_ops.one_hot(
# TODO(haeusser): infer batch size from input
random_ops.random_uniform([3], maxval=num_classes, dtype=dtypes.int32),
num_classes))
class ACGANDiscriminator(object):
def __call__(self, inputs, _, num_classes=10):
return (discriminator_model(inputs, _), array_ops.one_hot(
# TODO(haeusser): infer batch size from input
random_ops.random_uniform([3], maxval=num_classes, dtype=dtypes.int32),
num_classes))
def get_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
pass
with variable_scope.variable_scope('discriminator') as dis_scope:
pass
return namedtuples.GANModel(
generator_inputs=None,
generated_data=None,
generator_variables=None,
generator_scope=gen_scope,
generator_fn=generator_model,
real_data=array_ops.ones([1, 2, 3]),
discriminator_real_outputs=array_ops.ones([1, 2, 3]),
discriminator_gen_outputs=array_ops.ones([1, 2, 3]),
discriminator_variables=None,
discriminator_scope=dis_scope,
discriminator_fn=discriminator_model)
def get_callable_gan_model():
ganmodel = get_gan_model()
return ganmodel._replace(
generator_fn=Generator(),
discriminator_fn=Discriminator())
def create_gan_model():
return train.gan_model(
generator_model,
discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]))
def create_callable_gan_model():
return train.gan_model(
Generator(),
Discriminator(),
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]))
def get_infogan_model():
return namedtuples.InfoGANModel(
*get_gan_model(),
structured_generator_inputs=[constant_op.constant(0)],
predicted_distributions=[categorical.Categorical([1.0])],
discriminator_and_aux_fn=infogan_discriminator_model)
def get_callable_infogan_model():
return namedtuples.InfoGANModel(
*get_callable_gan_model(),
structured_generator_inputs=[constant_op.constant(0)],
predicted_distributions=[categorical.Categorical([1.0])],
discriminator_and_aux_fn=infogan_discriminator_model)
def create_infogan_model():
return train.infogan_model(
infogan_generator_model,
infogan_discriminator_model,
real_data=array_ops.zeros([1, 2]),
unstructured_generator_inputs=[],
structured_generator_inputs=[random_ops.random_normal([1, 2])])
def create_callable_infogan_model():
return train.infogan_model(
InfoGANGenerator(),
InfoGANDiscriminator(),
real_data=array_ops.zeros([1, 2]),
unstructured_generator_inputs=[],
structured_generator_inputs=[random_ops.random_normal([1, 2])])
def get_acgan_model():
return namedtuples.ACGANModel(
*get_gan_model(),
one_hot_labels=array_ops.one_hot([0, 1, 2], 10),
discriminator_real_classification_logits=array_ops.one_hot([0, 1, 3], 10),
discriminator_gen_classification_logits=array_ops.one_hot([0, 1, 4], 10))
def get_callable_acgan_model():
return namedtuples.ACGANModel(
*get_callable_gan_model(),
one_hot_labels=array_ops.one_hot([0, 1, 2], 10),
discriminator_real_classification_logits=array_ops.one_hot([0, 1, 3], 10),
discriminator_gen_classification_logits=array_ops.one_hot([0, 1, 4], 10))
def create_acgan_model():
return train.acgan_model(
generator_model,
acgan_discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]),
one_hot_labels=array_ops.one_hot([0, 1, 2], 10))
def create_callable_acgan_model():
return train.acgan_model(
Generator(),
ACGANDiscriminator(),
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]),
one_hot_labels=array_ops.one_hot([0, 1, 2], 10))
def get_sync_optimizer():
return sync_replicas_optimizer.SyncReplicasOptimizer(
gradient_descent.GradientDescentOptimizer(learning_rate=1.0),
replicas_to_aggregate=1)
def get_tensor_pool_fn(pool_size):
def tensor_pool_fn_impl(input_values):
return random_tensor_pool.tensor_pool(input_values, pool_size=pool_size)
return tensor_pool_fn_impl
def get_tensor_pool_fn_for_infogan(pool_size):
def tensor_pool_fn_impl(input_values):
generated_data, generator_inputs = input_values
output_values = random_tensor_pool.tensor_pool(
[generated_data] + generator_inputs, pool_size=pool_size)
return output_values[0], output_values[1:]
return tensor_pool_fn_impl
class GANModelTest(test.TestCase):
"""Tests for `gan_model`."""
def _test_output_type_helper(self, create_fn, tuple_type):
self.assertTrue(isinstance(create_fn(), tuple_type))
def test_output_type_gan(self):
self._test_output_type_helper(get_gan_model, namedtuples.GANModel)
def test_output_type_callable_gan(self):
self._test_output_type_helper(get_callable_gan_model, namedtuples.GANModel)
def test_output_type_infogan(self):
self._test_output_type_helper(get_infogan_model, namedtuples.InfoGANModel)
def test_output_type_callable_infogan(self):
self._test_output_type_helper(
get_callable_infogan_model, namedtuples.InfoGANModel)
def test_output_type_acgan(self):
self._test_output_type_helper(get_acgan_model, namedtuples.ACGANModel)
def test_output_type_callable_acgan(self):
self._test_output_type_helper(
get_callable_acgan_model, namedtuples.ACGANModel)
def test_no_shape_check(self):
def dummy_generator_model(_):
return (None, None)
def dummy_discriminator_model(data, conditioning): # pylint: disable=unused-argument
return 1
with self.assertRaisesRegexp(AttributeError, 'object has no attribute'):
train.gan_model(
dummy_generator_model,
dummy_discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=array_ops.zeros([1]),
check_shapes=True)
train.gan_model(
dummy_generator_model,
dummy_discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=array_ops.zeros([1]),
check_shapes=False)
class GANLossTest(test.TestCase):
"""Tests for `gan_loss`."""
# Test output type.
def _test_output_type_helper(self, get_gan_model_fn):
loss = train.gan_loss(get_gan_model_fn(), add_summaries=True)
self.assertTrue(isinstance(loss, namedtuples.GANLoss))
self.assertGreater(len(ops.get_collection(ops.GraphKeys.SUMMARIES)), 0)
def test_output_type_gan(self):
self._test_output_type_helper(get_gan_model)
def test_output_type_callable_gan(self):
self._test_output_type_helper(get_callable_gan_model)
def test_output_type_infogan(self):
self._test_output_type_helper(get_infogan_model)
def test_output_type_callable_infogan(self):
self._test_output_type_helper(get_callable_infogan_model)
def test_output_type_acgan(self):
self._test_output_type_helper(get_acgan_model)
def test_output_type_callable_acgan(self):
self._test_output_type_helper(get_callable_acgan_model)
# Test gradient penalty option.
def _test_grad_penalty_helper(self, create_gan_model_fn):
model = create_gan_model_fn()
loss = train.gan_loss(model)
loss_gp = train.gan_loss(model, gradient_penalty_weight=1.0)
self.assertTrue(isinstance(loss_gp, namedtuples.GANLoss))
# Check values.
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
loss_gen_np, loss_gen_gp_np = sess.run(
[loss.generator_loss, loss_gp.generator_loss])
loss_dis_np, loss_dis_gp_np = sess.run(
[loss.discriminator_loss, loss_gp.discriminator_loss])
self.assertEqual(loss_gen_np, loss_gen_gp_np)
self.assertTrue(loss_dis_np < loss_dis_gp_np)
def test_grad_penalty_gan(self):
self._test_grad_penalty_helper(create_gan_model)
def test_grad_penalty_callable_gan(self):
self._test_grad_penalty_helper(create_callable_gan_model)
def test_grad_penalty_infogan(self):
self._test_grad_penalty_helper(create_infogan_model)
def test_grad_penalty_callable_infogan(self):
self._test_grad_penalty_helper(create_callable_infogan_model)
def test_grad_penalty_acgan(self):
self._test_grad_penalty_helper(create_acgan_model)
def test_grad_penalty_callable_acgan(self):
self._test_grad_penalty_helper(create_callable_acgan_model)
# Test mutual information penalty option.
def _test_mutual_info_penalty_helper(self, create_gan_model_fn):
train.gan_loss(create_gan_model_fn(),
mutual_information_penalty_weight=constant_op.constant(1.0))
def test_mutual_info_penalty_infogan(self):
self._test_mutual_info_penalty_helper(get_infogan_model)
def test_mutual_info_penalty_callable_infogan(self):
self._test_mutual_info_penalty_helper(get_callable_infogan_model)
# Test regularization loss.
def _test_regularization_helper(self, get_gan_model_fn):
# Evaluate losses without regularization.
no_reg_loss = train.gan_loss(get_gan_model_fn())
with self.test_session(use_gpu=True):
no_reg_loss_gen_np = no_reg_loss.generator_loss.eval()
no_reg_loss_dis_np = no_reg_loss.discriminator_loss.eval()
with ops.name_scope(get_gan_model_fn().generator_scope.name):
ops.add_to_collection(
ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(3.0))
with ops.name_scope(get_gan_model_fn().discriminator_scope.name):
ops.add_to_collection(
ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(2.0))
# Check that losses now include the correct regularization values.
reg_loss = train.gan_loss(get_gan_model_fn())
with self.test_session(use_gpu=True):
reg_loss_gen_np = reg_loss.generator_loss.eval()
reg_loss_dis_np = reg_loss.discriminator_loss.eval()
self.assertTrue(3.0, reg_loss_gen_np - no_reg_loss_gen_np)
self.assertTrue(3.0, reg_loss_dis_np - no_reg_loss_dis_np)
def test_regularization_gan(self):
self._test_regularization_helper(get_gan_model)
def test_regularization_callable_gan(self):
self._test_regularization_helper(get_callable_gan_model)
def test_regularization_infogan(self):
self._test_regularization_helper(get_infogan_model)
def test_regularization_callable_infogan(self):
self._test_regularization_helper(get_callable_infogan_model)
def test_regularization_acgan(self):
self._test_regularization_helper(get_acgan_model)
def test_regularization_callable_acgan(self):
self._test_regularization_helper(get_callable_acgan_model)
# Test that ACGan models work.
def _test_acgan_helper(self, create_gan_model_fn):
model = create_gan_model_fn()
loss = train.gan_loss(model)
loss_ac_gen = train.gan_loss(model, aux_cond_generator_weight=1.0)
loss_ac_dis = train.gan_loss(model, aux_cond_discriminator_weight=1.0)
self.assertTrue(isinstance(loss, namedtuples.GANLoss))
self.assertTrue(isinstance(loss_ac_gen, namedtuples.GANLoss))
self.assertTrue(isinstance(loss_ac_dis, namedtuples.GANLoss))
# Check values.
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
loss_gen_np, loss_ac_gen_gen_np, loss_ac_dis_gen_np = sess.run(
[loss.generator_loss,
loss_ac_gen.generator_loss,
loss_ac_dis.generator_loss])
loss_dis_np, loss_ac_gen_dis_np, loss_ac_dis_dis_np = sess.run(
[loss.discriminator_loss,
loss_ac_gen.discriminator_loss,
loss_ac_dis.discriminator_loss])
self.assertTrue(loss_gen_np < loss_dis_np)
self.assertTrue(np.isscalar(loss_ac_gen_gen_np))
self.assertTrue(np.isscalar(loss_ac_dis_gen_np))
self.assertTrue(np.isscalar(loss_ac_gen_dis_np))
self.assertTrue(np.isscalar(loss_ac_dis_dis_np))
def test_acgan(self):
self._test_acgan_helper(create_acgan_model)
def test_callable_acgan(self):
self._test_acgan_helper(create_callable_acgan_model)
def _check_tensor_pool_adjusted_model_outputs(self, tensor1, tensor2,
pool_size):
history_values = []
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
for i in range(2 * pool_size):
t1, t2 = sess.run([tensor1, tensor2])
history_values.append(t1)
if i < pool_size:
# For [0, pool_size), the pool is not full, tensor1 should be equal
# to tensor2 as the pool.
self.assertAllEqual(t1, t2)
else:
# For [pool_size, ?), the pool is full, tensor2 must be equal to some
# historical values of tensor1 (which is previously stored in the
# pool).
self.assertTrue(any([(v == t2).all() for v in history_values]))
# Test `_tensor_pool_adjusted_model` for gan model.
def test_tensor_pool_adjusted_model_gan(self):
model = create_gan_model()
new_model = train._tensor_pool_adjusted_model(model, None)
# 'Generator/dummy_g:0' and 'Discriminator/dummy_d:0'
self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.VARIABLES)))
self.assertIs(new_model.discriminator_gen_outputs,
model.discriminator_gen_outputs)
pool_size = 5
new_model = train._tensor_pool_adjusted_model(
model, get_tensor_pool_fn(pool_size=pool_size))
self.assertIsNot(new_model.discriminator_gen_outputs,
model.discriminator_gen_outputs)
# Check values.
self._check_tensor_pool_adjusted_model_outputs(
model.discriminator_gen_outputs, new_model.discriminator_gen_outputs,
pool_size)
# Test _tensor_pool_adjusted_model for infogan model.
def test_tensor_pool_adjusted_model_infogan(self):
model = create_infogan_model()
pool_size = 5
new_model = train._tensor_pool_adjusted_model(
model, get_tensor_pool_fn_for_infogan(pool_size=pool_size))
# 'Generator/dummy_g:0' and 'Discriminator/dummy_d:0'
self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.VARIABLES)))
self.assertIsNot(new_model.discriminator_gen_outputs,
model.discriminator_gen_outputs)
self.assertIsNot(new_model.predicted_distributions,
model.predicted_distributions)
# Check values.
self._check_tensor_pool_adjusted_model_outputs(
model.discriminator_gen_outputs, new_model.discriminator_gen_outputs,
pool_size)
# Test _tensor_pool_adjusted_model for acgan model.
def test_tensor_pool_adjusted_model_acgan(self):
model = create_acgan_model()
pool_size = 5
new_model = train._tensor_pool_adjusted_model(
model, get_tensor_pool_fn(pool_size=pool_size))
# 'Generator/dummy_g:0' and 'Discriminator/dummy_d:0'
self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.VARIABLES)))
self.assertIsNot(new_model.discriminator_gen_outputs,
model.discriminator_gen_outputs)
self.assertIsNot(new_model.discriminator_gen_classification_logits,
model.discriminator_gen_classification_logits)
# Check values.
self._check_tensor_pool_adjusted_model_outputs(
model.discriminator_gen_outputs, new_model.discriminator_gen_outputs,
pool_size)
# Test tensor pool.
def _test_tensor_pool_helper(self, create_gan_model_fn):
model = create_gan_model_fn()
if isinstance(model, namedtuples.InfoGANModel):
tensor_pool_fn = get_tensor_pool_fn_for_infogan(pool_size=5)
else:
tensor_pool_fn = get_tensor_pool_fn(pool_size=5)
loss = train.gan_loss(model, tensor_pool_fn=tensor_pool_fn)
self.assertTrue(isinstance(loss, namedtuples.GANLoss))
# Check values.
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
for _ in range(10):
sess.run([loss.generator_loss, loss.discriminator_loss])
def test_tensor_pool_gan(self):
self._test_tensor_pool_helper(create_gan_model)
def test_tensor_pool_callable_gan(self):
self._test_tensor_pool_helper(create_callable_gan_model)
def test_tensor_pool_infogan(self):
self._test_tensor_pool_helper(create_infogan_model)
def test_tensor_pool_callable_infogan(self):
self._test_tensor_pool_helper(create_callable_infogan_model)
def test_tensor_pool_acgan(self):
self._test_tensor_pool_helper(create_acgan_model)
def test_tensor_pool_callable_acgan(self):
self._test_tensor_pool_helper(create_callable_acgan_model)
def test_doesnt_crash_when_in_nested_scope(self):
with variable_scope.variable_scope('outer_scope'):
gan_model = train.gan_model(
generator_model,
discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]))
# This should work inside a scope.
train.gan_loss(gan_model, gradient_penalty_weight=1.0)
# This should also work outside a scope.
train.gan_loss(gan_model, gradient_penalty_weight=1.0)
class GANTrainOpsTest(test.TestCase):
"""Tests for `gan_train_ops`."""
def _test_output_type_helper(self, create_gan_model_fn):
model = create_gan_model_fn()
loss = train.gan_loss(model)
g_opt = gradient_descent.GradientDescentOptimizer(1.0)
d_opt = gradient_descent.GradientDescentOptimizer(1.0)
train_ops = train.gan_train_ops(
model,
loss,
g_opt,
d_opt,
summarize_gradients=True,
colocate_gradients_with_ops=True)
self.assertTrue(isinstance(train_ops, namedtuples.GANTrainOps))
def test_output_type_gan(self):
self._test_output_type_helper(create_gan_model)
def test_output_type_callable_gan(self):
self._test_output_type_helper(create_callable_gan_model)
def test_output_type_infogan(self):
self._test_output_type_helper(create_infogan_model)
def test_output_type_callable_infogan(self):
self._test_output_type_helper(create_callable_infogan_model)
def test_output_type_acgan(self):
self._test_output_type_helper(create_acgan_model)
def test_output_type_callable_acgan(self):
self._test_output_type_helper(create_callable_acgan_model)
# TODO(joelshor): Add a test to check that custom update op is run.
def _test_unused_update_ops(self, create_gan_model_fn, provide_update_ops):
model = create_gan_model_fn()
loss = train.gan_loss(model)
# Add generator and discriminator update ops.
with variable_scope.variable_scope(model.generator_scope):
gen_update_count = variable_scope.get_variable('gen_count', initializer=0)
gen_update_op = gen_update_count.assign_add(1)
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, gen_update_op)
with variable_scope.variable_scope(model.discriminator_scope):
dis_update_count = variable_scope.get_variable('dis_count', initializer=0)
dis_update_op = dis_update_count.assign_add(1)
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, dis_update_op)
# Add an update op outside the generator and discriminator scopes.
if provide_update_ops:
kwargs = {'update_ops':
[constant_op.constant(1.0), gen_update_op, dis_update_op]}
else:
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, constant_op.constant(1.0))
kwargs = {}
g_opt = gradient_descent.GradientDescentOptimizer(1.0)
d_opt = gradient_descent.GradientDescentOptimizer(1.0)
with self.assertRaisesRegexp(ValueError, 'There are unused update ops:'):
train.gan_train_ops(model, loss, g_opt, d_opt,
check_for_unused_update_ops=True, **kwargs)
train_ops = train.gan_train_ops(
model, loss, g_opt, d_opt, check_for_unused_update_ops=False, **kwargs)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(0, gen_update_count.eval())
self.assertEqual(0, dis_update_count.eval())
train_ops.generator_train_op.eval()
self.assertEqual(1, gen_update_count.eval())
self.assertEqual(0, dis_update_count.eval())
train_ops.discriminator_train_op.eval()
self.assertEqual(1, gen_update_count.eval())
self.assertEqual(1, dis_update_count.eval())
def test_unused_update_ops_gan(self):
self._test_unused_update_ops(create_gan_model, False)
def test_unused_update_ops_gan_provideupdates(self):
self._test_unused_update_ops(create_gan_model, True)
def test_unused_update_ops_callable_gan(self):
self._test_unused_update_ops(create_callable_gan_model, False)
def test_unused_update_ops_callable_gan_provideupdates(self):
self._test_unused_update_ops(create_callable_gan_model, True)
def test_unused_update_ops_infogan(self):
self._test_unused_update_ops(create_infogan_model, False)
def test_unused_update_ops_infogan_provideupdates(self):
self._test_unused_update_ops(create_infogan_model, True)
def test_unused_update_ops_callable_infogan(self):
self._test_unused_update_ops(create_callable_infogan_model, False)
def test_unused_update_ops_callable_infogan_provideupdates(self):
self._test_unused_update_ops(create_callable_infogan_model, True)
def test_unused_update_ops_acgan(self):
self._test_unused_update_ops(create_acgan_model, False)
def test_unused_update_ops_acgan_provideupdates(self):
self._test_unused_update_ops(create_acgan_model, True)
def test_unused_update_ops_callable_acgan(self):
self._test_unused_update_ops(create_callable_acgan_model, False)
def test_unused_update_ops_callable_acgan_provideupdates(self):
self._test_unused_update_ops(create_callable_acgan_model, True)
def _test_sync_replicas_helper(
self, create_gan_model_fn, create_global_step=False):
model = create_gan_model_fn()
loss = train.gan_loss(model)
num_trainable_vars = len(variables_lib.get_trainable_variables())
if create_global_step:
gstep = variable_scope.get_variable(
'custom_gstep', dtype=dtypes.int32, initializer=0, trainable=False)
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, gstep)
g_opt = get_sync_optimizer()
d_opt = get_sync_optimizer()
train_ops = train.gan_train_ops(
model,
loss,
generator_optimizer=g_opt,
discriminator_optimizer=d_opt)
self.assertTrue(isinstance(train_ops, namedtuples.GANTrainOps))
# No new trainable variables should have been added.
self.assertEqual(num_trainable_vars,
len(variables_lib.get_trainable_variables()))
g_sync_init_op = g_opt.get_init_tokens_op(num_tokens=1)
d_sync_init_op = d_opt.get_init_tokens_op(num_tokens=1)
# Check that update op is run properly.
global_step = training_util.get_or_create_global_step()
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
g_opt.chief_init_op.run()
d_opt.chief_init_op.run()
gstep_before = global_step.eval()
# Start required queue runner for SyncReplicasOptimizer.
coord = coordinator.Coordinator()
g_threads = g_opt.get_chief_queue_runner().create_threads(sess, coord)
d_threads = d_opt.get_chief_queue_runner().create_threads(sess, coord)
g_sync_init_op.run()
d_sync_init_op.run()
train_ops.generator_train_op.eval()
# Check that global step wasn't incremented.
self.assertEqual(gstep_before, global_step.eval())
train_ops.discriminator_train_op.eval()
# Check that global step wasn't incremented.
self.assertEqual(gstep_before, global_step.eval())
coord.request_stop()
coord.join(g_threads + d_threads)
def test_sync_replicas_gan(self):
self._test_sync_replicas_helper(create_gan_model)
def test_sync_replicas_callable_gan(self):
self._test_sync_replicas_helper(create_callable_gan_model)
def test_sync_replicas_infogan(self):
self._test_sync_replicas_helper(create_infogan_model)
def test_sync_replicas_callable_infogan(self):
self._test_sync_replicas_helper(create_callable_infogan_model)
def test_sync_replicas_acgan(self):
self._test_sync_replicas_helper(create_acgan_model)
def test_sync_replicas_callable_acgan(self):
self._test_sync_replicas_helper(create_callable_acgan_model)
def test_global_step_can_be_int32(self):
self._test_sync_replicas_helper(create_gan_model, create_global_step=True)
class GANTrainTest(test.TestCase):
"""Tests for `gan_train`."""
def _gan_train_ops(self, generator_add, discriminator_add):
step = training_util.create_global_step()
# Increment the global count every time a train op is run so we can count
# the number of times they're run.
# NOTE: `use_locking=True` is required to avoid race conditions with
# joint training.
train_ops = namedtuples.GANTrainOps(
generator_train_op=step.assign_add(generator_add, use_locking=True),
discriminator_train_op=step.assign_add(discriminator_add,
use_locking=True),
global_step_inc_op=step.assign_add(1))
return train_ops
def _test_run_helper(self, create_gan_model_fn):
random_seed.set_random_seed(1234)
model = create_gan_model_fn()
loss = train.gan_loss(model)
g_opt = gradient_descent.GradientDescentOptimizer(1.0)
d_opt = gradient_descent.GradientDescentOptimizer(1.0)
train_ops = train.gan_train_ops(model, loss, g_opt, d_opt)
final_step = train.gan_train(
train_ops,
logdir='',
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=2)])
self.assertTrue(np.isscalar(final_step))
self.assertEqual(2, final_step)
def test_run_gan(self):
self._test_run_helper(create_gan_model)
def test_run_callable_gan(self):
self._test_run_helper(create_callable_gan_model)
def test_run_infogan(self):
self._test_run_helper(create_infogan_model)
def test_run_callable_infogan(self):
self._test_run_helper(create_callable_infogan_model)
def test_run_acgan(self):
self._test_run_helper(create_acgan_model)
def test_run_callable_acgan(self):
self._test_run_helper(create_callable_acgan_model)
# Test multiple train steps.
def _test_multiple_steps_helper(self, get_hooks_fn_fn):
train_ops = self._gan_train_ops(generator_add=10, discriminator_add=100)
train_steps = namedtuples.GANTrainSteps(
generator_train_steps=3,
discriminator_train_steps=4)
final_step = train.gan_train(
train_ops,
get_hooks_fn=get_hooks_fn_fn(train_steps),
logdir='',
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=1)])
self.assertTrue(np.isscalar(final_step))
self.assertEqual(1 + 3 * 10 + 4 * 100, final_step)
def test_multiple_steps_seq_train_steps(self):
self._test_multiple_steps_helper(train.get_sequential_train_hooks)
def test_multiple_steps_efficient_seq_train_steps(self):
self._test_multiple_steps_helper(train.get_joint_train_hooks)
def test_supervisor_run_gan_model_train_ops_multiple_steps(self):
step = training_util.create_global_step()
train_ops = namedtuples.GANTrainOps(
generator_train_op=constant_op.constant(3.0),
discriminator_train_op=constant_op.constant(2.0),
global_step_inc_op=step.assign_add(1))
train_steps = namedtuples.GANTrainSteps(
generator_train_steps=3,
discriminator_train_steps=4)
final_loss = slim_learning.train(
train_op=train_ops,
logdir='',
global_step=step,
number_of_steps=1,
train_step_fn=train.get_sequential_train_steps(train_steps))
self.assertTrue(np.isscalar(final_loss))
self.assertEqual(17.0, final_loss)
class PatchGANTest(test.TestCase):
"""Tests that functions work on PatchGAN style output."""
def _test_patchgan_helper(self, create_gan_model_fn):
"""Ensure that patch-based discriminators work end-to-end."""
random_seed.set_random_seed(1234)
model = create_gan_model_fn()
loss = train.gan_loss(model)
g_opt = gradient_descent.GradientDescentOptimizer(1.0)
d_opt = gradient_descent.GradientDescentOptimizer(1.0)
train_ops = train.gan_train_ops(model, loss, g_opt, d_opt)
final_step = train.gan_train(
train_ops,
logdir='',
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=2)])
self.assertTrue(np.isscalar(final_step))
self.assertEqual(2, final_step)
def test_patchgan_gan(self):
self._test_patchgan_helper(create_gan_model)
def test_patchgan_callable_gan(self):
self._test_patchgan_helper(create_callable_gan_model)
def test_patchgan_infogan(self):
self._test_patchgan_helper(create_infogan_model)
def test_patchgan_callable_infogan(self):
self._test_patchgan_helper(create_callable_infogan_model)
def test_patchgan_acgan(self):
self._test_patchgan_helper(create_acgan_model)
def test_patchgan_callable_acgan(self):
self._test_patchgan_helper(create_callable_acgan_model)
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/env python2.7
# -*- coding:utf-8 -*-
# Copyright (c) 2015, Galaxy Authors. All Rights Reserved
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Author: wangtaize@baidu.com
# Date: 2015-04-20
import argparse
import json
import sys
import os
import traceback
from paramiko import client
from argparse import RawTextHelpFormatter
BACK_GROUND_BLACK = '\033[1;40m'
BACK_GROUND_RED = '\033[1;41m'
BACK_GROUND_GREEN = '\033[1;42m'
BACK_GROUND_YELLOW = '\033[1;43m'
BACK_GROUND_BLUE = '\033[1;44m'
BACK_GROUND_BLUE_UNDERLINE = '\033[4;44m'
BACK_GROUND_PURPLE = '\033[1;45m'
BACK_GROUND_CYAN = '\033[1;46m'
BACK_GROUND_WHITE = '\033[1;47m'
TEXT_COLOR_BLACK = '\033[1;30m'
TEXT_COLOR_RED = '\033[1;31m'
TEXT_COLOR_GREEN = '\033[1;32m'
TEXT_COLOR_YELLOW = '\033[1;33m'
TEXT_COLOR_BLUE = '\033[1;34m'
TEXT_COLOR_PURPLE = '\033[1;35m'
TEXT_COLOR_CYAN = '\033[1;36m'
TEXT_COLOR_WHITE = '\033[1;37m'
RESET = '\033[1;m'
USE_SHELL = sys.platform.startswith("win")
class RichText(object):
@classmethod
def render(cls, text, bg_color, color=TEXT_COLOR_WHITE):
"""render
"""
if USE_SHELL:
return text
result = []
result.append(bg_color)
result.append(color)
result.append(text)
result.append(RESET)
return ''.join(result)
@classmethod
def render_text_color(cls, text, color):
"""render text with color
"""
if USE_SHELL:
return text
return ''.join([color, text, RESET])
@classmethod
def render_green_text(cls, text):
"""render green text
"""
return cls.render_text_color(text, TEXT_COLOR_GREEN)
@classmethod
def render_red_text(cls, text):
"""render red text
"""
return cls.render_text_color(text, TEXT_COLOR_RED)
@classmethod
def render_blue_text(cls, text):
"""render blue text
"""
return cls.render_text_color(text, TEXT_COLOR_BLUE)
@classmethod
def render_yellow_text(cls, text):
"""render yellow text
"""
return cls.render_text_color(text, TEXT_COLOR_YELLOW)
usage = """
deployer <command> [<args>...]
"""
epilog = """
bug reports<>
"""
def build_parser(deployer):
parser = argparse.ArgumentParser(
prog="deployer.py",
formatter_class=RawTextHelpFormatter,
epilog=epilog,
usage=usage)
subparsers = parser.add_subparsers()
stop_sub = subparsers.add_parser("stop",
help="stop apps on all hosts")
stop_sub.add_argument('-v',
default=False,
dest="show_output",
action="store_true",
help="show output and err")
stop_sub.add_argument('-c',
default=None,
dest="config",
action="store",
help="specify config file ")
stop_sub.add_argument('-u',
default=None,
dest="user",
action="store",
help="specify user on host")
stop_sub.add_argument('-p',
default=None,
dest="password",
action="store",
help="specify password on host")
stop_sub.set_defaults(func=deployer.stop)
fetch_sub = subparsers.add_parser("fetch",help="fetch package to all hosts")
fetch_sub.add_argument('-v',
default=False,
dest="show_output",
action="store_true",
help="show output and err")
fetch_sub.add_argument('-c',
default=None,
dest="config",
action="store",
help="specify config file")
fetch_sub.add_argument('-u',
default=None,
dest="user",
action="store",
help="specify user on host")
fetch_sub.add_argument('-p',
default=None,
dest="password",
action="store",
help="specify password on host")
fetch_sub.set_defaults(func=deployer.fetch)
start_sub = subparsers.add_parser("start",help="start apps on all hosts")
start_sub.add_argument('-v',
default=False,
dest="show_output",
action="store_true",
help="show output and err")
start_sub.add_argument('-c',
default=None,
dest="config",
action="store",
help="specify config file")
start_sub.add_argument('-u',
default=None,
dest="user",
action="store",
help="specify user on host")
start_sub.add_argument('-p',
default=None,
dest="password",
action="store",
help="specify password on host")
start_sub.set_defaults(func=deployer.start)
init_sub = subparsers.add_parser("init",
help="init all nodes")
init_sub.add_argument('-v',
default=False,
dest="show_output",
action="store_true",
help="show output and err")
init_sub.add_argument('-c',
default=None,
dest="config",
action="store",
help="specify config file")
init_sub.add_argument('-u',
default=None,
dest="user",
action="store",
help="specify user on host")
init_sub.add_argument('-p',
default=None,
dest="password",
action="store",
help="specify password on host")
init_sub.set_defaults(func=deployer.init)
clean_sub = subparsers.add_parser("clean",
help="clean all nodes")
clean_sub.add_argument('-v',
default=False,
dest="show_output",
action="store_true",
help="show output and err")
clean_sub.add_argument('-c',
default=None,
dest="config",
action="store",
help="specify config file")
clean_sub.add_argument('-u',
default=None,
dest="user",
action="store",
help="specify user on host")
clean_sub.add_argument('-p',
default=None,
dest="password",
action="store",
help="specify password on host")
clean_sub.set_defaults(func=deployer.clean)
migrate_sub = subparsers.add_parser("migrate",
help="migrate all apps")
migrate_sub.add_argument('-v',
default=False,
dest="show_output",
action="store_true",
help="show output and err")
migrate_sub.add_argument('-c',
default=None,
dest="config",
action="store",
help="specify config file")
migrate_sub.add_argument('-u',
default=None,
dest="user",
action="store",
help="specify user on host")
migrate_sub.add_argument('-p',
default=None,
dest="password",
action="store",
help="specify password on host")
migrate_sub.set_defaults(func=deployer.migrate)
return parser
class Deployer(object):
def __init__(self):
self.show_output = False
def _exec_cmd_on_host(self,host,cmds,real_time=False):
"""
exec cmd on specify host
"""
ret_dict = {}
try:
sshclient = self._build_client()
sshclient.connect(host,username=self.user,password=self.password)
for cmd in cmds:
stdin,stdout,stderr = sshclient.exec_command(cmd)
retcode = stdout.channel.recv_exit_status()
ret_dict[cmd] = retcode
if not real_time:
continue
for line in stdout:
print line
for line in stderr:
print line
sshclient.close()
except Exception as e:
traceback.print_exc(file=sys.stdout)
return ret_dict
def init(self,options):
self.password = options.password
self.user = options.user
for node in options.module.NODE_LIST:
ret_dict = self._exec_cmd_on_host(node,options.module.INIT_SYS_CMDS,options.show_output)
for key in ret_dict:
if ret_dict[key] == 0:
print "exec %s on %s %s"%(key,node,RichText.render_green_text("succssfully"))
else:
print "exec %s on %s %s"%(key,node,RichText.render_red_text("error"))
def clean(self,options):
self.password = options.password
self.user = options.user
for node in options.module.NODE_LIST:
ret_dict = self._exec_cmd_on_host(node,options.module.CLEAN_SYS_CMDS,options.show_output)
for key in ret_dict:
if ret_dict[key] == 0:
print "exec %s on %s %s"%(key,node,RichText.render_green_text("succssfully"))
else:
print "exec %s on %s %s"%(key,node,RichText.render_red_text("error"))
def fetch(self,options):
self.password = options.password
self.user = options.user
for app in options.module.APPS:
print "fetch app %s"%RichText.render_green_text(app['name'])
fetch_cmd = "mkdir -p %s && cd %s && wget -O tmp.tar.gz %s && tar -zxvf tmp.tar.gz"%(app['workspace'],
app['workspace'],
app['package'])
print "exec %s"%fetch_cmd
for host in app['hosts']:
ret_dict = self._exec_cmd_on_host(host,[fetch_cmd],options.show_output)
if ret_dict[fetch_cmd] ==0 :
print "fetch on %s %s"%(host,RichText.render_green_text("succssfully"))
else:
print "fetch on %s %s"%(host,RichText.render_red_text("error"))
def start(self,options):
self.password = options.password
self.user = options.user
for app in options.module.APPS:
print "start app %s"%RichText.render_green_text(app["name"])
start_cmd = "cd %s && %s"%(app['workspace'],
app['start_cmd'])
print "exec %s"%start_cmd
for host in app['hosts']:
ret_dict = self._exec_cmd_on_host(host,[start_cmd],True)
if ret_dict[start_cmd] == 0 :
print "start on %s %s"%(host,RichText.render_green_text("succssfully"))
else:
print "start on %s %s"%(host,RichText.render_red_text("error"))
def stop(self,options):
self.password = options.password
self.user = options.user
for app in options.module.APPS:
print "stop app %s"%app["name"]
stop_cmd = "cd %s && %s"%(app["workspace"],app["stop_cmd"])
print "exec %s"%stop_cmd
for host in app["hosts"]:
ret_dict = self._exec_cmd_on_host(host,[stop_cmd])
if ret_dict[stop_cmd] == 0 :
print "stop on %s %s"%(host,RichText.render_green_text("succssfully"))
else:
print "stop on %s %s"%(host,RichText.render_red_text("error"))
def migrate(self,options):
self.fetch(options)
self.stop(options)
self.start(options)
def _build_client(self):
sshclient = client.SSHClient()
sshclient.load_system_host_keys()
sshclient.set_missing_host_key_policy(client.AutoAddPolicy())
return sshclient
if __name__ == "__main__":
deploy = Deployer()
parser = build_parser(deploy)
options = parser.parse_args()
sys.path.append(os.getcwd())
if not options.config:
print "-c parameter is required"
sys.exit(-1)
md = options.config.replace(".py","")
module = __import__(md)
options.module = module
options.func(options)
|
|
# Copyright 2015 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import decorator
import pecan
from keystoneauth1 import exceptions as ka_exception
from magnum.api import utils as api_utils
from magnum.common import clients
from magnum.common import exception
import magnum.conf
from magnum.drivers.common import driver
from magnum.i18n import _
from magnum import objects
CONF = magnum.conf.CONF
cluster_update_allowed_properties = set(['node_count', 'health_status',
'health_status_reason'])
federation_update_allowed_properties = set(['member_ids', 'properties'])
def ct_not_found_to_bad_request():
@decorator.decorator
def wrapper(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except exception.ClusterTemplateNotFound as e:
# Change error code because 404 (NotFound) is inappropriate
# response for a POST request to create a Cluster
e.code = 400 # BadRequest
raise
return wrapper
def enforce_cluster_type_supported():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster = args[1]
cluster_template = objects.ClusterTemplate.get(
pecan.request.context, cluster.cluster_template_id)
cluster_type = (cluster_template.server_type,
cluster_template.cluster_distro,
cluster_template.coe)
driver.Driver.get_driver(*cluster_type)
return func(*args, **kwargs)
return wrapper
def enforce_driver_supported():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template = args[1]
cluster_distro = cluster_template.cluster_distro
if not cluster_distro:
try:
cli = clients.OpenStackClients(pecan.request.context)
image_id = cluster_template.image_id
image = api_utils.get_openstack_resource(cli.glance().images,
image_id,
'images')
cluster_distro = image.get('os_distro')
except Exception:
pass
cluster_type = (cluster_template.server_type,
cluster_distro,
cluster_template.coe)
driver.Driver.get_driver(*cluster_type)
return func(*args, **kwargs)
return wrapper
def enforce_cluster_volume_storage_size():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster = args[1]
cluster_template = objects.ClusterTemplate.get(
pecan.request.context, cluster.cluster_template_id)
_enforce_volume_storage_size(
cluster_template.as_dict(), cluster.as_dict())
return func(*args, **kwargs)
return wrapper
def enforce_valid_project_id_on_create():
@decorator.decorator
def wrapper(func, *args, **kwargs):
quota = args[1]
_validate_project_id(quota.project_id)
return func(*args, **kwargs)
return wrapper
def _validate_project_id(project_id):
try:
context = pecan.request.context
osc = clients.OpenStackClients(context)
osc.keystone().domain_admin_client.projects.get(project_id)
except ka_exception.http.NotFound:
raise exception.ProjectNotFound(name='project_id',
id=project_id)
def enforce_network_driver_types_create():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template = args[1]
_enforce_network_driver_types(cluster_template)
return func(*args, **kwargs)
return wrapper
def enforce_network_driver_types_update():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template_ident = args[1]
patch = args[2]
cluster_template = api_utils.get_resource('ClusterTemplate',
cluster_template_ident)
try:
cluster_template_dict = api_utils.apply_jsonpatch(
cluster_template.as_dict(), patch)
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
cluster_template = objects.ClusterTemplate(pecan.request.context,
**cluster_template_dict)
_enforce_network_driver_types(cluster_template)
return func(*args, **kwargs)
return wrapper
def _enforce_network_driver_types(cluster_template):
validator = Validator.get_coe_validator(cluster_template.coe)
if not cluster_template.network_driver:
cluster_template.network_driver = validator.default_network_driver
validator.validate_network_driver(cluster_template.network_driver)
def enforce_server_type():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template = args[1]
_enforce_server_type(cluster_template)
return func(*args, **kwargs)
return wrapper
def _enforce_server_type(cluster_template):
validator = Validator.get_coe_validator(cluster_template.coe)
validator.validate_server_type(cluster_template.server_type)
def enforce_volume_driver_types_create():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template = args[1]
_enforce_volume_driver_types(cluster_template.as_dict())
return func(*args, **kwargs)
return wrapper
def enforce_volume_storage_size_create():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template = args[1]
_enforce_volume_storage_size(cluster_template.as_dict(), {})
return func(*args, **kwargs)
return wrapper
def enforce_volume_driver_types_update():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template_ident = args[1]
patch = args[2]
cluster_template = api_utils.get_resource('ClusterTemplate',
cluster_template_ident)
try:
cluster_template_dict = api_utils.apply_jsonpatch(
cluster_template.as_dict(), patch)
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
_enforce_volume_driver_types(cluster_template_dict)
return func(*args, **kwargs)
return wrapper
def _enforce_volume_driver_types(cluster_template):
validator = Validator.get_coe_validator(cluster_template['coe'])
if not cluster_template.get('volume_driver'):
return
validator.validate_volume_driver(cluster_template['volume_driver'])
def _enforce_volume_storage_size(cluster_template, cluster):
volume_size = cluster.get('docker_volume_size') \
or cluster_template.get('docker_volume_size')
storage_driver = cluster_template.get('docker_storage_driver')
if storage_driver == 'devicemapper':
if not volume_size or volume_size < 3:
raise exception.InvalidParameterValue(
'docker volume size %s GB is not valid, '
'expecting minimum value 3GB for %s storage '
'driver.' % (volume_size, storage_driver))
def validate_cluster_properties(delta):
update_disallowed_properties = delta - cluster_update_allowed_properties
if update_disallowed_properties:
err = (_("cannot change cluster property(ies) %s.") %
", ".join(update_disallowed_properties))
raise exception.InvalidParameterValue(err=err)
def validate_federation_properties(delta):
update_disallowed_properties = delta - federation_update_allowed_properties
if update_disallowed_properties:
err = (_("cannot change federation property(ies) %s.") %
", ".join(update_disallowed_properties))
raise exception.InvalidParameterValue(err=err)
class Validator(object):
@classmethod
def get_coe_validator(cls, coe):
if coe == 'kubernetes':
return K8sValidator()
elif coe == 'swarm' or coe == 'swarm-mode':
return SwarmValidator()
else:
raise exception.InvalidParameterValue(
_('Requested COE type %s is not supported.') % coe)
@classmethod
def validate_network_driver(cls, driver):
cls._validate_network_driver_supported(driver)
cls._validate_network_driver_allowed(driver)
@classmethod
def _validate_network_driver_supported(cls, driver):
"""Confirm that driver is supported by Magnum for this COE."""
if driver not in cls.supported_network_drivers:
raise exception.InvalidParameterValue(_(
'Network driver type %(driver)s is not supported, '
'expecting a %(supported_drivers)s network driver.') % {
'driver': driver,
'supported_drivers': '/'.join(
cls.supported_network_drivers + ['unspecified'])})
@classmethod
def _validate_network_driver_allowed(cls, driver):
"""Confirm that driver is allowed via configuration for this COE."""
if ('all' not in cls.allowed_network_drivers and
driver not in cls.allowed_network_drivers):
raise exception.InvalidParameterValue(_(
'Network driver type %(driver)s is not allowed, '
'expecting a %(allowed_drivers)s network driver. ') % {
'driver': driver,
'allowed_drivers': '/'.join(
cls.allowed_network_drivers + ['unspecified'])})
@classmethod
def validate_volume_driver(cls, driver):
cls._validate_volume_driver_supported(driver)
@classmethod
def _validate_volume_driver_supported(cls, driver):
"""Confirm that volume driver is supported by Magnum for this COE."""
if driver not in cls.supported_volume_driver:
raise exception.InvalidParameterValue(_(
'Volume driver type %(driver)s is not supported, '
'expecting a %(supported_volume_driver)s volume driver.') % {
'driver': driver,
'supported_volume_driver': '/'.join(
cls.supported_volume_driver + ['unspecified'])})
@classmethod
def validate_server_type(cls, server_type):
cls._validate_server_type(server_type)
@classmethod
def _validate_server_type(cls, server_type):
"""Confirm that server type is supported by Magnum for this COE."""
if server_type not in cls.supported_server_types:
raise exception.InvalidParameterValue(_(
'Server type %(server_type)s is not supported, '
'expecting a %(supported_server_types)s server type.') % {
'server_type': server_type,
'supported_server_types': '/'.join(
cls.supported_server_types + ['unspecified'])})
class K8sValidator(Validator):
supported_network_drivers = ['flannel', 'calico']
supported_server_types = ['vm', 'bm']
allowed_network_drivers = (
CONF.cluster_template.kubernetes_allowed_network_drivers)
default_network_driver = (
CONF.cluster_template.kubernetes_default_network_driver)
supported_volume_driver = ['cinder']
class SwarmValidator(Validator):
supported_network_drivers = ['docker', 'flannel']
supported_server_types = ['vm', 'bm']
allowed_network_drivers = (CONF.cluster_template.
swarm_allowed_network_drivers)
default_network_driver = (CONF.cluster_template.
swarm_default_network_driver)
supported_volume_driver = ['rexray']
class MesosValidator(Validator):
supported_network_drivers = ['docker']
supported_server_types = ['vm', 'bm']
allowed_network_drivers = (CONF.cluster_template.
mesos_allowed_network_drivers)
default_network_driver = (CONF.cluster_template.
mesos_default_network_driver)
supported_volume_driver = ['rexray']
|
|
from _ebcf_alexa import incoming_types
import pytest
VALID_INTENT_LAMBDA_EVENT = {
"session": {
"new": False,
"sessionId": "SessionId.10809a6f-e431-42f6-8d02-1d71ffab2251",
"application": {
"applicationId": "amzn1.ask.skill.d6f2f7c4-7689-410d-9c35-8f8baae37969"
},
"attributes": {},
"user": {
"userId": "amzn1.ask.account.XXXXX"
}
},
"request": {
"type": "IntentRequest",
"requestId": "EdwRequestId.eb6a8e40-8272-4d2b-ad2c-d9b7cc787a67",
"intent": {
"name": "DefaultQuery",
"slots": {
"RelativeTo": {
"name": "RelativeTo"
},
"Section": {
"name": "Section",
"value": "workout"
}
}
},
"locale": "en-US",
"timestamp": "2017-08-19T19:04:26Z"
},
"context": {
"AudioPlayer": {
"playerActivity": "IDLE"
},
"System": {
"application": {
"applicationId": "amzn1.ask.skill.d6f2f7c4-7689-410d-9c35-8f8baae37969"
},
"user": {
"userId": "amzn1.ask.account.XXXXX"
},
"device": {
"supportedInterfaces": {}
}
}
},
"version": "1.0"
}
@pytest.fixture
def valid_intent_lambda_event() -> incoming_types.LambdaEvent:
return incoming_types.LambdaEvent(VALID_INTENT_LAMBDA_EVENT)
def test_intent_request(valid_intent_lambda_event: incoming_types.LambdaEvent):
req = valid_intent_lambda_event
assert req.session.application.application_id == "amzn1.ask.skill.d6f2f7c4-7689-410d-9c35-8f8baae37969"
assert req.request.type == incoming_types.RequestTypes.IntentRequest
assert req.request.intent.name == 'DefaultQuery'
assert req.request.intent.slots['RelativeTo'].name == 'RelativeTo'
assert not req.request.intent.slots['RelativeTo'].has_value
assert repr(req.request.intent.slots['RelativeTo']) # test that this returns a non-empty string...
assert not req.session.new
def test_intent_to_dict(valid_intent_lambda_event):
intent = valid_intent_lambda_event.request.intent
assert intent.last_intent is None
assert intent.to_dict() == {
'name': 'DefaultQuery',
'slots': {}
}
def test_intent_slot_to_valid_flag_with_to_dict(valid_intent_lambda_event):
intent = valid_intent_lambda_event.request.intent
intent.slots['Section'].is_valid = True
assert intent.to_dict() == {
'name': 'DefaultQuery',
'slots': {
'Section': {
'name': 'Section',
'value': 'workout'
}
}
}
VALID_LAUNCH_REQUEST_LAMBDA_EVENT = {
'version': '1.0',
'session': {
'new': True,
'sessionId': 'amzn1.echo-api.session.c1b6cfa8-e11d-4677-9431-0cab8e68315d',
'application': {
'applicationId': 'amzn1.ask.skill.d6f2f7c4-7689-410d-9c35-8f8baae37969'},
'user': {
'userId': 'amzn1.ask.account.XXXXX'}},
'context': {
'AudioPlayer': {'playerActivity': 'STOPPED'}, 'Display': {},
'System': {'application': {
'applicationId': 'amzn1.ask.skill.d6f2f7c4-7689-410d-9c35-8f8baae37969'},
'user': {
'userId': 'amzn1.ask.account.XXXXXX'},
'device': {
'deviceId': 'amzn1.ask.device.XXXXX',
'supportedInterfaces': {'AudioPlayer': {}, 'Display': {
'templateVersion': '1.0', 'markupVersion': '1.0'},
'VideoApp': {}}},
'apiEndpoint': 'https://api.amazonalexa.com'}},
'request': {'type': 'LaunchRequest',
'requestId': 'amzn1.echo-api.request.0f045029-0f67-4a4f-9ccd-a0e7822b789d',
'timestamp': '2017-08-19T19:58:27Z', 'locale': 'en-US'}}
def test_launch_request():
req = incoming_types.LambdaEvent(VALID_LAUNCH_REQUEST_LAMBDA_EVENT)
assert req.session.application.application_id == "amzn1.ask.skill.d6f2f7c4-7689-410d-9c35-8f8baae37969"
assert req.request.type == incoming_types.RequestTypes.LaunchRequest
assert req.session.new
VALID_CANCEL_INTENT_EVENT = {
'version': '1.0',
'session': {'new': False, 'sessionId': 'amzn1.echo-api.session.3fea2408-d1ed-44b2-8343-42106601e585',
'application': {'applicationId': 'amzn1.ask.skill.d6f2f7c4-7689-410d-9c35-8f8baae37969'}, 'user': {
'userId': 'amzn1.ask.account.XXXXXX'}},
'context': {'AudioPlayer': {'playerActivity': 'STOPPED'}, 'Display': {'token': ''},
'System': {'application': {'applicationId': 'amzn1.ask.skill.d6f2f7c4-7689-410d-9c35-8f8baae37969'},
'user': {
'userId': 'amzn1.ask.account.XXXXXX'},
'device': {
'deviceId': 'amzn1.ask.device.XXXXXXX',
'supportedInterfaces': {'AudioPlayer': {},
'Display': {'templateVersion': '1.0', 'markupVersion': '1.0'},
'VideoApp': {}}}, 'apiEndpoint': 'https://api.amazonalexa.com'}},
'request': {'type': 'IntentRequest', 'requestId': 'amzn1.echo-api.request.6cdc55fe-d1be-46bc-b315-0f1a779a24b6',
'timestamp': '2017-08-19T20:08:35Z', 'locale': 'en-US',
'intent': {'name': 'AMAZON.CancelIntent', 'confirmationStatus': 'NONE'}}}
def test_cancel_intent():
req = incoming_types.LambdaEvent(VALID_CANCEL_INTENT_EVENT)
assert req.request.intent.name == 'AMAZON.CancelIntent'
str(req.request) # does not crash
# This is what the request used to look like when I started making skills.
# I think I should be backwards compatible..
OLD_CODE_REQUEST = {
"session": {
"sessionId": "SessionId.dff1b708-2aeb-4d08-8fa8-aaf549836707",
"application": {
"applicationId": "amzn1.ask.skill.d6f2f7c4-7689-410d-9c35-8f8baae37969"
},
"attributes": {},
"user": {
"userId": "amzn1.ask.account.AGF7EUF4RNORLHSZDNU7KR7W75A2GRGQPT6OMHLBACZBLFKZTA2SPNW2UR527IFJRSPTPMMG5F2J64FH67DWLVUYNRDO5IOLQ2OSS22UJAMPG7YLDFDFSMMVQKWUIIIX5PI3RBDV4YGFZN6M5LR2GV52NQND5PJPVHVE3NAYGSGPLNNPDI6PYTKNAQMBJW2KLONN2Z7F77FUZPA"
},
"new": True
},
"request": {
"type": "IntentRequest",
"requestId": "EdwRequestId.64cef551-0040-4b50-967c-5a2698067cc2",
"locale": "en-US",
"timestamp": "2017-06-03T23:27:15Z",
"intent": {
"name": "GetWOD",
"slots": {
"Date": {
"name": "Date",
"value": "2017-06-02"
}
}
}
},
"version": "1.0"
}
def test_old_request():
req = incoming_types.LambdaEvent(OLD_CODE_REQUEST)
assert req.request.intent.name == 'GetWOD'
INTENT_WITH_ATTRIBUTES = {
"session": {
"new": False,
"sessionId": "SessionId.10809a6f-e431-42f6-8d02-1d71ffab2251",
"application": {
"applicationId": "amzn1.ask.skill.d6f2f7c4-7689-410d-9c35-8f8baae37969"
},
"attributes": {
"intents": {
"DefaultQuery": {
"name": "DefaultQuery",
"slots": {
"RelativeTo": {
"name": "RelativeTo",
"value": "today's"
},
"Section": {
"name": "Section",
"value": "turd"
}
}
},
}
},
"user": {
"userId": "amzn1.ask.account.XXXXX"
}
},
"request": {
"type": "IntentRequest",
"requestId": "EdwRequestId.eb6a8e40-8272-4d2b-ad2c-d9b7cc787a67",
"intent": {
"name": "DefaultQuery",
"slots": {
"RelativeTo": {
"name": "RelativeTo"
},
"Section": {
"name": "Section",
"value": "workout"
}
}
},
"locale": "en-US",
"timestamp": "2017-08-19T19:04:26Z"
},
"context": {
"AudioPlayer": {
"playerActivity": "IDLE"
},
"System": {
"application": {
"applicationId": "amzn1.ask.skill.d6f2f7c4-7689-410d-9c35-8f8baae37969"
},
"user": {
"userId": "amzn1.ask.account.XXXXX"
},
"device": {
"supportedInterfaces": {}
}
}
},
"version": "1.0"
}
def test_merging():
req = incoming_types.LambdaEvent(INTENT_WITH_ATTRIBUTES)
assert req.request.intent.last_intent is not None
assert req.request.intent.last_intent.name == 'DefaultQuery'
assert req.request.intent.slots['RelativeTo'].value == 'today\'s'
assert req.request.intent.slots['Section'].value == 'workout'
|
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Parameters module allows you to specify build parameters for a job.
**Component**: parameters
:Macro: parameter
:Entry Point: jenkins_jobs.parameters
Example::
job:
name: test_job
parameters:
- string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.errors import JenkinsJobsException
def base_param(parser, xml_parent, data, do_default, ptype):
pdef = XML.SubElement(xml_parent, ptype)
XML.SubElement(pdef, 'name').text = data['name']
XML.SubElement(pdef, 'description').text = data.get('description', '')
if do_default:
default = data.get('default', None)
if default:
XML.SubElement(pdef, 'defaultValue').text = default
else:
XML.SubElement(pdef, 'defaultValue')
return pdef
def string_param(parser, xml_parent, data):
"""yaml: string
A string parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
base_param(parser, xml_parent, data, True,
'hudson.model.StringParameterDefinition')
def password_param(parser, xml_parent, data):
"""yaml: password
A password parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- password:
name: FOO
default: 1HSC0Ts6E161FysGf+e1xasgsHkgleLh09JUTYnipPvw=
description: "A parameter named FOO."
"""
base_param(parser, xml_parent, data, True,
'hudson.model.PasswordParameterDefinition')
def bool_param(parser, xml_parent, data):
"""yaml: bool
A boolean parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- bool:
name: FOO
default: false
description: "A parameter named FOO, defaults to 'false'."
"""
data['default'] = str(data.get('default', False)).lower()
base_param(parser, xml_parent, data, True,
'hudson.model.BooleanParameterDefinition')
def file_param(parser, xml_parent, data):
"""yaml: file
A file parameter.
:arg str name: the target location for the file upload
:arg str description: a description of the parameter (optional)
Example::
parameters:
- file:
name: test.txt
description: "Upload test.txt."
"""
base_param(parser, xml_parent, data, False,
'hudson.model.FileParameterDefinition')
def text_param(parser, xml_parent, data):
"""yaml: text
A text parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- text:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
base_param(parser, xml_parent, data, True,
'hudson.model.TextParameterDefinition')
def label_param(parser, xml_parent, data):
"""yaml: label
A node label parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- label:
name: node
default: precise
description: "The node on which to run the job"
"""
base_param(parser, xml_parent, data, True,
'org.jvnet.jenkins.plugins.nodelabelparameter.'
'LabelParameterDefinition')
def node_param(parser, xml_parent, data):
"""yaml: node
Defines a list of nodes where this job could potentially be executed on.
Restrict where this project can be run, If your using a node or label
parameter to run your job on a particular node, you should not use the
option "Restrict where this project can be run" in the job configuration
- it will not have any effect to the selection of your node anymore!
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg list default-nodes: The nodes used when job gets triggered
by anything else other than manually
:arg list allowed-slaves: The nodes available for selection
when job gets triggered manually. Empty means 'All'.
:arg bool ignore-offline-nodes: Ignore nodes not online or not having
executors (default false)
:arg bool allowed-multiselect: Allow multi node selection for concurrent
builds - this option only makes sense (and must be selected!) in
case the job is configured with: "Execute concurrent builds if
necessary". With this configuration the build will be executed on all
the selected nodes in parallel. (default false)
Example:
.. literalinclude:: /../../tests/parameters/fixtures/node-param001.yaml
:language: yaml
"""
pdef = base_param(parser, xml_parent, data, False,
'org.jvnet.jenkins.plugins.nodelabelparameter.'
'NodeParameterDefinition')
default = XML.SubElement(pdef, 'defaultSlaves')
if 'default-slaves' in data:
for slave in data['default-slaves']:
XML.SubElement(default, 'string').text = slave
allowed = XML.SubElement(pdef, 'allowedSlaves')
if 'allowed-slaves' in data:
for slave in data['allowed-slaves']:
XML.SubElement(allowed, 'string').text = slave
XML.SubElement(pdef, 'ignoreOfflineNodes').text = str(
data.get('ignore-offline-nodes', False)).lower()
if data.get('allowed-multiselect', False):
XML.SubElement(pdef, 'triggerIfResult').text = \
'allowMultiSelectionForConcurrentBuilds'
else:
XML.SubElement(pdef, 'triggerIfResult').text = \
'multiSelectionDisallowed'
XML.SubElement(pdef, 'allowMultiNodeSelection').text = str(
data.get('allowed-multiselect', False)).lower()
XML.SubElement(pdef, 'triggerConcurrentBuilds').text = str(
data.get('allowed-multiselect', False)).lower()
def choice_param(parser, xml_parent, data):
"""yaml: choice
A single selection parameter.
:arg str name: the name of the parameter
:arg list choices: the available choices
:arg str description: a description of the parameter (optional)
Example::
parameters:
- choice:
name: project
choices:
- nova
- glance
description: "On which project to run?"
"""
pdef = base_param(parser, xml_parent, data, False,
'hudson.model.ChoiceParameterDefinition')
choices = XML.SubElement(pdef, 'choices',
{'class': 'java.util.Arrays$ArrayList'})
a = XML.SubElement(choices, 'a', {'class': 'string-array'})
for choice in data['choices']:
XML.SubElement(a, 'string').text = choice
def run_param(parser, xml_parent, data):
"""yaml: run
A run parameter.
:arg str name: the name of the parameter
:arg str project-name: the name of job from which the user can pick runs
:arg str description: a description of the parameter (optional)
Example:
.. literalinclude:: /../../tests/parameters/fixtures/run-param001.yaml
:language: yaml
"""
pdef = base_param(parser, xml_parent, data, False,
'hudson.model.RunParameterDefinition')
XML.SubElement(pdef, 'projectName').text = data['project-name']
def extended_choice_param(parser, xml_parent, data):
"""yaml: extended-choice
Creates an extended choice parameter where values can be read from a file
Requires the Jenkins :jenkins-wiki:`Extended Choice Parameter Plugin
<Extended+Choice+Parameter+plugin>`.
:arg str name: name of the parameter
:arg str description: description of the parameter
(optional, default '')
:arg str property-file: location of property file to read from
(optional, default '')
:arg str property-key: key for the property-file (optional, default '')
:arg bool quote-value: whether to put quotes around the property
when passing to Jenkins (optional, default false)
:arg str visible-items: number of items to show in the list
(optional, default 5)
:arg str type: type of select, can be single-select, multi-select,
radio, checkbox or textbox (optional, default single-select)
:arg str value: comma separated list of values for the single select
or multi-select box (optional, default '')
:arg str default-value: used to set the initial selection of the
single-select or multi-select box (optional, default '')
:arg str default-property-file: location of property file when default
value needs to come from a property file (optional, default '')
:arg str default-property-key: key for the default property file
(optional, default '')
:arg str multi-select-delimiter: value between selections when the
parameter is a multi-select (optiona, default ',')
Example:
.. literalinclude:: \
/../../tests/parameters/fixtures/extended-choice-param001.yaml
:language: yaml
"""
pdef = base_param(parser, xml_parent, data, False,
'com.cwctravel.hudson.plugins.'
'extended__choice__parameter.'
'ExtendedChoiceParameterDefinition')
XML.SubElement(pdef, 'value').text = data.get('value', '')
XML.SubElement(pdef, 'visibleItemCount').text = str(data.get(
'visible-items', data.get('visible-item-count', 5)))
XML.SubElement(pdef, 'multiSelectDelimiter').text = data.get(
'multi-select-delimiter', ',')
XML.SubElement(pdef, 'quoteValue').text = str(data.get('quote-value',
False)).lower()
XML.SubElement(pdef, 'defaultValue').text = data.get(
'default-value', '')
choice = data.get('type', 'single-select')
choicedict = {'single-select': 'PT_SINGLE_SELECT',
'multi-select': 'PT_MULTI_SELECT',
'radio': 'PT_RADIO',
'checkbox': 'PT_CHECKBOX',
'textbox': 'PT_TEXTBOX',
'PT_SINGLE_SELECT': 'PT_SINGLE_SELECT',
'PT_MULTI_SELECT': 'PT_MULTI_SELECT',
'PT_RADIO': 'PT_RADIO',
'PT_CHECKBOX': 'PT_CHECKBOX',
'PT_TEXTBOX': 'PT_TEXTBOX'}
if choice in choicedict:
XML.SubElement(pdef, 'type').text = choicedict[choice]
else:
raise JenkinsJobsException("Type entered is not valid, must be one "
"of: single-select, multi-select, radio, "
"textbox or checkbox")
XML.SubElement(pdef, 'propertyFile').text = data.get('property-file', '')
XML.SubElement(pdef, 'propertyKey').text = data.get('property-key', '')
XML.SubElement(pdef, 'defaultPropertyFile').text = data.get(
'default-property-file', '')
XML.SubElement(pdef, 'defaultPropertyKey').text = data.get(
'default-property-key', '')
def validating_string_param(parser, xml_parent, data):
"""yaml: validating-string
A validating string parameter
Requires the Jenkins :jenkins-wiki:`Validating String Plugin
<Validating+String+Parameter+Plugin>`.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
:arg str regex: a regular expression to validate the string
:arg str msg: a message to display upon failed validation
Example::
parameters:
- validating-string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
regex: [A-Za-z]*
msg: Your entered value failed validation
"""
pdef = base_param(parser, xml_parent, data, True,
'hudson.plugins.validating__string__parameter.'
'ValidatingStringParameterDefinition')
XML.SubElement(pdef, 'regex').text = data['regex']
XML.SubElement(pdef, 'failedValidationMessage').text = data['msg']
def svn_tags_param(parser, xml_parent, data):
"""yaml: svn-tags
A svn tag parameter
Requires the Jenkins :jenkins-wiki:`Parameterized Trigger Plugin
<Parameterized+Trigger+Plugin>`.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
:arg str url: the url to list tags from
:arg str filter: the regular expression to filter tags
Example::
parameters:
- svn-tags:
name: BRANCH_NAME
default: release
description: A parameter named BRANCH_NAME default is release
url: http://svn.example.com/repo
filter: [A-za-z0-9]*
"""
pdef = base_param(parser, xml_parent, data, True,
'hudson.scm.listtagsparameter.'
'ListSubversionTagsParameterDefinition')
XML.SubElement(pdef, 'tagsDir').text = data['url']
XML.SubElement(pdef, 'tagsFilter').text = data.get('filter', None)
XML.SubElement(pdef, 'reverseByDate').text = "true"
XML.SubElement(pdef, 'reverseByName').text = "false"
XML.SubElement(pdef, 'maxTags').text = "100"
XML.SubElement(pdef, 'uuid').text = "1-1-1-1-1"
def dynamic_choice_param(parser, xml_parent, data):
"""yaml: dynamic-choice
Dynamic Choice Parameter
Requires the Jenkins :jenkins-wiki:`Jenkins Dynamic Parameter Plug-in
<Jenkins+Dynamic+Parameter+Plug-in>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script: Groovy expression which generates the potential choices.
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg str classpath: class path for script (optional)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-choice:
name: OPTIONS
description: "Available options"
script: "['optionA', 'optionB']"
remote: false
read-only: false
"""
dynamic_param_common(parser, xml_parent, data, 'ChoiceParameterDefinition')
def dynamic_string_param(parser, xml_parent, data):
"""yaml: dynamic-string
Dynamic Parameter
Requires the Jenkins :jenkins-wiki:`Jenkins Dynamic Parameter Plug-in
<Jenkins+Dynamic+Parameter+Plug-in>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script: Groovy expression which generates the potential choices
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg str classpath: class path for script (optional)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-string:
name: FOO
description: "A parameter named FOO, defaults to 'bar'."
script: "bar"
remote: false
read-only: false
"""
dynamic_param_common(parser, xml_parent, data, 'StringParameterDefinition')
def dynamic_choice_scriptler_param(parser, xml_parent, data):
"""yaml: dynamic-choice-scriptler
Dynamic Choice Parameter (Scriptler)
Requires the Jenkins :jenkins-wiki:`Jenkins Dynamic Parameter Plug-in
<Jenkins+Dynamic+Parameter+Plug-in>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script-id: Groovy script which generates the default value
:arg list parameters: parameters to corresponding script
:Parameter: * **name** (`str`) Parameter name
* **value** (`str`) Parameter value
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-choice-scriptler:
name: OPTIONS
description: "Available options"
script-id: "scriptid.groovy"
parameters:
- name: param1
value: value1
- name: param2
value: value2
remote: false
read-only: false
"""
dynamic_scriptler_param_common(parser, xml_parent, data,
'ScriptlerChoiceParameterDefinition')
def dynamic_string_scriptler_param(parser, xml_parent, data):
"""yaml: dynamic-string-scriptler
Dynamic Parameter (Scriptler)
Requires the Jenkins :jenkins-wiki:`Jenkins Dynamic Parameter Plug-in
<Jenkins+Dynamic+Parameter+Plug-in>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script-id: Groovy script which generates the default value
:arg list parameters: parameters to corresponding script
:Parameter: * **name** (`str`) Parameter name
* **value** (`str`) Parameter value
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-string-scriptler:
name: FOO
description: "A parameter named FOO, defaults to 'bar'."
script-id: "scriptid.groovy"
parameters:
- name: param1
value: value1
- name: param2
value: value2
remote: false
read-only: false
"""
dynamic_scriptler_param_common(parser, xml_parent, data,
'ScriptlerStringParameterDefinition')
def dynamic_param_common(parser, xml_parent, data, ptype):
pdef = base_param(parser, xml_parent, data, False,
'com.seitenbau.jenkins.plugins.dynamicparameter.'
+ ptype)
XML.SubElement(pdef, '__remote').text = str(
data.get('remote', False)).lower()
XML.SubElement(pdef, '__script').text = data.get('script', None)
localBaseDir = XML.SubElement(pdef, '__localBaseDirectory',
{'serialization': 'custom'})
filePath = XML.SubElement(localBaseDir, 'hudson.FilePath')
default = XML.SubElement(filePath, 'default')
XML.SubElement(filePath, 'boolean').text = "true"
XML.SubElement(default, 'remote').text = \
"/var/lib/jenkins/dynamic_parameter/classpath"
XML.SubElement(pdef, '__remoteBaseDirectory').text = \
"dynamic_parameter_classpath"
XML.SubElement(pdef, '__classPath').text = data.get('classpath', None)
XML.SubElement(pdef, 'readonlyInputField').text = str(
data.get('read-only', False)).lower()
def dynamic_scriptler_param_common(parser, xml_parent, data, ptype):
pdef = base_param(parser, xml_parent, data, False,
'com.seitenbau.jenkins.plugins.dynamicparameter.'
'scriptler.' + ptype)
XML.SubElement(pdef, '__remote').text = str(
data.get('remote', False)).lower()
XML.SubElement(pdef, '__scriptlerScriptId').text = data.get(
'script-id', None)
parametersXML = XML.SubElement(pdef, '__parameters')
parameters = data.get('parameters', [])
if parameters:
for parameter in parameters:
parameterXML = XML.SubElement(parametersXML,
'com.seitenbau.jenkins.plugins.'
'dynamicparameter.scriptler.'
'ScriptlerParameterDefinition_'
'-ScriptParameter')
XML.SubElement(parameterXML, 'name').text = parameter['name']
XML.SubElement(parameterXML, 'value').text = parameter['value']
XML.SubElement(pdef, 'readonlyInputField').text = str(data.get(
'read-only', False)).lower()
def matrix_combinations_param(parser, xml_parent, data):
"""yaml: matrix-combinations
Matrix combinations parameter
Requires the Jenkins :jenkins-wiki:`Matrix Combinations Plugin
<Matrix+Combinations+Plugin>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str filter: Groovy expression to use filter the combination by
default (optional)
Example:
.. literalinclude:: \
/../../tests/parameters/fixtures/matrix-combinations-param001.yaml
:language: yaml
"""
element_name = 'hudson.plugins.matrix__configuration__parameter.' \
'MatrixCombinationsParameterDefinition'
pdef = XML.SubElement(xml_parent, element_name)
if 'name' not in data:
raise JenkinsJobsException('matrix-combinations must have a name '
'parameter.')
XML.SubElement(pdef, 'name').text = data['name']
XML.SubElement(pdef, 'description').text = data.get('description', '')
combination_filter = data.get('filter')
if combination_filter:
XML.SubElement(pdef, 'defaultCombinationFilter').text = \
combination_filter
return pdef
class Parameters(jenkins_jobs.modules.base.Base):
sequence = 21
component_type = 'parameter'
component_list_type = 'parameters'
def gen_xml(self, parser, xml_parent, data):
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
parameters = data.get('parameters', [])
hmodel = 'hudson.model.'
if parameters:
# The conditionals here are to work around the extended_choice
# parameter also being definable in the properties module. This
# usage has been deprecated but not removed. Because it may have
# added these elements before us, we need to check if they already
# exist, and only add them if they're missing.
pdefp = properties.find(hmodel + 'ParametersDefinitionProperty')
if pdefp is None:
pdefp = XML.SubElement(properties,
hmodel + 'ParametersDefinitionProperty')
pdefs = pdefp.find('parameterDefinitions')
if pdefs is None:
pdefs = XML.SubElement(pdefp, 'parameterDefinitions')
for param in parameters:
self.registry.dispatch('parameter',
parser, pdefs, param)
|
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import itertools
import sys
from mox3 import mox
from neutronclient.neutron.v2_0 import port
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20PortJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20PortJSON, self).setUp(plurals={'tags': 'tag'})
def test_create_port(self):
"""Create port: netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid]
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_extra_dhcp_opts_args(self):
"""Create port: netid --extra_dhcp_opt."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
extra_dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]
args = [netid]
for dhcp_opt in extra_dhcp_opts:
args += ['--extra-dhcp-opt',
('opt_name=%(opt_name)s,opt_value=%(opt_value)s' %
dhcp_opt)]
position_names = ['network_id', 'extra_dhcp_opts']
position_values = [netid, extra_dhcp_opts]
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_extra_dhcp_opts_args_ip_version(self):
"""Create port: netid --extra_dhcp_opt."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
extra_dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': "4"},
{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::1',
'ip_version': "6"},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45',
'ip_version': "4"}]
args = [netid]
for dhcp_opt in extra_dhcp_opts:
args += ['--extra-dhcp-opt',
('opt_name=%(opt_name)s,opt_value=%(opt_value)s,'
'ip_version=%(ip_version)s' %
dhcp_opt)]
position_names = ['network_id', 'extra_dhcp_opts']
position_values = [netid, extra_dhcp_opts]
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_full(self):
"""Create port: --mac_address mac --device_id deviceid netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--mac_address', 'mac', '--device_id', 'deviceid', netid]
position_names = ['network_id', 'mac_address', 'device_id']
position_values = [netid, 'mac', 'deviceid']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--mac-address', 'mac', '--device-id', 'deviceid', netid]
position_names = ['network_id', 'mac_address', 'device_id']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_vnic_type_normal(self):
"""Create port: --vnic_type normal netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--vnic_type', 'normal', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['normal', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--vnic-type', 'normal', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['normal', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_vnic_type_direct(self):
"""Create port: --vnic_type direct netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--vnic_type', 'direct', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['direct', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--vnic-type', 'direct', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['direct', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_vnic_type_macvtap(self):
"""Create port: --vnic_type macvtap netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--vnic_type', 'macvtap', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['macvtap', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--vnic-type', 'macvtap', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['macvtap', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_with_binding_profile(self):
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--binding_profile', '{"foo":"bar"}', netid]
position_names = ['binding:profile', 'network_id']
position_values = [{'foo': 'bar'}, netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--binding-profile', '{"foo":"bar"}', netid]
position_names = ['binding:profile', 'network_id']
position_values = [{'foo': 'bar'}, netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_tenant(self):
"""Create port: --tenant_id tenantid netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--tenant_id', 'tenantid', netid, ]
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
# Test dashed options
args = ['--tenant-id', 'tenantid', netid, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_port_tags(self):
"""Create port: netid mac_address device_id --tags a b."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid, '--tags', 'a', 'b']
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_port_secgroup(self):
"""Create port: --security-group sg1_id netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--security-group', 'sg1_id', netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg1_id']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroups(self):
"""Create port: <security_groups> netid
The <security_groups> are
--security-group sg1_id --security-group sg2_id
"""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--security-group', 'sg1_id',
'--security-group', 'sg2_id',
netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg1_id', 'sg2_id']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroup_off(self):
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--no-security-group', netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, []]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroups_list(self):
"""Create port: netid <security_groups>
The <security_groups> are
--security-groups list=true sg_id1 sg_id2
"""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid, '--security-groups', 'list=true', 'sg_id1', 'sg_id2']
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg_id1', 'sg_id2']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_ports(self):
"""List ports: -D."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_ports_pagination(self):
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_ports_sort(self):
"""list ports: --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_ports_limit(self):
"""list ports: -P."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_list_ports_tags(self):
"""List ports: -- --tags a b."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, tags=['a', 'b'])
def test_list_ports_detail_tags(self):
"""List ports: -D -- --tags a b."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, detail=True, tags=['a', 'b'])
def test_list_ports_fields(self):
"""List ports: --fields a --fields b -- --fields c d."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def test_list_ports_with_fixed_ips_in_csv(self):
"""List ports: -f csv."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
fixed_ips = [{"subnet_id": "30422057-d6df-4c90-8314-aefb5e326666",
"ip_address": "10.0.0.12"},
{"subnet_id": "30422057-d6df-4c90-8314-aefb5e326666",
"ip_address": "10.0.0.4"}]
contents = [{'name': 'name1', 'fixed_ips': fixed_ips}]
self._test_list_resources(resources, cmd, True,
response_contents=contents,
output_format='csv')
def _test_list_router_port(self, resources, cmd,
myid, detail=False, tags=(),
fields_1=(), fields_2=()):
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: [{'id': 'myid1', },
{'id': 'myid2', }, ], }
resstr = self.client.serialize(reses)
# url method body
query = ""
args = detail and ['-D', ] or []
if fields_1:
for field in fields_1:
args.append('--fields')
args.append(field)
args.append(myid)
if tags:
args.append('--')
args.append("--tag")
for tag in tags:
args.append(tag)
if (not tags) and fields_2:
args.append('--')
if fields_2:
args.append("--fields")
for field in fields_2:
args.append(field)
for field in itertools.chain(fields_1, fields_2):
if query:
query += "&fields=" + field
else:
query = "fields=" + field
for tag in tags:
if query:
query += "&tag=" + tag
else:
query = "tag=" + tag
if detail:
query = query and query + '&verbose=True' or 'verbose=True'
query = query and query + '&device_id=%s' or 'device_id=%s'
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query % myid),
self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertIn('myid1', _str)
def test_list_router_ports(self):
"""List router ports: -D."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd,
self.test_id, True)
def test_list_router_ports_tags(self):
"""List router ports: -- --tags a b."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd,
self.test_id, tags=['a', 'b'])
def test_list_router_ports_detail_tags(self):
"""List router ports: -D -- --tags a b."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd, self.test_id,
detail=True, tags=['a', 'b'])
def test_list_router_ports_fields(self):
"""List ports: --fields a --fields b -- --fields c d."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd, self.test_id,
fields_1=['a', 'b'],
fields_2=['c', 'd'])
def test_update_port(self):
"""Update port: myid --name myname --admin-state-up False
--tags a b.
"""
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--admin-state-up', 'False',
'--tags', 'a', 'b'],
{'name': 'myname',
'admin_state_up': 'False',
'tags': ['a', 'b'], })
def test_update_port_secgroup(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = ['--security-group', 'sg1_id', myid]
updatefields = {'security_groups': ['sg1_id']}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_secgroups(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = ['--security-group', 'sg1_id',
'--security-group', 'sg2_id',
myid]
updatefields = {'security_groups': ['sg1_id', 'sg2_id']}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_extra_dhcp_opts(self):
"""Update port: myid --extra_dhcp_opt."""
resource = 'port'
myid = 'myid'
args = [myid,
'--extra-dhcp-opt',
"opt_name=bootfile-name,opt_value=pxelinux.0",
'--extra-dhcp-opt',
"opt_name=tftp-server,opt_value=123.123.123.123",
'--extra-dhcp-opt',
"opt_name=server-ip-address,opt_value=123.123.123.45"
]
updatedfields = {'extra_dhcp_opts': [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]}
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, myid, args, updatedfields)
def test_update_port_fixed_ip(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
net_id = 'net_id'
ip_addr = '123.123.123.123'
args = [myid,
'--fixed-ip', "network_id=%(net_id)s,ip_address=%(ip_addr)s" %
{'net_id': net_id,
'ip_addr': ip_addr}]
updated_fields = {"fixed_ips": [{'network_id': net_id,
'ip_address': ip_addr}]}
self._test_update_resource(resource, cmd, myid, args, updated_fields)
def test_update_port_device_id_device_owner(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = ['--device-id', 'dev_id', '--device-owner', 'fake', myid]
updatefields = {'device_id': 'dev_id',
'device_owner': 'fake'}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_extra_dhcp_opts_ip_version(self):
"""Update port: myid --extra_dhcp_opt."""
resource = 'port'
myid = 'myid'
args = [myid,
'--extra-dhcp-opt',
"opt_name=bootfile-name,opt_value=pxelinux.0,ip_version=4",
'--extra-dhcp-opt',
"opt_name=tftp-server,opt_value=2001:192:168::1,ip_version=6",
'--extra-dhcp-opt',
"opt_name=server-ip-address,opt_value=null,ip_version=4"
]
updatedfields = {'extra_dhcp_opts': [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': '4'},
{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::1',
'ip_version': '6'},
{'opt_name': 'server-ip-address',
'opt_value': None,
'ip_version': '4'}]}
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, myid, args, updatedfields)
def test_delete_extra_dhcp_opts_from_port(self):
resource = 'port'
myid = 'myid'
args = [myid,
'--extra-dhcp-opt',
"opt_name=bootfile-name,opt_value=null",
'--extra-dhcp-opt',
"opt_name=tftp-server,opt_value=123.123.123.123",
'--extra-dhcp-opt',
"opt_name=server-ip-address,opt_value=123.123.123.45"
]
# the client code will change the null to None and send to server,
# where its interpreted as delete the DHCP option on the port.
updatedfields = {'extra_dhcp_opts': [{'opt_name': 'bootfile-name',
'opt_value': None},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]}
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, myid, args, updatedfields)
def test_update_port_security_group_off(self):
"""Update port: --no-security-groups myid."""
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['--no-security-groups', 'myid'],
{'security_groups': []})
def test_show_port(self):
"""Show port: --fields id --fields name myid."""
resource = 'port'
cmd = port.ShowPort(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_delete_port(self):
"""Delete port: myid."""
resource = 'port'
cmd = port.DeletePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
class CLITestV20PortXML(CLITestV20PortJSON):
format = 'xml'
|
|
"""
********************************************************************************
* Name: handoff.py
* Author: Nathan Swain and Scott Christensen
* Created On: August 11, 2015
* Copyright: (c) Brigham Young University 2015
* License: BSD 2-Clause
********************************************************************************
"""
import inspect
import json
from django.shortcuts import redirect
from django.http import HttpResponseBadRequest
import tethys_apps
from tethys_apps.base.function_extractor import TethysFunctionExtractor
class HandoffManager:
"""
An object that is used to interact with HandoffHandlers.
Attributes:
app (str): Instance of a TethysAppBase object.
handlers (list[HandoffHandler]): A list of HandoffHandlers registered in the app.
valid_handlers (list[HandoffHandler]): A filtered list of only the valid HandoffHandlers.
"""
def __init__(self, app):
"""
Constructor
"""
self.app = app
self.handlers = app.handoff_handlers() or []
self.valid_handlers = self._get_valid_handlers()
def __repr__(self):
"""
String representation
"""
return '<Handoff Manager: app={0}, handlers={1}>'.format(self.app, [handler.name for handler in self.handlers])
def get_capabilities(self, app_name=None, external_only=False, jsonify=False):
"""
Gets a list of the valid handoff handlers.
Args:
app_name (str, optional): The name of another app whose capabilities should be listed. Defaults to None in which case the capabilities of the current app will be listed.
external_only (bool, optional): If True only return handlers where the internal attribute is False. Default is False.
jsonify (bool, optional): If True return the JSON representation of the handlers is used. Default is False.
Returns:
A list of valid HandoffHandler objects (or a JSON string if jsonify=True) representing the capabilities of app_name, or None if no app with app_name is found.
""" # noqa: E501
manager = self._get_handoff_manager_for_app(app_name)
if manager:
handlers = manager.valid_handlers
if external_only:
handlers = [handler for handler in handlers if not handler.internal]
if jsonify:
handlers = json.dumps([handler.__dict__ for handler in handlers])
return handlers
def get_handler(self, handler_name, app_name=None):
"""
Returns the HandoffHandler with name == handler_name.
Args:
handler_name (str): the name of a HandoffHandler object.
app_name (str, optional): the name of the app with handler_name. Defaults to None in which case the current app will be used.
Returns:
A HandoffHandler object where the name attribute is equal to handler_name or None if no HandoffHandler with that name is found or no app with app_name is found.
""" # noqa: E501
manager = self._get_handoff_manager_for_app(app_name)
if manager:
for handler in manager.valid_handlers:
if handler.name == handler_name:
return handler
def handoff(self, request, handler_name, app_name=None, external_only=True, **kwargs):
"""
Calls handler if it is not internal and if it exists for the app.
Args:
request (HttpRequest): The request object passed by the http call.
handler_name (str): The name of the HandoffHandler object to handle the handoff. Must not be internal.
app_name (str, optional): The name of another app where the handler should exist. Defaults to None in which case the current app will attempt to handle the handoff.
**kwargs: Key-value pairs to be passed on to the handler.
Returns:
HttpResponse object.
""" # noqa: E501
error = {"message": "",
"code": 400,
"status": "error",
"app_name": app_name or self.app.name,
"handler_name": handler_name}
manager = self._get_handoff_manager_for_app(app_name)
if manager:
handler = manager.get_handler(handler_name)
if not handler.internal:
try:
urlish = handler(request, **kwargs)
return redirect(urlish)
except TypeError as e:
error['message'] = "HTTP 400 Bad Request: {0}. ".format(str(e))
return HttpResponseBadRequest(json.dumps(error), content_type='application/javascript')
error['message'] = "HTTP 400 Bad Request: No handoff handler '{0}' for app '{1}' found.".\
format(manager.app.name, handler_name)
return HttpResponseBadRequest(json.dumps(error), content_type='application/javascript')
def _get_handoff_manager_for_app(self, app_name):
"""
Returns the app manager for app with package == app_name if that app is installed.
Args:
app_name (str): The name of another Tethys app whose HandoffManager should be returned. If None then self is returned.
Returns:
A HandoffManager object for the app with the name app_name or None if no app with that name is found.
""" # noqa: E501
if not app_name:
return self
# Get the app
harvester = tethys_apps.harvester.SingletonHarvester()
apps = harvester.apps
for app in apps:
if app.package == app_name:
manager = app.get_handoff_manager()
return manager
def _get_valid_handlers(self):
"""
Returns a list of valid HandoffHandler objects.
"""
return [handler for handler in self.handlers if handler.valid]
class HandoffHandler(TethysFunctionExtractor):
"""
An object that is used to register a Handoff handler functions.
Attributes:
name(str): Name of the handoff handler.
handler(str): Path to the handler function for the handoff interaction. Use dot-notation (e.g.: "foo.bar.function").
internal(bool, optional): Specifies that the handler is only for internal (i.e. within the same Tethys server) purposes.
""" # noqa: E501
def __init__(self, name, handler, internal=False):
"""
Constructor
"""
self.name = name
self.handler = handler
self.internal = internal
super().__init__(self.handler)
# ensure that the function and valid attributes are initialized
self.function
# make each instance callable
self.__class__ = type(self.__class__.__name__, (self.__class__,), {})
self.__class__.__call__ = lambda this, *args, **kwargs: this.function(*args, **kwargs)
def __repr__(self):
"""
String representation
"""
return '<Handoff Handler: name={0}, handler={1}>'.format(self.name, self.handler)
def __dict__(self):
"""
JSON representation
"""
return {'name': self.name,
'arguments': self.json_arguments,
}
@property
def arguments(self):
"""
Returns a list of arguments for the HandoffHandler function.
"""
return inspect.getfullargspec(self.function).args
@property
def json_arguments(self):
"""
Returns self.arguments with the 'request' argument removed.
"""
args = self.arguments
if 'request' in args:
index = args.index('request')
args.pop(index)
return args
|
|
"""
tests the pysat meta object and code
"""
import pysat
import pandas as pds
from nose.tools import assert_raises, raises
import nose.tools
import pysat.instruments.pysat_testing
import numpy as np
class TestBasics:
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.meta = pysat.Meta()
self.testInst = pysat.Instrument('pysat', 'testing', tag='', clean_level='clean')
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst
def test_basic_meta_assignment(self):
self.meta['new'] = {'units':'hey', 'long_name':'boo'}
assert (self.meta['new'].units == 'hey') & (self.meta['new'].long_name == 'boo')
def test_basic_meta_assignment_w_Series(self):
self.meta['new'] = pds.Series({'units':'hey', 'long_name':'boo'})
assert (self.meta['new'].units == 'hey') & (self.meta['new'].long_name == 'boo')
def test_multiple_meta_assignment(self):
self.meta[['new','new2']] = {'units':['hey', 'hey2'], 'long_name':['boo', 'boo2']}
assert ((self.meta['new'].units == 'hey') & (self.meta['new'].long_name == 'boo') &
(self.meta['new2'].units == 'hey2') & (self.meta['new2'].long_name == 'boo2'))
@raises(ValueError)
def test_multiple_meta_assignment_error(self):
self.meta[['new','new2']] = {'units':['hey', 'hey2'], 'long_name':['boo']}
assert ((self.meta['new'].units == 'hey') & (self.meta['new'].long_name == 'boo') &
(self.meta['new2'].units == 'hey2') & (self.meta['new2'].long_name == 'boo2'))
def test_replace_meta_units(self):
self.meta['new'] = {'units':'hey', 'long_name':'boo'}
self.meta['new'] = {'units':'yep'}
assert (self.meta['new'].units == 'yep') & (self.meta['new'].long_name == 'boo')
def test_replace_meta_long_name(self):
self.meta['new'] = {'units':'hey', 'long_name':'boo'}
self.meta['new'] = {'long_name':'yep'}
assert (self.meta['new'].units == 'hey') & (self.meta['new'].long_name == 'yep')
def test_add_additional_metadata_types(self):
self.meta['new'] = {'units':'hey', 'long_name':'boo', 'description':'boohoo'}
assert ((self.meta['new'].units == 'hey') &
(self.meta['new'].long_name == 'boo') &
(self.meta['new'].description == 'boohoo'))
def test_add_meta_then_add_additional_metadata_types(self):
self.meta['new'] = {'units':'hey', 'long_name':'crew'}
self.meta['new'] = {'units':'hey', 'long_name':'boo', 'description':'boohoo'}
assert ((self.meta['new'].units == 'hey') &
(self.meta['new'].long_name == 'boo') &
(self.meta['new'].description == 'boohoo'))
def test_add_meta_then_add_different_additional_metadata_types(self):
self.meta['new1'] = {'units':'hey1', 'long_name':'crew'}
self.meta['new2'] = {'units':'hey', 'long_name':'boo', 'description':'boohoo'}
assert ((self.meta['new2'].units == 'hey') &
(self.meta['new2'].long_name == 'boo') &
(self.meta['new2'].description == 'boohoo') &
(self.meta['new1'].units == 'hey1') &
(self.meta['new1'].long_name == 'crew') &
(np.isnan(self.meta['new1'].description)))
def test_add_meta_then_partially_add_additional_metadata_types(self):
self.meta['new'] = {'units':'hey', 'long_name':'crew'}
self.meta['new'] = {'long_name':'boo', 'description':'boohoo'}
assert ((self.meta['new'].units == 'hey') &
(self.meta['new'].long_name == 'boo') &
(self.meta['new'].description == 'boohoo'))
def test_meta_equality(self):
assert self.testInst.meta == self.testInst.meta
def test_false_meta_equality(self):
assert not (self.testInst.meta == self.testInst)
def test_assign_higher_order_meta(self):
meta = pysat.Meta()
meta['dm'] = {'units':'hey', 'long_name':'boo'}
meta['rpa'] = {'units':'crazy', 'long_name':'boo_whoo'}
self.meta['higher'] = meta
def test_assign_higher_order_meta_from_dict(self):
meta = pysat.Meta()
meta['dm'] = {'units':'hey', 'long_name':'boo'}
meta['rpa'] = {'units':'crazy', 'long_name':'boo_whoo'}
self.meta['higher'] = {'meta':meta}
def test_assign_higher_order_meta_from_dict_correct(self):
meta = pysat.Meta()
meta['dm'] = {'units':'hey', 'long_name':'boo'}
meta['rpa'] = {'units':'crazy', 'long_name':'boo_whoo'}
self.meta['higher'] = {'meta':meta}
assert self.meta['higher'] == meta
def test_assign_higher_order_meta_from_dict_w_multiple(self):
meta = pysat.Meta()
meta['dm'] = {'units':'hey', 'long_name':'boo'}
meta['rpa'] = {'units':'crazy', 'long_name':'boo_whoo'}
self.meta[['higher', 'lower']] = {'meta':[meta, None],
'units':[None, 'boo'],
'long_name':[None, 'boohoo']}
check1 = self.meta['lower'].units == 'boo'
check2 = self.meta['lower'].long_name == 'boohoo'
check3 = self.meta['higher'] == meta
assert check1 & check2 & check3
def test_assign_higher_order_meta_from_dict_w_multiple_2(self):
meta = pysat.Meta()
meta['dm'] = {'units':'hey', 'long_name':'boo'}
meta['rpa'] = {'units':'crazy', 'long_name':'boo_whoo'}
self.meta[['higher', 'lower', 'lower2']] = {'meta':[meta, None, meta],
'units':[None, 'boo', None],
'long_name':[None, 'boohoo', None]}
check1 = self.meta['lower'].units == 'boo'
check2 = self.meta['lower'].long_name == 'boohoo'
check3 = self.meta['higher'] == meta
assert check1 & check2 & check3
def test_create_new_metadata_from_old(self):
meta = pysat.Meta()
meta['dm'] = {'units':'hey', 'long_name':'boo'}
meta['rpa'] = {'units':'crazy', 'long_name':'boo_whoo'}
self.meta[['higher', 'lower', 'lower2']] = {'meta': [meta, None, meta],
'units': [None, 'boo', None],
'long_name': [None, 'boohoo', None]}
meta2 = pysat.Meta(metadata=self.meta.data)
check1 = np.all(meta2['lower'] == self.meta['lower'])
assert check1
def test_replace_meta_units_list(self):
self.meta['new'] = {'units':'hey', 'long_name':'boo'}
self.meta['new2'] = {'units':'hey2', 'long_name':'boo2'}
self.meta['new2','new'] = {'units':['yeppers','yep']}
#print self.meta['new']
#print self.meta['new2']
assert ((self.meta['new'].units == 'yep') & (self.meta['new'].long_name == 'boo') &
(self.meta['new2'].units == 'yeppers') & (self.meta['new2'].long_name == 'boo2'))
def test_meta_repr_functions(self):
self.testInst.meta['new'] = {'units':'hey', 'long_name':'boo'}
self.testInst.meta['new2'] = {'units':'hey2', 'long_name':'boo2'}
print (self.testInst.meta)
# if it doesn't produce an error, we presume it works
# how do you test a print??
assert True
def test_meta_csv_load(self):
import os
name = os.path.join(pysat.__path__[0],'tests', 'cindi_ivm_meta.txt')
mdata = pysat.Meta.from_csv(name=name, na_values=[ ], #index_col=2,
keep_default_na=False,
col_names=['name','long_name','idx','units','description'])
check = []
check.append(mdata['yrdoy'].long_name == 'Date')
check.append(mdata['unit_mer_z'].long_name == 'Unit Vector - Meridional Dir - S/C z')
check.append(mdata['iv_mer'].description == 'Constructed using IGRF mag field.')
assert np.all(check)
def test_meta_csv_load_and_operations(self):
import os
name = os.path.join(pysat.__path__[0],'tests', 'cindi_ivm_meta.txt')
mdata = pysat.Meta.from_csv(name=name, na_values=[ ], #index_col=2,
keep_default_na=False,
col_names=['name','long_name','idx','units','description'])
# names aren't provided for all data in file, filling in gaps
# print mdata.data
mdata.data.loc[:,'name'] = mdata.data.index
mdata.data.index = mdata.data['idx']
new = mdata.data.reindex(index = np.arange(mdata.data['idx'].iloc[-1]+1))
idx, = np.where(new['name'].isnull())
new.ix[idx, 'name'] = idx.astype(str)
new.ix[idx,'units']=''
new.ix[idx,'long_name'] =''
new.ix[idx,'description']=''
new['idx'] = new.index.values
new.index = new['name']
# update metadata object with new info
mdata.replace(metadata=new)
assert np.all(mdata.data == new)
# assign multiple values to default
def test_multiple_input_names_null_value(self):
self.meta[['test1', 'test2']] = {}
check1 = self.meta['test1', 'units'] == ''
check2 = self.meta['test2', 'long_name'] == 'test2'
assert check1 & check2
def test_multiple_input_names_null_value_preexisting_values(self):
self.meta[['test1', 'test2']] = {'units' : ['degrees', 'hams'],
'long_name' : ['testing', 'further']}
# print (self.meta)
self.meta[['test1', 'test2']] = {}
check1 = self.meta['test1', 'units'] == 'degrees'
check2 = self.meta['test2', 'long_name'] == 'further'
assert check1 & check2
# test behaviors related to case changes, 'units' vs 'Units'
def test_assign_Units(self):
self.meta = pysat.Meta(units_label='Units', name_label='Long_Name')
self.meta['new'] = {'Units': 'hey', 'Long_Name': 'boo'}
self.meta['new2'] = {'Units': 'hey2', 'Long_Name': 'boo2'}
assert ((self.meta['new'].Units == 'hey') & (self.meta['new'].Long_Name == 'boo') &
(self.meta['new2'].Units == 'hey2') & (self.meta['new2'].Long_Name == 'boo2'))
@raises(AttributeError)
def test_assign_Units_no_units(self):
self.meta = pysat.Meta(units_label='Units', name_label='Long_Name')
self.meta['new'] = {'Units': 'hey', 'Long_Name': 'boo'}
self.meta['new2'] = {'Units': 'hey2', 'Long_Name': 'boo2'}
# print ( self.meta['new'])
# print (self.meta['new2', 'units'])
self.meta['new'].units
def test_get_Units_wrong_case(self):
self.meta = pysat.Meta(units_label='Units', name_label='Long_Name')
self.meta['new'] = {'Units': 'hey', 'Long_Name': 'boo'}
self.meta['new2'] = {'Units': 'hey2', 'Long_Name': 'boo2'}
assert ((self.meta['new', 'units'] == 'hey') & (self.meta['new', 'long_name'] == 'boo') &
(self.meta['new2', 'units'] == 'hey2') & (self.meta['new2', 'long_name'] == 'boo2'))
# Test the attribute transfer function
def test_transfer_attributes_to_instrument(self):
self.meta.new_attribute = 'hello'
self.meta._yo_yo = 'yo yo'
self.meta.date = None
self.meta.transfer_attributes_to_instrument(self.testInst)
check1 = self.testInst.new_attribute == 'hello'
assert check1
# ensure leading hyphens are dropped
@raises(AttributeError)
def test_transfer_attributes_to_instrument_leading_(self):
self.meta.new_attribute = 'hello'
self.meta._yo_yo = 'yo yo'
self.meta.date = None
self.meta.transfer_attributes_to_instrument(self.testInst)
self.testInst._yo_yo == 'yo yo'
assert True
# ensure leading hyphens are dropped
@raises(AttributeError)
def test_transfer_attributes_to_instrument_leading__(self):
self.meta.new_attribute = 'hello'
self.meta.__yo_yo = 'yo yo'
self.meta.date = None
self.meta.transfer_attributes_to_instrument(self.testInst)
self.testInst.__yo_yo == 'yo yo'
assert True
# ensure meta attributes aren't transfered
@raises(AttributeError)
def test_transfer_attributes_to_instrument_no_meta_attr(self):
self.meta.new_attribute = 'hello'
self.meta._yo_yo = 'yo yo'
self.meta.date = None
self.meta.transfer_attributes_to_instrument(self.testInst)
self.testInst.ho_data
assert True
@raises(RuntimeError)
def test_transfer_attributes_to_instrument_strict_names(self):
self.meta.new_attribute = 'hello'
self.meta._yo_yo = 'yo yo'
self.meta.jojo_beans = 'yep!'
self.meta.name = 'Failure!'
self.meta.date = 'yo yo2'
self.testInst.load(2009,1)
self.testInst.jojo_beans = 'nope!'
self.meta.transfer_attributes_to_instrument(self.testInst, strict_names=True)
assert True
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitConnectionsOperations:
"""ExpressRouteCircuitConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitConnection":
"""Gets the specified Express Route Circuit Connection from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
express_route_circuit_connection_parameters: "_models.ExpressRouteCircuitConnection",
**kwargs: Any
) -> "_models.ExpressRouteCircuitConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(express_route_circuit_connection_parameters, 'ExpressRouteCircuitConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
express_route_circuit_connection_parameters: "_models.ExpressRouteCircuitConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitConnection"]:
"""Creates or updates a Express Route Circuit Connection in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:param express_route_circuit_connection_parameters: Parameters supplied to the create or update
express route circuit connection operation.
:type express_route_circuit_connection_parameters: ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
express_route_circuit_connection_parameters=express_route_circuit_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutronclient.common import exceptions as qe
from heat.common import exception
from heat.engine.clients.os import neutron
from heat.tests import common
from heat.tests import utils
class NeutronClientPluginTestCase(common.HeatTestCase):
def setUp(self):
super(NeutronClientPluginTestCase, self).setUp()
self.neutron_client = mock.MagicMock()
con = utils.dummy_context()
c = con.clients
self.neutron_plugin = c.client_plugin('neutron')
self.neutron_plugin._client = self.neutron_client
class NeutronClientPluginTests(NeutronClientPluginTestCase):
def setUp(self):
super(NeutronClientPluginTests, self).setUp()
self.mock_find = self.patchobject(neutron.neutronV20,
'find_resourceid_by_name_or_id')
self.mock_find.return_value = 42
def test_find_neutron_resource(self):
props = {'net': 'test_network'}
res = self.neutron_plugin.find_neutron_resource(props, 'net',
'network')
self.assertEqual(42, res)
self.mock_find.assert_called_once_with(self.neutron_client, 'network',
'test_network')
def test_resolve_network(self):
props = {'net': 'test_network'}
res = self.neutron_plugin.resolve_network(props, 'net', 'net_id')
self.assertEqual(42, res)
self.mock_find.assert_called_once_with(self.neutron_client, 'network',
'test_network')
# check resolve if was send id instead of name
props = {'net_id': 77}
res = self.neutron_plugin.resolve_network(props, 'net', 'net_id')
self.assertEqual(77, res)
# in this case find_resourceid_by_name_or_id is not called
self.mock_find.assert_called_once_with(self.neutron_client, 'network',
'test_network')
def test_resolve_subnet(self):
props = {'snet': 'test_subnet'}
res = self.neutron_plugin.resolve_subnet(props, 'snet', 'snet_id')
self.assertEqual(42, res)
self.mock_find.assert_called_once_with(self.neutron_client, 'subnet',
'test_subnet')
# check resolve if was send id instead of name
props = {'snet_id': 77}
res = self.neutron_plugin.resolve_subnet(props, 'snet', 'snet_id')
self.assertEqual(77, res)
# in this case find_resourceid_by_name_or_id is not called
self.mock_find.assert_called_once_with(self.neutron_client, 'subnet',
'test_subnet')
def test_get_secgroup_uuids(self):
# test get from uuids
sgs_uuid = ['b62c3079-6946-44f5-a67b-6b9091884d4f',
'9887157c-d092-40f5-b547-6361915fce7d']
sgs_list = self.neutron_plugin.get_secgroup_uuids(sgs_uuid)
self.assertEqual(sgs_uuid, sgs_list)
# test get from name, return only one
sgs_non_uuid = ['security_group_1']
expected_groups = ['0389f747-7785-4757-b7bb-2ab07e4b09c3']
fake_list = {
'security_groups': [
{
'tenant_id': 'test_tenant_id',
'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
'name': 'security_group_1',
'security_group_rules': [],
'description': 'no protocol'
}
]
}
self.neutron_client.list_security_groups.return_value = fake_list
self.assertEqual(expected_groups,
self.neutron_plugin.get_secgroup_uuids(sgs_non_uuid))
# test only one belong to the tenant
fake_list = {
'security_groups': [
{
'tenant_id': 'test_tenant_id',
'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
'name': 'security_group_1',
'security_group_rules': [],
'description': 'no protocol'
},
{
'tenant_id': 'not_test_tenant_id',
'id': '384ccd91-447c-4d83-832c-06974a7d3d05',
'name': 'security_group_1',
'security_group_rules': [],
'description': 'no protocol'
}
]
}
self.neutron_client.list_security_groups.return_value = fake_list
self.assertEqual(expected_groups,
self.neutron_plugin.get_secgroup_uuids(sgs_non_uuid))
# test there are two securityGroups with same name, and the two
# all belong to the tenant
fake_list = {
'security_groups': [
{
'tenant_id': 'test_tenant_id',
'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
'name': 'security_group_1',
'security_group_rules': [],
'description': 'no protocol'
},
{
'tenant_id': 'test_tenant_id',
'id': '384ccd91-447c-4d83-832c-06974a7d3d05',
'name': 'security_group_1',
'security_group_rules': [],
'description': 'no protocol'
}
]
}
self.neutron_client.list_security_groups.return_value = fake_list
self.assertRaises(exception.PhysicalResourceNameAmbiguity,
self.neutron_plugin.get_secgroup_uuids,
sgs_non_uuid)
class NeutronConstraintsValidate(common.HeatTestCase):
scenarios = [
('validate_network',
dict(constraint_class=neutron.NetworkConstraint,
resource_type='network')),
('validate_port',
dict(constraint_class=neutron.PortConstraint,
resource_type='port')),
('validate_router',
dict(constraint_class=neutron.RouterConstraint,
resource_type='router')),
('validate_subnet',
dict(constraint_class=neutron.SubnetConstraint,
resource_type='subnet'))
]
def test_validate(self):
nc = mock.Mock()
mock_create = self.patchobject(neutron.NeutronClientPlugin, '_create')
mock_create.return_value = nc
mock_find = self.patchobject(neutron.neutronV20,
'find_resourceid_by_name_or_id')
mock_find.side_effect = ['foo',
qe.NeutronClientException(status_code=404)]
constraint = self.constraint_class()
ctx = utils.dummy_context()
self.assertTrue(constraint.validate("foo", ctx))
self.assertFalse(constraint.validate("bar", ctx))
mock_find.assert_has_calls([mock.call(nc, self.resource_type, 'foo'),
mock.call(nc, self.resource_type, 'bar')])
class TestIPConstraint(common.HeatTestCase):
def setUp(self):
super(TestIPConstraint, self).setUp()
self.constraint = neutron.IPConstraint()
def test_validate_ipv4_format(self):
validate_format = [
'1.1.1.1',
'1.0.1.1',
'255.255.255.255'
]
for ip in validate_format:
self.assertTrue(self.constraint.validate(ip, None))
def test_invalidate_ipv4_format(self):
invalidate_format = [
'1.1.1.',
'1.1.1.256',
'invalidate format',
'1.a.1.1'
]
for ip in invalidate_format:
self.assertFalse(self.constraint.validate(ip, None))
def test_validate_ipv6_format(self):
validate_format = [
'2002:2002::20c:29ff:fe7d:811a',
'::1',
'2002::',
'2002::1',
]
for ip in validate_format:
self.assertTrue(self.constraint.validate(ip, None))
def test_invalidate_ipv6_format(self):
invalidate_format = [
'2002::2001::1',
'2002::g',
'invalidate format',
'2001::0::',
'20c:29ff:fe7d:811a'
]
for ip in invalidate_format:
self.assertFalse(self.constraint.validate(ip, None))
class TestMACConstraint(common.HeatTestCase):
def setUp(self):
super(TestMACConstraint, self).setUp()
self.constraint = neutron.MACConstraint()
def test_valid_mac_format(self):
validate_format = [
'01:23:45:67:89:ab',
'01-23-45-67-89-ab',
'0123.4567.89ab'
]
for mac in validate_format:
self.assertTrue(self.constraint.validate(mac, None))
def test_invalid_mac_format(self):
invalidate_format = [
'8.8.8.8',
'0a-1b-3c-4d-5e-6f-1f',
'0a-1b-3c-4d-5e-xx'
]
for mac in invalidate_format:
self.assertFalse(self.constraint.validate(mac, None))
class TestCIDRConstraint(common.HeatTestCase):
def setUp(self):
super(TestCIDRConstraint, self).setUp()
self.constraint = neutron.CIDRConstraint()
def test_valid_cidr_format(self):
validate_format = [
'10.0.0.0/24',
'6000::/64',
'8.8.8.8'
]
for cidr in validate_format:
self.assertTrue(self.constraint.validate(cidr, None))
def test_invalid_cidr_format(self):
invalidate_format = [
'::/129',
'Invalid cidr',
'300.0.0.0/24',
'10.0.0.0/33',
'8.8.8.0/ 24'
]
for cidr in invalidate_format:
self.assertFalse(self.constraint.validate(cidr, None))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import re
import urllib
import urllib2
from oslo.config import cfg
import six
from kwranking.openstack.common import fileutils
from kwranking.openstack.common.gettextutils import _ # noqa
from kwranking.openstack.common import jsonutils
from kwranking.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('JSON file containing policy')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Rule enforced when requested rule is not found')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
_checks = {}
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
msg = _("Policy doesn't allow %s to be performed.") % rule
super(PolicyNotAuthorized, self).__init__(msg)
class Rules(dict):
"""A store for rules. Handles the default_rule setting directly."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Allow loading of JSON rule data."""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule or self.default_rule not in self:
raise KeyError(key)
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
class Enforcer(object):
"""Responsible for loading and enforcing rules.
:param policy_file: Custom policy file to use, if none is
specified, `CONF.policy_file` will be
used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation. If
`load_rules(True)`, `clear()` or `set_rules(True)`
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
"""
def __init__(self, policy_file=None, rules=None, default_rule=None):
self.rules = Rules(rules)
self.default_rule = default_rule or CONF.policy_default_rule
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
def set_rules(self, rules, overwrite=True):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
if overwrite:
self.rules = Rules(rules)
else:
self.update(rules)
def clear(self):
"""Clears Enforcer rules, policy's cache and policy's path."""
self.set_rules({})
self.policy_path = None
def load_rules(self, force_reload=False):
"""Loads policy_path's rules.
Policy file is cached and will be reloaded if modified.
:param force_reload: Whether to overwrite current rules.
"""
if not self.policy_path:
self.policy_path = self._get_policy_path()
reloaded, data = fileutils.read_cached_file(self.policy_path,
force_reload=force_reload)
if reloaded:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules)
LOG.debug(_("Rules successfully reloaded"))
def _get_policy_path(self):
"""Locate the policy json data file.
:param policy_file: Custom policy file to locate.
:returns: The policy path
:raises: ConfigFilesNotFoundError if the file couldn't
be located.
"""
policy_file = CONF.find_file(self.policy_file)
if policy_file:
return policy_file
raise cfg.ConfigFilesNotFoundError(path=CONF.policy_file)
def enforce(self, rule, target, creds, do_raise=False,
exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials.
:param rule: A string or BaseCheck instance specifying the rule
to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param do_raise: Whether to raise an exception or not if check
fails.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If not specified, PolicyNotAuthorized
will be used.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
# NOTE(flaper87): Not logging target or creds to avoid
# potential security issues.
LOG.debug(_("Rule %s will be now enforced") % rule)
self.load_rules()
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds, self)
elif not self.rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug(_("Rule [%s] doesn't exist") % rule)
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if do_raise and not result:
if exc:
raise exc(*args, **kwargs)
raise PolicyNotAuthorized(rule)
return result
class BaseCheck(object):
"""Abstract base class for Check classes."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target, cred, enforcer):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy check that always returns False (disallow)."""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""A policy check that always returns True (allow)."""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return True
class Check(BaseCheck):
"""A base class to allow for user-defined policy checks."""
def __init__(self, kind, match):
"""Initiates Check instance.
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""Implements the "not" logical operator.
A policy check that inverts the result of another policy check.
"""
def __init__(self, rule):
"""Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred, enforcer):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred, enforcer)
class AndCheck(BaseCheck):
"""Implements the "and" logical operator.
A policy check that requires that a list of other checks all return True.
"""
def __init__(self, rules):
"""Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that all rules accept in order to return True.
"""
for rule in self.rules:
if not rule(target, cred):
return False
return True
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""Implements the "or" operator.
A policy check that requires that at least one of a list of other
checks returns True.
"""
def __init__(self, rules):
"""Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that at least one rule accept in order to return True.
"""
for rule in self.rules:
if rule(target, cred):
return True
return False
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""Translates the old list-of-lists syntax into a tree of Check objects.
Provided for backwards compatibility.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, basestring):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""Metaclass for the ParseState class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
__metaclass__ = ParseStateMeta
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
reduce() method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
Raises ValueError if the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'.
Join two checks by the 'and' operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding one more check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'.
Join two checks by the 'or' operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding one more check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_("Failed to understand rule %r") % rule)
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""Parses a policy rule into a tree of Check objects."""
# If the rule is a string, it's in the policy language
if isinstance(rule, basestring):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Recursively checks credentials based on the defined rules."""
try:
return enforcer.rules[self.match](target, creds, enforcer)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urllib.urlencode(data)
f = urllib2.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
"""
# TODO(termie): do dict inspection via dot syntax
match = self.match % target
if self.kind in creds:
return match == six.text_type(creds[self.kind])
return False
|
|
from __future__ import absolute_import, unicode_literals
import logging
import json
from dash.orgs.views import OrgPermsMixin
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import Prefetch
from django.http import (
HttpResponseBadRequest, HttpResponseRedirect, JsonResponse)
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.utils.http import is_safe_url
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View
from smartmin.views import SmartCRUDL, SmartListView, SmartFormView, SmartView
from tracpro.contacts.models import Contact
from .models import Boundary, Group, Region
from .forms import ContactGroupsForm
logger = logging.getLogger(__name__)
class SetRegion(View):
"""
Update the session variable that stores the currently-active region
per org.
"""
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
if 'region' not in request.POST:
return HttpResponseBadRequest(
"Request data should include `region`.")
# Determine the requested region.
region_id = request.POST.get('region')
if region_id == "all":
if not request.user.is_admin_for(request.org):
return HttpResponseBadRequest(
"Only org admins may see all regions.")
else:
region = None
else:
region = request.user_regions.filter(pk=region_id).first()
if not region:
return HttpResponseBadRequest(
"Either region {} does not exist or you do not have "
"permission to see this region.".format(region_id))
# Show a message confirming the change.
region_name = region.name if region else "all regions"
msg = "Now showing data from {}.".format(region_name)
messages.info(request, msg)
# Store the requested region in the session.
session_key = '{org}:region_id'.format(org=request.org.pk)
request.session[session_key] = str(region.pk) if region else None
request.session.save()
# Redirect the user to the next page (usually set to the page the
# user came from).
next_path = self.request.POST.get('next')
if not (next_path and is_safe_url(next_path, request.get_host())):
next_path = reverse('home.home')
return redirect(next_path)
class ToggleSubregions(View):
"""
Update session variable that manages whether to include data for
sub-regions or only the current region.
"""
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
if 'include_subregions' not in request.POST:
return HttpResponseBadRequest(
"Request data should include `include_subregions`.")
# Determine whether to include sub-regions and store the value
# in the session.
val = request.POST.get('include_subregions')
if val in ('0', '1'):
val = bool(int(val))
request.session['include_subregions'] = val
request.session.save()
if val:
msg = "Now showing data from {region} and its sub-regions."
else:
msg = "Showing data from {region} only."
messages.info(request, msg.format(region=request.region))
else:
return HttpResponseBadRequest(
"`include_subregions` should be either '0' or '1'.")
# Redirect the user to the next page (usually set to the page the
# user came from).
next_path = request.POST.get('next')
if not (next_path and is_safe_url(next_path, request.get_host())):
next_path = reverse("home.home")
return redirect(next_path)
class RegionCRUDL(SmartCRUDL):
model = Region
actions = ('list', 'most_active', 'select', 'update_all')
class List(OrgPermsMixin, SmartListView):
fields = ('name', 'boundary', 'contacts')
paginate_by = None
def derive_queryset(self, **kwargs):
regions = Region.get_all(self.request.org)
regions = regions.prefetch_related(
Prefetch(
"contacts",
Contact.objects.active(),
"prefetched_contacts",
),
)
return regions
def get_context_data(self, **kwargs):
org_boundaries = Boundary.objects.by_org(self.request.org).order_by('name')
kwargs.setdefault('org_boundaries', org_boundaries)
return super(RegionCRUDL.List, self).get_context_data(**kwargs)
def get_contacts(self, obj):
return len(obj.prefetched_contacts)
def get_boundary(self, obj):
return obj.boundary.name if obj.boundary else "-"
class MostActive(OrgPermsMixin, SmartListView):
def get(self, request, *args, **kwargs):
regions = Region.get_most_active(self.request.org)[0:5]
results = [{'id': r.pk, 'name': r.name, 'response_count': r.response_count}
for r in regions]
return JsonResponse({
'count': len(results),
'results': results,
})
class Select(OrgPermsMixin, SmartFormView):
title = _("Region Groups")
form_class = ContactGroupsForm
success_url = '@groups.region_list'
submit_button_name = _("Update")
success_message = _("Updated contact groups to use as regions")
def get_form_kwargs(self):
kwargs = super(RegionCRUDL.Select, self).get_form_kwargs()
kwargs.setdefault('model', RegionCRUDL.model)
kwargs.setdefault('org', self.request.org)
return kwargs
def form_valid(self, form):
uuids = form.cleaned_data['groups']
Region.sync_with_temba(self.request.org, uuids)
return HttpResponseRedirect(self.get_success_url())
class UpdateAll(OrgPermsMixin, SmartView, View):
http_method_names = ['post']
@transaction.atomic
def post(self, request, *args, **kwargs):
"""AJAX endpoint to update boundaries and hierarchy for all org regions."""
org = request.org
# Load data and validate that it is in the correct format.
self.raw_data = request.POST.get('data', "").strip() or None
try:
data = json.loads(self.raw_data)
except TypeError:
return self.error(
"No data was provided in the `data` parameter.")
except ValueError:
return self.error(
"Data must be valid JSON.")
if not isinstance(data, dict):
return self.error(
"Data must be a dict that maps region id to "
"(parent id, boundary id).")
if not all(isinstance(v, list) and len(v) == 2 for v in data.values()):
return self.error(
"All data values must be of the format "
"(parent id, boundary id).")
# Grab all of the org's regions and boundaries at once.
regions = {str(r.pk): r for r in Region.get_all(org)}
boundaries = {str(b.pk): b for b in Boundary.objects.by_org(org)}
# Check that the user is updating exactly the regions from this
# org, and that specified parents and boundaries are valid for
# this org.
valid_regions = set(regions.keys())
valid_boundaries = set(boundaries.keys())
sent_regions = set(str(i) for i in data.keys())
sent_parents = set(str(i[0]) for i in data.values() if i[0] is not None)
sent_boundaries = set(str(i[1]) for i in data.values() if i[1] is not None)
if sent_regions != valid_regions:
return self.error(
"Data must map region id to parent id for every region "
"in this org.")
if not sent_parents.issubset(valid_regions):
return self.error(
"Region parent must be a region from the same org, "
"or null.")
if not sent_boundaries.issubset(valid_boundaries):
return self.error(
"Region boundary must be a boundary from the same "
"org, or null.")
# Re-set parent and boundary values for each region,
# then rebuild the mptt tree.
with Region.objects.disable_mptt_updates():
for region_id, (parent_id, boundary_id) in data.items():
region = regions.get(str(region_id))
parent = regions.get(str(parent_id)) if parent_id else None
boundary = boundaries.get(str(boundary_id)) if boundary_id else None
changed = False
if region.boundary != boundary:
changed = True
self.log_change("boundary", region, region.boundary, boundary)
region.boundary = boundary
if region.parent != parent:
changed = True
self.log_change("parent", region, region.parent, parent)
region.parent = parent
if changed:
region.save()
Region.objects.rebuild()
return self.success("{} regions have been updated.".format(request.org))
def log_change(self, name, region, old, new):
message = "Updating {name} of {region} from {old} -> {new}.".format(
name=name,
region=region,
old=old.name if old else None,
new=new.name if new else None,
)
logger.debug("{} Regions: {}".format(self.request.org, message))
def error(self, message):
template = "{} Regions: {} {}"
logger.warning(template.format(self.request.org, message, self.raw_data))
return JsonResponse({
'status': 400,
'success': False,
'message': message,
})
def success(self, message):
template = "{} Regions: {} {}"
logger.info(template.format(self.request.org, message, self.raw_data))
return JsonResponse({
'status': 200,
'success': True,
'message': message,
})
class GroupCRUDL(SmartCRUDL):
model = Group
actions = ('list', 'most_active', 'select')
class List(OrgPermsMixin, SmartListView):
fields = ('name', 'contacts')
default_order = ('name',)
title = _("Reporter Groups")
def derive_queryset(self, **kwargs):
return Group.get_all(self.request.org)
def get_contacts(self, obj):
return obj.get_contacts().count()
class MostActive(OrgPermsMixin, SmartListView):
def get(self, request, *args, **kwargs):
regions = Group.get_most_active(self.request.org)[0:5]
results = [{'id': r.pk, 'name': r.name, 'response_count': r.response_count}
for r in regions]
return JsonResponse({
'count': len(results),
'results': results,
})
class Select(OrgPermsMixin, SmartFormView):
title = _("Reporter Groups")
form_class = ContactGroupsForm
success_url = '@groups.group_list'
submit_button_name = _("Update")
success_message = _("Updated contact groups to use as reporter groups")
def get_form_kwargs(self):
kwargs = super(GroupCRUDL.Select, self).get_form_kwargs()
kwargs.setdefault('model', GroupCRUDL.model)
kwargs.setdefault('org', self.request.org)
return kwargs
def form_valid(self, form):
uuids = form.cleaned_data['groups']
Group.sync_with_temba(self.request.org, uuids)
return HttpResponseRedirect(self.get_success_url())
class BoundaryCRUDL(SmartCRUDL):
model = Boundary
actions = ('list',)
class List(OrgPermsMixin, SmartListView):
def get_queryset(self):
return Boundary.objects.by_org(self.request.org).order_by('-level')
def render_to_response(self, context, **response_kwargs):
results = {b.pk: b.as_geojson() for b in context['object_list']}
return JsonResponse({'results': results})
|
|
from __future__ import absolute_import, unicode_literals, print_function
from io import BytesIO # Yes, there is an io module in Python 2
import cgi
import codecs
import gzip
import operator
import os
import re
import warnings
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
# some old python 2.6 thing then, eh?
from ordereddict import OrderedDict
import sys
if sys.version_info >= (3,): # pragma: no cover
# As in, Python 3
from io import StringIO
from urllib.request import urlopen
from urllib.parse import urljoin, urlparse
STR_TYPE = str
else: # Python 2
try:
from cStringIO import StringIO
except ImportError: # pragma: no cover
from StringIO import StringIO
StringIO = StringIO # shut up pyflakes
from urllib2 import urlopen
from urlparse import urljoin, urlparse
STR_TYPE = basestring
import cssutils
from lxml import etree
from lxml.cssselect import CSSSelector
from premailer.merge_style import merge_styles, csstext_to_pairs
from premailer.cache import function_cache
__all__ = ['PremailerError', 'Premailer', 'transform']
class PremailerError(Exception):
pass
class ExternalNotFoundError(ValueError):
pass
def make_important(bulk):
"""makes every property in a string !important.
"""
return ';'.join('%s !important' % p if not p.endswith('!important') else p
for p in bulk.split(';'))
def get_or_create_head(root):
"""Ensures that `root` contains a <head> element and returns it.
"""
head = CSSSelector('head')(root)
if not head:
head = etree.Element('head')
body = CSSSelector('body')(root)[0]
body.getparent().insert(0, head)
return head
else:
return head[0]
@function_cache()
def _cache_parse_css_string(css_body, validate=True):
"""
This function will cache the result from cssutils
It is a big gain when number of rules is big
Maximum cache entries are 1000. This is mainly for
protecting memory leak in case something gone wild.
Be aware that you can turn the cache off in Premailer
Args:
css_body(str): css rules in string format
validate(bool): if cssutils should validate
Returns:
cssutils.css.cssstylesheet.CSSStyleSheet
"""
return cssutils.parseString(css_body, validate=validate)
_element_selector_regex = re.compile(r'(^|\s)\w')
_cdata_regex = re.compile(r'\<\!\[CDATA\[(.*?)\]\]\>', re.DOTALL)
_importants = re.compile('\s*!important')
# These selectors don't apply to all elements. Rather, they specify
# which elements to apply to.
FILTER_PSEUDOSELECTORS = [':last-child', ':first-child', 'nth-child']
class Premailer(object):
attribute_name = 'data-premailer'
def __init__(self, html, base_url=None,
preserve_internal_links=False,
preserve_inline_attachments=True,
exclude_pseudoclasses=True,
keep_style_tags=False,
include_star_selectors=False,
remove_classes=True,
strip_important=True,
external_styles=None,
css_text=None,
method="html",
base_path=None,
disable_basic_attributes=None,
disable_validation=False,
cache_css_parsing=True,
cssutils_logging_handler=None,
cssutils_logging_level=None,
disable_leftover_css=False):
self.html = html
self.base_url = base_url
self.preserve_internal_links = preserve_internal_links
self.preserve_inline_attachments = preserve_inline_attachments
self.exclude_pseudoclasses = exclude_pseudoclasses
# whether to delete the <style> tag once it's been processed
# this will always preserve the original css
self.keep_style_tags = keep_style_tags
self.remove_classes = remove_classes
# whether to process or ignore selectors like '* { foo:bar; }'
self.include_star_selectors = include_star_selectors
if isinstance(external_styles, STR_TYPE):
external_styles = [external_styles]
self.external_styles = external_styles
if isinstance(css_text, STR_TYPE):
css_text = [css_text]
self.css_text = css_text
self.strip_important = strip_important
self.method = method
self.base_path = base_path
if disable_basic_attributes is None:
disable_basic_attributes = []
self.disable_basic_attributes = disable_basic_attributes
self.disable_validation = disable_validation
self.cache_css_parsing = cache_css_parsing
self.disable_leftover_css = disable_leftover_css
if cssutils_logging_handler:
cssutils.log.addHandler(cssutils_logging_handler)
if cssutils_logging_level:
cssutils.log.setLevel(cssutils_logging_level)
def _parse_css_string(self, css_body, validate=True):
if self.cache_css_parsing:
return _cache_parse_css_string(css_body, validate=validate)
return cssutils.parseString(css_body, validate=validate)
def _parse_style_rules(self, css_body, ruleset_index):
"""Returns a list of rules to apply to this doc and a list of rules
that won't be used because e.g. they are pseudoclasses. Rules
look like: (specificity, selector, bulk)
for example: ((0, 1, 0, 0, 0), u'.makeblue', u'color:blue').
The bulk of the rule should not end in a semicolon.
"""
def join_css_properties(properties):
""" Accepts a list of cssutils Property objects and returns
a semicolon delimitted string like 'color: red; font-size: 12px'
"""
return ';'.join(
'{0}:{1}'.format(prop.name, prop.value)
for prop in properties
)
leftover = []
rules = []
# empty string
if not css_body:
return rules, leftover
sheet = self._parse_css_string(
css_body,
validate=not self.disable_validation
)
for rule in sheet:
# handle media rule
if rule.type == rule.MEDIA_RULE:
leftover.append(rule)
continue
# only proceed for things we recognize
if rule.type != rule.STYLE_RULE:
continue
# normal means it doesn't have "!important"
normal_properties = [
prop for prop in rule.style.getProperties()
if prop.priority != 'important'
]
important_properties = [
prop for prop in rule.style.getProperties()
if prop.priority == 'important'
]
# Create three strings that we can use to add to the `rules`
# list later as ready blocks of css.
bulk_normal = join_css_properties(normal_properties)
bulk_important = join_css_properties(important_properties)
bulk_all = join_css_properties(
normal_properties + important_properties
)
selectors = (
x.strip()
for x in rule.selectorText.split(',')
if x.strip() and not x.strip().startswith('@')
)
for selector in selectors:
if (':' in selector and self.exclude_pseudoclasses and
':' + selector.split(':', 1)[1]
not in FILTER_PSEUDOSELECTORS):
# a pseudoclass
leftover.append((selector, bulk_all))
continue
elif '*' in selector and not self.include_star_selectors:
continue
# Crudely calculate specificity
id_count = selector.count('#')
class_count = selector.count('.')
element_count = len(_element_selector_regex.findall(selector))
# Within one rule individual properties have different
# priority depending on !important.
# So we split each rule into two: one that includes all
# the !important declarations and another that doesn't.
for is_important, bulk in (
(1, bulk_important), (0, bulk_normal)
):
if not bulk:
# don't bother adding empty css rules
continue
specificity = (
is_important,
id_count,
class_count,
element_count,
ruleset_index,
len(rules) # this is the rule's index number
)
rules.append((specificity, selector, bulk))
return rules, leftover
def transform(self, pretty_print=True, **kwargs):
"""change the self.html and return it with CSS turned into style
attributes.
"""
if hasattr(self.html, "getroottree"):
# skip the next bit
root = self.html.getroottree()
page = root
tree = root
else:
if self.method == 'xml':
parser = etree.XMLParser(
ns_clean=False,
resolve_entities=False
)
else:
parser = etree.HTMLParser()
stripped = self.html.strip()
tree = etree.fromstring(stripped, parser).getroottree()
page = tree.getroot()
# lxml inserts a doctype if none exists, so only include it in
# the root if it was in the original html.
root = tree if stripped.startswith(tree.docinfo.doctype) else page
assert page is not None
if self.disable_leftover_css:
head = None
else:
head = get_or_create_head(tree)
#
# style selectors
#
rules = []
index = 0
for element in CSSSelector('style,link[rel~=stylesheet]')(page):
# If we have a media attribute whose value is anything other than
# 'screen', ignore the ruleset.
media = element.attrib.get('media')
if media and media != 'screen':
continue
data_attribute = element.attrib.get(self.attribute_name)
if data_attribute:
if data_attribute == 'ignore':
del element.attrib[self.attribute_name]
continue
else:
warnings.warn(
'Unrecognized %s attribute (%r)' % (
self.attribute_name,
data_attribute,
)
)
is_style = element.tag == 'style'
if is_style:
css_body = element.text
else:
href = element.attrib.get('href')
css_body = self._load_external(href)
these_rules, these_leftover = self._parse_style_rules(
css_body, index
)
index += 1
rules.extend(these_rules)
parent_of_element = element.getparent()
if these_leftover or self.keep_style_tags:
if is_style:
style = element
else:
style = etree.Element('style')
style.attrib['type'] = 'text/css'
if self.keep_style_tags:
style.text = css_body
else:
style.text = self._css_rules_to_string(these_leftover)
if self.method == 'xml':
style.text = etree.CDATA(style.text)
if not is_style:
element.addprevious(style)
parent_of_element.remove(element)
elif not self.keep_style_tags or not is_style:
parent_of_element.remove(element)
# external style files
if self.external_styles:
for stylefile in self.external_styles:
css_body = self._load_external(stylefile)
self._process_css_text(css_body, index, rules, head)
index += 1
# css text
if self.css_text:
for css_body in self.css_text:
self._process_css_text(css_body, index, rules, head)
index += 1
# rules is a tuple of (specificity, selector, styles), where
# specificity is a tuple ordered such that more specific
# rules sort larger.
rules.sort(key=operator.itemgetter(0))
# collecting all elements that we need to apply rules on
# id is unique for the lifetime of the object
# and lxml should give us the same everytime during this run
# item id -> {item: item, classes: [], style: []}
elements = {}
for _, selector, style in rules:
new_selector = selector
class_ = ''
if ':' in selector:
new_selector, class_ = re.split(':', selector, 1)
class_ = ':%s' % class_
# Keep filter-type selectors untouched.
if class_ in FILTER_PSEUDOSELECTORS:
class_ = ''
else:
selector = new_selector
sel = CSSSelector(selector)
items = sel(page)
if len(items):
# same so process it first
processed_style = csstext_to_pairs(style)
for item in items:
item_id = id(item)
if item_id not in elements:
elements[item_id] = {
'item': item,
'classes': [],
'style': [],
}
elements[item_id]['style'].append(processed_style)
elements[item_id]['classes'].append(class_)
# Now apply inline style
# merge style only once for each element
# crucial when you have a lot of pseudo/classes
# and a long list of elements
for _, element in elements.items():
final_style = merge_styles(element['item'].attrib.get('style', ''),
element['style'], element['classes'])
element['item'].attrib['style'] = final_style
self._style_to_basic_html_attributes(
element['item'],
final_style,
force=True
)
if self.remove_classes:
# now we can delete all 'class' attributes
for item in page.xpath('//@class'):
parent = item.getparent()
del parent.attrib['class']
#
# URLs
#
if self.base_url:
if not urlparse(self.base_url).scheme:
raise ValueError('Base URL must have a scheme')
for attr in ('href', 'src'):
for item in page.xpath("//@%s" % attr):
parent = item.getparent()
url = parent.attrib[attr]
if (
attr == 'href' and self.preserve_internal_links and
url.startswith('#')
):
continue
if (
attr == 'src' and self.preserve_inline_attachments and
url.startswith('cid:')
):
continue
parent.attrib[attr] = urljoin(self.base_url, url)
if hasattr(self.html, "getroottree"):
return root
else:
kwargs.setdefault('method', self.method)
kwargs.setdefault('pretty_print', pretty_print)
kwargs.setdefault('encoding', 'utf-8') # As Ken Thompson intended
out = etree.tostring(root, **kwargs).decode(kwargs['encoding'])
if self.method == 'xml':
out = _cdata_regex.sub(
lambda m: '/*<![CDATA[*/%s/*]]>*/' % m.group(1),
out
)
if self.strip_important:
out = _importants.sub('', out)
return out
def _load_external_url(self, url):
r = urlopen(url)
_, params = cgi.parse_header(r.headers.get('Content-Type', ''))
encoding = params.get('charset', 'utf-8')
if 'gzip' in r.info().get('Content-Encoding', ''):
buf = BytesIO(r.read())
f = gzip.GzipFile(fileobj=buf)
out = f.read().decode(encoding)
else:
out = r.read().decode(encoding)
return out
def _load_external(self, url):
"""loads an external stylesheet from a remote url or local path
"""
if url.startswith('//'):
# then we have to rely on the base_url
if self.base_url and 'https://' in self.base_url:
url = 'https:' + url
else:
url = 'http:' + url
if url.startswith('http://') or url.startswith('https://'):
css_body = self._load_external_url(url)
else:
stylefile = url
if not os.path.isabs(stylefile):
stylefile = os.path.abspath(
os.path.join(self.base_path or '', stylefile)
)
if os.path.exists(stylefile):
with codecs.open(stylefile, encoding='utf-8') as f:
css_body = f.read()
elif self.base_url:
url = urljoin(self.base_url, url)
return self._load_external(url)
else:
raise ExternalNotFoundError(stylefile)
return css_body
def _style_to_basic_html_attributes(self, element, style_content,
force=False):
"""given an element and styles like
'background-color:red; font-family:Arial' turn some of that into HTML
attributes. like 'bgcolor', etc.
Note, the style_content can contain pseudoclasses like:
'{color:red; border:1px solid green} :visited{border:1px solid green}'
"""
if (
style_content.count('}') and
style_content.count('{') == style_content.count('}')
):
style_content = style_content.split('}')[0][1:]
attributes = OrderedDict()
for key, value in [x.split(':') for x in style_content.split(';')
if len(x.split(':')) == 2]:
key = key.strip()
if key == 'text-align':
attributes['align'] = value.strip()
elif key == 'vertical-align':
attributes['valign'] = value.strip()
elif key == 'background-color':
attributes['bgcolor'] = value.strip()
elif key == 'width' or key == 'height':
value = value.strip()
if value.endswith('px'):
value = value[:-2]
attributes[key] = value
for key, value in attributes.items():
if (
key in element.attrib and not force or
key in self.disable_basic_attributes
):
# already set, don't dare to overwrite
continue
element.attrib[key] = value
def _css_rules_to_string(self, rules):
"""given a list of css rules returns a css string
"""
lines = []
for item in rules:
if isinstance(item, tuple):
k, v = item
lines.append('%s {%s}' % (k, make_important(v)))
# media rule
else:
for rule in item.cssRules:
if isinstance(rule, cssutils.css.csscomment.CSSComment):
continue
for key in rule.style.keys():
rule.style[key] = (
rule.style.getPropertyValue(key, False),
'!important'
)
lines.append(item.cssText)
return '\n'.join(lines)
def _process_css_text(self, css_text, index, rules, head):
"""processes the given css_text by adding rules that can be
in-lined to the given rules list and adding any that cannot
be in-lined to the given `<head>` element.
"""
these_rules, these_leftover = self._parse_style_rules(css_text, index)
rules.extend(these_rules)
if head is not None and (these_leftover or self.keep_style_tags):
style = etree.Element('style')
style.attrib['type'] = 'text/css'
if self.keep_style_tags:
style.text = css_text
else:
style.text = self._css_rules_to_string(these_leftover)
head.append(style)
def transform(html, base_url=None):
return Premailer(html, base_url=base_url).transform()
if __name__ == '__main__': # pragma: no cover
html = """<html>
<head>
<title>Test</title>
<style>
h1, h2 { color:red; }
strong {
text-decoration:none
}
p { font-size:2px }
p.footer { font-size: 1px}
</style>
</head>
<body>
<h1>Hi!</h1>
<p><strong>Yes!</strong></p>
<p class="footer" style="color:red">Feetnuts</p>
</body>
</html>"""
p = Premailer(html)
print (p.transform())
|
|
from __future__ import division
import numpy as np
from scipy.linalg import toeplitz
from scipy.fftpack import fftshift, fft2, ifftshift, fft
from scipy.linalg import hankel
from stingray import lightcurve
import stingray.utils as utils
__all__ = ["Bispectrum"]
class Bispectrum(object):
"""Makes a :class:`Bispectrum` object from a :class:`stingray.Lightcurve`.
:class:`Bispectrum` is a higher order time series analysis method and is calculated by
indirect method as Fourier transform of triple auto-correlation function also called as
3rd order cumulant.
Parameters
----------
lc : :class:`stingray.Lightcurve` object
The light curve data for bispectrum calculation.
maxlag : int, optional, default ``None``
Maximum lag on both positive and negative sides of
3rd order cumulant (Similar to lags in correlation).
if ``None``, max lag is set to one-half of length of light curve.
window : {``uniform``, ``parzen``, ``hamming``, ``hanning``, ``triangular``, ``welch``, ``blackman``, ``flat-top``}, optional, default 'uniform'
Type of window function to apply to the data.
scale : {``biased``, ``unbiased``}, optional, default ``biased``
Flag to decide biased or unbiased normalization for 3rd order cumulant function.
Attributes
----------
lc : :class:`stingray.Lightcurve` object
The light curve data to compute the :class:`Bispectrum`.
fs : float
Sampling frequencies
n : int
Total Number of samples of light curve observations.
maxlag : int
Maximum lag on both positive and negative sides of
3rd order cumulant (similar to lags in correlation)
signal : numpy.ndarray
Row vector of light curve counts for matrix operations
scale : {``biased``, ``unbiased``}
Flag to decide biased or unbiased normalization for 3rd order cumulant function.
lags : numpy.ndarray
An array of time lags for which 3rd order cumulant is calculated
freq : numpy.ndarray
An array of freq values for :class:`Bispectrum`.
cum3 : numpy.ndarray
A ``maxlag*2+1 x maxlag*2+1`` matrix containing 3rd order cumulant data for different lags.
bispec : numpy.ndarray
A`` maxlag*2+1 x maxlag*2+1`` matrix containing bispectrum data for different frequencies.
bispec_mag : numpy.ndarray
Magnitude of the bispectrum
bispec_phase : numpy.ndarray
Phase of the bispectrum
References
----------
1) The biphase explained: understanding the asymmetries invcoupled Fourier components of astronomical timeseries
by Thomas J. Maccarone Department of Physics, Box 41051, Science Building, Texas Tech University, Lubbock TX 79409-1051
School of Physics and Astronomy, University of Southampton, SO16 4ES
2) T. S. Rao, M. M. Gabr, An Introduction to Bispectral Analysis and Bilinear Time
Series Models, Lecture Notes in Statistics, Volume 24, D. Brillinger, S. Fienberg,
J. Gani, J. Hartigan, K. Krickeberg, Editors, Springer-Verlag, New York, NY, 1984.
3) Matlab version of bispectrum under following link.
https://www.mathworks.com/matlabcentral/fileexchange/60-bisp3cum
Examples
--------
::
>> from stingray.lightcurve import Lightcurve
>> from stingray.bispectrum import Bispectrum
>> lc = Lightcurve([1,2,3,4,5],[2,3,1,1,2])
>> bs = Bispectrum(lc,maxlag=1)
>> bs.lags
array([-1., 0., 1.])
>> bs.freq
array([-0.5, 0. , 0.5])
>> bs.cum3
array([[-0.2976, 0.1024, 0.1408],
[ 0.1024, 0.144 , -0.2976],
[ 0.1408, -0.2976, 0.1024]])
>> bs.bispec_mag
array([[ 1.26336794, 0.0032 , 0.0032 ],
[ 0.0032 , 0.16 , 0.0032 ],
[ 0.0032 , 0.0032 , 1.26336794]])
>> bs.bispec_phase
array([[ -9.65946229e-01, 2.25347190e-14, 3.46944695e-14],
[ 0.00000000e+00, 3.14159265e+00, 0.00000000e+00],
[ -3.46944695e-14, -2.25347190e-14, 9.65946229e-01]])
"""
def __init__(self, lc, maxlag=None, window=None, scale='biased'):
# Function call to create Bispectrum Object
self._make_bispetrum(lc, maxlag, window, scale)
def _make_bispetrum(self, lc, maxlag, window, scale):
"""
Makes a Bispectrum Object with given lighcurve, maxlag and scale.
Helper method.
"""
if not isinstance(lc, lightcurve.Lightcurve):
raise TypeError('lc must be a lightcurve.ightcurve object')
# Available Windows. Used to resolve window paramneter
WINDOWS = ['uniform', 'parzen', 'hamming', 'hanning', 'triangular', 'welch', 'blackmann', 'flat-top']
if window:
if not isinstance(window, str):
raise TypeError('Window must be specified as string!')
window = window.lower()
if window not in WINDOWS:
raise ValueError("Wrong window specified or window function is not available")
self.lc = lc
self.fs = 1 / lc.dt
self.n = self.lc.n
if maxlag is None:
# if maxlag is not specified, it is set to half of length of lightcurve
self.maxlag = np.int(self.lc.n / 2)
else:
if not (isinstance(maxlag, int)):
raise ValueError('maxlag must be an integer')
# if negative maxlag is entered, convert it to +ve
if maxlag < 0:
self.maxlag = -maxlag
else:
self.maxlag = maxlag
if isinstance(scale, str) is False:
raise TypeError("scale must be a string")
if scale.lower() not in ["biased", "unbiased"]:
raise ValueError("scale can only be either 'biased' or 'unbiased'.")
self.scale = scale.lower()
if window is None:
self.window_name = 'No Window'
self.window = None
else:
self.window_name = window
self.window = self._get_window()
# Other Atributes
self.lags = None
self.cum3 = None
self.freq = None
self.bispec = None
self.bispec_mag = None
self.bispec_phase = None
# converting to a row vector to apply matrix operations
self.signal = np.reshape(lc, (1, len(self.lc.counts)))
# Mean subtraction before bispecrum calculation
self.signal = self.signal - np.mean(lc.counts)
self._cumulant3()
self._normalize_cumulant3()
self._cal_bispec()
def _get_window(self):
"""
Returns a window function of self.window_name type
"""
N = 2 * self.maxlag + 1
window_even = utils.create_window(N, self.window_name)
# 2d even window
window2d = np.array([window_even, ] * N)
## One-sided window with zero padding
window = np.zeros(N)
window[:self.maxlag + 1] = window_even[self.maxlag:]
window[self.maxlag:] = 0
# 2d window function to apply to bispectrum
row = np.concatenate(([window[0]], np.zeros(2 * self.maxlag)))
toep_matrix = toeplitz(window, row)
toep_matrix += np.tril(toep_matrix, -1).transpose()
window = toep_matrix[..., ::-1] * window2d * window2d.transpose()
return window
def _cumulant3(self):
"""
Calculates the 3rd Order cummulant of the lightcurve.
Assigns
-------
self.cum3,
self.lags
"""
# Initialize square cumulant matrix if zeros
cum3_dim = 2 * self.maxlag + 1
self.cum3 = np.zeros((cum3_dim, cum3_dim))
# calculate lags for different values of 3rd order cumulant
lagindex = np.arange(-self.maxlag, self.maxlag + 1)
self.lags = lagindex * self.lc.dt
# Defines indices for matrices
ind = np.arange((self.n - self.maxlag) - 1, self.n)
ind_t = np.arange(self.maxlag, self.n)
zero_maxlag = np.zeros((1, self.maxlag))
zero_maxlag_t = zero_maxlag.transpose()
sig = self.signal.transpose()
rev_signal = np.array([self.signal[0][::-1]])
col = np.concatenate((sig[ind], zero_maxlag_t), axis=0)
row = np.concatenate((rev_signal[0][ind_t], zero_maxlag[0]), axis=0)
# converts row and column into a toeplitz matrix
toep = toeplitz(col, row)
rev_signal = np.repeat(rev_signal, [2 * self.maxlag + 1], axis=0)
# Calulates Cummulant of 1D signal i.e. Lightcurve counts
self.cum3 = self.cum3 + np.matmul(np.multiply(toep, rev_signal), toep.transpose())
def _normalize_cumulant3(self):
"""
Scales (biased or ubiased) the 3rd Order cumulant of the lightcurve .
Updates
-------
seff.cum3
"""
# Biased scaling of cummulant
if self.scale == 'biased':
self.cum3 = self.cum3 / self.n
else:
# unbiased Scaling of cummulant
maxlag1 = self.maxlag + 1
# Scaling matrix initialized used to do unbiased normalization of cumulant
scal_matrix = np.zeros((maxlag1, maxlag1), dtype='int64')
# Calculate scaling matrix for unbiased normalization
for k in range(maxlag1):
maxlag1k = (maxlag1 - (k + 1))
scal_matrix[k, k:maxlag1] = np.tile(self.n - maxlag1k, (1, maxlag1k + 1))
scal_matrix += np.triu(scal_matrix, k=1).transpose()
maxlag1ind = np.arange(self.maxlag - 1, -1, -1)
lagdiff = self.n - maxlag1
# Rows and columns for Toeplitz matrix
col = np.arange(lagdiff, self.n - 1)
col = np.reshape(col, (1, len(col))).transpose()
row = np.arange(lagdiff, (self.n - 2 * self.maxlag) - 1, -1)
row = np.reshape(row, (1, len(row)))
# Toeplitz matrix
toep_matrix = toeplitz(col, row)
# Matrix used to concatenate with scaling matrix
conc_mat = np.array([scal_matrix[self.maxlag, maxlag1ind]])
join_matrix = np.concatenate((toep_matrix, conc_mat), axis=0)
scal_matrix = np.concatenate((scal_matrix, join_matrix), axis=1)
co_mat = scal_matrix[maxlag1ind, :]
co_mat = co_mat[:, np.arange(2 * self.maxlag, -1, -1)]
# Scaling matrix calculated
scal_matrix = np.concatenate((scal_matrix, co_mat), axis=0)
# Set numbers less than 1 to be equal to 1
scal_matrix[scal_matrix < 1] = 1
self.cum3 = np.divide(self.cum3, scal_matrix)
def _cal_bispec(self):
"""
Calculates bispectrum as a fourier transform of 3rd Order Cumulant.
Attributes
----------
self.freq
self.bispec
self.bispec_mag
self.bispec_phase
"""
self.freq = (1 / 2) * self.fs * (self.lags / self.lc.dt) / self.maxlag
# Apply window if specified otherwise calculate with applying window
if self.window is None:
self.bispec = fftshift(fft2(ifftshift(self.cum3)))
else:
self.bispec = fftshift(fft2(ifftshift(self.cum3 * self.window)))
self.bispec_mag = np.abs(self.bispec)
self.bispec_phase = np.angle((self.bispec))
def plot_cum3(self, axis=None, save=False, filename=None):
"""
Plot the 3rd order cumulant as function of time lags using ``matplotlib``.
Plot the ``cum3`` attribute on a graph with the ``lags`` attribute on x-axis and y-axis and
``cum3`` on z-axis
Parameters
----------
axis : list, tuple, string, default ``None``
Parameter to set axis properties of ``matplotlib`` figure. For example
it can be a list like ``[xmin, xmax, ymin, ymax]`` or any other
acceptable argument for ``matplotlib.pyplot.axis()`` method.
save : bool, optionalm, default ``False``
If ``True``, save the figure with specified filename.
filename : str
File name and path of the image to save. Depends on the boolean ``save``.
Returns
-------
plt : ``matplotlib.pyplot`` object
Reference to plot, call ``show()`` to display it
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for plot()")
cont = plt.contourf(self.lags, self.lags, self.cum3, 100, cmap=plt.cm.Spectral_r)
plt.colorbar(cont)
plt.title('3rd Order Cumulant')
plt.xlabel('lags 1')
plt.ylabel('lags 2')
if axis is not None:
plt.axis(axis)
if save:
if filename is None:
plt.savefig('bispec_cum3.png')
else:
plt.savefig(filename)
return plt
def plot_mag(self, axis=None, save=False, filename=None):
"""
Plot the magnitude of bispectrum as function of freq using ``matplotlib``.
Plot the ``bispec_mag`` attribute on a graph with ``freq`` attribute on the x-axis and y-axis and
the ``bispec_mag`` attribute on the z-axis.
Parameters
----------
axis : list, tuple, string, default ``None``
Parameter to set axis properties of ``matplotlib`` figure. For example
it can be a list like ``[xmin, xmax, ymin, ymax]`` or any other
acceptable argument for ``matplotlib.pyplot.axis()`` method.
save : bool, optional, default ``False``
If ``True``, save the figure with specified filename and path.
filename : str
File name and path of the image to save. Depends on the bool ``save``.
Returns
-------
plt : ``matplotlib.pyplot`` object
Reference to plot, call ``show()`` to display it
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for plot()")
cont = plt.contourf(self.freq, self.freq, self.bispec_mag, 100, cmap=plt.cm.Spectral_r)
plt.colorbar(cont)
plt.title('Bispectrum Magnitude')
plt.xlabel('freq 1')
plt.ylabel('freq 2')
if axis is not None:
plt.axis(axis)
if save:
if filename is None:
plt.savefig('bispec_mag.png')
else:
plt.savefig(filename)
return plt
def plot_phase(self, axis=None, save=False, filename=None):
"""
Plot the phase of bispectrum as function of freq using ``matplotlib``.
Plot the ``bispec_phase`` attribute on a graph with ``phase`` attribute on the x-axis and
y-axis and the ``bispec_phase`` attribute on the z-axis.
Parameters
----------
axis : list, tuple, string, default ``None``
Parameter to set axis properties of ``matplotlib`` figure. For example
it can be a list like ``[xmin, xmax, ymin, ymax]`` or any other
acceptable argument for ``matplotlib.pyplot.axis()`` function.
save : bool, optional, default ``False``
If ``True``, save the figure with specified filename and path.
filename : str
File name and path of the image to save. Depends on the bool ``save``.
Returns
-------
plt : ``matplotlib.pyplot`` object
Reference to plot, call ``show()`` to display it
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for plot()")
cont = plt.contourf(self.freq, self.freq, self.bispec_phase, 100, cmap=plt.cm.Spectral_r)
plt.colorbar(cont)
plt.title('Bispectrum Phase')
plt.xlabel('freq 1')
plt.ylabel('freq 2')
if axis is not None:
plt.axis(axis)
# Save figure
if save:
if filename is None:
plt.savefig('bispec_phase.png')
else:
plt.savefig(filename)
return plt
|
|
"""
concat routines
"""
import numpy as np
from pandas import compat, DataFrame, Series, Index, MultiIndex
from pandas.core.index import (_get_combined_index,
_ensure_index, _get_consensus_names,
_all_indexes_same)
from pandas.core.categorical import (_factorize_from_iterable,
_factorize_from_iterables)
from pandas.core.internals import concatenate_block_managers
from pandas.core import common as com
from pandas.core.generic import NDFrame
import pandas.core.dtypes.concat as _concat
# ---------------------------------------------------------------------
# Concatenate DataFrame objects
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic
ignore_index : boolean, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys
names : list, default None
Names for the levels in the resulting hierarchical index
verify_integrity : boolean, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation
copy : boolean, default True
If False, do not copy data unnecessarily
Returns
-------
concatenated : type of objects
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
panda objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
See Also
--------
Series.append
DataFrame.append
DataFrame.join
DataFrame.merge
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2',])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3])
animal letter number
0 NaN a 1
1 NaN b 2
0 cat c 3
1 dog d 4
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy)
return op.get_result()
class _Concatenator(object):
"""
Orchestrates a concatenation operation for BlockManagers
"""
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
ignore_index=False, verify_integrity=False, copy=True):
if isinstance(objs, (NDFrame, compat.string_types)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
'"{0}"'.format(type(objs).__name__))
if join == 'outer':
self.intersect = False
elif join == 'inner':
self.intersect = True
else: # pragma: no cover
raise ValueError('Only can inner (intersect) or outer (union) '
'join the other axis')
if isinstance(objs, dict):
if keys is None:
keys = sorted(objs)
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if len(objs) == 0:
raise ValueError('No objects to concatenate')
if keys is None:
objs = [obj for obj in objs if obj is not None]
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
name = getattr(keys, 'name', None)
keys = Index(clean_keys, name=name)
if len(objs) == 0:
raise ValueError('All objects passed were None')
# consolidate data & figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not isinstance(obj, NDFrame):
raise TypeError("cannot concatenate a non-NDFrame object")
# consolidate
obj._consolidate(inplace=True)
ndims.add(obj.ndim)
# get the sample
# want the higest ndim that we have, and must be non-empty
# unless all objs are empty
sample = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
if obj.ndim == max_ndim and np.sum(obj.shape):
sample = obj
break
else:
# filter out the empties if we have not multi-index possibilities
# note to keep empty Series as it affect to result columns / name
non_empties = [obj for obj in objs
if sum(obj.shape) > 0 or isinstance(obj, Series)]
if (len(non_empties) and (keys is None and names is None and
levels is None and
join_axes is None and
not self.intersect)):
objs = non_empties
sample = objs[0]
if sample is None:
sample = objs[0]
self.objs = objs
# Standardize axis parameter to int
if isinstance(sample, Series):
axis = DataFrame()._get_axis_number(axis)
else:
axis = sample._get_axis_number(axis)
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, Series)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {0}, "
"input was {1}".format(sample.ndim, axis))
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if len(ndims) > 1:
current_column = 0
max_ndim = sample.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == max_ndim:
pass
elif ndim != max_ndim - 1:
raise ValueError("cannot concatenate unaligned mixed "
"dimensional NDFrame objects")
else:
name = getattr(obj, 'name', None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
obj = sample._constructor({name: obj})
self.objs.append(obj)
# note: this is the BlockManager axis (since DataFrame is transposed)
self.axis = axis
self.join_axes = join_axes
self.keys = keys
self.names = names or getattr(keys, 'names', None)
self.levels = levels
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.copy = copy
self.new_axes = self._get_new_axes()
def get_result(self):
# series only
if self._is_series:
# stack blocks
if self.axis == 0:
# concat Series with length to keep dtype as much
non_empties = [x for x in self.objs if len(x) > 0]
if len(non_empties) > 0:
values = [x._values for x in non_empties]
else:
values = [x._values for x in self.objs]
new_data = _concat._concat_compat(values)
name = com._consensus_name_attr(self.objs)
cons = _concat._get_series_result_type(new_data)
return (cons(new_data, index=self.new_axes[0],
name=name, dtype=new_data.dtype)
.__finalize__(self, method='concat'))
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
cons = _concat._get_series_result_type(data)
index, columns = self.new_axes
df = cons(data, index=index)
df.columns = columns
return df.__finalize__(self, method='concat')
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexing on concat axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis,
copy=self.copy)
if not self.copy:
new_data._consolidate_inplace()
cons = _concat._get_frame_result_type(new_data, self.objs)
return (cons._from_axes(new_data, self.new_axes)
.__finalize__(self, method='concat'))
def _get_result_dim(self):
if self._is_series and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _get_new_axes(self):
ndim = self._get_result_dim()
new_axes = [None] * ndim
if self.join_axes is None:
for i in range(ndim):
if i == self.axis:
continue
new_axes[i] = self._get_comb_axis(i)
else:
if len(self.join_axes) != ndim - 1:
raise AssertionError("length of join_axes must not be "
"equal to {0}".format(ndim - 1))
# ufff...
indices = compat.lrange(ndim)
indices.remove(self.axis)
for i, ax in zip(indices, self.join_axes):
new_axes[i] = ax
new_axes[self.axis] = self._get_concat_axis()
return new_axes
def _get_comb_axis(self, i):
if self._is_series:
all_indexes = [x.index for x in self.objs]
else:
try:
all_indexes = [x._data.axes[i] for x in self.objs]
except IndexError:
types = [type(x).__name__ for x in self.objs]
raise TypeError("Cannot concatenate list of %s" % types)
return _get_combined_index(all_indexes, intersect=self.intersect)
def _get_concat_axis(self):
"""
Return index to be used along concatenation axis.
"""
if self._is_series:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = com._default_index(len(self.objs))
return idx
elif self.keys is None:
names = [None] * len(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
if not isinstance(x, Series):
raise TypeError("Cannot concatenate type 'Series' "
"with object of type "
"%r" % type(x).__name__)
if x.name is not None:
names[i] = x.name
has_names = True
else:
names[i] = num
num += 1
if has_names:
return Index(names)
else:
return com._default_index(len(self.objs))
else:
return _ensure_index(self.keys)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = com._default_index(sum(len(i) for i in indexes))
return idx
if self.keys is None:
concat_axis = _concat_indexes(indexes)
else:
concat_axis = _make_concat_multiindex(indexes, self.keys,
self.levels, self.names)
self._maybe_check_integrity(concat_axis)
return concat_axis
def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index.get_duplicates()
raise ValueError('Indexes have overlapping values: %s'
% str(overlap))
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if ((levels is None and isinstance(keys[0], tuple)) or
(levels is not None and len(levels) > 1)):
zipped = compat.lzip(*keys)
if names is None:
names = [None] * len(zipped)
if levels is None:
_, levels = _factorize_from_iterables(zipped)
else:
levels = [_ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [_ensure_index(keys)]
else:
levels = [_ensure_index(x) for x in levels]
if not _all_indexes_same(indexes):
label_list = []
# things are potentially different sizes, so compute the exact labels
# for each level and pass those to MultiIndex.from_arrays
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
try:
i = level.get_loc(key)
except KeyError:
raise ValueError('Key %s not in level %s'
% (str(key), str(level)))
to_concat.append(np.repeat(i, len(index)))
label_list.append(np.concatenate(to_concat))
concat_index = _concat_indexes(indexes)
# these go at the end
if isinstance(concat_index, MultiIndex):
levels.extend(concat_index.levels)
label_list.extend(concat_index.labels)
else:
codes, categories = _factorize_from_iterable(concat_index)
levels.append(categories)
label_list.append(codes)
if len(names) == len(levels):
names = list(names)
else:
# make sure that all of the passed indices have the same nlevels
if not len(set([idx.nlevels for idx in indexes])) == 1:
raise AssertionError("Cannot concat indices that do"
" not have the same number of levels")
# also copies
names = names + _get_consensus_names(indexes)
return MultiIndex(levels=levels, labels=label_list, names=names,
verify_integrity=False)
new_index = indexes[0]
n = len(new_index)
kpieces = len(indexes)
# also copies
new_names = list(names)
new_levels = list(levels)
# construct labels
new_labels = []
# do something a bit more speedy
for hlevel, level in zip(zipped, levels):
hlevel = _ensure_index(hlevel)
mapped = level.get_indexer(hlevel)
mask = mapped == -1
if mask.any():
raise ValueError('Values not found in passed level: %s'
% str(hlevel[mask]))
new_labels.append(np.repeat(mapped, n))
if isinstance(new_index, MultiIndex):
new_levels.extend(new_index.levels)
new_labels.extend([np.tile(lab, kpieces) for lab in new_index.labels])
else:
new_levels.append(new_index)
new_labels.append(np.tile(np.arange(n), kpieces))
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
return MultiIndex(levels=new_levels, labels=new_labels, names=new_names,
verify_integrity=False)
|
|
import configparser
import datetime
import os
import random
import re
import time
import unittest
import dateutil
from TM1py.Exceptions import TM1pyException
from TM1py.Objects import Cube, Dimension, Hierarchy, Process
from TM1py.Services import TM1Service
config = configparser.ConfigParser()
config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config.ini'))
PREFIX = "TM1py_Tests_Server_"
class TestServerMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Namings
cls.dimension_name1 = PREFIX + "Dimension1"
cls.dimension_name2 = PREFIX + "Dimension2"
cls.cube_name = PREFIX + "Cube1"
cls.process_name1 = PREFIX + "Process1"
cls.process_name2 = PREFIX + "Process2"
# Connect to TM1
cls.tm1 = TM1Service(**config['tm1srv01'])
# create a simple cube with dimensions to test transactionlog methods
if not cls.tm1.dimensions.exists(cls.dimension_name1):
d = Dimension(cls.dimension_name1)
h = Hierarchy(cls.dimension_name1, cls.dimension_name1)
h.add_element('Total Years', 'Consolidated')
h.add_element('No Year', 'Numeric')
for year in range(1989, 2040, 1):
h.add_element(str(year), 'Numeric')
h.add_edge('Total Years', str(year), 1)
d.add_hierarchy(h)
cls.tm1.dimensions.create(d)
if not cls.tm1.dimensions.exists(cls.dimension_name2):
d = Dimension(cls.dimension_name2)
h = Hierarchy(cls.dimension_name2, cls.dimension_name2)
h.add_element('Value', 'Numeric')
d.add_hierarchy(h)
cls.tm1.dimensions.create(d)
if not cls.tm1.cubes.exists(cls.cube_name):
cube = Cube(cls.cube_name, [cls.dimension_name1, cls.dimension_name2])
cls.tm1.cubes.create(cube)
# inject process with ItemReject
cls.process1 = Process(name=cls.process_name1, prolog_procedure="ItemReject('TM1py Tests');")
cls.tm1.processes.create(cls.process1)
# inject process that does nothing and runs successfull
cls.process2 = Process(name=cls.process_name2, prolog_procedure="sText = 'text';")
cls.tm1.processes.create(cls.process2)
def test_get_server_name(self):
server_name = self.tm1.server.get_server_name()
self.assertIsInstance(server_name, str)
self.assertGreater(len(server_name), 0)
active_configuration = self.tm1.server.get_active_configuration()
self.assertEqual(server_name, active_configuration["ServerName"])
def test_get_product_version(self):
product_version = self.tm1.server.get_product_version()
self.assertIsInstance(product_version, str)
self.assertGreater(len(product_version), 0)
self.assertGreaterEqual(int(product_version[0:2]), 10)
def test_get_admin_host(self):
admin_host = self.tm1.server.get_admin_host()
self.assertIsInstance(admin_host, str)
def test_get_data_directory(self):
data_directory = self.tm1.server.get_data_directory()
self.assertIsInstance(data_directory, str)
self.assertGreater(len(data_directory), 0)
active_configuration = self.tm1.server.get_active_configuration()
self.assertEqual(data_directory, active_configuration["Administration"]["DataBaseDirectory"])
def test_get_static_configuration(self):
static_configuration = self.tm1.server.get_static_configuration()
self.assertIsInstance(static_configuration, dict)
self.assertIn("ServerName", static_configuration)
self.assertIn("Access", static_configuration)
self.assertIn("Administration", static_configuration)
self.assertIn("Modelling", static_configuration)
self.assertIn("Performance", static_configuration)
def test_get_active_configuration(self):
active_configuration = self.tm1.server.get_active_configuration()
self.assertEqual(
int(self.tm1._tm1_rest._port),
int(active_configuration["Access"]["HTTP"]["Port"]))
def test_update_static_configuration(self):
for new_mtq_threads in (4, 8):
config_changes = {
"Performance": {
"MTQ": {
"NumberOfThreadsToUse": new_mtq_threads
}
}
}
response = self.tm1.server.update_static_configuration(config_changes)
self.assertTrue(response.ok)
active_config = self.tm1.server.get_active_configuration()
self.assertEqual(
active_config["Performance"]["MTQ"]["NumberOfThreadsToUse"],
new_mtq_threads - 1)
@unittest.skip("Doesn't work sometimes")
def test_get_last_process_message_from_message_log(self):
try:
self.tm1.processes.execute(self.process_name1)
except TM1pyException as e:
if "ProcessCompletedWithMessages" in e._response:
pass
else:
raise e
# TM1 takes one second to write to the message-log
time.sleep(1)
log_entry = self.tm1.server.get_last_process_message_from_messagelog(self.process_name1)
regex = re.compile('TM1ProcessError_.*.log')
self.assertTrue(regex.search(log_entry))
self.tm1.processes.execute(self.process_name2)
# TM1 takes one second to write to the message-log
time.sleep(1)
log_entry = self.tm1.server.get_last_process_message_from_messagelog(self.process_name2)
regex = re.compile('TM1ProcessError_.*.log')
self.assertFalse(regex.search(log_entry))
@unittest.skip("Doesn't work in TM1 11")
def test_get_last_transaction_log_entries(self):
self.tm1.processes.execute_ti_code(lines_prolog="CubeSetLogChanges('{}', {});".format(self.cube_name, 1))
tmstp = datetime.datetime.utcnow()
# Generate 3 random numbers
random_values = [random.uniform(-10, 10) for _ in range(3)]
# Write value 1 to cube
cellset = {
('2000', 'Value'): random_values[0]
}
self.tm1.cubes.cells.write_values(self.cube_name, cellset)
# Digest time in TM1
time.sleep(1)
# Write value 2 to cube
cellset = {
('2001', 'Value'): random_values[1]
}
self.tm1.cubes.cells.write_values(self.cube_name, cellset)
# Digest time in TM1
time.sleep(1)
# Write value 3 to cube
cellset = {
('2002', 'Value'): random_values[2]
}
self.tm1.cubes.cells.write_values(self.cube_name, cellset)
# Digest time in TM1
time.sleep(8)
user = config['tm1srv01']['user']
cube = self.cube_name
# Query transaction log with top filter
entries = self.tm1.server.get_transaction_log_entries(
reverse=True,
user=user,
cube=cube,
top=3)
values_from_top = [entry['NewValue'] for entry in entries]
self.assertGreaterEqual(len(values_from_top), 3)
# Query transaction log with Since filter
entries = self.tm1.server.get_transaction_log_entries(
reverse=True,
cube=cube,
since=tmstp,
top=10)
values_from_since = [entry['NewValue'] for entry in entries]
self.assertGreaterEqual(len(values_from_since), 3)
# Compare values written to cube vs. values retrieved from transaction log
self.assertEqual(len(values_from_top), len(values_from_since))
for v1, v2, v3 in zip(random_values, reversed(values_from_top), reversed(values_from_since)):
self.assertAlmostEqual(v1, v2, delta=0.000000001)
@unittest.skip("Doesn't work in TM1 11")
def test_get_transaction_log_entries_from_today(self):
# get datetime from today at 00:00:00
today = datetime.datetime.combine(datetime.date.today(), datetime.time(0, 0))
entries = self.tm1.server.get_transaction_log_entries(reverse=True, since=today)
self.assertTrue(len(entries) > 0)
for entry in entries:
entry_timestamp = dateutil.parser.parse(entry['TimeStamp'])
# all the entries should have today's date
entry_date = entry_timestamp.date()
today_date = datetime.date.today()
self.assertTrue(entry_date == today_date)
def test_session_context_default(self):
threads = self.tm1.monitoring.get_threads()
for thread in threads:
if "GET /api/v1/Threads" in thread["Function"] and thread["Name"] == config['tm1srv01']['user']:
self.assertTrue(thread["Context"] == "TM1py")
return
raise Exception("Did not find my own Thread")
def test_session_context_custom(self):
app_name = "Some Application"
with TM1Service(**config['tm1srv01'], session_context=app_name) as tm1:
threads = tm1.monitoring.get_threads()
for thread in threads:
if "GET /api/v1/Threads" in thread["Function"] and thread["Name"] == config['tm1srv01']['user']:
self.assertTrue(thread["Context"] == app_name)
return
raise Exception("Did not find my own Thread")
@classmethod
def tearDownClass(cls):
cls.tm1.cubes.delete(cls.cube_name)
cls.tm1.dimensions.delete(cls.dimension_name1)
cls.tm1.dimensions.delete(cls.dimension_name2)
cls.tm1.processes.delete(cls.process_name1)
cls.tm1.processes.delete(cls.process_name2)
cls.tm1.logout()
if __name__ == '__main__':
unittest.main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Drivers for volumes.
"""
import os
import time
from oslo.config import cfg
from cinder.brick.initiator import connector as initiator
from cinder.brick.iscsi import iscsi
from cinder.brick.iser import iser
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import excutils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import utils
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_shell_tries',
default=3,
help='number of times to attempt to run flakey shell commands'),
cfg.IntOpt('reserved_percentage',
default=0,
help='The percentage of backend capacity is reserved'),
cfg.IntOpt('iscsi_num_targets',
default=100,
help='The maximum number of iscsi target ids per host'),
cfg.StrOpt('iscsi_target_prefix',
default='iqn.2010-10.org.openstack:',
help='prefix for iscsi volumes'),
cfg.StrOpt('iscsi_ip_address',
default='$my_ip',
help='The IP address that the iSCSI daemon is listening on'),
cfg.IntOpt('iscsi_port',
default=3260,
help='The port that the iSCSI daemon is listening on'),
cfg.IntOpt('num_volume_device_scan_tries',
deprecated_name='num_iscsi_scan_tries',
default=3,
help='The maximum number of times to rescan targets'
' to find volume'),
cfg.IntOpt('num_iser_scan_tries',
default=3,
help='The maximum number of times to rescan iSER target'
'to find volume'),
cfg.IntOpt('iser_num_targets',
default=100,
help='The maximum number of iser target ids per host'),
cfg.StrOpt('iser_target_prefix',
default='iqn.2010-10.org.iser.openstack:',
help='prefix for iser volumes'),
cfg.StrOpt('iser_ip_address',
default='$my_ip',
help='The IP address that the iSER daemon is listening on'),
cfg.IntOpt('iser_port',
default=3260,
help='The port that the iSER daemon is listening on'),
cfg.StrOpt('iser_helper',
default='tgtadm',
help='iser target user-land tool to use'),
cfg.StrOpt('volume_backend_name',
default=None,
help='The backend name for a given driver implementation'),
cfg.BoolOpt('use_multipath_for_image_xfer',
default=False,
help='Do we attach/detach volumes in cinder using multipath '
'for volume to image and image to volume transfers?'),
cfg.StrOpt('volume_clear',
default='zero',
help='Method used to wipe old voumes (valid options are: '
'none, zero, shred)'),
cfg.IntOpt('volume_clear_size',
default=0,
help='Size in MiB to wipe at start of old volumes. 0 => all'),
cfg.StrOpt('iscsi_helper',
default='tgtadm',
help='iscsi target user-land tool to use'),
cfg.StrOpt('volumes_dir',
default='$state_path/volumes',
help='Volume configuration file storage '
'directory'),
cfg.StrOpt('iet_conf',
default='/etc/iet/ietd.conf',
help='IET configuration file'),
cfg.StrOpt('lio_initiator_iqns',
default='',
help=('Comma-separated list of initiator IQNs '
'allowed to connect to the '
'iSCSI target. (From Nova compute nodes.)')),
cfg.StrOpt('iscsi_iotype',
default='fileio',
help=('Sets the behavior of the iSCSI target '
'to either perform blockio or fileio '
'optionally, auto can be set and Cinder '
'will autodetect type of backing device'))]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class VolumeDriver(object):
"""Executes commands relating to Volumes."""
VERSION = "N/A"
def __init__(self, execute=utils.execute, *args, **kwargs):
# NOTE(vish): db is set by Manager
self.db = kwargs.get('db')
self.configuration = kwargs.get('configuration', None)
if self.configuration:
self.configuration.append_config_values(volume_opts)
self.set_execute(execute)
self._stats = {}
# set True by manager after succesful check_for_setup
self._initialized = False
def set_execute(self, execute):
self._execute = execute
def set_initialized(self):
self._initialized = True
@property
def initialized(self):
return self._initialized
def get_version(self):
"""Get the current version of this driver."""
return self.VERSION
def _is_non_recoverable(self, err, non_recoverable_list):
for item in non_recoverable_list:
if item in err:
return True
return False
def _try_execute(self, *command, **kwargs):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
non_recoverable = kwargs.pop('no_retry_list', [])
tries = 0
while True:
try:
self._execute(*command, **kwargs)
return True
except processutils.ProcessExecutionError as ex:
tries = tries + 1
if tries >= self.configuration.num_shell_tries or\
self._is_non_recoverable(ex.stderr, non_recoverable):
raise
LOG.exception(_("Recovering from a failed execute. "
"Try number %s"), tries)
time.sleep(tries ** 2)
def check_for_setup_error(self):
raise NotImplementedError()
def create_volume(self, volume):
"""Creates a volume. Can optionally return a Dictionary of
changes to the volume object to be persisted.
"""
raise NotImplementedError()
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
raise NotImplementedError()
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
raise NotImplementedError()
def delete_volume(self, volume):
"""Deletes a volume."""
raise NotImplementedError()
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
raise NotImplementedError()
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
raise NotImplementedError()
def local_path(self, volume):
raise NotImplementedError()
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
raise NotImplementedError()
def create_export(self, context, volume):
"""Exports the volume. Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
raise NotImplementedError()
def remove_export(self, context, volume):
"""Removes an export for a volume."""
raise NotImplementedError()
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
raise NotImplementedError()
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector"""
raise NotImplementedError()
def attach_volume(self, context, volume, instance_uuid, host_name,
mountpoint):
"""Callback for volume attached to instance or host."""
pass
def detach_volume(self, context, volume):
"""Callback for volume detached."""
pass
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service. If 'refresh' is
True, run the update first.
"""
return None
def do_setup(self, context):
"""Any initialization the volume driver does while starting"""
pass
def validate_connector(self, connector):
"""Fail if connector doesn't contain all the data needed by driver"""
pass
def _copy_volume_data_cleanup(self, context, volume, properties,
attach_info, remote, force=False):
self._detach_volume(attach_info)
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(context, volume, properties,
force=force)
else:
self.terminate_connection(volume, properties, force=False)
def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug(_('copy_data_between_volumes %(src)s -> %(dest)s.')
% {'src': src_vol['name'], 'dest': dest_vol['name']})
properties = utils.brick_get_connector_properties()
dest_remote = True if remote in ['dest', 'both'] else False
dest_orig_status = dest_vol['status']
try:
dest_attach_info = self._attach_volume(context,
dest_vol,
properties,
remote=dest_remote)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Failed to attach volume %(vol)s")
LOG.error(msg % {'vol': dest_vol['id']})
self.db.volume_update(context, dest_vol['id'],
{'status': dest_orig_status})
src_remote = True if remote in ['src', 'both'] else False
src_orig_status = src_vol['status']
try:
src_attach_info = self._attach_volume(context,
src_vol,
properties,
remote=src_remote)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Failed to attach volume %(vol)s")
LOG.error(msg % {'vol': src_vol['id']})
self.db.volume_update(context, src_vol['id'],
{'status': src_orig_status})
self._copy_volume_data_cleanup(context, dest_vol, properties,
dest_attach_info, dest_remote,
force=True)
try:
size_in_mb = int(src_vol['size']) * 1024 # vol size is in GB
volume_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Failed to copy volume %(src)s to %(dest)d")
LOG.error(msg % {'src': src_vol['id'], 'dest': dest_vol['id']})
copy_error = True
finally:
self._copy_volume_data_cleanup(context, dest_vol, properties,
dest_attach_info, dest_remote,
force=copy_error)
self._copy_volume_data_cleanup(context, src_vol, properties,
src_attach_info, src_remote,
force=copy_error)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
LOG.debug(_('copy_image_to_volume %s.') % volume['name'])
properties = utils.brick_get_connector_properties()
attach_info = self._attach_volume(context, volume, properties)
try:
image_utils.fetch_to_raw(context,
image_service,
image_id,
attach_info['device']['path'],
size=volume['size'])
finally:
self._detach_volume(attach_info)
self.terminate_connection(volume, properties)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
LOG.debug(_('copy_volume_to_image %s.') % volume['name'])
properties = utils.brick_get_connector_properties()
attach_info = self._attach_volume(context, volume, properties)
try:
image_utils.upload_volume(context,
image_service,
image_meta,
attach_info['device']['path'])
finally:
self._detach_volume(attach_info)
self.terminate_connection(volume, properties)
def _attach_volume(self, context, volume, properties, remote=False):
"""Attach the volume."""
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
conn = rpcapi.initialize_connection(context, volume, properties)
else:
conn = self.initialize_connection(volume, properties)
# Use Brick's code to do attach/detach
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(protocol,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts,
conn=conn)
device = connector.connect_volume(conn['data'])
host_device = device['path']
if not connector.check_valid_device(host_device):
raise exception.DeviceUnavailable(path=host_device,
reason=(_("Unable to access "
"the backend storage "
"via the path "
"%(path)s.") %
{'path': host_device}))
return {'conn': conn, 'device': device, 'connector': connector}
def _detach_volume(self, attach_info):
"""Disconnect the volume from the host."""
# Use Brick's code to do attach/detach
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
def clone_image(self, volume, image_location, image_id):
"""Create a volume efficiently from an existing image.
image_location is a string whose format depends on the
image service backend in use. The driver should use it
to determine whether cloning is possible.
image_id is a string which represents id of the image.
It can be used by the driver to introspect internal
stores or registry to do an efficient image clone.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred
"""
return None, False
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
LOG.debug(_('Creating a new backup for volume %s.') %
volume['name'])
properties = utils.brick_get_connector_properties()
attach_info = self._attach_volume(context, volume, properties)
try:
volume_path = attach_info['device']['path']
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path) as volume_file:
backup_service.backup(backup, volume_file)
finally:
self._detach_volume(attach_info)
self.terminate_connection(volume, properties)
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
LOG.debug(_('Restoring backup %(backup)s to '
'volume %(volume)s.') %
{'backup': backup['id'],
'volume': volume['name']})
properties = utils.brick_get_connector_properties()
attach_info = self._attach_volume(context, volume, properties)
try:
volume_path = attach_info['device']['path']
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
finally:
self._detach_volume(attach_info)
self.terminate_connection(volume, properties)
def clear_download(self, context, volume):
"""Clean up after an interrupted image copy."""
pass
def extend_volume(self, volume, new_size):
msg = _("Extend volume not implemented")
raise NotImplementedError(msg)
def migrate_volume(self, context, volume, host):
"""Migrate the volume to the specified host.
Returns a boolean indicating whether the migration occurred, as well as
model_update.
"""
return (False, None)
class ISCSIDriver(VolumeDriver):
"""Executes commands relating to ISCSI volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSCSI target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
def __init__(self, *args, **kwargs):
super(ISCSIDriver, self).__init__(*args, **kwargs)
def _do_iscsi_discovery(self, volume):
#TODO(justinsb): Deprecate discovery and use stored info
#NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
LOG.warn(_("ISCSI provider_location not stored, using discovery"))
volume_name = volume['name']
(out, _err) = self._execute('iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p', volume['host'],
run_as_root=True)
for target in out.splitlines():
if (self.configuration.iscsi_ip_address in target
and volume_name in target):
return target
return None
def _get_iscsi_properties(self, volume):
"""Gets iscsi configuration
We ideally get saved information in the volume entity, but fall back
to discovery if need be. Discovery may be completely removed in future
The properties are:
:target_discovered: boolean indicating whether discovery was used
:target_iqn: the IQN of the iSCSI target
:target_portal: the portal of the iSCSI target
:target_lun: the lun of the iSCSI target
:volume_id: the id of the volume (currently used by xen)
:auth_method:, :auth_username:, :auth_password:
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
:access_mode: the volume access mode allow client used
('rw' or 'ro' currently supported)
"""
properties = {}
location = volume['provider_location']
if location:
# provider_location is the same format as iSCSI discovery output
properties['target_discovered'] = False
else:
location = self._do_iscsi_discovery(volume)
if not location:
msg = (_("Could not find iSCSI export for volume %s") %
(volume['name']))
raise exception.InvalidVolume(reason=msg)
LOG.debug(_("ISCSI Discovery: Found %s") % (location))
properties['target_discovered'] = True
results = location.split(" ")
properties['target_portal'] = results[0].split(",")[0]
properties['target_iqn'] = results[1]
try:
properties['target_lun'] = int(results[2])
except (IndexError, ValueError):
if (self.configuration.volume_driver in
['cinder.volume.drivers.lvm.LVMISCSIDriver',
'cinder.volume.drivers.lvm.ThinLVMVolumeDriver'] and
self.configuration.iscsi_helper == 'tgtadm'):
properties['target_lun'] = 1
else:
properties['target_lun'] = 0
properties['volume_id'] = volume['id']
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
geometry = volume.get('provider_geometry', None)
if geometry:
(physical_block_size, logical_block_size) = geometry.split()
properties['physical_block_size'] = physical_block_size
properties['logical_block_size'] = logical_block_size
encryption_key_id = volume.get('encryption_key_id', None)
properties['encrypted'] = encryption_key_id is not None
return properties
def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
iscsi_properties['target_iqn'],
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = self._execute('iscsiadm',
*iscsi_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
**kwargs):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
The iscsi driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined in _get_iscsi_properties.
Example return value::
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'volume_id': 1,
'access_mode': 'rw'
}
}
"""
if CONF.iscsi_helper == 'lioadm':
self.tgtadm.initialize_connection(volume, connector)
iscsi_properties = self._get_iscsi_properties(volume)
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
}
def validate_connector(self, connector):
# iSCSI drivers require the initiator information
if 'initiator' not in connector:
err_msg = (_('The volume driver requires the iSCSI initiator '
'name in the connector.'))
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
def terminate_connection(self, volume, connector, **kwargs):
pass
def _get_iscsi_initiator(self):
"""Get iscsi initiator name for this machine"""
# NOTE openiscsi stores initiator name in a file that
# needs root permission to read.
contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi')
for l in contents.split('\n'):
if l.startswith('InitiatorName='):
return l[l.index('=') + 1:].strip()
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or 'Generic_iSCSI'
data["vendor_name"] = 'Open Source'
data["driver_version"] = '1.0'
data["storage_protocol"] = 'iSCSI'
data['total_capacity_gb'] = 'infinite'
data['free_capacity_gb'] = 'infinite'
data['reserved_percentage'] = 100
data['QoS_support'] = False
self._stats = data
def accept_transfer(self, context, volume, new_user, new_project):
pass
def get_target_admin(self):
root_helper = utils.get_root_helper()
if CONF.iscsi_helper == 'tgtadm':
return iscsi.TgtAdm(root_helper,
CONF.volumes_dir,
CONF.iscsi_target_prefix)
elif CONF.iscsi_helper == 'fake':
return iscsi.FakeIscsiHelper()
elif CONF.iscsi_helper == 'lioadm':
return iscsi.LioAdm(root_helper,
CONF.lio_initiator_iqns,
CONF.iscsi_target_prefix)
else:
return iscsi.IetAdm(root_helper, CONF.iet_conf, CONF.iscsi_iotype)
class FakeISCSIDriver(ISCSIDriver):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
super(FakeISCSIDriver, self).__init__(execute=self.fake_execute,
*args, **kwargs)
def create_volume(self, volume):
pass
def check_for_setup_error(self):
"""No setup necessary in fake mode."""
pass
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'iscsi',
'data': {'access_mode': 'rw'}
}
def terminate_connection(self, volume, connector, **kwargs):
pass
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
LOG.debug(_("FAKE ISCSI: %s"), cmd)
return (None, None)
class ISERDriver(ISCSIDriver):
"""Executes commands relating to ISER volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSER target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
def __init__(self, *args, **kwargs):
super(ISERDriver, self).__init__(*args, **kwargs)
def _do_iser_discovery(self, volume):
LOG.warn(_("ISER provider_location not stored, using discovery"))
volume_name = volume['name']
(out, _err) = self._execute('iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p', volume['host'],
run_as_root=True)
for target in out.splitlines():
if (self.configuration.iser_ip_address in target
and volume_name in target):
return target
return None
def _get_iser_properties(self, volume):
"""Gets iser configuration
We ideally get saved information in the volume entity, but fall back
to discovery if need be. Discovery may be completely removed in future
The properties are:
:target_discovered: boolean indicating whether discovery was used
:target_iqn: the IQN of the iSER target
:target_portal: the portal of the iSER target
:target_lun: the lun of the iSER target
:volume_id: the id of the volume (currently used by xen)
:auth_method:, :auth_username:, :auth_password:
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
"""
properties = {}
location = volume['provider_location']
if location:
# provider_location is the same format as iSER discovery output
properties['target_discovered'] = False
else:
location = self._do_iser_discovery(volume)
if not location:
msg = (_("Could not find iSER export for volume %s") %
(volume['name']))
raise exception.InvalidVolume(reason=msg)
LOG.debug(_("ISER Discovery: Found %s") % (location))
properties['target_discovered'] = True
results = location.split(" ")
properties['target_portal'] = results[0].split(",")[0]
properties['target_iqn'] = results[1]
try:
properties['target_lun'] = int(results[2])
except (IndexError, ValueError):
if (self.configuration.volume_driver in
['cinder.volume.drivers.lvm.LVMISERDriver',
'cinder.volume.drivers.lvm.ThinLVMVolumeDriver'] and
self.configuration.iser_helper == 'tgtadm'):
properties['target_lun'] = 1
else:
properties['target_lun'] = 0
properties['volume_id'] = volume['id']
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
return properties
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
The iser driver returns a driver_volume_type of 'iser'.
The format of the driver data is defined in _get_iser_properties.
Example return value::
{
'driver_volume_type': 'iser'
'data': {
'target_discovered': True,
'target_iqn':
'iqn.2010-10.org.iser.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'volume_id': 1,
}
}
"""
iser_properties = self._get_iser_properties(volume)
return {
'driver_volume_type': 'iser',
'data': iser_properties
}
def _check_valid_device(self, path):
cmd = ('dd', 'if=%(path)s' % {"path": path},
'of=/dev/null', 'count=1')
out, info = None, None
try:
out, info = self._execute(*cmd, run_as_root=True)
except processutils.ProcessExecutionError as e:
LOG.error(_("Failed to access the device on the path "
"%(path)s: %(error)s.") %
{"path": path, "error": e.stderr})
return False
# If the info is none, the path does not exist.
if info is None:
return False
return True
def _attach_volume(self, context, volume, connector):
"""Attach the volume."""
iser_properties = None
host_device = None
init_conn = self.initialize_connection(volume, connector)
iser_properties = init_conn['data']
# code "inspired by" nova/virt/libvirt/volume.py
try:
self._run_iscsiadm(iser_properties, ())
except processutils.ProcessExecutionError as exc:
# iscsiadm returns 21 for "No records found" after version 2.0-871
if exc.exit_code in [21, 255]:
self._run_iscsiadm(iser_properties, ('--op', 'new'))
else:
raise
if iser_properties.get('auth_method'):
self._iscsiadm_update(iser_properties,
"node.session.auth.authmethod",
iser_properties['auth_method'])
self._iscsiadm_update(iser_properties,
"node.session.auth.username",
iser_properties['auth_username'])
self._iscsiadm_update(iser_properties,
"node.session.auth.password",
iser_properties['auth_password'])
host_device = ("/dev/disk/by-path/ip-%s-iser-%s-lun-%s" %
(iser_properties['target_portal'],
iser_properties['target_iqn'],
iser_properties.get('target_lun', 0)))
out = self._run_iscsiadm_bare(["-m", "session"],
run_as_root=True,
check_exit_code=[0, 1, 21])[0] or ""
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in out.splitlines() if p.startswith("iser:")]
stripped_portal = iser_properties['target_portal'].split(",")[0]
length_iqn = [s for s in portals
if stripped_portal ==
s['portal'].split(",")[0] and
s['iqn'] == iser_properties['target_iqn']]
if len(portals) == 0 or len(length_iqn) == 0:
try:
self._run_iscsiadm(iser_properties, ("--login",),
check_exit_code=[0, 255])
except processutils.ProcessExecutionError as err:
if err.exit_code in [15]:
self._iscsiadm_update(iser_properties,
"node.startup",
"automatic")
return iser_properties, host_device
else:
raise
self._iscsiadm_update(iser_properties,
"node.startup", "automatic")
tries = 0
while not os.path.exists(host_device):
if tries >= self.configuration.num_iser_scan_tries:
raise exception.CinderException(_("iSER device "
"not found "
"at %s") % (host_device))
LOG.warn(_("ISER volume not yet found at: %(host_device)s. "
"Will rescan & retry. Try number: %(tries)s.") %
{'host_device': host_device, 'tries': tries})
# The rescan isn't documented as being necessary(?),
# but it helps
self._run_iscsiadm(iser_properties, ("--rescan",))
tries = tries + 1
if not os.path.exists(host_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug(_("Found iSER node %(host_device)s "
"(after %(tries)s rescans).") %
{'host_device': host_device,
'tries': tries})
if not self._check_valid_device(host_device):
raise exception.DeviceUnavailable(path=host_device,
reason=(_("Unable to access "
"the backend storage "
"via the path "
"%(path)s.") %
{'path': host_device}))
return iser_properties, host_device
def _update_volume_status(self):
"""Retrieve status info from volume group."""
LOG.debug(_("Updating volume status"))
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or 'Generic_iSER'
data["vendor_name"] = 'Open Source'
data["driver_version"] = '1.0'
data["storage_protocol"] = 'iSER'
data['total_capacity_gb'] = 'infinite'
data['free_capacity_gb'] = 'infinite'
data['reserved_percentage'] = 100
data['QoS_support'] = False
self._stats = data
def get_target_admin(self):
root_helper = utils.get_root_helper()
if CONF.iser_helper == 'fake':
return iser.FakeIserHelper()
else:
return iser.TgtAdm(root_helper,
CONF.volumes_dir)
class FakeISERDriver(FakeISCSIDriver):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
super(FakeISERDriver, self).__init__(execute=self.fake_execute,
*args, **kwargs)
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'iser',
'data': {}
}
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
LOG.debug(_("FAKE ISER: %s"), cmd)
return (None, None)
class FibreChannelDriver(VolumeDriver):
"""Executes commands relating to Fibre Channel volumes."""
def __init__(self, *args, **kwargs):
super(FibreChannelDriver, self).__init__(*args, **kwargs)
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
'access_mode': 'rw'
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
'access_mode': 'rw'
}
}
"""
msg = _("Driver must implement initialize_connection")
raise NotImplementedError(msg)
|
|
"""
Unit test for SLSQP optimization.
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_, assert_array_almost_equal, TestCase,
assert_allclose, assert_equal, run_module_suite)
import numpy as np
from scipy.optimize import fmin_slsqp, minimize
class MyCallBack(object):
"""pass a custom callback function
This makes sure it's being used.
"""
def __init__(self):
self.been_called = False
self.ncalls = 0
def __call__(self, x):
self.been_called = True
self.ncalls += 1
class TestSLSQP(TestCase):
"""
Test SLSQP algorithm using Example 14.4 from Numerical Methods for
Engineers by Steven Chapra and Raymond Canale.
This example maximizes the function f(x) = 2*x*y + 2*x - x**2 - 2*y**2,
which has a maximum at x=2, y=1.
"""
def setUp(self):
self.opts = {'disp': False}
def fun(self, d, sign=1.0):
"""
Arguments:
d - A list of two elements, where d[0] represents x and d[1] represents y
in the following equation.
sign - A multiplier for f. Since we want to optimize it, and the scipy
optimizers can only minimize functions, we need to multiply it by
-1 to achieve the desired solution
Returns:
2*x*y + 2*x - x**2 - 2*y**2
"""
x = d[0]
y = d[1]
return sign*(2*x*y + 2*x - x**2 - 2*y**2)
def jac(self, d, sign=1.0):
"""
This is the derivative of fun, returning a numpy array
representing df/dx and df/dy.
"""
x = d[0]
y = d[1]
dfdx = sign*(-2*x + 2*y + 2)
dfdy = sign*(2*x - 4*y)
return np.array([dfdx, dfdy], float)
def fun_and_jac(self, d, sign=1.0):
return self.fun(d, sign), self.jac(d, sign)
def f_eqcon(self, x, sign=1.0):
""" Equality constraint """
return np.array([x[0] - x[1]])
def fprime_eqcon(self, x, sign=1.0):
""" Equality constraint, derivative """
return np.array([[1, -1]])
def f_eqcon_scalar(self, x, sign=1.0):
""" Scalar equality constraint """
return self.f_eqcon(x, sign)[0]
def fprime_eqcon_scalar(self, x, sign=1.0):
""" Scalar equality constraint, derivative """
return self.fprime_eqcon(x, sign)[0].tolist()
def f_ieqcon(self, x, sign=1.0):
""" Inequality constraint """
return np.array([x[0] - x[1] - 1.0])
def fprime_ieqcon(self, x, sign=1.0):
""" Inequality constraint, derivative """
return np.array([[1, -1]])
def f_ieqcon2(self, x):
""" Vector inequality constraint """
return np.asarray(x)
def fprime_ieqcon2(self, x):
""" Vector inequality constraint, derivative """
return np.identity(x.shape[0])
# minimize
def test_minimize_unbounded_approximated(self):
# Minimize, method='SLSQP': unbounded, approximated jacobian.
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1])
def test_minimize_unbounded_given(self):
# Minimize, method='SLSQP': unbounded, given jacobian.
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
jac=self.jac, method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1])
def test_minimize_bounded_approximated(self):
# Minimize, method='SLSQP': bounded, approximated jacobian.
with np.errstate(invalid='ignore'):
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
bounds=((2.5, None), (None, 0.5)),
method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2.5, 0.5])
def test_minimize_unbounded_combined(self):
# Minimize, method='SLSQP': unbounded, combined function and jacobian.
res = minimize(self.fun_and_jac, [-1.0, 1.0], args=(-1.0, ),
jac=True, method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1])
def test_minimize_equality_approximated(self):
# Minimize with method='SLSQP': equality constraint, approx. jacobian.
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
constraints={'type': 'eq',
'fun': self.f_eqcon,
'args': (-1.0, )},
method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [1, 1])
def test_minimize_equality_given(self):
# Minimize with method='SLSQP': equality constraint, given jacobian.
res = minimize(self.fun, [-1.0, 1.0], jac=self.jac,
method='SLSQP', args=(-1.0,),
constraints={'type': 'eq', 'fun':self.f_eqcon,
'args': (-1.0, )},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [1, 1])
def test_minimize_equality_given2(self):
# Minimize with method='SLSQP': equality constraint, given jacobian
# for fun and const.
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
jac=self.jac, args=(-1.0,),
constraints={'type': 'eq',
'fun': self.f_eqcon,
'args': (-1.0, ),
'jac': self.fprime_eqcon},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [1, 1])
def test_minimize_equality_given_cons_scalar(self):
# Minimize with method='SLSQP': scalar equality constraint, given
# jacobian for fun and const.
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
jac=self.jac, args=(-1.0,),
constraints={'type': 'eq',
'fun': self.f_eqcon_scalar,
'args': (-1.0, ),
'jac': self.fprime_eqcon_scalar},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [1, 1])
def test_minimize_inequality_given(self):
# Minimize with method='SLSQP': inequality constraint, given jacobian.
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
jac=self.jac, args=(-1.0, ),
constraints={'type': 'ineq',
'fun': self.f_ieqcon,
'args': (-1.0, )},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1], atol=1e-3)
def test_minimize_inequality_given_vector_constraints(self):
# Minimize with method='SLSQP': vector inequality constraint, given
# jacobian.
res = minimize(self.fun, [-1.0, 1.0], jac=self.jac,
method='SLSQP', args=(-1.0,),
constraints={'type': 'ineq',
'fun': self.f_ieqcon2,
'jac': self.fprime_ieqcon2},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1])
def test_minimize_bound_equality_given2(self):
# Minimize with method='SLSQP': bounds, eq. const., given jac. for
# fun. and const.
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
jac=self.jac, args=(-1.0, ),
bounds=[(-0.8, 1.), (-1, 0.8)],
constraints={'type': 'eq',
'fun': self.f_eqcon,
'args': (-1.0, ),
'jac': self.fprime_eqcon},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [0.8, 0.8], atol=1e-3)
# fmin_slsqp
def test_unbounded_approximated(self):
# SLSQP: unbounded, approximated jacobian.
res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ),
iprint = 0, full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [2, 1])
def test_unbounded_given(self):
# SLSQP: unbounded, given jacobian.
res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ),
fprime = self.jac, iprint = 0,
full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [2, 1])
def test_equality_approximated(self):
# SLSQP: equality constraint, approximated jacobian.
res = fmin_slsqp(self.fun,[-1.0,1.0], args=(-1.0,),
eqcons = [self.f_eqcon],
iprint = 0, full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [1, 1])
def test_equality_given(self):
# SLSQP: equality constraint, given jacobian.
res = fmin_slsqp(self.fun, [-1.0, 1.0],
fprime=self.jac, args=(-1.0,),
eqcons = [self.f_eqcon], iprint = 0,
full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [1, 1])
def test_equality_given2(self):
# SLSQP: equality constraint, given jacobian for fun and const.
res = fmin_slsqp(self.fun, [-1.0, 1.0],
fprime=self.jac, args=(-1.0,),
f_eqcons = self.f_eqcon,
fprime_eqcons = self.fprime_eqcon,
iprint = 0,
full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [1, 1])
def test_inequality_given(self):
# SLSQP: inequality constraint, given jacobian.
res = fmin_slsqp(self.fun, [-1.0, 1.0],
fprime=self.jac, args=(-1.0, ),
ieqcons = [self.f_ieqcon],
iprint = 0, full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [2, 1], decimal=3)
def test_bound_equality_given2(self):
# SLSQP: bounds, eq. const., given jac. for fun. and const.
res = fmin_slsqp(self.fun, [-1.0, 1.0],
fprime=self.jac, args=(-1.0, ),
bounds = [(-0.8, 1.), (-1, 0.8)],
f_eqcons = self.f_eqcon,
fprime_eqcons = self.fprime_eqcon,
iprint = 0, full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [0.8, 0.8], decimal=3)
def test_scalar_constraints(self):
# Regression test for gh-2182
x = fmin_slsqp(lambda z: z**2, [3.],
ieqcons=[lambda z: z[0] - 1],
iprint=0)
assert_array_almost_equal(x, [1.])
x = fmin_slsqp(lambda z: z**2, [3.],
f_ieqcons=lambda z: [z[0] - 1],
iprint=0)
assert_array_almost_equal(x, [1.])
def test_integer_bounds(self):
# This should not raise an exception
fmin_slsqp(lambda z: z**2 - 1, [0], bounds=[[0, 1]], iprint=0)
def test_callback(self):
# Minimize, method='SLSQP': unbounded, approximated jacobian. Check for callback
callback = MyCallBack()
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
method='SLSQP', callback=callback, options=self.opts)
assert_(res['success'], res['message'])
assert_(callback.been_called)
assert_equal(callback.ncalls, res['nit'])
if __name__ == "__main__":
run_module_suite()
|
|
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix (including Mac OSX), it starts with sys.prefix and
sys.exec_prefix (if different) and appends
lib/python<version>/site-packages as well as lib/site-python.
On other platforms (such as Windows), it tries each of the
prefixes directly, as well as with lib/site-packages appended. The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.5/site-packages/bar
/usr/local/lib/python2.5/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
import __builtin__
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = False # buildout does not support user sites.
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.abspath(os.path.join(*paths))
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if hasattr(m, '__loader__'):
continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
except AttributeError:
continue
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
with f:
for line in f:
if line.startswith("#"):
continue
if line.startswith(("import ", "import\t")):
exec line
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
dotpth = os.extsep + "pth"
names = [name for name in names if name.endswith(dotpth)]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
USER_BASE = env_base if env_base else joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
USER_BASE = env_base if env_base else joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
return known_paths
def addsitepackages(known_paths):
"""Add site packages, as determined by zc.buildout.
See original_addsitepackages, below, for the original version."""
setuptools_path = 'c:\\src\\django\\buildout15\\eggs\\setuptools-0.6c12dev_r88124-py2.6.egg'
sys.path.append(setuptools_path)
known_paths.add(os.path.normcase(setuptools_path))
import pkg_resources
buildout_paths = [
'c:\\src\\django\\buildout15\\src',
'c:\\src\\django\\buildout15\\eggs\\setuptools-0.6c12dev_r88124-py2.6.egg'
]
for path in buildout_paths:
sitedir, sitedircase = makepath(path)
if not sitedircase in known_paths and os.path.exists(sitedir):
sys.path.append(sitedir)
known_paths.add(sitedircase)
pkg_resources.working_set.add_entry(sitedir)
sys.__egginsert = len(buildout_paths) # Support distribute.
original_paths = [
'C:\\Python26\\lib\\site-packages'
]
for path in original_paths:
if path == setuptools_path or path not in known_paths:
addsitedir(path, known_paths)
return known_paths
def original_addsitepackages(known_paths):
"""Add site-packages (and possibly site-python) to sys.path"""
sitedirs = []
seen = []
for prefix in PREFIXES:
if not prefix or prefix in seen:
continue
seen.append(prefix)
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"site-packages"))
sitedirs.append(os.path.join(prefix, "lib", "site-python"))
else:
sitedirs.append(prefix)
sitedirs.append(os.path.join(prefix, "lib", "site-packages"))
if sys.platform == "darwin":
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
sitedirs.append(
os.path.expanduser(
os.path.join("~", "Library", "Python",
sys.version[:3], "site-packages")))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
__builtin__.quit = Quitter('quit')
__builtin__.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = file(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print self.__lines[i]
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = raw_input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
__builtin__.copyright = _Printer("copyright", sys.copyright)
if sys.platform[:4] == 'java':
__builtin__.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
else:
__builtin__.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
__builtin__.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
__builtin__.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print >>sys.stderr, \
"'import sitecustomize' failed; use -v for traceback"
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print>>sys.stderr, \
"'import usercustomize' failed; use -v for traceback"
def main():
global ENABLE_USER_SITE
abs__file__()
known_paths = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print "sys.path = ["
for dir in sys.path:
print " %r," % (dir,)
print "]"
print "USER_BASE: %r (%s)" % (USER_BASE,
"exists" if os.path.isdir(USER_BASE) else "doesn't exist")
print "USER_SITE: %r (%s)" % (USER_SITE,
"exists" if os.path.isdir(USER_SITE) else "doesn't exist")
print "ENABLE_USER_SITE: %r" % ENABLE_USER_SITE
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print os.pathsep.join(buffer)
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print textwrap.dedent(help % (sys.argv[0], os.pathsep))
sys.exit(10)
if __name__ == '__main__':
_script()
|
|
#!/usr/bin/env python
import argparse
import atexit
import copy
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import warnings
try:
import django
except ImportError as e:
raise RuntimeError(
'Django module not found, reference tests/README.rst for instructions.'
) from e
else:
from django.apps import apps
from django.conf import settings
from django.db import connection, connections
from django.test import TestCase, TransactionTestCase
from django.test.runner import default_test_processes
from django.test.selenium import SeleniumTestCaseBase
from django.test.utils import get_runner
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.log import DEFAULT_LOGGING
from django.utils.version import PY37
try:
import MySQLdb
except ImportError:
pass
else:
# Ignore informational warnings from QuerySet.explain().
warnings.filterwarnings('ignore', r'\(1003, *', category=MySQLdb.Warning)
# Make deprecation warnings errors to ensure no usage of deprecated features.
warnings.simplefilter("error", RemovedInDjango40Warning)
# Make runtime warning errors to ensure no usage of error prone patterns.
warnings.simplefilter("error", RuntimeWarning)
# Ignore known warnings in test dependencies.
warnings.filterwarnings("ignore", "'U' mode is deprecated", DeprecationWarning, module='docutils.io')
RUNTESTS_DIR = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
# Removing the temporary TMPDIR.
atexit.register(shutil.rmtree, TMPDIR)
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_runner_apps',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and isn't in an application in INSTALLED_APPS."
CONTRIB_TESTS_TO_APPS = {
'flatpages_tests': 'django.contrib.flatpages',
'redirects_tests': 'django.contrib.redirects',
}
def get_test_modules():
modules = []
discovery_paths = [(None, RUNTESTS_DIR)]
if connection.features.gis_enabled:
# GIS tests are in nested apps
discovery_paths.append(('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')))
else:
SUBDIRS_TO_SKIP.append('gis_tests')
for modpath, dirpath in discovery_paths:
for f in os.scandir(dirpath):
if ('.' not in f.name and
os.path.basename(f.name) not in SUBDIRS_TO_SKIP and
not f.is_file() and
os.path.exists(os.path.join(f.path, '__init__.py'))):
modules.append((modpath, f.name))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels, parallel, start_at, start_after):
# Reduce the given test labels to just the app module path.
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
if verbosity >= 1:
msg = "Testing against Django installed in '%s'" % os.path.dirname(django.__file__)
max_parallel = default_test_processes() if parallel == 0 else parallel
if max_parallel > 1:
msg += " with up to %d processes" % max_parallel
print(msg)
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE': settings.MIDDLEWARE,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE = ALWAYS_MIDDLEWARE
settings.MIGRATION_MODULES = {
# This lets us skip creating migrations for the test models as many of
# them depend on one of the following contrib applications.
'auth': None,
'contenttypes': None,
'sessions': None,
}
log_config = copy.deepcopy(DEFAULT_LOGGING)
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
settings.SILENCED_SYSTEM_CHECKS = [
'fields.W342', # ForeignKey(unique=True) -> OneToOneField
]
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# It would be nice to put this validation earlier but it must come after
# django.setup() so that connection.features.gis_enabled can be accessed
# without raising AppRegistryNotReady when running gis_tests in isolation
# on some backends (e.g. PostGIS).
if 'gis_tests' in test_labels_set and not connection.features.gis_enabled:
print('Aborting: A GIS database backend is required to run gis_tests.')
sys.exit(1)
def _module_match_label(module_label, label):
# Exact or ancestor match.
return module_label == label or module_label.startswith(label + '.')
# Load all the test model apps.
test_modules = get_test_modules()
found_start = not (start_at or start_after)
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = modpath + '.' + module_name
else:
module_label = module_name
if not found_start:
if start_at and _module_match_label(module_label, start_at):
found_start = True
elif start_after and _module_match_label(module_label, start_after):
found_start = True
continue
else:
continue
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
module_found_in_labels = not test_labels or any(
_module_match_label(module_label, label) for label in test_labels_set
)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
settings.INSTALLED_APPS.append(CONTRIB_TESTS_TO_APPS[module_name])
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
# Discard the multiprocessing.util finalizer that tries to remove a
# temporary directory that's already removed by this script's
# atexit.register(shutil.rmtree, TMPDIR) handler. Prevents
# FileNotFoundError at the end of a test run (#27890).
from multiprocessing.util import _finalizer_registry
_finalizer_registry.pop((-100, 0), None)
def actual_test_processes(parallel):
if parallel == 0:
# This doesn't work before django.setup() on some databases.
if all(conn.features.can_clone_databases for conn in connections.all()):
return default_test_processes()
else:
return 1
else:
return parallel
class ActionSelenium(argparse.Action):
"""
Validate the comma-separated list of requested browsers.
"""
def __call__(self, parser, namespace, values, option_string=None):
browsers = values.split(',')
for browser in browsers:
try:
SeleniumTestCaseBase.import_webdriver(browser)
except ImportError:
raise argparse.ArgumentError(self, "Selenium browser specification '%s' is not valid." % browser)
setattr(namespace, self.dest, browsers)
def django_tests(verbosity, interactive, failfast, keepdb, reverse,
test_labels, debug_sql, parallel, tags, exclude_tags,
test_name_patterns, start_at, start_after, pdb):
state = setup(verbosity, test_labels, parallel, start_at, start_after)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
parallel=actual_test_processes(parallel),
tags=tags,
exclude_tags=exclude_tags,
test_name_patterns=test_name_patterns,
pdb=pdb,
)
failures = test_runner.run_tests(
test_labels or get_installed(),
extra_tests=extra_tests,
)
teardown(state)
return failures
def get_subprocess_args(options):
subprocess_args = [
sys.executable, __file__, '--settings=%s' % options.settings
]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
if options.tags:
subprocess_args.append('--tag=%s' % options.tags)
if options.exclude_tags:
subprocess_args.append('--exclude_tag=%s' % options.exclude_tags)
return subprocess_args
def bisect_tests(bisection_label, options, test_labels, parallel, start_at, start_after):
state = setup(options.verbosity, test_labels, parallel, start_at, start_after)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.run(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.run(subprocess_args + test_labels_b)
if failures_a.returncode and not failures_b.returncode:
print("***** Problem found in first half. Bisecting again...")
iteration += 1
test_labels = test_labels_a[:-1]
elif failures_b.returncode and not failures_a.returncode:
print("***** Problem found in second half. Bisecting again...")
iteration += 1
test_labels = test_labels_b[:-1]
elif failures_a.returncode and failures_b.returncode:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels, parallel, start_at, start_after):
state = setup(options.verbosity, test_labels, parallel, start_at, start_after)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Django test suite.")
parser.add_argument(
'modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".',
)
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output',
)
parser.add_argument(
'--noinput', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--failfast', action='store_true',
help='Tells Django to stop running the test suite after first failed test.',
)
parser.add_argument(
'--keepdb', action='store_true',
help='Tells Django to preserve the test database between runs.',
)
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.',
)
parser.add_argument(
'--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.',
)
parser.add_argument(
'--pair',
help='Run the test suite in pairs with the named test to find problem pairs.',
)
parser.add_argument(
'--reverse', action='store_true',
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.',
)
parser.add_argument(
'--selenium', action=ActionSelenium, metavar='BROWSERS',
help='A comma-separated list of browsers to run the Selenium tests against.',
)
parser.add_argument(
'--headless', action='store_true',
help='Run selenium tests in headless mode, if the browser supports the option.',
)
parser.add_argument(
'--selenium-hub',
help='A URL for a selenium hub instance to use in combination with --selenium.',
)
parser.add_argument(
'--external-host', default=socket.gethostname(),
help='The external host that can be reached by the selenium hub instance when running Selenium '
'tests via Selenium Hub.',
)
parser.add_argument(
'--debug-sql', action='store_true',
help='Turn on the SQL query logger within tests.',
)
parser.add_argument(
'--parallel', nargs='?', default=0, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', dest='tags', action='append',
help='Run only tests with the specified tags. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', dest='exclude_tags', action='append',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
parser.add_argument(
'--start-after', dest='start_after',
help='Run tests starting after the specified top-level module.',
)
parser.add_argument(
'--start-at', dest='start_at',
help='Run tests starting at the specified top-level module.',
)
parser.add_argument(
'--pdb', action='store_true',
help='Runs the PDB debugger on error or failure.'
)
if PY37:
parser.add_argument(
'-k', dest='test_name_patterns', action='append',
help=(
'Only run test methods and classes matching test name pattern. '
'Same as unittest -k option. Can be used multiple times.'
),
)
options = parser.parse_args()
using_selenium_hub = options.selenium and options.selenium_hub
if options.selenium_hub and not options.selenium:
parser.error('--selenium-hub and --external-host require --selenium to be used.')
if using_selenium_hub and not options.external_host:
parser.error('--selenium-hub and --external-host must be used together.')
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
mutually_exclusive_options = [options.start_at, options.start_after, options.modules]
enabled_module_options = [bool(option) for option in mutually_exclusive_options].count(True)
if enabled_module_options > 1:
print('Aborting: --start-at, --start-after, and test labels are mutually exclusive.')
sys.exit(1)
for opt_name in ['start_at', 'start_after']:
opt_val = getattr(options, opt_name)
if opt_val:
if '.' in opt_val:
print('Aborting: --%s must be a top-level module.' % opt_name.replace('_', '-'))
sys.exit(1)
setattr(options, opt_name, os.path.normpath(opt_val))
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_sqlite')
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.selenium:
if not options.tags:
options.tags = ['selenium']
elif 'selenium' not in options.tags:
options.tags.append('selenium')
if options.selenium_hub:
SeleniumTestCaseBase.selenium_hub = options.selenium_hub
SeleniumTestCaseBase.external_host = options.external_host
SeleniumTestCaseBase.headless = options.headless
SeleniumTestCaseBase.browsers = options.selenium
if options.bisect:
bisect_tests(
options.bisect, options, options.modules, options.parallel,
options.start_at, options.start_after,
)
elif options.pair:
paired_tests(
options.pair, options, options.modules, options.parallel,
options.start_at, options.start_after,
)
else:
failures = django_tests(
options.verbosity, options.interactive, options.failfast,
options.keepdb, options.reverse, options.modules,
options.debug_sql, options.parallel, options.tags,
options.exclude_tags,
getattr(options, 'test_name_patterns', None),
options.start_at, options.start_after, options.pdb,
)
if failures:
sys.exit(1)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_group
author: "Andrew de Quincey (@adq)"
version_added: "1.3"
requirements: [ boto3 ]
short_description: maintain an ec2 VPC security group.
description:
- maintains ec2 security groups. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the security group.
- One of and only one of I(name) or I(group_id) is required.
- Required if I(state=present).
required: false
group_id:
description:
- Id of group to delete (works only with absent).
- One of and only one of I(name) or I(group_id) is required.
required: false
version_added: "2.4"
description:
description:
- Description of the security group. Required when C(state) is C(present).
required: false
vpc_id:
description:
- ID of the VPC to create the group in.
required: false
rules:
description:
- List of firewall inbound rules to enforce in this group (see example). If none are supplied,
no inbound rules will be enabled. Rules list may include its own name in `group_name`.
This allows idempotent loopback additions (e.g. allow group to access itself).
Rule sources list support was added in version 2.4. This allows to define multiple sources per
source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
required: false
rules_egress:
description:
- List of firewall outbound rules to enforce in this group (see example). If none are supplied,
a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
Rule Egress sources list support was added in version 2.4.
required: false
version_added: "1.6"
state:
version_added: "1.4"
description:
- Create or delete a security group
required: false
default: 'present'
choices: [ "present", "absent" ]
aliases: []
purge_rules:
version_added: "1.8"
description:
- Purge existing rules on security group that are not found in rules
required: false
default: 'true'
aliases: []
purge_rules_egress:
version_added: "1.8"
description:
- Purge existing rules_egress on security group that are not found in rules_egress
required: false
default: 'true'
aliases: []
tags:
version_added: "2.4"
description:
- A dictionary of one or more tags to assign to the security group.
required: false
purge_tags:
version_added: "2.4"
description:
- If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
tags will not be modified.
required: false
default: yes
choices: [ 'yes', 'no' ]
extends_documentation_fragment:
- aws
- ec2
notes:
- If a rule declares a group_name and that group doesn't exist, it will be
automatically created. In that case, group_desc should be provided as well.
The module will refuse to create a depended-on group without a description.
'''
EXAMPLES = '''
- name: example ec2 group
ec2_group:
name: example
description: an example EC2 group
vpc_id: 12345
region: eu-west-1
aws_secret_key: SECRET
aws_access_key: ACCESS
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 10.0.0.0/8
- proto: tcp
from_port: 443
to_port: 443
group_id: amazon-elb/sg-87654321/amazon-elb-sg
- proto: tcp
from_port: 3306
to_port: 3306
group_id: 123412341234/sg-87654321/exact-name-of-sg
- proto: udp
from_port: 10050
to_port: 10050
cidr_ip: 10.0.0.0/8
- proto: udp
from_port: 10051
to_port: 10051
group_id: sg-12345678
- proto: icmp
from_port: 8 # icmp type, -1 = any type
to_port: -1 # icmp subtype, -1 = any subtype
cidr_ip: 10.0.0.0/8
- proto: all
# the containing group name may be specified here
group_name: example
rules_egress:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
cidr_ipv6: 64:ff9b::/96
group_name: example-other
# description to use if example-other needs to be created
group_desc: other example EC2 group
- name: example2 ec2 group
ec2_group:
name: example2
description: an example2 EC2 group
vpc_id: 12345
region: eu-west-1
rules:
# 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
- proto: tcp
ports: 22
group_name: example-vpn
- proto: tcp
ports:
- 80
- 443
- 8080-8099
cidr_ip: 0.0.0.0/0
# Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
- proto: tcp
ports:
- 6379
- 26379
group_name:
- example-vpn
- example-redis
- proto: tcp
ports: 5665
group_name: example-vpn
cidr_ip:
- 172.16.1.0/24
- 172.16.17.0/24
cidr_ipv6:
- 2607:F8B0::/32
- 64:ff9b::/96
group_id:
- sg-edcd9784
- name: "Delete group by its id"
ec2_group:
group_id: sg-33b4ee5b
state: absent
'''
RETURN = '''
group_name:
description: Security group name
sample: My Security Group
type: string
returned: on create/update
group_id:
description: Security group id
sample: sg-abcd1234
type: string
returned: on create/update
description:
description: Description of security group
sample: My Security Group
type: string
returned: on create/update
tags:
description: Tags associated with the security group
sample:
Name: My Security Group
Purpose: protecting stuff
type: dict
returned: on create/update
vpc_id:
description: ID of VPC to which the security group belongs
sample: vpc-abcd1234
type: string
returned: on create/update
ip_permissions:
description: Inbound rules associated with the security group.
sample:
- from_port: 8182
ip_protocol: tcp
ip_ranges:
- cidr_ip: "1.1.1.1/32"
ipv6_ranges: []
prefix_list_ids: []
to_port: 8182
user_id_group_pairs: []
type: list
returned: on create/update
ip_permissions_egress:
description: Outbound rules associated with the security group.
sample:
- ip_protocol: -1
ip_ranges:
- cidr_ip: "0.0.0.0/0"
ipv6_ranges: []
prefix_list_ids: []
user_id_group_pairs: []
type: list
returned: on create/update
owner_id:
description: AWS Account ID of the security group
sample: 123456789012
type: int
returned: on create/update
'''
import json
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn
from ansible.module_utils.ec2 import get_aws_connection_info
from ansible.module_utils.ec2 import ec2_argument_spec
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.ec2 import HAS_BOTO3
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags
from ansible.module_utils.ec2 import AWSRetry
import traceback
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_security_groups_with_backoff(connection, **kwargs):
return connection.describe_security_groups(**kwargs)
def deduplicate_rules_args(rules):
"""Returns unique rules"""
if rules is None:
return None
return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
def make_rule_key(prefix, rule, group_id, cidr_ip):
if 'proto' in rule:
proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')]
elif 'IpProtocol' in rule:
proto, from_port, to_port = [rule.get(x, None) for x in ('IpProtocol', 'FromPort', 'ToPort')]
if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1:
from_port = 'none'
to_port = 'none'
key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip)
return key.lower().replace('-none', '-None')
def add_rules_to_lookup(ipPermissions, group_id, prefix, dict):
for rule in ipPermissions:
for groupGrant in rule.get('UserIdGroupPairs', []):
dict[make_rule_key(prefix, rule, group_id, groupGrant.get('GroupId'))] = (rule, groupGrant)
for ipv4Grants in rule.get('IpRanges', []):
dict[make_rule_key(prefix, rule, group_id, ipv4Grants.get('CidrIp'))] = (rule, ipv4Grants)
for ipv6Grants in rule.get('Ipv6Ranges', []):
dict[make_rule_key(prefix, rule, group_id, ipv6Grants.get('CidrIpv6'))] = (rule, ipv6Grants)
def validate_rule(module, rule):
VALID_PARAMS = ('cidr_ip', 'cidr_ipv6',
'group_id', 'group_name', 'group_desc',
'proto', 'from_port', 'to_port')
if not isinstance(rule, dict):
module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
for k in rule:
if k not in VALID_PARAMS:
module.fail_json(msg='Invalid rule parameter \'{}\''.format(k))
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_id OR cidr_ip, not both')
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_name OR cidr_ip, not both')
elif 'group_id' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
elif 'group_name' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg='Specify group_id OR group_name, not both')
def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
"""
Returns tuple of (group_id, ip) after validating rule params.
rule: Dict describing a rule.
name: Name of the security group being managed.
groups: Dict of all available security groups.
AWS accepts an ip range or a security group as target of a rule. This
function validate the rule specification and return either a non-None
group_id or a non-None ip range.
"""
FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)'
group_id = None
group_name = None
ip = None
ipv6 = None
target_group_created = False
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_id OR cidr_ip, not both")
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_name OR cidr_ip, not both")
elif 'group_id' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
elif 'group_name' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg="Specify group_id OR group_name, not both")
elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
elif rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
group_instance = dict(GroupId=group_id, GroupName=group_name)
groups[group_id] = group_instance
groups[group_name] = group_instance
elif 'group_id' in rule:
group_id = rule['group_id']
elif 'group_name' in rule:
group_name = rule['group_name']
if group_name == name:
group_id = group['GroupId']
groups[group_id] = group
groups[group_name] = group
elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
# both are VPC groups, this is ok
group_id = groups[group_name]['GroupId']
elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
# both are EC2 classic, this is ok
group_id = groups[group_name]['GroupId']
else:
# if we got here, either the target group does not exist, or there
# is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
# is bad, so we have to create a new SG because no compatible group
# exists
if not rule.get('group_desc', '').strip():
module.fail_json(msg="group %s will be automatically created by rule %s and "
"no description was provided" % (group_name, rule))
if not module.check_mode:
params = dict(GroupName=group_name, Description=rule['group_desc'])
if vpc_id:
params['VpcId'] = vpc_id
auto_group = client.create_security_group(**params)
group_id = auto_group['GroupId']
groups[group_id] = auto_group
groups[group_name] = auto_group
target_group_created = True
elif 'cidr_ip' in rule:
ip = rule['cidr_ip']
elif 'cidr_ipv6' in rule:
ipv6 = rule['cidr_ipv6']
return group_id, ip, ipv6, target_group_created
def ports_expand(ports):
# takes a list of ports and returns a list of (port_from, port_to)
ports_expanded = []
for port in ports:
if not isinstance(port, str):
ports_expanded.append((port,) * 2)
elif '-' in port:
ports_expanded.append(tuple(p.strip() for p in port.split('-', 1)))
else:
ports_expanded.append((port.strip(),) * 2)
return ports_expanded
def rule_expand_ports(rule):
# takes a rule dict and returns a list of expanded rule dicts
if 'ports' not in rule:
return [rule]
ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
rule_expanded = []
for from_to in ports_expand(ports):
temp_rule = rule.copy()
del temp_rule['ports']
temp_rule['from_port'], temp_rule['to_port'] = from_to
rule_expanded.append(temp_rule)
return rule_expanded
def rules_expand_ports(rules):
# takes a list of rules and expands it based on 'ports'
if not rules:
return rules
return [rule for rule_complex in rules
for rule in rule_expand_ports(rule_complex)]
def rule_expand_source(rule, source_type):
# takes a rule dict and returns a list of expanded rule dicts for specified source_type
sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name')
rule_expanded = []
for source in sources:
temp_rule = rule.copy()
for s in source_types_all:
temp_rule.pop(s, None)
temp_rule[source_type] = source
rule_expanded.append(temp_rule)
return rule_expanded
def rule_expand_sources(rule):
# takes a rule dict and returns a list of expanded rule discts
source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name') if stype in rule)
return [r for stype in source_types
for r in rule_expand_source(rule, stype)]
def rules_expand_sources(rules):
# takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
if not rules:
return rules
return [rule for rule_complex in rules
for rule in rule_expand_sources(rule_complex)]
def authorize_ip(type, changed, client, group, groupRules,
ip, ip_permission, module, rule, ethertype):
# If rule already exists, don't later delete it
for thisip in ip:
rule_id = make_rule_key(type, rule, group['GroupId'], thisip)
if rule_id in groupRules:
del groupRules[rule_id]
else:
if not module.check_mode:
ip_permission = serialize_ip_grant(rule, thisip, ethertype)
if ip_permission:
try:
if type == "in":
client.authorize_security_group_ingress(GroupId=group['GroupId'],
IpPermissions=[ip_permission])
elif type == "out":
client.authorize_security_group_egress(GroupId=group['GroupId'],
IpPermissions=[ip_permission])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to authorize %s for ip %s security group '%s' - %s" %
(type, thisip, group['GroupName'], e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
return changed, ip_permission
def serialize_group_grant(group_id, rule):
permission = {'IpProtocol': rule['proto'],
'FromPort': rule['from_port'],
'ToPort': rule['to_port'],
'UserIdGroupPairs': [{'GroupId': group_id}]}
return fix_port_and_protocol(permission)
def serialize_revoke(grant, rule):
permission = dict()
fromPort = rule['FromPort'] if 'FromPort' in rule else None
toPort = rule['ToPort'] if 'ToPort' in rule else None
if 'GroupId' in grant:
permission = {'IpProtocol': rule['IpProtocol'],
'FromPort': fromPort,
'ToPort': toPort,
'UserIdGroupPairs': [{'GroupId': grant['GroupId']}]
}
elif 'CidrIp' in grant:
permission = {'IpProtocol': rule['IpProtocol'],
'FromPort': fromPort,
'ToPort': toPort,
'IpRanges': [grant]
}
elif 'CidrIpv6' in grant:
permission = {'IpProtocol': rule['IpProtocol'],
'FromPort': fromPort,
'ToPort': toPort,
'Ipv6Ranges': [grant]
}
return fix_port_and_protocol(permission)
def serialize_ip_grant(rule, thisip, ethertype):
permission = {'IpProtocol': rule['proto'],
'FromPort': rule['from_port'],
'ToPort': rule['to_port']}
if ethertype == "ipv4":
permission['IpRanges'] = [{'CidrIp': thisip}]
elif ethertype == "ipv6":
permission['Ipv6Ranges'] = [{'CidrIpv6': thisip}]
return fix_port_and_protocol(permission)
def fix_port_and_protocol(permission):
for key in ['FromPort', 'ToPort']:
if key in permission:
if permission[key] is None:
del permission[key]
else:
permission[key] = int(permission[key])
permission['IpProtocol'] = str(permission['IpProtocol'])
return permission
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(),
group_id=dict(),
description=dict(),
vpc_id=dict(),
rules=dict(type='list'),
rules_egress=dict(type='list'),
state=dict(default='present', type='str', choices=['present', 'absent']),
purge_rules=dict(default=True, required=False, type='bool'),
purge_rules_egress=dict(default=True, required=False, type='bool'),
tags=dict(required=False, type='dict', aliases=['resource_tags']),
purge_tags=dict(default=True, required=False, type='bool')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['name', 'group_id']],
required_if=[['state', 'present', ['name']]],
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
name = module.params['name']
group_id = module.params['group_id']
description = module.params['description']
vpc_id = module.params['vpc_id']
rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules'])))
rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules_egress'])))
state = module.params.get('state')
purge_rules = module.params['purge_rules']
purge_rules_egress = module.params['purge_rules_egress']
tags = module.params['tags']
purge_tags = module.params['purge_tags']
if state == 'present' and not description:
module.fail_json(msg='Must provide description when state is present.')
changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="The AWS region must be specified as an "
"environment variable or in the AWS credentials "
"profile.")
client = boto3_conn(module, conn_type='client', resource='ec2', endpoint=ec2_url, region=region, **aws_connect_params)
group = None
groups = dict()
security_groups = []
# do get all security groups
# find if the group is present
try:
response = get_security_groups_with_backoff(client)
security_groups = response.get('SecurityGroups', [])
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg="Error in describe_security_groups: %s" % "Unable to locate credentials", exception=traceback.format_exc())
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Error in describe_security_groups: %s" % e, exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for sg in security_groups:
groups[sg['GroupId']] = sg
groupName = sg['GroupName']
if groupName in groups:
# Prioritise groups from the current VPC
# even if current VPC is EC2-Classic
if groups[groupName].get('VpcId') == vpc_id:
# Group saved already matches current VPC, change nothing
pass
elif vpc_id is None and groups[groupName].get('VpcId') is None:
# We're in EC2 classic, and the group already saved is as well
# No VPC groups can be used alongside EC2 classic groups
pass
else:
# the current SG stored has no direct match, so we can replace it
groups[groupName] = sg
else:
groups[groupName] = sg
if group_id and sg['GroupId'] == group_id:
group = sg
elif groupName == name and (vpc_id is None or sg.get('VpcId') == vpc_id):
group = sg
# Ensure requested group is absent
if state == 'absent':
if group:
# found a match, delete it
try:
if not module.check_mode:
client.delete_security_group(GroupId=group['GroupId'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
else:
group = None
changed = True
else:
# no match found, no changes required
pass
# Ensure requested group is present
elif state == 'present':
if group:
# existing group
if group['Description'] != description:
module.fail_json(
msg="Group description does not match existing group. ec2_group does not support this case.")
# if the group doesn't exist, create it now
else:
# no match found, create it
if not module.check_mode:
params = dict(GroupName=name, Description=description)
if vpc_id:
params['VpcId'] = vpc_id
group = client.create_security_group(**params)
# When a group is created, an egress_rule ALLOW ALL
# to 0.0.0.0/0 is added automatically but it's not
# reflected in the object returned by the AWS API
# call. We re-read the group for getting an updated object
# amazon sometimes takes a couple seconds to update the security group so wait till it exists
while True:
group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
if group.get('VpcId') and not group.get('IpPermissionsEgress'):
pass
else:
break
changed = True
if tags is not None:
current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
if tags_to_delete:
try:
client.delete_tags(Resources=[group['GroupId']], Tags=[{'Key': tag} for tag in tags_to_delete])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
# Add/update tags
if tags_need_modify:
try:
client.create_tags(Resources=[group['GroupId']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
except botocore.exceptions.ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
else:
module.fail_json(msg="Unsupported state requested: %s" % state)
# create a lookup for all existing rules on the group
ip_permission = []
if group:
# Manage ingress rules
groupRules = {}
add_rules_to_lookup(group['IpPermissions'], group['GroupId'], 'in', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules is not None:
for rule in rules:
validate_rule(module, rule)
group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name,
group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
if group_id:
rule_id = make_rule_key('in', rule, group['GroupId'], group_id)
if rule_id in groupRules:
del groupRules[rule_id]
else:
if not module.check_mode:
ip_permission = serialize_group_grant(group_id, rule)
if ip_permission:
ips = ip_permission
if vpc_id:
[useridpair.update({'VpcId': vpc_id}) for useridpair in
ip_permission.get('UserIdGroupPairs', [])]
try:
client.authorize_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ips])
except botocore.exceptions.ClientError as e:
module.fail_json(
msg="Unable to authorize ingress for group %s security group '%s' - %s" %
(group_id, group['GroupName'], e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
elif ip:
# Convert ip to list we can iterate over
if ip and not isinstance(ip, list):
ip = [ip]
changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ip, ip_permission,
module, rule, "ipv4")
elif ipv6:
# Convert ip to list we can iterate over
if not isinstance(ipv6, list):
ipv6 = [ipv6]
# If rule already exists, don't later delete it
changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ipv6, ip_permission,
module, rule, "ipv6")
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules:
for (rule, grant) in groupRules.values():
ip_permission = serialize_revoke(grant, rule)
if not module.check_mode:
try:
client.revoke_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
except botocore.exceptions.ClientError as e:
module.fail_json(
msg="Unable to revoke ingress for security group '%s' - %s" %
(group['GroupName'], e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
# Manage egress rules
groupRules = {}
add_rules_to_lookup(group['IpPermissionsEgress'], group['GroupId'], 'out', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules_egress is not None:
for rule in rules_egress:
validate_rule(module, rule)
group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name,
group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
if group_id:
rule_id = make_rule_key('out', rule, group['GroupId'], group_id)
if rule_id in groupRules:
del groupRules[rule_id]
else:
if not module.check_mode:
ip_permission = serialize_group_grant(group_id, rule)
if ip_permission:
ips = ip_permission
if vpc_id:
[useridpair.update({'VpcId': vpc_id}) for useridpair in
ip_permission.get('UserIdGroupPairs', [])]
try:
client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ips])
except botocore.exceptions.ClientError as e:
module.fail_json(
msg="Unable to authorize egress for group %s security group '%s' - %s" %
(group_id, group['GroupName'], e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
elif ip:
# Convert ip to list we can iterate over
if not isinstance(ip, list):
ip = [ip]
changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ip,
ip_permission, module, rule, "ipv4")
elif ipv6:
# Convert ip to list we can iterate over
if not isinstance(ipv6, list):
ipv6 = [ipv6]
# If rule already exists, don't later delete it
changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ipv6,
ip_permission, module, rule, "ipv6")
elif vpc_id is not None:
# when no egress rules are specified and we're in a VPC,
# we add in a default allow all out rule, which was the
# default behavior before egress rules were added
default_egress_rule = 'out--1-None-None-' + group['GroupId'] + '-0.0.0.0/0'
if default_egress_rule not in groupRules:
if not module.check_mode:
ip_permission = [{'IpProtocol': '-1',
'IpRanges': [{'CidrIp': '0.0.0.0/0'}]
}
]
try:
client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=ip_permission)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to authorize egress for ip %s security group '%s' - %s" %
('0.0.0.0/0',
group['GroupName'],
e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
else:
# make sure the default egress rule is not removed
del groupRules[default_egress_rule]
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules_egress and vpc_id is not None:
for (rule, grant) in groupRules.values():
# we shouldn't be revoking 0.0.0.0 egress
if grant != '0.0.0.0/0':
ip_permission = serialize_revoke(grant, rule)
if not module.check_mode:
try:
client.revoke_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to revoke egress for ip %s security group '%s' - %s" %
(grant, group['GroupName'], e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
if group:
security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
security_group = camel_dict_to_snake_dict(security_group)
security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []),
tag_name_key_name='key', tag_value_key_name='value')
module.exit_json(changed=changed, **security_group)
else:
module.exit_json(changed=changed, group_id=None)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
"""
pr_parser.py: SCRIPTS that analyse pr information and write parsed pr information
(especially the multi-relate-pr) to post-build proprerties for downstream job.
"""
import argparse
import sys
import os
import github
import collections
import random
from github import Github
from manifest import Manifest
class PrParser(object):
def __init__(self, change_url, target_branch, puller_ghtoken_pool):
"""
Initialize PrParser with change_url
"""
assert change_url, "Error: PR URL is None!"
assert puller_ghtoken_pool, "Error: puller_ghtoken_pool is None!"
assert target_branch, "Error: target_branch is None!"
url_segments = change_url.split("/")
self.__repo = "/".join(change_url.split("/")[-4:-2]).lower()
self.__target_branch = target_branch
self.__pr_number = url_segments[-1]
self.__merge_commit_sha = "origin/pr/{0}/merge".format(self.__pr_number)
self.__pr_list = [self.__repo]
self.__pr_connectivity_map = collections.defaultdict(dict)
self.__puller_ghtoken_pool = puller_ghtoken_pool.split()
# If the prs of this build is valid,
# -1 haven't parse pr, 1 is valid, 0 invalid
# unmergeable pr will set this var to 1
self.__valid_pr_group = -1
@property
def __gh(self):
"""
return a Github instance with random token
"""
ghtoken_pool_size = len(self.__puller_ghtoken_pool)
random_index = random.randint(0, ghtoken_pool_size-1)
this_choice = self.__puller_ghtoken_pool[random_index]
return Github(this_choice)
def pr_group_is_valid(self):
if self.__valid_pr_group == 1:
return True
if self.__valid_pr_group == 0:
return False
if self.__valid_pr_group == -1:
print "Can't get pr group status before parsing!"
sys.exit(1)
def parse_pr(self, base_repo, base_pr_number):
"""
get related prs according to the base pr
:param repo: string, repo name associated with the pr to be parsed
:param pr_number: number in string, pull request number of the pr to be parsed
:return related_prs: list of tuple of string: [(repo, sha, pr_number, commit),...]
pr list which is the associated with base pr
"""
#init github and get related pr object
gh = self.__gh
pr = gh.get_repo(base_repo).get_pull(long(base_pr_number))
self.__valid_pr_group = 1
#get all comments and description in the pr
pr_texts = []
pr_texts.append(pr.body)
for pr_comment in pr.get_issue_comments():
pr_texts.append(pr_comment.body)
# pre processing
pr_text_segment = []
for pr_text in pr_texts:
pr_text = pr_text.lower()
jenkins_index = pr_text.find("jenkins")
if jenkins_index != -1:
pr_text = pr_text[jenkins_index:]
partition = [i for i in pr_text.split('jenkins') if i]
for segment in partition:
pr_text_segment.append("jenkins"+segment)
#parse pr
related_prs = []
for pr_text in pr_text_segment:
pr_words = pr_text.replace(':', '').replace(',', ' ').split()
#find keyword "jenkins"
if 'jenkins' not in pr_words:
continue
position = pr_words.index('jenkins')
#Checks to make sure the pr_words are long enough to parse.
#Needs to be at least length of 3 ("Jenkins ignore/depend PR")
if ((position+2) >= len(pr_words)) :
continue
#analyse dependency relationship, "depend" or "ignore"
if ('ignore' not in pr_words[position+1]) and ('depend' not in pr_words[position+1]):
continue
#find "ignore"
if 'ignore' in pr_words[position+1]:
related_prs = None
print "INFO: \"Jenkins: ignore\" in repo: {0} pr_number: {1}".format(base_repo, base_pr_number)
break
#find "depend"
disp = 2
if pr_words[position+2] == "on":
disp += 1
for i in range(position+disp, len(pr_words)):
if 'https//github.com' not in pr_words[i]:
break
dep_pr_url = pr_words[i]
try:
repo = dep_pr_url[:dep_pr_url.rfind('/pull/')].replace('https//github.com/','')
assert len(repo.split('/')) == 2
pr_number = dep_pr_url[dep_pr_url.rfind('/pull/')+6:]
assert pr_number.isalnum()
except AssertionError as error:
print "ERROR: the pr url {0} is invalid.\n{1}".format(dep_pr_url, error)
sys.exit(1)
try:
dep_pr = gh.get_repo(repo).get_pull(long(pr_number))
if dep_pr.merged:
continue
if not dep_pr.mergeable:
print "ERROR: the pr of {0} is unmergeable.\n{1}".format(dep_pr_url, pr.mergeable_state)
self.__valid_pr_group = 0
sha = 'origin/pr/{0}/merge'.format(pr_number)
except Exception as error:
print "ERROR: the pr of {0} doesn't exist.\n{1}".format(dep_pr_url, error)
self.__valid_pr_group = 0
print "INFO: find one dependency pr, ({0}, {1}, {2})".format(repo, sha, pr_number)
related_prs.append((repo, sha, pr_number))
self.__pr_connectivity_map[base_repo][repo] = True
if not self.__pr_connectivity_map[repo].has_key(base_repo):
self.__pr_connectivity_map[repo][base_repo] = False
if repo not in self.__pr_list:
self.__pr_list.append(repo)
print "INFO: repo: {0}, pr_number: {1} parsing done, recursive parse may continue".format(base_repo, base_pr_number)
return related_prs
def get_all_related_prs(self, repo, sha, pr_number):
"""
RECURSIVELY get ALL related prs information according to the base pr
:param repo: string, repo name associated with the pr to be parsed
:param sha: string, the merge_commit_sha associated with the pr to be parsed
:param pr_number: number in string, pull request number of the pr to be parsed
:return all_prs: list of tuple of string: [(repo, sha, pr_number),...]
which is the associated with base pr
"""
#add base pr first
all_prs = []
base_pr = [(repo, sha, pr_number)]
all_prs.extend(base_pr)
#recursively find dependent pr
while base_pr:
_tmp_pr = []
for item in base_pr:
repo, _, pr_number = item
dependent_prs = self.parse_pr(repo, pr_number)
#find 'Jenkins: ignore'
if dependent_prs == None:
#find 'Jenkins: ignore' in root trigger pr
if len(all_prs) == 1:
all_prs = []
else:
continue
else:
_tmp_pr.extend(dependent_prs)
#avoid endless loop
if _tmp_pr:
_tmp_pr = [t for t in _tmp_pr if t not in all_prs]
all_prs.extend(_tmp_pr)
base_pr = _tmp_pr
return all_prs
def get_under_test_prs(self):
"""
According to the self.__pr_connectivity_map and self.__pr_list
to find those under_test_prs.
under_test_pr:
1. root pr(which trigger this build) is under_test
2. pr that diconnected with under_test_pr is under_test_pr
"""
# the final filtered results
# under_test_prs to be parsed to find related under_test_prs
tmp_under_test_prs = [self.__pr_list[0]]
# wich is the under_test_prs after parsing
under_test_prs = [self.__pr_list[0]]
# prs haven been parsed, for preventing infinite loop
parsed_prs = []
while tmp_under_test_prs:
parsed_prs.extend(tmp_under_test_prs)
# the next tmp_under_test_prs
tmp_tmp_under_test_prs = []
for under_test_pr in tmp_under_test_prs:
# must be single pr, depends on None PR
if not self.__pr_connectivity_map.has_key(under_test_pr):
continue
for pr in self.__pr_list:
# disconnected with this pr
if not self.__pr_connectivity_map[under_test_pr].has_key(pr):
continue
# if diconnected , [under_test_pr][pr] and [under_test_pr][pr] both equal True
# if one-way connected, one is True the other if False
if self.__pr_connectivity_map[under_test_pr][pr] and \
self.__pr_connectivity_map[pr][under_test_pr]:
# diconnected with under_test_pr means this pr is under test too
under_test_prs.append(pr)
if pr not in parsed_prs:
tmp_tmp_under_test_prs.append(pr)
tmp_under_test_prs = tmp_tmp_under_test_prs
return under_test_prs
def get_latest_commit(self, repo, branch):
"""
Get repo latest commit of the specific branch
"""
gh = self.__gh
branch = gh.get_repo(repo).get_branch(branch)
latest_commit = branch.commit.sha
return latest_commit
def wrap_manifest_file(self, file_path):
"""
Generated manifest file
"""
try:
all_prs = self.get_all_related_prs(self.__repo, self.__merge_commit_sha, self.__pr_number)
under_test_prs = self.get_under_test_prs()
# instance of manifest template
manifest = Manifest.instance_of_sample("manifest-pr-gate.json")
# wrap with pr
repo_url_list = [repo["repository"] for repo in manifest.repositories]
for pr in all_prs:
repo, sha1, _ = pr
repo_url = "https://github.com/{0}.git".format(repo)
# uniform the repo_url case, make sure the url is completely consistent with repo in the manifest
repo_url = [url for url in repo_url_list if url.lower() == repo_url][0]
if repo in under_test_prs:
manifest.update_manifest(repo_url, "", sha1, True)
else:
manifest.update_manifest(repo_url, "", sha1, False)
# fill in blank commit with latest commit sha
for repo in manifest.repositories:
if 'commit-id' in repo and repo['commit-id'] == "":
repo_name = "/".join(repo["repository"][:-4].split("/")[3:])
latest_commit = self.get_latest_commit(repo_name, self.__target_branch)
repo["commit-id"] = latest_commit
manifest.validate_manifest()
manifest.dump_to_json_file(file_path)
except Exception as error:
print "ERROR occured in parse manifest: {0}".format(error)
sys.exit(1)
def parse_args(args):
"""
Take in values from the user.
Repo, branch, merge_commit_sha and pr_number are required. This exits if they are not given.
:return: Parsed args for assignment
"""
parser = argparse.ArgumentParser()
parser.add_argument("--change-url",
help="Url of the triggered pr",
action="store")
parser.add_argument("--target-branch",
help="The target branch of the pr for the named repo",
required=True,
action="store")
parser.add_argument("--puller-ghtoken-pool",
help="Github token pool that have basic pull permission.",
required=True,
action="store")
parser.add_argument("--manifest-file-path",
help="The file path of wanted manifest output, relevent or absolute",
required=True,
action="store")
parsed_args = parser.parse_args(args)
return parsed_args
def main():
parsed_args = parse_args(sys.argv[1:])
pr_parser = PrParser(parsed_args.change_url, parsed_args.target_branch, parsed_args.puller_ghtoken_pool)
pr_parser.wrap_manifest_file(parsed_args.manifest_file_path)
if not pr_parser.pr_group_is_valid:
print "There are unmergable PRs!"
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class Datetimerfc1123(object):
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_null(
self, custom_headers={}, raw=False, **operation_config):
"""
Get null datetime value
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: datetime or (datetime, requests.response) or
concurrent.futures.Future
"""
# Construct URL
url = '/datetimerfc1123/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('rfc-1123', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_invalid(
self, custom_headers={}, raw=False, **operation_config):
"""
Get invalid datetime value
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: datetime or (datetime, requests.response) or
concurrent.futures.Future
"""
# Construct URL
url = '/datetimerfc1123/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('rfc-1123', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_overflow(
self, custom_headers={}, raw=False, **operation_config):
"""
Get overflow datetime value
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: datetime or (datetime, requests.response) or
concurrent.futures.Future
"""
# Construct URL
url = '/datetimerfc1123/overflow'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('rfc-1123', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_underflow(
self, custom_headers={}, raw=False, **operation_config):
"""
Get underflow datetime value
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: datetime or (datetime, requests.response) or
concurrent.futures.Future
"""
# Construct URL
url = '/datetimerfc1123/underflow'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('rfc-1123', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_utc_max_date_time(
self, datetime_body, custom_headers={}, raw=False, **operation_config):
"""
Put max datetime value Fri, 31 Dec 9999 23:59:59 GMT
:param datetime_body:
:type datetime_body: datetime
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/datetimerfc1123/max'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(datetime_body, 'rfc-1123')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_utc_lowercase_max_date_time(
self, custom_headers={}, raw=False, **operation_config):
"""
Get max datetime value fri, 31 dec 9999 23:59:59 gmt
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: datetime or (datetime, requests.response) or
concurrent.futures.Future
"""
# Construct URL
url = '/datetimerfc1123/max/lowercase'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('rfc-1123', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_utc_uppercase_max_date_time(
self, custom_headers={}, raw=False, **operation_config):
"""
Get max datetime value FRI, 31 DEC 9999 23:59:59 GMT
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: datetime or (datetime, requests.response) or
concurrent.futures.Future
"""
# Construct URL
url = '/datetimerfc1123/max/uppercase'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('rfc-1123', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_utc_min_date_time(
self, datetime_body, custom_headers={}, raw=False, **operation_config):
"""
Put min datetime value Mon, 1 Jan 0001 00:00:00 GMT
:param datetime_body:
:type datetime_body: datetime
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/datetimerfc1123/min'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(datetime_body, 'rfc-1123')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_utc_min_date_time(
self, custom_headers={}, raw=False, **operation_config):
"""
Get min datetime value Mon, 1 Jan 0001 00:00:00 GMT
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: datetime or (datetime, requests.response) or
concurrent.futures.Future
"""
# Construct URL
url = '/datetimerfc1123/min'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('rfc-1123', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
"""
raven.contrib.flask
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
try:
from flask_login import current_user
except ImportError:
has_flask_login = False
else:
has_flask_login = True
import sys
import os
import logging
from flask import request, current_app, g
from flask.signals import got_request_exception, request_finished
from raven.conf import setup_logging
from raven.base import Client
from raven.middleware import Sentry as SentryMiddleware
from raven.handlers.logging import SentryHandler
from raven.utils.compat import _urlparse
from raven.utils.wsgi import get_headers, get_environ
from werkzeug.exceptions import ClientDisconnected
def make_client(client_cls, app, dsn=None):
return client_cls(
dsn=dsn or app.config.get('SENTRY_DSN') or os.environ.get('SENTRY_DSN'),
include_paths=set(app.config.get('SENTRY_INCLUDE_PATHS', [])) | set([app.import_name]),
exclude_paths=app.config.get('SENTRY_EXCLUDE_PATHS'),
servers=app.config.get('SENTRY_SERVERS'),
name=app.config.get('SENTRY_NAME'),
public_key=app.config.get('SENTRY_PUBLIC_KEY'),
secret_key=app.config.get('SENTRY_SECRET_KEY'),
project=app.config.get('SENTRY_PROJECT'),
site=app.config.get('SENTRY_SITE_NAME'),
processors=app.config.get('SENTRY_PROCESSORS'),
string_max_length=app.config.get('SENTRY_MAX_LENGTH_STRING'),
list_max_length=app.config.get('SENTRY_MAX_LENGTH_LIST'),
auto_log_stacks=app.config.get('SENTRY_AUTO_LOG_STACKS'),
tags=app.config.get('SENTRY_TAGS'),
extra={
'app': app,
},
)
class Sentry(object):
"""
Flask application for Sentry.
Look up configuration from ``os.environ['SENTRY_DSN']``::
>>> sentry = Sentry(app)
Pass an arbitrary DSN::
>>> sentry = Sentry(app, dsn='http://public:secret@example.com/1')
Pass an explicit client::
>>> sentry = Sentry(app, client=client)
Automatically configure logging::
>>> sentry = Sentry(app, logging=True, level=logging.ERROR)
Capture an exception::
>>> try:
>>> 1 / 0
>>> except ZeroDivisionError:
>>> sentry.captureException()
Capture a message::
>>> sentry.captureMessage('hello, world!')
By default, the Flask integration will do the following:
- Hook into the `got_request_exception` signal. This can be disabled by
passing `register_signal=False`.
- Wrap the WSGI application. This can be disabled by passing
`wrap_wsgi=False`.
- Capture information from Flask-Login (if available).
"""
# TODO(dcramer): the client isn't using local context and therefore
# gets shared by every app that does init on it
def __init__(self, app=None, client=None, client_cls=Client, dsn=None,
logging=False, level=logging.NOTSET, wrap_wsgi=True,
register_signal=True):
self.dsn = dsn
self.logging = logging
self.client_cls = client_cls
self.client = client
self.level = level
self.wrap_wsgi = wrap_wsgi
self.register_signal = register_signal
if app:
self.init_app(app)
@property
def last_event_id(self):
return getattr(self, '_last_event_id', None)
@last_event_id.setter
def last_event_id(self, value):
self._last_event_id = value
try:
g.sentry_event_id = value
except Exception:
pass
def handle_exception(self, *args, **kwargs):
if not self.client:
return
ignored_exc_type_list = current_app.config.get('RAVEN_IGNORE_EXCEPTIONS', [])
exc = sys.exc_info()[1]
if any((isinstance(exc, ignored_exc_type) for ignored_exc_type in ignored_exc_type_list)):
return
self.captureException(exc_info=kwargs.get('exc_info'))
def get_user_info(self, request):
"""
Requires Flask-Login (https://pypi.python.org/pypi/Flask-Login/) to be installed
and setup
"""
if not has_flask_login:
return
if not hasattr(current_app, 'login_manager'):
return
try:
is_authenticated = current_user.is_authenticated()
except AttributeError:
# HACK: catch the attribute error thrown by flask-login is not attached
# > current_user = LocalProxy(lambda: _request_ctx_stack.top.user)
# E AttributeError: 'RequestContext' object has no attribute 'user'
return {}
if is_authenticated:
user_info = {
'is_authenticated': True,
'is_anonymous': current_user.is_anonymous(),
'id': current_user.get_id(),
}
if 'SENTRY_USER_ATTRS' in current_app.config:
for attr in current_app.config['SENTRY_USER_ATTRS']:
if hasattr(current_user, attr):
user_info[attr] = getattr(current_user, attr)
else:
user_info = {
'is_authenticated': False,
'is_anonymous': current_user.is_anonymous(),
}
return user_info
def get_http_info(self, request):
urlparts = _urlparse.urlsplit(request.url)
try:
formdata = request.form
except ClientDisconnected:
formdata = {}
return {
'url': '%s://%s%s' % (urlparts.scheme, urlparts.netloc, urlparts.path),
'query_string': urlparts.query,
'method': request.method,
'data': formdata,
'headers': dict(get_headers(request.environ)),
'env': dict(get_environ(request.environ)),
}
def before_request(self, *args, **kwargs):
self.last_event_id = None
self.client.http_context(self.get_http_info(request))
self.client.user_context(self.get_user_info(request))
def add_sentry_id_header(self, sender, response, *args, **kwargs):
response.headers['X-Sentry-ID'] = self.last_event_id
return response
def init_app(self, app, dsn=None, logging=None, level=None, wrap_wsgi=None,
register_signal=None):
if dsn is not None:
self.dsn = dsn
if level is not None:
self.level = level
if wrap_wsgi is not None:
self.wrap_wsgi = wrap_wsgi
if register_signal is not None:
self.register_signal = register_signal
if logging is not None:
self.logging = logging
if not self.client:
self.client = make_client(self.client_cls, app, self.dsn)
if self.logging:
setup_logging(SentryHandler(self.client, level=self.level))
if self.wrap_wsgi:
app.wsgi_app = SentryMiddleware(app.wsgi_app, self.client)
app.before_request(self.before_request)
if self.register_signal:
got_request_exception.connect(self.handle_exception, sender=app)
request_finished.connect(self.add_sentry_id_header, sender=app)
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['sentry'] = self
def captureException(self, *args, **kwargs):
assert self.client, 'captureException called before application configured'
result = self.client.captureException(*args, **kwargs)
if result:
self.last_event_id = self.client.get_ident(result)
else:
self.last_event_id = None
return result
def captureMessage(self, *args, **kwargs):
assert self.client, 'captureMessage called before application configured'
result = self.client.captureMessage(*args, **kwargs)
if result:
self.last_event_id = self.client.get_ident(result)
else:
self.last_event_id = None
return result
|
|
"""
Contains rich-text related classes.
"""
import re
from django.utils.html import escape
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.rich_text import EMBED_HANDLERS
from wagtail.wagtailcore.whitelist import Whitelister
from wagtail.wagtaildocs.models import Document
from wagtailplus.wagtaillinks.models import Link
def expand_db_attributes_for_model(model, attrs, for_editor):
"""
Given a dictionary of attributes from the <a> tag, return
the real HTML representation.
:param model: the model class.
:param attrs: dictionary of database attributes.
:param for_editor: flag to display in editor or frontend.
:rtype: str.
"""
editor_attrs = ''
try:
obj = model.objects.get(id=attrs['id'])
if for_editor:
link_type = model._meta.model.__name__.lower()
editor_attrs = 'data-linktype="{0}" data-id="{1}"'
editor_attrs = editor_attrs.format(link_type, obj.id)
# Include title attribute for 508 compliance.
return '<a {0} href="{1}" title="{2}">'.format(
editor_attrs,
escape(obj.url),
obj.title
)
except model.DoesNotExist:
return '<a>'
class BetterHandler(object):
"""
Base handler class for embedded links to instances.
"""
@staticmethod
def get_db_attributes(tag):
"""
Given an <a> tag that we've identified as a link embed, return a
dictionary of the attributes we should have on the resulting
<a> element.
"""
return {'id': tag['data-id']}
@classmethod
def expand_db_attributes(cls, attrs, for_editor):
"""
Given a dictionary of attributes from the <a> tag, return
the real HTML representation.
:param attrs: dictionary of database attributes.
:param for_editor: flag to display in editor or frontend.
:rtype: str.
"""
return expand_db_attributes_for_model(cls.model, attrs, for_editor)
class BetterDocumentLinkHandler(BetterHandler):
"""
BetterDocumentLinkHandler will be invoked whenever we encounter
an element in HTML content with an attribute of
data-embedtype="document". The resulting element in the database
representation will be:
<a data-linktype="document" data-id="42" href="[url]">.
"""
model = Document
class BetterPageLinkHandler(BetterHandler):
"""
BetterPageLinkHandler will be invoked whenever we encounter
an element in HTML content with an attribute of
data-embedtype="page". The resulting element in the database
representation will be:
<a data-linktype="page" data-id="42" href="[url]">.
"""
model = Page
class BetterLinkHandler(BetterHandler):
"""
BetterLinkHandler will be invoked whenever we encounter an element
in HTML content with an attribute of data-embedtype="link". The
resulting element in the database representation will be:
<a data-linktype="link" data-id="42" href="[url]">.
"""
model = Link
# Update link handlers.
LINK_HANDLERS = {
'document': BetterDocumentLinkHandler,
'page': BetterPageLinkHandler,
'link': BetterLinkHandler,
}
class FlexibleDbWhitelister(Whitelister):
"""
Prevents automatic replacement of 'DIV' tags with 'P' tags.
"""
has_loaded_custom_whitelist_rules = False
@classmethod
def clean(cls, html):
"""
Returns cleaned HTML.
:param html: the HTML to clean.
:rtype: str.
"""
if not cls.has_loaded_custom_whitelist_rules:
for fn in hooks.get_hooks('construct_whitelister_element_rules'):
cls.element_rules = cls.element_rules.copy()
cls.element_rules.update(fn())
cls.has_loaded_custom_whitelist_rules = True
return super(FlexibleDbWhitelister, cls).clean(html)
@classmethod
def clean_tag_node(cls, doc, tag):
"""
Cleans specified tag node.
:param doc: the document instance.
:param tag: the tag instance.
"""
if 'data-embedtype' in tag.attrs:
embed_type = tag['data-embedtype']
# Fetch the appropriate embed handler for this embedtype.
embed_handler = EMBED_HANDLERS[embed_type]
embed_attrs = embed_handler.get_db_attributes(tag)
embed_attrs['embedtype'] = embed_type
embed_tag = doc.new_tag('embed', **embed_attrs)
embed_tag.can_be_empty_element = True
tag.replace_with(embed_tag)
elif tag.name == 'a' and 'data-linktype' in tag.attrs:
# First, whitelist the contents of this tag.
for child in tag.contents:
cls.clean_node(doc, child)
link_type = tag['data-linktype']
link_handler = LINK_HANDLERS['link']
link_attrs = link_handler.get_db_attributes(tag)
link_attrs['linktype'] = link_type
tag.attrs.clear()
tag.attrs.update(**link_attrs)
else:
super(FlexibleDbWhitelister, cls).clean_tag_node(doc, tag)
FIND_A_TAG = re.compile(r'<a(\b[^>]*)>')
FIND_EMBED_TAG = re.compile(r'<embed(\b[^>]*)/>')
FIND_ATTRS = re.compile(r'([\w-]+)\="([^"]*)"')
def extract_attrs(attr_string):
"""
Helper method to extract tag attributes as a dictionary.
Does not escape HTML entities!
:param attr_string: string of attributes.
:rtype: dict.
"""
attributes = {}
for name, val in FIND_ATTRS.findall(attr_string):
attributes[name] = val
return attributes
def expand_db_html(html, for_editor=False):
"""
Expand database-representation HTML into proper HTML usable in either
templates or the rich-text editor.
:param html: the HTML to parse.
:param for_editor: flag to display in editor or frontend.
:rtype: str.
"""
def replace_a_tag(m):
attrs = extract_attrs(m.group(1))
if 'linktype' not in attrs:
# Return unchanged.
return m.group(0)
handler = LINK_HANDLERS[attrs['linktype']]
return handler.expand_db_attributes(attrs, for_editor)
def replace_embed_tag(m):
attrs = extract_attrs(m.group(1))
handler = EMBED_HANDLERS[attrs['embedtype']]
return handler.expand_db_attributes(attrs, for_editor)
html = FIND_A_TAG.sub(replace_a_tag, html)
html = FIND_EMBED_TAG.sub(replace_embed_tag, html)
return html
|
|
#
# Some tests for NSDecimal (C type) and NSDecimalNumber (Objective-C class)
#
from PyObjCTools.TestSupport import *
from Foundation import *
import operator
import objc
import sys
try:
long
except NameError:
long = int
if 0:
class TestNSDecimal (TestCase):
def testConstants(self):
self.assertEqual(NSRoundPlain, 0)
self.assertEqual(NSRoundDown, 1)
self.assertEqual(NSRoundUp, 2)
self.assertEqual(NSRoundBankers, 3)
self.assertEqual(NSCalculationNoError, 0)
self.assertEqual(NSCalculationLossOfPrecision, 1)
self.assertEqual(NSCalculationUnderflow, 2)
self.assertEqual(NSCalculationOverflow, 3)
self.assertEqual(NSCalculationDivideByZero, 4)
self.assertEqual(NSDecimalMaxSize, 8)
self.assertEqual(NSDecimalNoScale, (2**15)-1)
def testCreation(self):
o = NSDecimal(b"1.25".decode('ascii'))
self.assert_(isinstance(o, NSDecimal))
self.assertEqual(str(o), b"1.25".decode('ascii'))
o = NSDecimal(12345, -2, objc.YES)
self.assert_(isinstance(o, NSDecimal))
self.assertEqual(str(o), b"-123.45".decode('ascii'))
o = NSDecimal()
self.assert_(isinstance(o, NSDecimal))
self.assert_(str(o) in (b"0".decode('ascii'), b"0.0".decode('ascii')))
o = NSDecimal(1234)
self.assert_(isinstance(o, NSDecimal))
self.assertEqual(str(o), b"1234".decode('ascii'))
o = NSDecimal(-1234)
self.assert_(isinstance(o, NSDecimal))
self.assertEqual(str(o), b"-1234".decode('ascii'))
o = NSDecimal(long(1234))
self.assert_(isinstance(o, NSDecimal))
self.assertEqual(str(o), b"1234".decode('ascii'))
o = NSDecimal(long(-1234))
self.assert_(isinstance(o, NSDecimal))
self.assertEqual(str(o), b"-1234".decode('ascii'))
o = NSDecimal(1 << 64 - 1)
# Explicit conversion is supported, but might not do
# what a naive user expects...
o = NSDecimal(1.1)
self.assert_(isinstance(o, NSDecimal))
self.assertEqual(str(o), repr(1.1))
self.assertRaises(OverflowError, NSDecimal, 1 << 128)
self.assertRaises(OverflowError, NSDecimal, -1 << 128)
def testFunction(self):
o = NSDecimal(b"1.5".decode('ascii'))
p = NSDecimal(12345, -2, objc.YES)
r = NSDecimal(b"-121.95".decode('ascii'))
q = NSDecimal()
NSDecimalAdd(q, o, p, NSRoundPlain)
self.assertEqual(str(q), str(r))
v = NSDecimalIsNotANumber(o)
self.assertIs(v, False)
v = NSDecimal()
NSDecimalCopy(v, o)
self.assertEqual(str(v), str(o))
NSDecimalCompact(v)
i = NSDecimalCompare(o, p)
self.assertIsInstance(i, (int, long))
NSDecimalRound(v, o, 0, NSRoundBankers)
self.assertEqual(str(v), '2')
t = NSDecimalNormalize(v, o, NSRoundBankers)
self.assertEqual(t, NSCalculationNoError)
self.assertEqual(str(v), '2.0')
t = NSDecimalPower(v, o, 3, NSRoundBankers)
self.assertEqual(t, NSCalculationNoError)
self.assertEqual(str(v), '3.375')
t = NSDecimalString(v, None)
self.assertEqual(t, b'3.375'.decode('ascii'))
def testCompare(self):
small = NSDecimal(b"1".decode('ascii'))
small2 = NSDecimal(b"1".decode('ascii'))
large = NSDecimal(b"42".decode('ascii'))
self.assert_(small == small2)
self.assert_(not (small == large))
self.assert_(not (small != small2))
self.assert_(small < large)
self.assert_(not(large < small))
self.assert_(not(small < small))
self.assert_(small <= large)
self.assert_(small <= small)
self.assert_(not(large <= small))
self.assert_(large > small)
self.assert_(not(small > large))
self.assert_(not(large > large))
self.assert_(large >= small)
self.assert_(large >= large)
self.assert_(not(small >= large))
def testConversion(self):
o = NSDecimal(b"1234.44".decode('ascii'))
self.assertEqual(o.as_int(), 1234)
o = NSDecimal(b"1.5".decode('ascii'))
self.assertEqual(o.as_float(), 1.5)
self.assertRaises(TypeError, int, o)
self.assertRaises(TypeError, float, o)
def testCreateFromFloat(self):
o = NSDecimal(1.1)
self.assertAlmostEquals(o.as_float(), 1.1)
if not hasattr(TestCase, 'assertAlmostEquals'):
def assertAlmostEquals(self, val1, val2, eta=0.000001):
self.assert_(abs(val1 - val2) < eta)
class TestNSDecimalNumber (TestCase):
def testCreation1(self):
o = NSDecimalNumber.decimalNumberWithString_(b"1.1234".decode('ascii'))
self.assertEqual(o.description(), b"1.1234".decode('ascii'))
p = o.decimalValue()
self.assert_(isinstance(p, NSDecimal))
self.assertEqual(str(p), b"1.1234".decode('ascii'))
def testCreation2(self):
p = NSDecimal(b"1.1234".decode('ascii'))
o = NSDecimalNumber.decimalNumberWithDecimal_(p)
self.assertEqual(o.description(), b"1.1234".decode('ascii'))
def testCreation3(self):
p = NSDecimal(b"1.1234".decode('ascii'))
o = NSDecimalNumber.alloc().initWithDecimal_(p)
self.assertEqual(o.description(), b"1.1234".decode('ascii'))
class NSDecimalOperators (TestCase):
def testCoerce(self):
r = NSDecimal(1)
v = coerce(r, r)
self.assertEqual(v, (r, r))
v = coerce(r, 2)
self.assertEqual(v, (r, NSDecimal(2)))
v = coerce(2, r)
self.assertEqual(v, (NSDecimal(2), r))
v = coerce(r, sys.maxint+2)
self.assertEqual(v, (r, NSDecimal(sys.maxint+2)))
v = coerce(sys.maxint+2, r)
self.assertEqual(v, (NSDecimal(sys.maxint+2), r))
t = NSDecimal(4).__pyobjc_object__
self.assert_(isinstance(t, NSObject))
v = coerce(t, r)
self.assertEqual(v, (NSDecimal(4), r))
v = coerce(r, t)
self.assertEqual(v, (r, NSDecimal(4)))
self.assertRaises(TypeError, coerce, 1.0, r)
self.assertRaises(TypeError, coerce, r, 1.0)
self.assertRaises(TypeError, coerce, "1.0", r)
self.assertRaises(TypeError, coerce, r, "1.0")
self.assertRaises(TypeError, coerce, b"1.0".decode('ascii'), r)
self.assertRaises(TypeError, coerce, r, b"1.0".decode('ascii'))
self.assertRaises(TypeError, coerce, (), r)
self.assertRaises(TypeError, coerce, r, ())
def testAddition(self):
r = NSDecimal()
o = NSDecimal(1)
p = NSDecimal(2)
O = o.__pyobjc_object__
P = p.__pyobjc_object__
self.assert_(isinstance(P, NSObject))
self.assert_(isinstance(O, NSObject))
NSDecimalAdd(r, o, p, NSRoundPlain)
self.assertEqual(o+p, r)
self.assertEqual(o+P, r)
self.assertEqual(O+p, r)
self.assertEqual(o+2, r)
self.assertEqual(o+long(2), r)
self.assertEqual(p+1, r)
self.assertEqual(1+p, r)
self.assertRaises(TypeError, operator.add, o, 1.2)
self.assertRaises(TypeError, operator.add, 1.2, o)
self.assertRaises(TypeError, operator.add, o, "1.2")
self.assertRaises(TypeError, operator.add, "1.2", o)
self.assertRaises(TypeError, operator.add, o, b"1.2".decode('ascii'))
self.assertRaises(TypeError, operator.add, b"1.2".decode('ascii'), o)
self.assertRaises(TypeError, operator.add, o, [])
self.assertRaises(TypeError, operator.add, [], o)
def testSubtraction(self):
r = NSDecimal()
o = NSDecimal(1)
p = NSDecimal(2)
P = p.__pyobjc_object__
O = o.__pyobjc_object__
self.assert_(isinstance(P, NSObject))
self.assert_(isinstance(O, NSObject))
NSDecimalSubtract(r, o, p, NSRoundPlain)
self.assertEqual(o-p, r)
self.assertEqual(O-p, r)
self.assertEqual(o-P, r)
self.assertEqual(o-2, r)
self.assertEqual(o-long(2), r)
self.assertEqual(1-p, r)
self.assertEqual(1-p, r)
self.assertRaises(TypeError, operator.sub, o, 1.2)
self.assertRaises(TypeError, operator.sub, 1.2, o)
self.assertRaises(TypeError, operator.sub, o, "1.2")
self.assertRaises(TypeError, operator.sub, "1.2", o)
self.assertRaises(TypeError, operator.sub, o, b"1.2".decode('ascii'))
self.assertRaises(TypeError, operator.sub, b"1.2".decode('ascii'), o)
self.assertRaises(TypeError, operator.sub, o, ())
self.assertRaises(TypeError, operator.sub, (), o)
def testMultiplication(self):
r = NSDecimal()
o = NSDecimal(2)
p = NSDecimal(3)
P = p.__pyobjc_object__
O = o.__pyobjc_object__
self.assert_(isinstance(P, NSObject))
self.assert_(isinstance(O, NSObject))
NSDecimalMultiply(r, o, p, NSRoundPlain)
self.assertEqual(o*p, r)
self.assertEqual(O*p, r)
self.assertEqual(o*P, r)
self.assertEqual(o*3, r)
self.assertEqual(o*long(3), r)
self.assertEqual(2*p, r)
self.assertEqual(2*p, r)
self.assertRaises(TypeError, operator.mul, o, 1.2)
self.assertRaises(TypeError, operator.mul, 1.2, o)
NSDecimalMultiplyByPowerOf10(r, o, 4, NSRoundPlain)
self.assertEqual(r, NSDecimal(20000))
def testDivision(self):
r = NSDecimal()
o = NSDecimal(2)
p = NSDecimal(3)
P = p.__pyobjc_object__
O = o.__pyobjc_object__
self.assert_(isinstance(P, NSObject))
self.assert_(isinstance(O, NSObject))
NSDecimalDivide(r, o, p, NSRoundPlain)
self.assertEqual(o/p, r)
self.assertEqual(O/p, r)
self.assertEqual(o/P, r)
self.assertEqual(o/3, r)
self.assertEqual(o/long(3), r)
self.assertEqual(2/p, r)
self.assertEqual(2/p, r)
self.assertRaises(TypeError, operator.div, o, 1.2)
self.assertRaises(TypeError, operator.div, 1.2, o)
def testPositive(self):
o = NSDecimal(2)
p = NSDecimal(-2)
self.assertEqual(+o, o)
self.assertEqual(+p, p)
def testNegative(self):
o = NSDecimal(2)
p = NSDecimal(-2)
self.assertEqual(-o, p)
self.assertEqual(-p, o)
def testAbs(self):
o = NSDecimal(2)
p = NSDecimal(-2)
self.assertEqual(abs(o), o)
self.assertEqual(abs(p), o)
def testBitwise(self):
o = NSDecimal(2)
p = NSDecimal(3)
self.assertRaises(TypeError, operator.and_, o, p)
self.assertRaises(TypeError, operator.or_, o, p)
self.assertRaises(TypeError, operator.not_, o, p)
def testPow(self):
o = NSDecimal(2)
p = NSDecimal(3)
self.assertRaises(TypeError, pow, o, p)
self.assertRaises(TypeError, pow, o, 2)
self.assertRaises(TypeError, pow, 2, o)
def testDivMod(self):
o = NSDecimal(2)
p = NSDecimal(3)
self.assertRaises(TypeError, divmod, o, p)
self.assertRaises(TypeError, divmod, o, 2)
self.assertRaises(TypeError, divmod, 2, o)
def testInplaceAddition(self):
r = NSDecimal()
o = NSDecimal(1)
p = NSDecimal(2)
P = p.__pyobjc_object__
self.assert_(isinstance(P, NSObject))
NSDecimalAdd(r, o, p, NSRoundPlain)
o = NSDecimal(1)
o += p
self.assertEqual(o, r)
o = NSDecimal(1)
o += P
self.assertEqual(o, r)
o = NSDecimal(1)
o += 2
self.assertEqual(o, r)
o = NSDecimal(1)
o += long(2)
self.assertEqual(o, r)
o = 1
o += p
self.assertEqual(o, r)
o = long(1)
o += p
self.assertEqual(o, r)
try:
o = 1.2
o += p
self.fail()
except TypeError:
pass
try:
o = NSDecimal(1)
o += 1.2
self.fail()
except TypeError:
pass
def testInplaceSubtraction(self):
r = NSDecimal()
o = NSDecimal(1)
p = NSDecimal(2)
P = p.__pyobjc_object__
self.assert_(isinstance(P, NSObject))
NSDecimalSubtract(r, o, p, NSRoundPlain)
o = NSDecimal(1)
o -= p
self.assertEqual(o, r)
o = NSDecimal(1)
o -= P
self.assertEqual(o, r)
o = NSDecimal(1)
o -= 2
self.assertEqual(o, r)
o = NSDecimal(1)
o -= 2
self.assertEqual(o, r)
o = 1
o -= p
self.assertEqual(o, r)
o = 1
o -= p
self.assertEqual(o, r)
def testInplaceMultiplication(self):
r = NSDecimal()
o = NSDecimal(2)
p = NSDecimal(3)
P = p.__pyobjc_object__
self.assert_(isinstance(P, NSObject))
NSDecimalMultiply(r, o, p, NSRoundPlain)
o = NSDecimal(2)
o *= p
self.assertEqual(o, r)
o = NSDecimal(2)
o *= P
self.assertEqual(o, r)
o = NSDecimal(2)
o *= 3
self.assertEqual(o, r)
o = NSDecimal(2)
o *= long(3)
self.assertEqual(o, r)
o = 2
o *= p
self.assertEqual(o, r)
o = long(2)
o *= p
self.assertEqual(o, r)
def testInplaceDivision(self):
r = NSDecimal()
o = NSDecimal(2)
p = NSDecimal(3)
P = p.__pyobjc_object__
self.assert_(isinstance(P, NSObject))
NSDecimalDivide(r, o, p, NSRoundPlain)
o = NSDecimal(2)
o /= p
self.assertEqual(o, r)
o = NSDecimal(2)
o /= P
self.assertEqual(o, r)
o = NSDecimal(2)
o /= 3
self.assertEqual(o, r)
o = NSDecimal(2)
o /= long(3)
self.assertEqual(o, r)
o = 2
o /= p
self.assertEqual(o, r)
o = long(2)
o /= p
self.assertEqual(o, r)
class NSDecimalNumberOperators (TestCase):
def testAddition(self):
r = NSDecimal()
o = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(1))
p = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(2))
NSDecimalAdd(r, o.decimalValue(), p.decimalValue(), NSRoundPlain)
self.assertEqual((o+p), r)
self.assertEqual((o+2), r)
self.assertEqual((o+long(2)), r)
self.assertEqual((1+p), r)
self.assertEqual((1+p), r)
self.assertRaises(TypeError, operator.add, o, 1.2)
self.assertRaises(TypeError, operator.add, 1.2, o)
o = NSDecimalNumber.zero()
self.assertRaises(TypeError, operator.add, o, 1.2)
def testSubtraction(self):
r = NSDecimal()
o = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(1))
p = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(2))
NSDecimalSubtract(r, o.decimalValue(), p.decimalValue(), NSRoundPlain)
self.assertEqual((o-p), r)
self.assertEqual((o-2), r)
self.assertEqual((o-long(2)), r)
self.assertEqual((1-p), r)
self.assertEqual((1-p), r)
self.assertRaises(TypeError, operator.sub, o, 1.2)
self.assertRaises(TypeError, operator.sub, 1.2, o)
def testMultiplication(self):
r = NSDecimal()
o = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(2))
p = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(3))
NSDecimalMultiply(r, o.decimalValue(), p.decimalValue(), NSRoundPlain)
self.assertEqual((o*p), r)
self.assertEqual((o*3), r)
self.assertEqual((o*long(3)), r)
self.assertEqual((2*p), r)
self.assertEqual((2*p), r)
self.assertRaises(TypeError, operator.mul, o, 1.2)
self.assertRaises(TypeError, operator.mul, 1.2, o)
def testDivision(self):
r = NSDecimal()
o = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(2))
p = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(3))
NSDecimalDivide(r, o.decimalValue(), p.decimalValue(), NSRoundPlain)
self.assertEqual((o/p), r)
self.assertEqual((o/3), r)
self.assertEqual((o/long(3)), r)
self.assertEqual((2/p), r)
self.assertEqual((2/p), r)
self.assertRaises(TypeError, operator.div, o, 1.2)
self.assertRaises(TypeError, operator.div, 1.2, o)
def testPositive(self):
o = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(2))
p = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(-2))
self.assertEqual((+o), o.decimalValue())
self.assertEqual((+p), p.decimalValue())
def testNegative(self):
o = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(2))
p = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(-2))
self.assertEqual((-o), p.decimalValue())
self.assertEqual((-p), o.decimalValue())
def testAbs(self):
o = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(2))
p = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(-2))
self.assertEqual(abs(o), o.decimalValue())
self.assertEqual(abs(p), o.decimalValue())
def testBitwise(self):
o = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(2))
p = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(-2))
self.assertRaises(TypeError, operator.and_, o, p)
self.assertRaises(TypeError, operator.or_, o, p)
self.assertRaises(TypeError, operator.not_, o, p)
def testPow(self):
o = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(2))
p = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(-2))
self.assertRaises(TypeError, pow, o, p)
self.assertRaises(TypeError, pow, o, 2)
self.assertRaises(TypeError, pow, 2, o)
def testDivMod(self):
o = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(2))
p = NSDecimalNumber.decimalNumberWithDecimal_(NSDecimal(-2))
self.assertRaises(TypeError, divmod, o, p)
self.assertRaises(TypeError, divmod, o, 2)
self.assertRaises(TypeError, divmod, 2, o)
if __name__ == "__main__":
main()
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import operator
import numpy as np
from jaxlib import xla_client
try:
from . import _hipblas
for _name, _value in _hipblas.registrations().items():
xla_client.register_custom_call_target(_name, _value, platform="ROCM")
except ImportError:
pass
try:
from . import _hipsolver
for _name, _value in _hipsolver.registrations().items():
xla_client.register_custom_call_target(_name, _value, platform="ROCM")
except ImportError:
pass
_ops = xla_client.ops
_Shape = xla_client.Shape
def _real_type(dtype):
"""Returns the real equivalent of 'dtype'."""
return np.finfo(dtype).dtype
_prod = lambda xs: functools.reduce(operator.mul, xs, 1)
def trsm(c,
a,
b,
left_side=False,
lower=False,
trans_a=False,
conj_a=False,
diag=False):
"""Batched triangular solve.
XLA implements unbatched triangular solve directly, so we need only implement
the batched case."""
b_shape = c.get_shape(b)
dtype = b_shape.element_type()
dims = b_shape.dimensions()
assert len(dims) >= 2
m, n = dims[-2:]
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
batch = _prod(batch_dims)
k = m if left_side else n
a_shape = c.get_shape(a)
if (batch_dims + (k, k) != a_shape.dimensions()
or a_shape.element_type() != dtype):
raise ValueError("Argument mismatch for trsm, got {} and {}".format(
a_shape, b_shape))
if conj_a and not trans_a:
raise NotImplementedError(
"Conjugation without transposition not supported")
lwork, opaque = _hipblas.build_trsm_batched_descriptor(
np.dtype(dtype), batch, m, n, left_side, lower, trans_a, conj_a, diag)
layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))
out = _ops.CustomCallWithLayout(
c,
b"hipblas_trsm_batched",
operands=(a, b),
shape_with_layout=_Shape.tuple_shape(
(_Shape.array_shape(dtype, b_shape.dimensions(), layout),
_Shape.array_shape(np.dtype(np.int8), (lwork, ), (0, )),
_Shape.array_shape(np.dtype(np.int8), (lwork, ), (0, )))),
operand_shapes_with_layout=(
_Shape.array_shape(dtype, a_shape.dimensions(), layout),
_Shape.array_shape(dtype, b_shape.dimensions(), layout),
),
opaque=opaque,
api_version=xla_client.ops.CustomCallApiVersion.
API_VERSION_STATUS_RETURNING)
return _ops.GetTupleElement(out, 0)
def potrf(c, a, lower):
"""Cholesky decomposition."""
a_shape = c.get_shape(a)
dtype = a_shape.element_type()
dims = a_shape.dimensions()
m, n = dims[-2:]
assert m == n
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
batch = _prod(batch_dims)
lwork, opaque = _hipsolver.build_potrf_descriptor(np.dtype(dtype), lower,
batch, n)
kernel = b"hipsolver_potrf"
out = _ops.CustomCallWithLayout(
c,
kernel,
operands=(a, ),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(dtype, batch_dims + (n, n), (num_bd, num_bd + 1) +
tuple(range(num_bd - 1, -1, -1))),
_Shape.array_shape(np.dtype(np.int32), batch_dims,
tuple(range(num_bd - 1, -1, -1))),
_Shape.array_shape(np.dtype(np.int8), (lwork, ), (0, )),
)),
operand_shapes_with_layout=(_Shape.array_shape(
dtype, batch_dims + (n, n),
(num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))), ),
opaque=opaque,
api_version=xla_client.ops.CustomCallApiVersion.
API_VERSION_STATUS_RETURNING)
return _ops.GetTupleElement(out, 0), _ops.GetTupleElement(out, 1)
def getrf(c, a):
"""LU decomposition."""
a_shape = c.get_shape(a)
dtype = a_shape.element_type()
dims = a_shape.dimensions()
assert len(dims) >= 2
m, n = dims[-2:]
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
batch = _prod(batch_dims)
if batch > 1 and m == n and m // batch <= 128:
lwork, opaque = _hipblas.build_getrf_batched_descriptor(
np.dtype(dtype), batch, m)
workspace = _Shape.array_shape(np.dtype(np.int8), (lwork, ), (0, ))
kernel = b"hipblas_getrf_batched"
else:
lwork, opaque = _hipsolver.build_getrf_descriptor(np.dtype(dtype), batch,
m, n)
workspace = _Shape.array_shape(dtype, (lwork, ), (0, ))
kernel = b"hipsolver_getrf"
out = _ops.CustomCallWithLayout(
c,
kernel,
operands=(a, ),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(dtype, batch_dims + (m, n), (num_bd, num_bd + 1) +
tuple(range(num_bd - 1, -1, -1))),
_Shape.array_shape(np.dtype(np.int32), batch_dims + (min(m, n), ),
tuple(range(num_bd, -1, -1))),
_Shape.array_shape(np.dtype(np.int32), batch_dims,
tuple(range(num_bd - 1, -1, -1))),
workspace,
)),
operand_shapes_with_layout=(_Shape.array_shape(
dtype, batch_dims + (m, n),
(num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))), ),
opaque=opaque,
api_version=xla_client.ops.CustomCallApiVersion.
API_VERSION_STATUS_RETURNING)
return (_ops.GetTupleElement(out, 0), _ops.GetTupleElement(out, 1),
_ops.GetTupleElement(out, 2))
def geqrf(c, a):
"""QR decomposition."""
a_shape = c.get_shape(a)
dtype = a_shape.element_type()
dims = a_shape.dimensions()
assert len(dims) >= 2
m, n = dims[-2:]
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
batch = _prod(batch_dims)
lwork, opaque = _hipsolver.build_geqrf_descriptor(np.dtype(dtype), batch, m,
n)
workspace = _Shape.array_shape(dtype, (lwork, ), (0, ))
kernel = b"hipsolver_geqrf"
out = _ops.CustomCallWithLayout(
c,
kernel,
operands=(a, ),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(dtype, batch_dims + (m, n), (num_bd, num_bd + 1) +
tuple(range(num_bd - 1, -1, -1))),
_Shape.array_shape(dtype, batch_dims + (min(m, n), ),
tuple(range(num_bd, -1, -1))),
_Shape.array_shape(np.dtype(np.int32), batch_dims,
tuple(range(num_bd - 1, -1, -1))),
workspace,
)),
operand_shapes_with_layout=(_Shape.array_shape(
dtype, batch_dims + (m, n),
(num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))), ),
opaque=opaque,
api_version=xla_client.ops.CustomCallApiVersion.
API_VERSION_STATUS_RETURNING)
return (_ops.GetTupleElement(out, 0), _ops.GetTupleElement(out, 1),
_ops.GetTupleElement(out, 2))
def orgqr(c, a, tau):
"""Product of elementary Householder reflections."""
a_shape = c.get_shape(a)
dtype = a_shape.element_type()
dims = a_shape.dimensions()
assert len(dims) >= 2
m, n = dims[-2:]
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
batch = _prod(batch_dims)
tau_dims = c.get_shape(tau).dimensions()
assert tau_dims[:-1] == dims[:-2]
k = tau_dims[-1]
lwork, opaque = _hipsolver.build_orgqr_descriptor(np.dtype(dtype), batch, m,
n, k)
workspace = _Shape.array_shape(dtype, (lwork, ), (0, ))
kernel = b"hipsolver_orgqr"
out = _ops.CustomCallWithLayout(
c,
kernel,
operands=(a, tau),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(dtype, batch_dims + (m, n), (num_bd, num_bd + 1) +
tuple(range(num_bd - 1, -1, -1))),
_Shape.array_shape(np.dtype(np.int32), batch_dims,
tuple(range(num_bd - 1, -1, -1))),
workspace,
)),
operand_shapes_with_layout=(
_Shape.array_shape(dtype, batch_dims + (m, n), (num_bd, num_bd + 1) +
tuple(range(num_bd - 1, -1, -1))),
_Shape.array_shape(dtype, batch_dims + (k, ),
tuple(range(num_bd, -1, -1))),
),
opaque=opaque,
api_version=xla_client.ops.CustomCallApiVersion.
API_VERSION_STATUS_RETURNING)
return (_ops.GetTupleElement(out, 0), _ops.GetTupleElement(out, 1))
def syevd(c, a, lower=False):
"""Symmetric (Hermitian) eigendecomposition."""
a_shape = c.get_shape(a)
dtype = a_shape.element_type()
dims = a_shape.dimensions()
assert len(dims) >= 2
m, n = dims[-2:]
assert m == n
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
batch = _prod(batch_dims)
layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))
# TODO(rocm): rocm does not support jacobian method.
kernel = b"hipsolver_syevd"
lwork, opaque = _hipsolver.build_syevd_descriptor(np.dtype(dtype), lower,
batch, n)
eigvals_type = _real_type(dtype)
out = _ops.CustomCallWithLayout(
c,
kernel,
operands=(a, ),
shape_with_layout=_Shape.tuple_shape(
(_Shape.array_shape(dtype, dims, layout),
_Shape.array_shape(np.dtype(eigvals_type), batch_dims + (n, ),
tuple(range(num_bd, -1, -1))),
_Shape.array_shape(np.dtype(np.int32), batch_dims,
tuple(range(num_bd - 1, -1, -1))),
_Shape.array_shape(dtype, (lwork, ), (0, )))),
operand_shapes_with_layout=(_Shape.array_shape(dtype, dims, layout), ),
opaque=opaque,
api_version=xla_client.ops.CustomCallApiVersion.
API_VERSION_STATUS_RETURNING)
return (_ops.GetTupleElement(out, 0), _ops.GetTupleElement(out, 1),
_ops.GetTupleElement(out, 2))
def gesvd(c, a, full_matrices=True, compute_uv=True):
"""Singular value decomposition."""
a_shape = c.get_shape(a)
dims = a_shape.dimensions()
dtype = a_shape.element_type()
assert len(dims) >= 2
m, n = dims[-2:]
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
b = _prod(batch_dims)
singular_vals_dtype = np.dtype(_real_type(dtype))
# TODO(rocm): rocm does not support jacobian method.
# for cuda, jax uses jacobian method for small size matrixes
if m < n:
lwork, opaque = _hipsolver.build_gesvd_descriptor(np.dtype(dtype), b, n, m,
compute_uv,
full_matrices)
scalar_layout = tuple(range(num_bd - 1, -1, -1))
vector_layout = (num_bd, ) + scalar_layout
matrix_layout = (num_bd + 1, num_bd) + scalar_layout
out = _ops.CustomCallWithLayout(
c,
b"hipsolver_gesvd",
operands=(a, ),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),
_Shape.array_shape(singular_vals_dtype, batch_dims + (min(m, n), ),
vector_layout),
_Shape.array_shape(dtype, batch_dims + (n, n), matrix_layout),
_Shape.array_shape(dtype, batch_dims + (m, m), matrix_layout),
_Shape.array_shape(np.dtype(np.int32), batch_dims, scalar_layout),
_Shape.array_shape(dtype, (lwork, ), (0, )),
)),
operand_shapes_with_layout=(_Shape.array_shape(dtype,
batch_dims + (m, n),
matrix_layout), ),
opaque=opaque,
api_version=xla_client.ops.CustomCallApiVersion.
API_VERSION_STATUS_RETURNING)
s = _ops.GetTupleElement(out, 1)
vt = _ops.GetTupleElement(out, 2)
u = _ops.GetTupleElement(out, 3)
info = _ops.GetTupleElement(out, 4)
else:
lwork, opaque = _hipsolver.build_gesvd_descriptor(np.dtype(dtype), b, m, n,
compute_uv,
full_matrices)
scalar_layout = tuple(range(num_bd - 1, -1, -1))
vector_layout = (num_bd, ) + scalar_layout
matrix_layout = (num_bd, num_bd + 1) + scalar_layout
out = _ops.CustomCallWithLayout(
c,
b"hipsolver_gesvd",
operands=(a, ),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),
_Shape.array_shape(singular_vals_dtype, batch_dims + (min(m, n), ),
vector_layout),
_Shape.array_shape(dtype, batch_dims + (m, m), matrix_layout),
_Shape.array_shape(dtype, batch_dims + (n, n), matrix_layout),
_Shape.array_shape(np.dtype(np.int32), batch_dims, scalar_layout),
_Shape.array_shape(dtype, (lwork, ), (0, )),
)),
operand_shapes_with_layout=(_Shape.array_shape(dtype,
batch_dims + (m, n),
matrix_layout), ),
opaque=opaque,
api_version=xla_client.ops.CustomCallApiVersion.
API_VERSION_STATUS_RETURNING)
s = _ops.GetTupleElement(out, 1)
u = _ops.GetTupleElement(out, 2)
vt = _ops.GetTupleElement(out, 3)
info = _ops.GetTupleElement(out, 4)
if not full_matrices:
u = _ops.Slice(u, (0, ) * len(dims), batch_dims + (m, min(m, n)),
(1, ) * len(dims))
vt = _ops.Slice(vt, (0, ) * len(dims), batch_dims + (min(m, n), n),
(1, ) * len(dims))
return s, u, vt, info
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The identities module is part of the nmeta suite
It provides an abstraction for participants (identities), using
a MongoDB database for storage and data retention maintenance.
Identities are identified via TBD....
There are methods (see class docstring) that provide harvesting
of identity metadata and various retrieval searches
"""
import sys
import struct
#*** For packet methods:
import socket
#*** Import dpkt for packet parsing:
import dpkt
#*** mongodb Database Import:
import pymongo
from pymongo import MongoClient
#*** For timestamps:
import datetime
#*** For logging configuration:
from baseclass import BaseClass
#*** For Regular Expression searches:
import re
#*** For hashing of identities:
import hashlib
#*** How long in seconds to cache ARP responses for (in seconds):
ARP_CACHE_TIME = 14400
#*** DHCP lease time to use if none present (in seconds):
DHCP_DEFAULT_LEASE_TIME = 3600
class Identities(BaseClass):
"""
An object that represents identity metadata
Main function used to harvest identity metadata:
(assumes class instantiated as an object called 'ident')
ident.harvest(pkt, flow.packet)
Passed a raw packet and packet metadata from flow object.
Check a packet_in event and harvest any relevant identity
indicators to metadata
Functions available for Classifiers:
(assumes class instantiated as an object called 'ident')
ident.findbymac(mac_address)
Look up identity object for a MAC address
ident.findbynode(host_name)
Look up identity object by host name (aka node)
Additionally, can set:
regex=True Treat service_name as a regular expression
harvest_type= Specify what type of harvest (i.e. DHCP)
ident.findbyservice(service_name)
Look up identity object by service name
Additionally, can set:
regex=True Treat service_name as a regular expression
harvest_type= Specify what type of harvest (i.e. DNS_A)
ip_address= Look for specific IP address
See function docstrings for more information
"""
def __init__(self, config, policy):
"""
Initialise an instance of the Identities class
"""
self.policy = policy
#*** Required for BaseClass:
self.config = config
#*** Set up Logging with inherited base class method:
self.configure_logging(__name__, "identities_logging_level_s",
"identities_logging_level_c")
#*** Get parameters from config:
mongo_addr = config.get_value("mongo_addr")
mongo_port = config.get_value("mongo_port")
mongo_dbname = self.config.get_value("mongo_dbname")
#*** Max bytes of the identities capped collection:
identities_max_bytes = config.get_value("identities_max_bytes")
#*** How far back in time to go back looking for an identity:
self.identity_time_limit = datetime.timedelta \
(seconds=config.get_value("identity_time_limit"))
#*** Max bytes of the dhcp_messages capped collection:
dhcp_messages_max_bytes = config.get_value("dhcp_messages_max_bytes")
#*** How far back in time to go back looking for an dhcp message:
self.dhcp_messages_time_limit = datetime.timedelta \
(seconds=config.get_value("dhcp_messages_time_limit"))
#*** Start mongodb:
self.logger.info("Connecting to MongoDB database...")
mongo_client = MongoClient(mongo_addr, mongo_port)
#*** Connect to MongoDB nmeta database:
db_nmeta = mongo_client[mongo_dbname]
#*** Delete (drop) previous identities collection if it exists:
self.logger.debug("Deleting previous identities MongoDB collection...")
db_nmeta.identities.drop()
#*** Create the identities collection, specifying capped option
#*** with max size in bytes, so MongoDB handles data retention:
self.identities = db_nmeta.create_collection('identities', capped=True,
size=identities_max_bytes)
#*** Index to improve look-up performance:
self.identities.create_index([('valid_from', pymongo.DESCENDING),
('valid_to', pymongo.DESCENDING),
('ip_address', pymongo.ASCENDING),
('harvest_type', pymongo.ASCENDING)
],
unique=False)
#*** Index to improve MAC address look-up performance:
self.identities.create_index([('mac_address', pymongo.ASCENDING),
('valid_from', pymongo.DESCENDING)], unique=False)
#*** Index to improve Node (host_name) look-up performance:
self.identities.create_index([('host_name', pymongo.ASCENDING),
('valid_from', pymongo.DESCENDING)], unique=False)
#*** Index to improve Service look-up performance:
self.identities.create_index([('service_name', pymongo.ASCENDING),
('valid_from', pymongo.DESCENDING)], unique=False)
#*** Delete (drop) previous dhcp_messages collection if it exists:
self.logger.debug("Deleting previous dhcp_messages MongoDB "
"collection...")
db_nmeta.dhcp_messages.drop()
#*** Create the dhcp_messages collection, specifying capped option
#*** with max size in bytes, so MongoDB handles data retention:
self.dhcp_messages = db_nmeta.create_collection('dhcp_messages',
capped=True, size=dhcp_messages_max_bytes)
#*** Index dhcp_messages to improve look-up performance:
self.dhcp_messages.create_index([('ingest_time', pymongo.DESCENDING),
('transaction_id', pymongo.ASCENDING),
('message_type', pymongo.ASCENDING)
],
unique=False)
class Identity(object):
"""
An object that represents an individual Identity Indicator
"""
def __init__(self):
#*** Initialise identity variables:
self.dpid = 0
self.in_port = 0
self.mac_address = ""
self.ip_address = ""
self.harvest_type = 0
self.harvest_time = 0
self.host_name = ""
self.host_type = ""
self.host_os = ""
self.host_desc = ""
self.service_name = ""
self.service_alias = ""
self.user_id = ""
self.valid_from = ""
self.valid_to = ""
self.id_hash = ""
self.location_logical = ""
self.location_physical = ""
def dbdict(self):
"""
Return a dictionary object of identity metadata
parameters for storing in the database
"""
return self.__dict__
class DHCPMessage(object):
"""
An object that represents an individual DHCP message.
Used for storing DHCP state by recording DHCP events
"""
def __init__(self):
#*** Initialise identity variables:
self.dpid = 0
self.in_port = 0
self.ingest_time = 0
self.eth_src = 0
self.eth_dst = 0
self.ip_src = 0
self.ip_dst = 0
self.tp_src = 0
self.tp_dst = 0
self.transaction_id = 0
self.message_type = 0
self.host_name = ""
self.ip_assigned = 0
self.ip_dhcp_server = 0
self.lease_time = 0
def dbdict(self):
"""
Return a dictionary object of dhcp message
parameters for storing in the database
"""
return self.__dict__
def harvest(self, pkt, flow_pkt):
"""
Passed a raw packet and packet metadata from flow object.
Check a packet_in event and harvest any relevant identity
indicators to metadata
"""
#*** ARP:
if flow_pkt.eth_type == 2054:
self.harvest_arp(pkt, flow_pkt)
#*** DHCP:
elif flow_pkt.eth_type == 2048 and flow_pkt.proto == 17 and \
(flow_pkt.tp_dst == 67 or flow_pkt.tp_dst == 68):
self.harvest_dhcp(flow_pkt)
#*** LLDP:
elif flow_pkt.eth_type == 35020:
self.harvest_lldp(flow_pkt)
#*** DNS:
elif (flow_pkt.proto == 6 or flow_pkt.proto == 17) and \
(flow_pkt.tp_src == 53 or flow_pkt.tp_src == 53):
self.harvest_dns(flow_pkt)
else:
#*** Not an identity indicator
return 0
def harvest_arp(self, pkt, flow_pkt):
"""
Harvest ARP identity metadata into database.
Passed packet-in metadata from flow object.
Check ARP reply and harvest identity
indicators to metadata
"""
self.logger.debug("Harvesting metadata from ARP request")
eth = dpkt.ethernet.Ethernet(pkt)
pkt_arp = eth.arp
if pkt_arp:
#*** It's an ARP, but is it a reply (opcode 2) for IPv4?:
if pkt_arp.op == 2 and pkt_arp.pro == 2048:
#*** Instantiate an instance of Indentity class:
ident = self.Identity()
ident.dpid = flow_pkt.dpid
ident.in_port = flow_pkt.in_port
ident.mac_address = mac_addr(pkt_arp.sha)
ident.ip_address = socket.inet_ntoa(pkt_arp.spa)
ident.harvest_type = 'ARP'
ident.harvest_time = flow_pkt.timestamp
ident.valid_from = flow_pkt.timestamp
ident.valid_to = flow_pkt.timestamp + \
datetime.timedelta(0, ARP_CACHE_TIME)
ident.id_hash = self._hash_identity(ident)
ident.location_logical = self.policy.locations.get_location \
(ident.dpid, ident.in_port)
db_dict = ident.dbdict()
#*** Write ARP identity metadata to database collection:
self.logger.debug("writing db_dict=%s", db_dict)
self.identities.insert_one(db_dict)
return 1
def harvest_dhcp(self, flow_pkt):
"""
Harvest DHCP identity metadata into database.
Passed packet-in metadata from flow object.
Check LLDP TLV fields and harvest any relevant identity
indicators to metadata
"""
self.logger.debug("Harvesting metadata from DHCP request")
dhcp_hostname = ""
dhcp_leasetime = 0
#*** Use dpkt to parse UDP DHCP data:
try:
pkt_dhcp = dpkt.dhcp.DHCP(flow_pkt.payload)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error("DHCP extraction failed "
"Exception %s, %s, %s",
exc_type, exc_value, exc_traceback)
return 0
#*** Turn DHCP options list of tuples into a dictionary:
dhcp_opts = dict(pkt_dhcp.opts)
self.logger.debug("dhcp_opts=%s", dhcp_opts)
#*** Get the type of the DHCP message:
try:
dhcp_type = ord(dhcp_opts[53])
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error("DHCP type extraction failed "
"Exception %s, %s, %s",
exc_type, exc_value, exc_traceback)
return 0
#*** Do stuff based on the DHCP message type:
if dhcp_type == dpkt.dhcp.DHCPDISCOVER:
self.logger.debug("Matched DHCPDISCOVER, TBD - not handled")
elif dhcp_type == dpkt.dhcp.DHCPOFFER:
self.logger.debug("Matched DHCPOFFER, TBD - not handled")
elif dhcp_type == dpkt.dhcp.DHCPREQUEST:
self.logger.debug("Matched DHCPREQUEST")
if dpkt.dhcp.DHCP_OPT_HOSTNAME in dhcp_opts:
#*** Instantiate an instance of DHCP class:
self.dhcp_msg = self.DHCPMessage()
self.dhcp_msg.dpid = flow_pkt.dpid
self.dhcp_msg.in_port = flow_pkt.in_port
self.dhcp_msg.ingest_time = flow_pkt.timestamp
self.dhcp_msg.eth_src = flow_pkt.eth_src
self.dhcp_msg.eth_dst = flow_pkt.eth_dst
self.dhcp_msg.ip_src = flow_pkt.ip_src
self.dhcp_msg.ip_dst = flow_pkt.ip_dst
self.dhcp_msg.tp_src = flow_pkt.tp_src
self.dhcp_msg.tp_dst = flow_pkt.tp_dst
self.dhcp_msg.transaction_id = hex(pkt_dhcp.xid)
self.dhcp_msg.host_name = str(dhcp_opts
[dpkt.dhcp.DHCP_OPT_HOSTNAME])
self.dhcp_msg.message_type = 'DHCPREQUEST'
#*** Record DHCP event to db collection:
db_dict = self.dhcp_msg.dbdict()
#*** Write DHCP message to db collection:
self.logger.debug("writing dhcp_messages db_dict=%s", db_dict)
self.dhcp_messages.insert_one(db_dict)
return 1
elif dhcp_type == dpkt.dhcp.DHCPDECLINE:
self.logger.debug("Matched DHCPDECLINE, TBD - not handled")
elif dhcp_type == dpkt.dhcp.DHCPACK:
self.logger.debug("Matched DHCPACK")
xid = hex(pkt_dhcp.xid)
#*** Look up dhcp db collection for DHCPREQUEST:
db_data = {'transaction_id': xid,
'message_type': 'DHCPREQUEST'}
#*** Filter by documents that are still within 'best before' time:
db_data['ingest_time'] = {'$gte': datetime.datetime.now() -
self.dhcp_messages_time_limit}
#*** Run db search:
result = self.dhcp_messages.find(db_data).sort('ingest_time', -1) \
.limit(1)
if result.count():
result0 = list(result)[0]
self.logger.debug("Found DHCPREQUEST for DHCPACK")
#*** Found a DHCP Request for the ACK, record results:
#*** Instantiate an instance of DHCP class:
self.dhcp_msg = self.DHCPMessage()
self.dhcp_msg.dpid = flow_pkt.dpid
self.dhcp_msg.in_port = flow_pkt.in_port
self.dhcp_msg.ingest_time = flow_pkt.timestamp
self.dhcp_msg.eth_src = flow_pkt.eth_src
self.dhcp_msg.eth_dst = flow_pkt.eth_dst
self.dhcp_msg.ip_src = flow_pkt.ip_src
self.dhcp_msg.ip_dst = flow_pkt.ip_dst
self.dhcp_msg.tp_src = flow_pkt.tp_src
self.dhcp_msg.tp_dst = flow_pkt.tp_dst
self.dhcp_msg.transaction_id = hex(pkt_dhcp.xid)
self.dhcp_msg.ip_assigned = \
socket.inet_ntoa(struct.pack(">L", pkt_dhcp.yiaddr))
if dpkt.dhcp.DHCP_OPT_LEASE_SEC in dhcp_opts:
self.dhcp_msg.lease_time = struct.unpack('>L', dhcp_opts
[dpkt.dhcp.DHCP_OPT_LEASE_SEC])[0]
self.logger.debug("Found dhcp_leasetime=%s",
self.dhcp_msg.lease_time)
else:
self.dhcp_msg.lease_time = DHCP_DEFAULT_LEASE_TIME
self.logger.debug("Using default dhcp_leasetime=%s",
self.dhcp_msg.lease_time)
self.dhcp_msg.message_type = 'DHCPACK'
#*** Record DHCP event to db collection:
db_dict = self.dhcp_msg.dbdict()
#*** Write DHCP message to db collection:
self.logger.debug("writing dhcp_messages db_dict=%s", db_dict)
self.dhcp_messages.insert_one(db_dict)
#*** Instantiate an instance of Identity class:
ident = self.Identity()
ident.dpid = flow_pkt.dpid
ident.in_port = flow_pkt.in_port
ident.mac_address = flow_pkt.eth_dst
ident.ip_address = self.dhcp_msg.ip_assigned
ident.harvest_type = 'DHCP'
ident.host_name = result0['host_name']
ident.harvest_time = flow_pkt.timestamp
ident.valid_from = flow_pkt.timestamp
#*** Calculate validity:
ident.valid_to = flow_pkt.timestamp + \
datetime.timedelta(0, self.dhcp_msg.lease_time)
ident.id_hash = self._hash_identity(ident)
ident.location_logical = self.policy.locations.get_location \
(ident.dpid, ident.in_port)
db_dict = ident.dbdict()
#*** Write DHCP identity metadata to db collection:
self.logger.debug("writing db_dict=%s", db_dict)
self.identities.insert_one(db_dict)
return 1
else:
self.logger.debug("Prev DHCP host_name not found")
return 0
elif dhcp_type == dpkt.dhcp.DHCPNAK:
self.logger.debug("Matched DHCPNAK, TBD - not handled")
elif dhcp_type == dpkt.dhcp.DHCPRELEASE:
self.logger.debug("Matched DHCPRELEASE, TBD - not handled")
elif dhcp_type == dpkt.dhcp.DHCPINFORM:
self.logger.debug("Matched DHCPINFORM, TBD - not handled")
else:
self.logger.debug("Unknown DHCP option 53 value: %s", dhcp_type)
return 0
def harvest_lldp(self, flow_pkt):
"""
Harvest LLDP identity metadata into database.
Passed packet-in metadata from flow object.
Check LLDP TLV fields and harvest any relevant identity
indicators to metadata
"""
self.logger.debug("Checking LLDP for metadata")
payload = flow_pkt.payload
lldp_dict = self._parse_lldp_detail(payload)
if not len(lldp_dict):
self.logger.warning("Failed to parse LLDP")
return 0
self.logger.debug("LLDP parsed %s", lldp_dict)
#*** Instantiate an instance of Indentity class:
ident = self.Identity()
if 'system_name' in lldp_dict:
ident.host_name = lldp_dict['system_name']
if 'system_desc' in lldp_dict:
ident.host_desc = lldp_dict['system_desc']
if 'TTL' in lldp_dict:
ttl = lldp_dict['TTL']
else:
#*** TBD, handle this better:
ttl = 60
ident.dpid = flow_pkt.dpid
ident.in_port = flow_pkt.in_port
ident.mac_address = flow_pkt.eth_src
ident.harvest_type = 'LLDP'
ident.harvest_time = flow_pkt.timestamp
ident.valid_from = flow_pkt.timestamp
#*** valid to based on LLDP TTL:
ident.valid_to = flow_pkt.timestamp + \
datetime.timedelta(0, ttl)
#*** Try looking up an IP for the LLDP source MAC:
ident2 = self.findbymac(ident.mac_address)
if 'ip_address' in ident2:
ident.ip_address = ident2['ip_address']
self.logger.debug("Found ip=%s for LLDP flow_hash=%s",
ident.ip_address, flow_pkt.flow_hash)
else:
self.logger.debug("Could not find IP for LLDP flow_hash=%s",
flow_pkt.flow_hash)
ident.id_hash = self._hash_identity(ident)
ident.location_logical = self.policy.locations.get_location \
(ident.dpid, ident.in_port)
#*** Write LLDP identity metadata to db collection:
db_dict = ident.dbdict()
self.logger.debug("writing db_dict=%s", db_dict)
self.identities.insert_one(db_dict)
return 1
def harvest_dns(self, flow_pkt):
"""
Harvest DNS identity metadata into database.
Passed packet-in metadata from flow object.
Check DNS answer(s) and harvest any relevant identity
indicators to metadata
"""
self.logger.debug("Checking DNS for metadata")
#*** Use dpkt to parse DNS:
try:
pkt_dns = dpkt.dns.DNS(flow_pkt.payload)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error("DNS extraction failed "
"Exception %s, %s, %s",
exc_type, exc_value, exc_traceback)
return 0
answers = pkt_dns.an
for answer in answers:
if answer.type == 1:
#*** DNS A Record:
ident = self.Identity()
ident.dpid = flow_pkt.dpid
ident.in_port = flow_pkt.in_port
ident.harvest_type = 'DNS_A'
ident.ip_address = socket.inet_ntoa(answer.rdata)
ident.service_name = answer.name
ident.harvest_time = flow_pkt.timestamp
ident.valid_from = flow_pkt.timestamp
ident.valid_to = flow_pkt.timestamp + \
datetime.timedelta(0, answer.ttl)
ident.id_hash = self._hash_identity(ident)
ident.location_logical = self.policy.locations.get_location \
(ident.dpid, ident.in_port)
db_dict = ident.dbdict()
#*** Write DNS identity metadata to database collection:
self.logger.debug("writing db_dict=%s", db_dict)
self.identities.insert_one(db_dict)
elif answer.type == 5:
#*** DNS CNAME Record:
ident = self.Identity()
ident.dpid = flow_pkt.dpid
ident.in_port = flow_pkt.in_port
ident.harvest_type = 'DNS_CNAME'
ident.service_name = answer.name
ident.service_alias = answer.cname
ident.harvest_time = flow_pkt.timestamp
ident.valid_from = flow_pkt.timestamp
ident.valid_to = flow_pkt.timestamp + \
datetime.timedelta(0, answer.ttl)
ident.id_hash = self._hash_identity(ident)
ident.location_logical = self.policy.locations.get_location \
(ident.dpid, ident.in_port)
db_dict = ident.dbdict()
#*** Write DNS identity metadata to database collection:
self.logger.debug("writing db_dict=%s", db_dict)
self.identities.insert_one(db_dict)
else:
#*** Not a type that we handle yet
self.logger.debug("Unhandled DNS answer type=%s", answer.type)
def findbymac(self, mac_addr, test=0):
"""
Passed a MAC address and reverse search identities collection
returning first match as a dictionary version of
an Identity class, or empty dictionary if not found
Setting test=1 returns database query execution statistics
"""
db_data = {'mac_address': mac_addr}
if not test:
result = self.identities.find(db_data).sort('valid_from', -1).limit(1)
else:
return self.identities.find(db_data).sort('valid_from', -1).limit(1).explain()
if result.count():
result0 = list(result)[0]
self.logger.debug("found result=%s len=%s", result0, len(result0))
return result0
else:
self.logger.debug("mac_addr=%s not found", mac_addr)
return {}
def findbynode(self, host_name, harvest_type='any', regex=False, test=0):
"""
Find by node name
Pass it the name of the node to search for. Additionally,
can set:
regex=True Treat service_name as a regular expression
harvest_type= Specify what type of harvest (i.e. DHCP)
Returns a dictionary version of an Identity class, or 0 if not found
Setting test=1 returns database query execution statistics
"""
db_data = {'host_name': host_name}
if harvest_type != 'any':
#*** Filter by harvest type:
db_data['harvest_type'] = harvest_type
if regex:
#*** Regular expression search on service name:
regx = re.compile(host_name)
db_data['host_name'] = regx
#*** Filter by documents that are still within 'best before' time:
db_data['valid_to'] = {'$gte': datetime.datetime.now()}
#*** Run db search:
if not test:
result = self.identities.find(db_data).sort('valid_from', -1).limit(1)
else:
return self.identities.find(db_data).sort('valid_from', -1).limit(1).explain()
if result.count():
result0 = list(result)[0]
self.logger.debug("found result=%s len=%s", result0, len(result0))
return result0
else:
self.logger.debug("host_name=%s not found", host_name)
return 0
def findbyservice(self, service_name, harvest_type='any', regex=False,
ip_address='any', test=0):
"""
Find by service name
Pass it the name of the service to search for. Additionally,
can set:
regex=True Treat service_name as a regular expression
harvest_type= Specify what type of harvest (i.e. DNS_A)
ip_address= Look for specific IP address
Returns boolean
Setting test=1 returns database query execution statistics
"""
db_data = {'service_name': service_name}
if harvest_type != 'any':
#*** Filter by harvest type:
db_data['harvest_type'] = harvest_type
if ip_address != 'any':
#*** Filter by IP address:
db_data['ip_address'] = ip_address
if regex:
#*** Regular expression search on service name:
regx = re.compile(service_name)
db_data['service_name'] = regx
#*** Filter by documents that are still within 'best before' time:
db_data['valid_to'] = {'$gte': datetime.datetime.now()}
#*** Run db search:
if not test:
result = self.identities.find(db_data).sort('valid_from', -1).limit(1)
else:
return self.identities.find(db_data).sort('valid_from', -1).limit(1).explain()
if result.count():
result0 = list(result)[0]
self.logger.debug("found result=%s len=%s", result0, len(result0))
return result0
else:
self.logger.debug("service_name=%s not found", service_name)
return 0
def _hash_identity(self, ident):
"""
Generate a hash of the current identity used for deduplication
where the same identity is received periodically, or from multiple
sources.
"""
hash_result = hashlib.md5()
id_tuple = (ident.harvest_type,
ident.host_name,
ident.service_name,
ident.user_id)
id_tuple_as_string = str(id_tuple)
hash_result.update(id_tuple_as_string)
return hash_result.hexdigest()
#=================== PRIVATE ==============================================
def _parse_lldp_detail(self, lldpPayload):
"""
Parse basic LLDP parameters from an LLDP packet payload
"""
result = {}
while lldpPayload:
tlv_header = struct.unpack("!H", lldpPayload[:2])[0]
tlv_type = tlv_header >> 9
tlv_len = (tlv_header & 0x01ff)
lldpDU = lldpPayload[2:tlv_len + 2]
if tlv_type == 0:
#*** TLV type 0 is end of TLVs so break the while loop:
break
else:
tlv_subtype = struct.unpack("!B", lldpDU[0:1]) \
if tlv_type is 2 else ""
startbyte = 1 if tlv_type is 2 else 0
tlv_datafield = lldpDU[startbyte:tlv_len]
#*** Pull out values from specific TLVs:
if tlv_type == 3:
result['TTL'] = struct.unpack("!h", tlv_datafield)[0]
elif tlv_type == 4:
result['port_desc'] = tlv_datafield
elif tlv_type == 5:
result['system_name'] = tlv_datafield
elif tlv_type == 6:
result['system_desc'] = tlv_datafield
else:
pass
lldpPayload = lldpPayload[2 + tlv_len:]
return result
def mac_addr(address):
"""
Convert a MAC address to a readable/printable string
"""
return ':'.join('%02x' % ord(b) for b in address)
|
|
"""Shared class to maintain Plex server instances."""
import logging
import plexapi.myplex
import plexapi.playqueue
import plexapi.server
from requests import Session
import requests.exceptions
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.const import CONF_TOKEN, CONF_URL, CONF_VERIFY_SSL
from homeassistant.helpers.dispatcher import dispatcher_send
from .const import (
CONF_CLIENT_IDENTIFIER,
CONF_SERVER,
CONF_SHOW_ALL_CONTROLS,
CONF_USE_EPISODE_ART,
DEFAULT_VERIFY_SSL,
PLEX_NEW_MP_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL,
PLEX_UPDATE_SENSOR_SIGNAL,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import NoServersFound, ServerNotSpecified
_LOGGER = logging.getLogger(__name__)
# Set default headers sent by plexapi
plexapi.X_PLEX_DEVICE_NAME = X_PLEX_DEVICE_NAME
plexapi.X_PLEX_PLATFORM = X_PLEX_PLATFORM
plexapi.X_PLEX_PRODUCT = X_PLEX_PRODUCT
plexapi.X_PLEX_VERSION = X_PLEX_VERSION
class PlexServer:
"""Manages a single Plex server connection."""
def __init__(self, hass, server_config, options=None):
"""Initialize a Plex server instance."""
self._hass = hass
self._plex_server = None
self._known_clients = set()
self._known_idle = set()
self._url = server_config.get(CONF_URL)
self._token = server_config.get(CONF_TOKEN)
self._server_name = server_config.get(CONF_SERVER)
self._verify_ssl = server_config.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL)
self.options = options
self.server_choice = None
# Header conditionally added as it is not available in config entry v1
if CONF_CLIENT_IDENTIFIER in server_config:
plexapi.X_PLEX_IDENTIFIER = server_config[CONF_CLIENT_IDENTIFIER]
plexapi.myplex.BASE_HEADERS = plexapi.reset_base_headers()
plexapi.server.BASE_HEADERS = plexapi.reset_base_headers()
def connect(self):
"""Connect to a Plex server directly, obtaining direct URL if necessary."""
def _connect_with_token():
account = plexapi.myplex.MyPlexAccount(token=self._token)
available_servers = [
(x.name, x.clientIdentifier)
for x in account.resources()
if "server" in x.provides
]
if not available_servers:
raise NoServersFound
if not self._server_name and len(available_servers) > 1:
raise ServerNotSpecified(available_servers)
self.server_choice = (
self._server_name if self._server_name else available_servers[0][0]
)
self._plex_server = account.resource(self.server_choice).connect()
def _connect_with_url():
session = None
if self._url.startswith("https") and not self._verify_ssl:
session = Session()
session.verify = False
self._plex_server = plexapi.server.PlexServer(
self._url, self._token, session
)
if self._url:
_connect_with_url()
else:
_connect_with_token()
def refresh_entity(self, machine_identifier, device, session):
"""Forward refresh dispatch to media_player."""
unique_id = f"{self.machine_identifier}:{machine_identifier}"
_LOGGER.debug("Refreshing %s", unique_id)
dispatcher_send(
self._hass,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL.format(unique_id),
device,
session,
)
def update_platforms(self):
"""Update the platform entities."""
_LOGGER.debug("Updating devices")
available_clients = {}
new_clients = set()
try:
devices = self._plex_server.clients()
sessions = self._plex_server.sessions()
except plexapi.exceptions.BadRequest:
_LOGGER.exception("Error requesting Plex client data from server")
return
except requests.exceptions.RequestException as ex:
_LOGGER.warning(
"Could not connect to Plex server: %s (%s)", self.friendly_name, ex
)
return
for device in devices:
self._known_idle.discard(device.machineIdentifier)
available_clients[device.machineIdentifier] = {"device": device}
if device.machineIdentifier not in self._known_clients:
new_clients.add(device.machineIdentifier)
_LOGGER.debug("New device: %s", device.machineIdentifier)
for session in sessions:
for player in session.players:
self._known_idle.discard(player.machineIdentifier)
available_clients.setdefault(
player.machineIdentifier, {"device": player}
)
available_clients[player.machineIdentifier]["session"] = session
if player.machineIdentifier not in self._known_clients:
new_clients.add(player.machineIdentifier)
_LOGGER.debug("New session: %s", player.machineIdentifier)
new_entity_configs = []
for client_id, client_data in available_clients.items():
if client_id in new_clients:
new_entity_configs.append(client_data)
else:
self.refresh_entity(
client_id, client_data["device"], client_data.get("session")
)
self._known_clients.update(new_clients)
idle_clients = (self._known_clients - self._known_idle).difference(
available_clients
)
for client_id in idle_clients:
self.refresh_entity(client_id, None, None)
self._known_idle.add(client_id)
if new_entity_configs:
dispatcher_send(
self._hass,
PLEX_NEW_MP_SIGNAL.format(self.machine_identifier),
new_entity_configs,
)
dispatcher_send(
self._hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.machine_identifier),
sessions,
)
@property
def plex_server(self):
"""Return the plexapi PlexServer instance."""
return self._plex_server
@property
def friendly_name(self):
"""Return name of connected Plex server."""
return self._plex_server.friendlyName
@property
def machine_identifier(self):
"""Return unique identifier of connected Plex server."""
return self._plex_server.machineIdentifier
@property
def url_in_use(self):
"""Return URL used for connected Plex server."""
return self._plex_server._baseurl # pylint: disable=W0212
@property
def use_episode_art(self):
"""Return use_episode_art option."""
return self.options[MP_DOMAIN][CONF_USE_EPISODE_ART]
@property
def show_all_controls(self):
"""Return show_all_controls option."""
return self.options[MP_DOMAIN][CONF_SHOW_ALL_CONTROLS]
@property
def library(self):
"""Return library attribute from server object."""
return self._plex_server.library
def playlist(self, title):
"""Return playlist from server object."""
return self._plex_server.playlist(title)
def create_playqueue(self, media, **kwargs):
"""Create playqueue on Plex server."""
return plexapi.playqueue.PlayQueue.create(self._plex_server, media, **kwargs)
|
|
"""
Component wrappers are created by the ``start`` command after the associated
component's server has been started.
"""
import os
import sys
import time
import logging
import traceback
from functools import partial
from cStringIO import StringIO
import xml.etree.cElementTree as ElementTree
from xml.sax.saxutils import escape
try:
import resource
except ImportError: # pragma no cover
pass # Not available on Windows.
import openmdao.util.log
from analysis_server.varwrapper import _find_var_wrapper, _float2str
from analysis_server.monitor import FileMonitor
# import var wrappers so they get registered
import analysis_server.floatwrapper
import analysis_server.intwrapper
import analysis_server.strwrapper
import analysis_server.boolwrapper
import analysis_server.enumwrapper
import analysis_server.arrwrapper
import analysis_server.objwrapper
import analysis_server.listwrapper
from analysis_server.filewrapper import FileWrapper
class TreeNode(object):
def __init__(self):
self.children = {}
def add_child(self, parts):
if parts[0] not in self.children:
self.children[parts[0]] = TreeNode()
if len(parts) > 1:
self.children[parts[0]].add_child(parts[1:])
def visit(self, prenode, postnode, name=''):
prenode(name, self)
for cname, node in sorted(self.children.items()):
path = '.'.join((name, cname)) if name else cname
node.visit(prenode, postnode, path)
postnode(name, self)
def create_tree(names):
root = TreeNode()
for name in names:
root.add_child(name.split('.'))
return root
class ComponentWrapper(object):
"""
Component wrapper providing a ModelCenter AnalysisServer interface,
based on the protocol described in:
http://www.phoenix-int.com/~AnalysisServer/commands/index.html
Wraps component `comp`, named `name`, with configuraton `cfg` on `server`.
`send_reply` and `send_exc` are used to communicate back to the client.
name: string
Instance name.
proxy: proxy
Proxy to remote component.
cfg: :class:`server._WrapperConfig`
Component configuration data.
manager: proxy
Proxy to remote manager hosting remote component.
send_reply: callable
Used to send a reply message back to client.
send_exc: callable
Used to send an exception message back to client.
logger: :class:`logging.Logger`
Used for progress, errors, etc.
"""
def __init__(self, name, proxy, cfg, manager, send_reply, send_exc):
self._name = name
self._comp = proxy
self._cfg = cfg
self._manager = manager
self._send_reply = send_reply
self._send_exc = send_exc
self._monitors = {} # Maps from monitor_id to monitor.
self._wrappers = {} # Maps from internal var path to var wrapper.
self._path_map = {} # Maps from external path to (var wrapper, attr).
self._start = None
self._rusage = None # For ps() on UNIX.
self._logger = logging.getLogger(name+'_wrapper')
def _get_var_wrapper(self, ext_path):
"""
Return '(wrapper, attr)' for `ext_path`.
ext_path: string
External reference for variable.
"""
try:
return self._path_map[ext_path]
except KeyError:
# Determine internal path to variable.
ext_attr = None
if ext_path in self._cfg.properties:
int_path = self._cfg.properties[ext_path]
epath = ext_path
else:
epath, _, ext_attr = ext_path.partition('.')
if ext_attr in self._cfg.properties:
int_path = self._cfg.properties[ext_attr]
else:
raise RuntimeError('no such property <%s>.' % ext_path)
try:
wrapper = self._wrappers[int_path]
except KeyError:
# Find variable.
val = self._comp.get(int_path)
wrapper_class = _find_var_wrapper(val)
if wrapper_class is None:
raise RuntimeError('%s: unsupported variable type %r.'
% (ext_path, type(val).__name__))
# Wrap it.
wrapper = wrapper_class(self._comp, int_path, epath, self._cfg)
if wrapper_class is FileWrapper:
wrapper.set_proxy(self._manager)
self._wrappers[int_path] = wrapper
attr = ext_attr or 'value'
map_value = (wrapper, attr)
self._path_map[ext_path] = map_value
return map_value
def pre_delete(self):
""" Prepare for deletion. """
for monitor in self._monitors.values():
monitor.stop()
self._comp.pre_delete()
def run(self, req_id):
"""
Runs a component instance.
req_id: string
'Raw' mode request identifier.
"""
try:
if sys.platform != 'win32':
self._rusage = resource.getrusage(resource.RUSAGE_SELF)
self._start = time.time()
try:
self._comp.run()
except Exception as exc:
self._logger.exception('run() failed:')
raise RuntimeError('%s' % exc)
else:
self._send_reply('%s completed.' % self._name, req_id)
finally:
self._start = None
except Exception:
self._send_exc(traceback.format_exc(), req_id)
def get(self, path, req_id):
"""
Returns the value of a variable.
path: string
External variable reference.
req_id: string
'Raw' mode request identifier.
"""
try:
wrapper, attr = self._get_var_wrapper(path)
self._send_reply(wrapper.get(attr, path), req_id)
except Exception:
self._send_exc(traceback.format_exc(), req_id)
def _pre_xml(self, lines, gzipped, name, node):
if node.children: # Group
if name:
lines.append('<Group name="%s">' % name.split('.')[-1])
else:
lines.append('<Group>')
else:
vwrapper, attr = self._get_var_wrapper(name)
try:
lines.append(vwrapper.get_as_xml(gzipped))
except Exception as exc:
raise type(exc)("Can't get %r: %s %s" % (name, vwrapper,exc))
def _post_xml(self, lines, gzipped, name, node):
if node.children:
lines.append('</Group>')
def get_hierarchy(self, req_id, gzipped):
"""
Return all inputs & outputs as XML.
req_id: string
'Raw' mode request identifier.
gzipped: bool
If True, file data is gzipped and then base64 encoded.
"""
try:
group = ''
lines = []
lines.append("<?xml version='1.0' encoding='utf-8'?>")
tree = create_tree(self._cfg.properties)
tree.visit(partial(self._pre_xml, lines, gzipped),
partial(self._post_xml, lines, gzipped))
self._send_reply('\n'.join(lines), req_id)
except Exception:
self._send_exc(traceback.format_exc(), req_id)
def invoke(self, method, full, req_id):
"""
Invokes a method on a component instance.
method: string
External method reference.
full: bool
If True, return result as XML.
req_id: string
'Raw' mode request identifier.
"""
try:
try:
attr = self._cfg.methods[method]
except KeyError:
raise RuntimeError('no such method <%s>.' % method)
result = self._comp.invoke(attr)
if result is None:
reply = ''
elif isinstance(result, float):
reply = _float2str(result)
elif isinstance(result, basestring):
reply = result.encode('string_escape')
else:
reply = str(result)
# Setting 'download' True since we have no idea about side-effects.
if full:
reply = """\
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\
<response>\
<version>100.0</version>\
<download>true</download>\
<string>%s</string>\
</response>""" % escape(reply)
self._send_reply(reply, req_id)
except Exception:
self._send_exc(traceback.format_exc(), req_id)
def list_array_values(self, path, req_id):
"""
Lists all the values of an array variable.
path: string
External reference to array.
req_id: string
'Raw' mode request identifier.
"""
try:
raise NotImplementedError('listArrayValues')
except Exception:
self._send_exc(traceback.format_exc(), req_id)
def list_methods(self, full, req_id):
"""
Lists all methods available on a component instance.
full: bool
If True, include 'full/long' name.
req_id: string
'Raw' mode request identifier.
"""
try:
lines = ['']
for name in sorted(self._cfg.methods):
line = '%s()' % name
if full:
line += ' fullName="%s/%s"' % (self._cfg.classname, name)
lines.append(line)
lines[0] = '%d methods found:' % (len(lines)-1)
self._send_reply('\n'.join(lines), req_id)
except Exception:
self._send_exc(traceback.format_exc(), req_id)
def list_monitors(self, req_id):
"""
Lists all available monitorable items on a component instance.
req_id: string
'Raw' mode request identifier.
"""
try:
root = self._comp.get_abs_directory()
text_files = self._comp.list_text_files()
lines = ['%d monitors:' % len(text_files)]
lines.extend(sorted(text_files))
self._send_reply('\n'.join(lines), req_id)
except Exception:
self._send_exc(traceback.format_exc(), req_id)
def list_properties(self, path, req_id):
"""
Lists all available variables and their sub-properties on a component
instance or sub-variable.
path: string
External reference.
req_id: string
'Raw' mode request identifier.
"""
try:
self._send_reply(self._list_properties(path), req_id)
except Exception:
self._send_exc(traceback.format_exc(), req_id)
def _list_properties(self, path):
"""
Lists all available variables and their sub-properties on a component
instance or sub-variable.
path: string
External reference.
"""
lines = ['']
try:
wrapper, attr = self._get_var_wrapper(path)
except RuntimeError:
# Must be a subsystem.
if path:
path += '.'
length = len(path)
groups = set()
for ext_path in sorted(self._cfg.properties):
if path and not ext_path.startswith(path):
continue
name = ext_path[length:]
# rest = ext_path[length:]
# name, _, rest = rest.partition('.')
# if rest:
# if name in groups:
# continue
# groups.add(name)
# typ = 'com.phoenix_int.aserver.PHXGroup'
# access = 'sg'
# else:
wrapper, attr = self._get_var_wrapper(ext_path)
typ = wrapper.phx_type
access = wrapper.phx_access
lines.append('%s (type=%s) (access=%s)' % (name, typ, access))
else:
lines.extend(wrapper.list_properties())
lines[0] = '%d properties found:' % (len(lines)-1)
return '\n'.join(lines)
def list_values(self, path, req_id):
"""
Lists all available variables and their sub-properties on a component
instance or sub-variable.
path: string
External reference.
req_id: string
'Raw' mode request identifier.
"""
try:
lines = []
# Get list of properties.
props = self._list_properties(path).split('\n')
lines.append(props[0])
if path:
path += '.'
# Collect detailed property information.
for line in props[1:]:
name, typ, access = line.split()
if typ == '(type=com.phoenix_int.aserver.PHXGroup)':
val = 'Group: %s' % name
lines.append('%s %s %s vLen=%d val=%s'
% (name, typ, access, len(val), val))
else:
ext_path = path + name
wrapper, attr = self._get_var_wrapper(ext_path)
val = wrapper.get('value', ext_path)
lines.append('%s %s %s vLen=%d val=%s'
% (name, typ, access, len(val), val))
if path:
continue # No sub_props.
sub_props = self._list_properties(ext_path).split('\n')
sub_props = sub_props[1:]
lines.append(' %d SubProps found:' % len(sub_props))
for line in sub_props:
name, typ, access = line.split()
if typ == '(type=com.phoenix_int.aserver.PHXGroup)':
val = 'Group: %s' % name
lines.append('%s %s %s vLen=%d val=%s'
% (name, typ, access, len(val), val))
else:
val = wrapper.get(name, ext_path)
lines.append('%s %s %s vLen=%d val=%s'
% (name, typ, access, len(val), val))
self._send_reply('\n'.join(lines), req_id)
except Exception:
self._send_exc(traceback.format_exc(), req_id)
def list_values_url(self, path, req_id):
"""
Lists all available variables and their sub-properties on a component
instance or sub-variable. This version supplies a URL for file data
if DirectFileTransfer is supported.
path: string
External reference.
req_id: string
'Raw' mode request identifier.
"""
self.list_values(path, req_id)
def start_monitor(self, path, req_id):
"""
Starts a monitor on a raw output file or available monitor.
path: string
Monitor reference.
req_id: string
'Raw' mode request identifier.
"""
try:
path = os.path.join(self._comp.get_abs_directory(), path)
monitor = FileMonitor(self._comp, path, 'r',
req_id, self._send_reply)
monitor.start()
self._monitors[str(req_id)] = monitor # Monitor id is request id.
except Exception:
self._send_exc(traceback.format_exc(), req_id)
def stop_monitor(self, monitor_id, req_id):
"""
Stops a monitor on a raw output file or available monitor.
monitor_id: string
Monitor identifier.
req_id: string
'Raw' mode request identifier.
"""
try:
monitor = self._monitors.pop(monitor_id)
# Invalid monitor_id intercepted by server.py
except KeyError: # pragma no cover
raise RuntimeError('No registered monitor for %r' % monitor_id)
else:
monitor.stop()
self._send_reply('', req_id)
def ps(self, req_id):
"""
Lists all running processes for a component instance.
req_id: string
'Raw' mode request identifier.
"""
try:
pid = os.getpid()
command = os.path.basename(sys.executable)
if self._start is None: # Component not running.
# Forcing PID to zero helps with testing.
reply = """\
<Processes length='1'>
<Process pid='0'>
<ParentPID>0</ParentPID>
<PercentCPU>0.0</PercentCPU>
<Memory>0</Memory>
<Time>0</Time>
<WallTime>0</WallTime>
<Command>%s</Command>
</Process>
</Processes>""" % escape(command)
else:
now = time.time()
walltime = now - self._start
if sys.platform == 'win32': # pragma no cover
reply = """\
<Processes length='1'>
<Process pid='%d'>
<ParentPID>0</ParentPID>
<PercentCPU>0.0</PercentCPU>
<Memory>0</Memory>
<Time>0</Time>
<WallTime>%.1f</WallTime>
<Command>%s</Command>
</Process>
</Processes>""" % (pid, walltime, escape(command))
else:
rusage = resource.getrusage(resource.RUSAGE_SELF)
cputime = (rusage.ru_utime + rusage.ru_stime) \
- (self._rusage.ru_utime + self._rusage.ru_stime)
if walltime > 0:
percent_cpu = cputime / walltime
else:
percent_cpu = 0.
memory = rusage.maxrss * resource.getpagesize()
reply = """\
<Processes length='1'>
<Process pid='%d'>
<ParentPID>%d</ParentPID>
<PercentCPU>%.1f</PercentCPU>
<Memory>%d</Memory>
<Time>%.1f</Time>
<WallTime>%.1f</WallTime>
<Command>%s</Command>
</Process>
</Processes>""" % (pid, os.getppid(), percent_cpu, memory, cputime, walltime,
escape(command))
self._send_reply(reply, req_id)
except Exception:
self._send_exc(traceback.format_exc(), req_id)
def set(self, path, valstr, req_id):
"""
Sets the value of `path` to `valstr`.
path: string
External reference to variable.
valstr: string
Value to set.
req_id: string
'Raw' mode request identifier.
"""
# Quotes around the value are semi-optional.
if valstr.startswith('"') and valstr.endswith('"'):
valstr = valstr[1:-1]
try:
self._set(path, valstr)
self._send_reply('value set for <%s>' % path, req_id)
except Exception:
self._send_exc(traceback.format_exc(), req_id)
def _set(self, path, valstr, gzipped=False):
"""
Sets the value of `path` to `valstr`.
path: string
External reference to variable.
valstr: string
Value to set.
gzipped: bool
If True, file data is gzipped and then base64 encoded.
"""
wrapper, attr = self._get_var_wrapper(path)
wrapper.set(attr, path, valstr, gzipped)
def set_hierarchy(self, xml, req_id):
"""
Set hierarchy of variable values from `xml`.
xml: string
XML describing values to be set.
req_id: string
'Raw' mode request identifier.
"""
try:
#header, _, xml = xml.partition('\n')
logging.info("XML:\n%s" % xml)
strm = StringIO()
strm.write(xml)
strm.seek(0)
tree = ElementTree.ElementTree()
root = tree.parse(strm)
#root = ElementTree.fromstring(xml)
# for var in root.findall('Variable'):
# valstr = var.text or ''
# if var.get('gzipped', 'false') == 'true':
# gzipped = True
# else:
# gzipped = False
# try:
# self._set(var.attrib['name'], valstr, gzipped)
# except Exception as exc:
# self._logger.exception("Can't set %r", var.attrib['name'])
# raise type(exc)("Can't set %r from %r: %s"
# % (var.attrib['name'], valstr[:1000], exc))
for elem in tree.iter(tag='Variable'):
path = tree.getpath(elem)
logging.info("PATH: %s" % path)
self._send_reply('values set', req_id)
except Exception:
self._send_exc(traceback.format_exc(), req_id)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class DeploymentsOperations(object):
"""DeploymentsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-02-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-02-01"
self.config = config
def delete(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Delete deployment.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to be deleted.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def check_existence(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Checks whether deployment exists.
:param resource_group_name: The name of the resource group to check.
The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = (response.status_code == 204)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, deployment_name, properties=None, custom_headers=None, raw=False, **operation_config):
"""Create a named template deployment using a template.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param properties: The deployment properties.
:type properties: :class:`DeploymentProperties
<azure.mgmt.resource.resources.v2016_02_01.models.DeploymentProperties>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<azure.mgmt.resource.resources.v2016_02_01.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.Deployment(properties=properties)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Deployment')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Get a deployment.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DeploymentExtended
<azure.mgmt.resource.resources.v2016_02_01.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def cancel(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Cancel a currently running template deployment.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def validate(
self, resource_group_name, deployment_name, properties=None, custom_headers=None, raw=False, **operation_config):
"""Validate a deployment template.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param properties: The deployment properties.
:type properties: :class:`DeploymentProperties
<azure.mgmt.resource.resources.v2016_02_01.models.DeploymentProperties>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DeploymentValidateResult
<azure.mgmt.resource.resources.v2016_02_01.models.DeploymentValidateResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.Deployment(properties=properties)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/validate'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Deployment')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 400]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def export_template(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Exports a deployment template.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DeploymentExportResult
<azure.mgmt.resource.resources.v2016_02_01.models.DeploymentExportResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExportResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, resource_group_name, filter=None, top=None, custom_headers=None, raw=False, **operation_config):
"""Get a list of deployments.
:param resource_group_name: The name of the resource group to filter
by. The name is case insensitive.
:type resource_group_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param top: Query parameters. If null is passed returns all
deployments.
:type top: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DeploymentExtendedPaged
<azure.mgmt.resource.resources.v2016_02_01.models.DeploymentExtendedPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DeploymentExtendedPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DeploymentExtendedPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
from app.api.models.LXDModule import LXDModule
from app.lib.conf import MetaConf
from app.api.utils.firebaseAuthentication import firebaseLogin
from app import __metadata__ as meta
import logging
import requests
import subprocess
import shutil
import os
import yaml
import tarfile
logging = logging.getLogger(__name__)
class LXCImage(LXDModule):
def __init__(self, input):
self.data = {}
if not input.get('remoteHost'):
self.remoteHost = '127.0.0.1'
else:
self.remoteHost = input.get('remoteHost')
if not input.get('fingerprint'):
logging.error('Image fingerprint is required for any image operation')
raise ValueError('Missing image fingerprint.')
self.setFingerprint(input.get('fingerprint'))
if input.get('image'):
self.setImage(input.get('image'))
logging.info('Connecting to LXD')
super(LXCImage, self).__init__(remoteHost=self.remoteHost)
def setAlias(self, input):
logging.debug('Setting image alias to {}'.format(input))
self.data['alias'] = input
def setFingerprint(self, input):
logging.debug('Setting image fingerprint to {}'.format(input))
self.data['fingerprint'] = input
def setImage(self, input):
logging.debug('Setting image to {}'.format(input))
self.data['image'] = input
def getImage(self):
try:
logging.info('Reading image {} details'.format(self.data.get('fingerprint')))
return self.client.api.images[self.data.get('fingerprint')].get().json()['metadata']
except Exception as e:
logging.error('Failed to retrieve information for image {}'.format(self.data.get('fingerprint')))
logging.exception(e)
raise ValueError(e)
def deleteImage(self):
try:
logging.info('Deleting image {}'.format(self.data.get('fingerprint')))
image = self.client.images.get(self.data.get('fingerprint'))
image.delete()
except Exception as e:
logging.error('Failed to delete the image {}'.format(self.data.get('fingerprint')))
logging.exception(e)
raise ValueError(e)
#TODO Refactor this part
def exportImage(self, input, logo=None):
try:
#Check if image exists & Update the fingerprint with the full fingerprint
self.data['fingerprint'] = self.client.images.get(self.data.get('fingerprint')).fingerprint
logging.info('Exporting image {}'.format(self.data.get('fingerprint')))
p2 = subprocess.Popen(["lxc", "image", "export", self.data.get('fingerprint')], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
#Make dir for the export
shutil.rmtree('tmp/images/{}/'.format(self.data.get('fingerprint')), ignore_errors=True)
os.makedirs('tmp/images/{}'.format(self.data.get('fingerprint')), exist_ok=True)
#Move the export - Check for both extenstion .tar.gz & .tar.xz
if os.path.exists('{}.tar.gz'.format(self.data.get('fingerprint'))):
shutil.move('{}.tar.gz'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['image'] = '{}.tar.gz'.format(self.data.get('fingerprint'))
if os.path.exists('{}.tar.xz'.format(self.data.get('fingerprint'))):
shutil.move('{}.tar.xz'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['image'] = '{}.tar.xz'.format(self.data.get('fingerprint'))
if os.path.exists('{}.squashfs'.format(self.data.get('fingerprint'))):
shutil.move('{}.squashfs'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['image'] = '{}.squashfs'.format(self.data.get('fingerprint'))
if os.path.exists('meta-{}.tar.gz'.format(self.data.get('fingerprint'))):
shutil.move('meta-{}.tar.gz'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['metadata'] = 'meta-{}.tar.gz'.format(self.data.get('fingerprint'))
if os.path.exists('meta-{}.tar.xz'.format(self.data.get('fingerprint'))):
shutil.move('meta-{}.tar.xz'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['metadata'] = 'meta-{}.tar.xz'.format(self.data.get('fingerprint'))
if os.path.exists('meta-{}.tar.xz'.format(self.data.get('fingerprint'))):
shutil.move('meta-{}.squashfs'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['metadata'] = 'meta-{}.squashfs'.format(self.data.get('fingerprint'))
#Prepare & Move the yaml file
self.prepareImageYAML(input)
shutil.move('image.yaml', 'tmp/images/{}/'.format(self.data.get('fingerprint')))
#TODO Prepare README.md
file = open('tmp/images/{}/README.md'.format(self.data.get('fingerprint')), 'a')
file.write('#README\n')
file.write(input.get('documentation'))
file.close()
#TODO Prepare Logo
if logo:
logo.save('tmp/images/{}/{}'.format(self.data.get('fingerprint'), 'logo.png'))
return MetaConf().getConfRoot() + '/tmp/images/{}'.format(self.data.get('fingerprint'))
except Exception as e:
logging.error('Failed to export the image {}'.format(self.data.get('fingerprint')))
logging.exception(e)
raise ValueError(e)
def prepareImageYAML(self, input):
if input.get('metadata') == None: input['metadata'] = ''
data = {
'title': input.get('imageAlias', ''),
'description': input.get('imageDescription', ''),
'author': {
'name': input.get('authorName', ''),
'alias': '',
'email': input.get('authorEmail', '')
},
'license': input.get('license', ''),
'readme': 'README.md',
'tags': input.get('imageTags').split(','),
'logo': 'logo.png',
'image': input.get('image'),
'metadata': input.get('metadata'),
'fingerprint': self.data.get('fingerprint'),
'public': True
}
data.update(self.client.api.images[self.data.get('fingerprint')].get().json()['metadata'])
with open('image.yaml', 'w') as yamlFile:
yaml.dump(data, yamlFile, default_flow_style=False)
def pushImage(self, input):
try:
#Login
result = firebaseLogin(input.get('username'), input.get('password'))
if result.ok:
token = result.json()['idToken']
else:
raise ValueError('Login failed: {}'.format(result.json()['error']['message']))
self.data['fingerprint'] = self.client.images.get(self.data.get('fingerprint')).fingerprint
if os.path.exists('tmp/images/{}'.format(self.data.get('fingerprint'))):
logging.info('Image exists. Ready for push.')
print ("Image exists. Ready for push.")
#Prepare the files for upload.
with open('tmp/images/{}/image.yaml'.format(self.data.get('fingerprint'))) as stream:
yamlData = yaml.load(stream)
files = {
'yaml': open('tmp/images/{}/image.yaml'.format(self.data.get('fingerprint'), 'rb'))
}
headers = {'Authorization': token}
response = requests.post('{}/cliAddPackage'.format(meta.IMAGE_HUB), headers=headers, files=files, data={'id': self.data.get('fingerprint')})
if response.ok == False:
logging.error('Failed to push the image {}'.format(self.data.get('fingerprint')))
raise ValueError(
response.json()['message'])
return
print("yaml uploaded successfully.")
print("Uploading:")
for file in response.json()['filesRequired']:
for key in file:
files = {}
if file[key] != '':
if os.path.exists('tmp/images/{}/{}'.format(self.data.get('fingerprint'), file[key])):
files['file'] = open('tmp/images/{}/{}'.format(self.data.get('fingerprint'), file[key]), 'rb')
requests.post('{}/cliAddFile'.format(meta.IMAGE_HUB), headers=headers, files=files, data={'id': self.data.get('fingerprint')}).json()
print('File {} uploaded successfully'.format(file[key]))
else:
print('File {} does not exist'.format(file[key]))
else:
logging.error('Failed to push the image {}'.format(self.data.get('fingerprint')))
logging.exception('Image is not prepared. Please prepare the image using the command lxdui image prep <fingerprint>')
raise ValueError('Image is not prepared. Please prepare the image using the command: lxdui image prep <fingerprint>')
except Exception as e:
logging.error('Failed to push the image {}'.format(self.data.get('fingerprint')))
logging.exception(e)
raise ValueError(e)
def importImage(self, input):
logging.info('Importing image {}'.format(self.data.get('fingerprint')))
shutil.rmtree('tmp/downloaded/{}/'.format(self.data.get('fingerprint')), ignore_errors=True)
os.makedirs('tmp/downloaded/{}'.format(self.data.get('fingerprint')), exist_ok=True)
# Download and extract the file
r = requests.get('{}/cliDownloadRepo/{}'.format(meta.IMAGE_HUB, self.data.get('fingerprint')), stream=True)
with open('tmp/downloaded/{}/package.tar.gz'.format(self.data.get('fingerprint')), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
tfile = tarfile.open('tmp/downloaded/{}/package.tar.gz'.format(self.data.get('fingerprint')), 'r:gz')
tfile.extractall('tmp/downloaded/{}/'.format(self.data.get('fingerprint')))
with open('tmp/downloaded/{}/image.yaml'.format(self.data.get('fingerprint'))) as stream:
yamlData = yaml.load(stream)
if os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint'))) and os.path.exists("tmp/downloaded/{0}/{0}.tar.xz".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint')),
"tmp/downloaded/{0}/{0}.tar.xz".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.gz".format(self.data.get('fingerprint'))) and os.path.exists("tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/meta-{0}.tar.gz".format(self.data.get('fingerprint')),
"tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.gz".format(self.data.get('fingerprint'))) == False and os.path.exists("tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint'))) == False and os.path.exists("tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/{0}.tar.xz".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint'))) == False and os.path.exists("tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint'))) and os.path.exists("tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint')),
"tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.squashfs".format(self.data.get('fingerprint'))) and os.path.exists("tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/meta-{0}.squashfs".format(self.data.get('fingerprint')),
"tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
shutil.rmtree('tmp/downloaded/{}/'.format(self.data.get('fingerprint')), ignore_errors=True)
image = self.client.images.get(self.data.get('fingerprint'))
image.add_alias(yamlData['title'], yamlData['title'])
# self.client.images.create(image_data='tmp/images/394986c986a778f64903fa043a3e280bda41e4793580b22c5d991ec948ced6dd/394986c986a778f64903fa043a3e280bda41e4793580b22c5d991ec948ced6dd.tar.xz',
# metadata='tmp/images/394986c986a778f64903fa043a3e280bda41e4793580b22c5d991ec948ced6dd/meta-394986c986a778f64903fa043a3e280bda41e4793580b22c5d991ec948ced6dd.tar.xz')
def listHub(self, input):
try:
logging.info('Listing images')
output = "# | Title | Fingerprint | OS | Author\n"
result = requests.get('{}/cliListRepos'.format(meta.IMAGE_HUB))
i = 1
for r in result.json():
output += '{} | {} | {} | {} | {}\n'.format(i, r['title'], r['fingerprint'], r['properties'].get('name'), r['author']['name'])
i+=1
return output
except Exception as e:
logging.error('Failed to list images from kuti.io')
logging.exception(e)
raise ValueError(e)
|
|
import platform
import unittest
import os
from nose.plugins.attrib import attr
from parameterized.parameterized import parameterized
from conans.model.version import Version
from conans.client.build.cmake import CMake
from conans.test.utils.tools import TestClient
conanfile_py = """
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
build_policy="missing"
def package_info(self):
self.cpp_info.cppflags = ["MyFlag1", "MyFlag2"]
self.cpp_info.cflags = ["-load", "C:\some\path"]
self.cpp_info.defines = ['MY_DEF=My" \string', 'MY_DEF2=My${} other \string']
"""
chatconanfile_py = """
from conans import ConanFile
class ChatConan(ConanFile):
name = "Chat"
version = "0.1"
requires = "Hello/0.1@lasote/testing"
build_policy="missing"
def package_info(self):
self.cpp_info.cppflags = ["MyChatFlag1", "MyChatFlag2"]
"""
conanfile = """[requires]
Hello/0.1@lasote/testing
"""
cmake = """set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(MyHello CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
message(STATUS "CMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}")
message(STATUS "CONAN_CXX_FLAGS=${CONAN_CXX_FLAGS}")
message(STATUS "CMAKE_C_FLAGS=${CMAKE_C_FLAGS}")
message(STATUS "CONAN_C_FLAGS=${CONAN_C_FLAGS}")
message(STATUS "HELLO_CXX_FLAGS=${HELLO_FLAGS}")
message(STATUS "CHAT_CXX_FLAGS=${CHAT_FLAGS}")
message(STATUS "CONAN_DEFINES_HELLO=${CONAN_DEFINES_HELLO}")
message(STATUS "HELLO_DEFINES=${HELLO_DEFINES}")
"""
@attr("slow")
class CMakeFlagsTest(unittest.TestCase):
def _get_line(self, text, begin):
lines = str(text).splitlines()
begin = "-- %s=" % begin
line = [l for l in lines if l.startswith(begin)][0]
flags = line[len(begin):].strip()
self.assertNotIn("'", flags)
self.assertNotIn('"', flags)
return flags
@parameterized.expand([(True, ), (False, )])
def build_app_test(self, targets):
client = TestClient()
conanfile_py = """
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
def package_info(self):
self.cpp_info.defines = [r'MY_DEF=My${} $string', r'MY_DEF2=My$ other string']
"""
client.save({"conanfile.py": conanfile_py})
client.run("create . lasote/testing")
consumer = """from conans import ConanFile, CMake
import os
class App(ConanFile):
settings = "os", "compiler", "arch", "build_type"
requires = "Hello/0.1@lasote/testing"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
self.run(os.sep.join([".", "bin", "myapp"]))
"""
cmake_app = """set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(MyHello CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup(%s)
add_executable(myapp myapp.cpp)
conan_target_link_libraries(myapp)
""" % ("TARGETS" if targets else "")
myapp = r"""#include <iostream>
#define STRINGIFY(x) #x
#define STRINGIFYMACRO(y) STRINGIFY(y)
int main(){
std::cout << "Msg1: " << STRINGIFYMACRO(MY_DEF) << "\n";
std::cout << "Msg2: " << STRINGIFYMACRO(MY_DEF2) << "\n";
}"""
client.save({"conanfile.py": consumer,
"CMakeLists.txt": cmake_app,
"myapp.cpp": myapp
}, clean_first=True)
client.run("install .")
client.run("build .")
self.assertIn("Msg1: My${} $string", client.out)
self.assertIn("Msg2: My$ other string", client.out)
def flags_test(self):
client = TestClient()
client.save({"conanfile.py": conanfile_py})
client.run("export . lasote/testing")
client.save({"conanfile.txt": conanfile,
"CMakeLists.txt": cmake}, clean_first=True)
client.run('install . -g cmake')
client.runner("cmake .", cwd=client.current_folder)
cmake_cxx_flags = self._get_line(client.user_io.out, "CMAKE_CXX_FLAGS")
self.assertTrue(cmake_cxx_flags.endswith("MyFlag1 MyFlag2"))
self.assertIn("CONAN_CXX_FLAGS=MyFlag1 MyFlag2", client.out)
self.assertIn("CMAKE_C_FLAGS= -load C:\some\path", client.out)
self.assertIn("CONAN_C_FLAGS=-load C:\some\path ", client.out)
self.assertIn('CONAN_DEFINES_HELLO=-DMY_DEF=My" \string;-DMY_DEF2=My${} other \string',
client.out)
def transitive_flags_test(self):
client = TestClient()
client.save({"conanfile.py": conanfile_py})
client.run("export . lasote/testing")
client.save({"conanfile.py": chatconanfile_py}, clean_first=True)
client.run("export . lasote/testing")
client.save({"conanfile.txt": conanfile.replace("Hello", "Chat"),
"CMakeLists.txt": cmake}, clean_first=True)
client.run('install . -g cmake')
client.runner("cmake .", cwd=client.current_folder)
cmake_cxx_flags = self._get_line(client.user_io.out, "CMAKE_CXX_FLAGS")
self.assertTrue(cmake_cxx_flags.endswith("MyFlag1 MyFlag2 MyChatFlag1 MyChatFlag2"))
self.assertIn("CONAN_CXX_FLAGS=MyFlag1 MyFlag2 MyChatFlag1 MyChatFlag2",
client.user_io.out)
def targets_flags_test(self):
client = TestClient()
client.save({"conanfile.py": conanfile_py})
client.run("export . lasote/testing")
cmake_targets = cmake.replace("conan_basic_setup()",
"conan_basic_setup(TARGETS)\n"
"get_target_property(HELLO_FLAGS CONAN_PKG::Hello"
" INTERFACE_COMPILE_OPTIONS)\n"
"get_target_property(HELLO_DEFINES CONAN_PKG::Hello"
" INTERFACE_COMPILE_DEFINITIONS)")
client.save({"conanfile.txt": conanfile,
"CMakeLists.txt": cmake_targets},
clean_first=True)
client.run('install . -g cmake')
client.runner("cmake .", cwd=client.current_folder)
cmake_cxx_flags = self._get_line(client.out, "CMAKE_CXX_FLAGS")
self.assertNotIn("My", cmake_cxx_flags)
self.assertIn("CONAN_CXX_FLAGS=MyFlag1 MyFlag2", client.out)
self.assertIn("HELLO_CXX_FLAGS=-load;C:\some\path;MyFlag1;MyFlag2;"
"$<$<CONFIG:Release>:;>;$<$<CONFIG:RelWithDebInfo>:;>;"
"$<$<CONFIG:MinSizeRel>:;>;$<$<CONFIG:Debug>:;>", client.out)
self.assertIn('HELLO_DEFINES=MY_DEF=My" \string;MY_DEF2=My${} other \string;', client.out)
def targets_own_flags_test(self):
client = TestClient()
client.save({"conanfile.py": conanfile_py.replace('version = "0.1"',
'version = "0.1"\n'
' settings = "compiler"')})
client.run("export . lasote/testing")
cmake_targets = cmake.replace("conan_basic_setup()",
"conan_basic_setup(TARGETS)\n"
"get_target_property(HELLO_FLAGS CONAN_PKG::Hello"
" INTERFACE_COMPILE_OPTIONS)\n"
"get_target_property(HELLO_DEFINES CONAN_PKG::Hello"
" INTERFACE_COMPILE_DEFINITIONS)")
client.save({"conanfile.txt": conanfile,
"CMakeLists.txt": cmake_targets},
clean_first=True)
client.run('install . -g cmake')
client.runner("cmake . -DCONAN_CXX_FLAGS=CmdCXXFlag", cwd=client.current_folder)
cmake_cxx_flags = self._get_line(client.user_io.out, "CMAKE_CXX_FLAGS")
self.assertNotIn("My", cmake_cxx_flags)
self.assertIn("CmdCXXFlag", cmake_cxx_flags)
self.assertIn("CONAN_CXX_FLAGS=MyFlag1 MyFlag2 CmdCXXFlag", client.user_io.out)
self.assertIn("HELLO_CXX_FLAGS=-load;C:\some\path;MyFlag1;MyFlag2;"
"$<$<CONFIG:Release>:;>;$<$<CONFIG:RelWithDebInfo>:;>;"
"$<$<CONFIG:MinSizeRel>:;>;$<$<CONFIG:Debug>:;>", client.out)
self.assertIn('HELLO_DEFINES=MY_DEF=My" \string;MY_DEF2=My${} other \string;', client.out)
def transitive_targets_flags_test(self):
client = TestClient()
client.save({"conanfile.py": conanfile_py})
client.run("export . lasote/testing")
client.save({"conanfile.py": chatconanfile_py}, clean_first=True)
client.run("export . lasote/testing")
cmake_targets = cmake.replace("conan_basic_setup()",
"conan_basic_setup(TARGETS)\n"
"get_target_property(HELLO_FLAGS CONAN_PKG::Hello"
" INTERFACE_COMPILE_OPTIONS)\n"
"get_target_property(CHAT_FLAGS CONAN_PKG::Chat"
" INTERFACE_COMPILE_OPTIONS)\n"
"get_target_property(HELLO_DEFINES CONAN_PKG::Hello"
" INTERFACE_COMPILE_DEFINITIONS)")
client.save({"conanfile.txt": conanfile.replace("Hello", "Chat"),
"CMakeLists.txt": cmake_targets},
clean_first=True)
client.run('install . -g cmake')
client.runner("cmake .", cwd=client.current_folder)
cmake_cxx_flags = self._get_line(client.user_io.out, "CMAKE_CXX_FLAGS")
self.assertNotIn("My", cmake_cxx_flags)
self.assertIn("CONAN_CXX_FLAGS=MyFlag1 MyFlag2 MyChatFlag1 MyChatFlag2",
client.user_io.out)
self.assertIn("HELLO_CXX_FLAGS=-load;C:\some\path;MyFlag1;MyFlag2;"
"$<$<CONFIG:Release>:;>;$<$<CONFIG:RelWithDebInfo>:;>;"
"$<$<CONFIG:MinSizeRel>:;>;$<$<CONFIG:Debug>:;>", client.out)
self.assertIn("CHAT_CXX_FLAGS=MyChatFlag1;MyChatFlag2;"
"$<$<CONFIG:Release>:;>;$<$<CONFIG:RelWithDebInfo>:;>;"
"$<$<CONFIG:MinSizeRel>:;>;$<$<CONFIG:Debug>:;>", client.out)
self.assertIn('HELLO_DEFINES=MY_DEF=My" \string;MY_DEF2=My${} other \string;', client.out)
def cmake_test_needed_settings(self):
conanfile = """
import os
from conans import ConanFile, CMake
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
%s
def build(self):
cmake = CMake(self)
"""
for settings_line in ('', 'settings="arch"', 'settings="compiler"'):
client = TestClient()
client.save({"conanfile.py": conanfile % settings_line})
client.run("install .")
client.run("build .")
def cmake_shared_flag_test(self):
conanfile = """
import os
from conans import ConanFile, CMake
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
options = {"shared": [True, False]}
default_options= "shared=%s"
settings = "arch", "compiler"
def build(self):
cmake = CMake(self)
if self.options.shared:
assert(cmake.definitions["BUILD_SHARED_LIBS"] == "ON")
else:
assert(cmake.definitions["BUILD_SHARED_LIBS"] == "OFF")
"""
client = TestClient()
client.save({"conanfile.py": conanfile % "True"})
client.run("build .", ignore_error=True)
self.assertIn("conanbuildinfo.txt file not found", client.user_io.out)
client.run("install .")
client.run("build .")
client.save({"conanfile.py": conanfile % "False"}, clean_first=True)
client.run("install .")
client.run("build .")
def std_flag_applied_test(self):
conanfile = """
import os
from conans import ConanFile, CMake
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
settings = "arch", "compiler", "cppstd"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
"""
client = TestClient()
client.save({"conanfile.py": conanfile,
"mylib.cpp": "auto myfunc(){return 3;}", # c++14 feature
"CMakeLists.txt": """
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(MyHello CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
add_library(mylib mylib.cpp)
target_link_libraries(mylib ${CONAN_LIBS})
"""})
if platform.system() != "Windows":
client.run("install . --install-folder=build -s cppstd=gnu98")
error = client.run("build . --build-folder=build", ignore_error=True)
self.assertTrue(error)
self.assertIn("Error in build()", client.out)
# Now specify c++14
client.run("install . --install-folder=build -s cppstd=gnu14")
client.run("build . --build-folder=build")
self.assertIn("CPP STANDARD: 14 WITH EXTENSIONS ON", client.out)
libname = "libmylib.a" if platform.system() != "Windows" else "mylib.lib"
libpath = os.path.join(client.current_folder, "build", "lib", libname)
self.assertTrue(os.path.exists(libpath))
client.run("install . --install-folder=build -s cppstd=14")
client.run("build . --build-folder=build")
self.assertIn("CPP STANDARD: 14 WITH EXTENSIONS OFF", client.out)
self.assertNotIn("Conan setting CXX_FLAGS flags", client.out)
libname = "libmylib.a" if platform.system() != "Windows" else "mylib.lib"
libpath = os.path.join(client.current_folder, "build", "lib", libname)
self.assertTrue(os.path.exists(libpath))
def standard_20_as_cxx_flag_test(self):
# CMake (1-Jun-2018) do not support the 20 flag in CMAKE_CXX_STANDARD var
conanfile = """
import os
from conans import ConanFile, CMake
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
settings = "arch", "compiler", "cppstd"
exports_sources = "CMakeLists.txt"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
"""
cmakelists = """
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(MyHello CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_set_std()
"""
client = TestClient()
client.save({"conanfile.py": conanfile,
"CMakeLists.txt": cmakelists})
def conan_set_std_branch():
# Replicate logic from cmake_common definition of 'macro(conan_set_std)'
cmake_version = CMake.get_version()
return cmake_version < Version("3.12")
client.run("create . user/channel -s cppstd=gnu20 -s compiler=gcc -s compiler.version=8 "
"-s compiler.libcxx=libstdc++11")
if conan_set_std_branch():
self.assertIn("Conan setting CXX_FLAGS flags: -std=gnu++2a", client.out)
else:
self.assertIn("Conan setting CPP STANDARD: 20 WITH EXTENSIONS ON", client.out)
client.run("create . user/channel -s cppstd=20 -s compiler=gcc -s compiler.version=8 "
"-s compiler.libcxx=libstdc++11")
if conan_set_std_branch():
self.assertIn("Conan setting CXX_FLAGS flags: -std=c++2a", client.out)
else:
self.assertIn("Conan setting CPP STANDARD: 20 WITH EXTENSIONS OFF", client.out)
def fpic_applied_test(self):
conanfile = """
import os
from conans import ConanFile, CMake
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
settings = "arch", "compiler"
options = {"fPIC": [True, False]}
default_options = "fPIC=False"
generators = "cmake"
exports_sources = "CMakeLists.txt"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
"""
cmakelists = """
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(MyHello CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
"""
client = TestClient()
client.save({"conanfile.py": conanfile,
"CMakeLists.txt": cmakelists})
client.run("create . user/channel -o MyLib:fPIC=True")
self.assertIn("Conan: Adjusting fPIC flag (ON)", client.out)
client.run("create . user/channel -o MyLib:fPIC=False")
self.assertIn("Conan: Adjusting fPIC flag (OFF)", client.out)
client.save({"conanfile.py": conanfile.replace("fPIC", "fpic")}, clean_first=False)
client.run("create . user/channel -o MyLib:fpic=True")
self.assertNotIn("Conan: Adjusting fPIC flag (ON)", client.out)
# Skip fpic adjustements in basic setup
tmp = cmakelists.replace("conan_basic_setup()", "conan_basic_setup(SKIP_FPIC)")
client.save({"CMakeLists.txt": tmp, "conanfile.py": conanfile}, clean_first=True)
client.run("create . user/channel -o MyLib:fPIC=True")
self.assertNotIn("Conan: Adjusting fPIC flag (ON)", client.out)
|
|
from .finder.core import FinderFactory
from .parser import DoxygenParserFactory, CacheFactory
from .renderer.rst.doxygen import DoxygenToRstRendererFactoryCreatorConstructor, \
RstContentCreator, RenderContext
from .renderer.rst.doxygen.filter import FilterFactory, GlobFactory
from .renderer.rst.doxygen.target import TargetHandlerFactory
from .renderer.rst.doxygen.mask import MaskFactory, NullMaskFactory, NoParameterNamesMask
from .finder.doxygen.core import DoxygenItemFinderFactoryCreator
from .directive.base import BaseDirective, create_warning
from .directive.index import DoxygenIndexDirective, AutoDoxygenIndexDirective
from .directive.file import DoxygenFileDirective, AutoDoxygenFileDirective
from .process import AutoDoxygenProcessHandle
from .exception import BreatheError
from .project import ProjectInfoFactory, ProjectError
from docutils.parsers.rst.directives import unchanged_required, unchanged, flag
from docutils.statemachine import ViewList
from sphinx.domains import cpp, c, python
from sphinx.writers.text import TextWriter
from sphinx.builders.text import TextBuilder
import docutils.nodes
import sphinx.addnodes
import sphinx.ext.mathbase
import os
import fnmatch
import re
import textwrap
import collections
import subprocess
# Somewhat outrageously, reach in and fix a Sphinx regex
cpp._identifier_re = re.compile(r'(~?\b[a-zA-Z_][a-zA-Z0-9_]*)\b')
class NoMatchingFunctionError(BreatheError):
pass
class UnableToResolveFunctionError(BreatheError):
def __init__(self, signatures):
self.signatures = signatures
class NodeNotFoundError(BreatheError):
pass
class FakeDestination(object):
def write(self, output):
return output
class TextRenderer(object):
def __init__(self, app):
self.app = app
def render(self, nodes, document):
new_document = document.copy()
new_document.children = nodes
writer = TextWriter(TextBuilder(self.app))
output = writer.write(new_document, FakeDestination())
return output.strip()
# Directives
# ----------
class DoxygenFunctionDirective(BaseDirective):
required_arguments = 1
option_spec = {
"path": unchanged_required,
"project": unchanged_required,
"outline": flag,
"no-link": flag,
}
has_content = False
final_argument_whitespace = True
def __init__(self, node_factory, text_renderer, *args, **kwargs):
BaseDirective.__init__(self, *args, **kwargs)
self.node_factory = node_factory
self.text_renderer = text_renderer
def run(self):
# Separate possible arguments (delimited by a "(") from the namespace::name
match = re.match(r"([^(]*)(.*)", self.arguments[0])
namespaced_function, args = match.group(1), match.group(2)
# Split the namespace and the function name
try:
(namespace, function_name) = namespaced_function.rsplit("::", 1)
except ValueError:
(namespace, function_name) = "", namespaced_function
try:
project_info = self.project_info_factory.create_project_info(self.options)
except ProjectError as e:
warning = create_warning(None, self.state, self.lineno)
return warning.warn('doxygenfunction: %s' % e)
try:
finder = self.finder_factory.create_finder(project_info)
except MTimerError as e:
warning = create_warning(None, self.state, self.lineno)
return warning.warn('doxygenfunction: %s' % e)
# Extract arguments from the function name.
args = self.parse_args(args)
finder_filter = self.filter_factory.create_function_finder_filter(namespace, function_name)
matches = []
finder.filter_(finder_filter, matches)
# Create it ahead of time as it is cheap and it is ugly to declare it for both exception
# clauses below
warning = create_warning(
project_info,
self.state,
self.lineno,
namespace='%s::' % namespace if namespace else '',
function=function_name,
args=', '.join(args)
)
try:
node_stack = self.resolve_function(matches, args, project_info)
except NoMatchingFunctionError:
return warning.warn('doxygenfunction: Cannot find function "{namespace}{function}" '
'{tail}')
except UnableToResolveFunctionError as error:
message = 'doxygenfunction: Unable to resolve multiple matches for function ' \
'"{namespace}{function}" with arguments ({args}) {tail}.\n' \
'Potential matches:\n'
# We want to create a raw_text string for the console output and a set of docutils nodes
# for rendering into the final output. We handle the final output as a literal string
# with a txt based list of the options.
raw_text = message
literal_text = ''
# TODO: We're cheating here with the set() as signatures has repeating entries for some
# reason (failures in the matcher_stack code) so we consolidate them by shoving them in
# a set to remove duplicates. Should be fixed!
for i, entry in enumerate(set(error.signatures)):
if i:
literal_text += '\n'
# Replace new lines with a new line & enough spacing to reach the appropriate
# alignment for our simple plain text list
literal_text += '- %s' % entry.replace('\n', '\n ')
raw_text += ' - %s\n' % entry.replace('\n', '\n ')
block = self.node_factory.literal_block('', '', self.node_factory.Text(literal_text))
formatted_message = warning.format(message)
warning_nodes = [
self.node_factory.paragraph(
"", "",
self.node_factory.Text(formatted_message)
),
block
]
result = warning.warn(raw_text, warning_nodes)
return result
target_handler = self.target_handler_factory.create_target_handler(
self.options, project_info, self.state.document
)
filter_ = self.filter_factory.create_outline_filter(self.options)
return self.render(node_stack, project_info, self.options, filter_, target_handler,
NullMaskFactory())
def parse_args(self, function_description):
# Strip off trailing qualifiers
pattern = re.compile(r'''(?<= \)) \s*
(?: const)? \s*
(?: volatile)? \s*
(?: = \s* 0)? \s* $ ''',
re.VERBOSE)
function_description = re.sub(pattern,
'',
function_description)
paren_index = function_description.find('(')
if paren_index == -1:
return []
# If it is empty parenthesis, then return empty list as we want empty parenthesis coming
# from the xml file to match the user's function when the user doesn't provide parenthesis
# ie. when there are no args anyway
elif function_description == '()':
return []
else:
# Parse the function name string, eg. f(int, float) to
# extract the types so we can use them for matching
args = []
num_open_brackets = -1
start = paren_index + 1
for i in range(paren_index, len(function_description)):
c = function_description[i]
if c == '(' or c == '<':
num_open_brackets += 1
elif c == ')' or c == '>':
num_open_brackets -= 1
elif c == ',' and num_open_brackets == 0:
args.append(function_description[start:i].strip())
start = i + 1
args.append(function_description[start:-1].strip())
return args
def resolve_function(self, matches, args, project_info):
if not matches:
raise NoMatchingFunctionError()
if len(matches) == 1:
return matches[0]
node_stack = None
signatures = []
# Iterate over the potential matches
for entry in matches:
text_options = {'no-link': u'', 'outline': u''}
# Render the matches to docutils nodes
target_handler = self.target_handler_factory.create_target_handler(
{'no-link': u''}, project_info, self.state.document
)
filter_ = self.filter_factory.create_outline_filter(text_options)
mask_factory = MaskFactory({'param': NoParameterNamesMask})
nodes = self.render(entry, project_info, text_options, filter_, target_handler,
mask_factory)
# Render the nodes to text
signature = self.text_renderer.render(nodes, self.state.document)
signatures.append(signature)
match = re.match(r"([^(]*)(.*)", signature)
match_args = match.group(2)
# Parse the text to find the arguments
match_args = self.parse_args(match_args)
# Match them against the arg spec
if args == match_args:
node_stack = entry
break
if not node_stack:
raise UnableToResolveFunctionError(signatures)
return node_stack
class DoxygenClassLikeDirective(BaseDirective):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
"path": unchanged_required,
"project": unchanged_required,
"members": unchanged,
"protected-members": flag,
"private-members": flag,
"undoc-members": flag,
"show": unchanged_required,
"outline": flag,
"no-link": flag,
}
has_content = False
def run(self):
name = self.arguments[0]
try:
project_info = self.project_info_factory.create_project_info(self.options)
except ProjectError as e:
warning = create_warning(None, self.state, self.lineno, kind=self.kind)
return warning.warn('doxygen{kind}: %s' % e)
try:
finder = self.finder_factory.create_finder(project_info)
except MTimerError as e:
warning = create_warning(None, self.state, self.lineno, kind=self.kind)
return warning.warn('doxygen{kind}: %s' % e)
finder_filter = self.filter_factory.create_compound_finder_filter(name, self.kind)
matches = []
finder.filter_(finder_filter, matches)
if len(matches) == 0:
warning = create_warning(project_info, self.state, self.lineno, name=name,
kind=self.kind)
return warning.warn('doxygen{kind}: Cannot find class "{name}" {tail}')
target_handler = self.target_handler_factory.create_target_handler(
self.options, project_info, self.state.document
)
filter_ = self.filter_factory.create_class_filter(name, self.options)
mask_factory = NullMaskFactory()
return self.render(matches[0], project_info, self.options, filter_, target_handler,
mask_factory)
class DoxygenClassDirective(DoxygenClassLikeDirective):
kind = "class"
class DoxygenStructDirective(DoxygenClassLikeDirective):
kind = "struct"
class DoxygenContentBlockDirective(BaseDirective):
"""Base class for namespace and group directives which have very similar behaviours"""
required_arguments = 1
optional_arguments = 1
option_spec = {
"path": unchanged_required,
"project": unchanged_required,
"content-only": flag,
"outline": flag,
"members": flag,
"protected-members": flag,
"private-members": flag,
"undoc-members": flag,
"no-link": flag
}
has_content = False
def run(self):
name = self.arguments[0]
try:
project_info = self.project_info_factory.create_project_info(self.options)
except ProjectError as e:
warning = create_warning(None, self.state, self.lineno, kind=self.kind)
return warning.warn('doxygen{kind}: %s' % e)
try:
finder = self.finder_factory.create_finder(project_info)
except MTimerError as e:
warning = create_warning(None, self.state, self.lineno, kind=self.kind)
return warning.warn('doxygen{kind}: %s' % e)
finder_filter = self.filter_factory.create_finder_filter(self.kind, name)
matches = []
finder.filter_(finder_filter, matches)
# It shouldn't be possible to have too many matches as namespaces & groups in their nature
# are merged together if there are multiple declarations, so we only check for no matches
if not matches:
warning = create_warning(project_info, self.state, self.lineno, name=name,
kind=self.kind)
return warning.warn('doxygen{kind}: Cannot find namespace "{name}" {tail}')
if 'content-only' in self.options:
# Unpack the single entry in the matches list
(node_stack,) = matches
filter_ = self.filter_factory.create_content_filter(self.kind, self.options)
# Having found the compound node for the namespace or group in the index we want to grab
# the contents of it which match the filter
contents_finder = self.finder_factory.create_finder_from_root(node_stack[0],
project_info)
contents = []
contents_finder.filter_(filter_, contents)
# Replaces matches with our new starting points
matches = contents
target_handler = self.target_handler_factory.create_target_handler(
self.options, project_info, self.state.document
)
filter_ = self.filter_factory.create_render_filter(self.kind, self.options)
renderer_factory_creator = self.renderer_factory_creator_constructor.create_factory_creator(
project_info,
self.state.document,
self.options,
target_handler
)
node_list = []
for node_stack in matches:
renderer_factory = renderer_factory_creator.create_factory(
node_stack,
self.state,
self.state.document,
filter_,
target_handler,
)
mask_factory = NullMaskFactory()
context = RenderContext(node_stack, mask_factory, self.directive_args)
object_renderer = renderer_factory.create_renderer(context)
node_list.extend(object_renderer.render())
return node_list
class DoxygenNamespaceDirective(DoxygenContentBlockDirective):
kind = "namespace"
class DoxygenGroupDirective(DoxygenContentBlockDirective):
kind = "group"
# This class was the same as the DoxygenBaseDirective above, except that it
# wraps the output in a definition_list before passing it back. This should be
# abstracted in a far nicer way to avoid repeating so much code
#
# Now we've removed the definition_list wrap so we really need to refactor this!
class DoxygenBaseItemDirective(BaseDirective):
required_arguments = 1
optional_arguments = 1
option_spec = {
"path": unchanged_required,
"project": unchanged_required,
"outline": flag,
"no-link": flag,
}
has_content = False
def create_finder_filter(self, namespace, name):
"""Creates a filter to find the node corresponding to this item."""
return self.filter_factory.create_member_finder_filter(
namespace, name, self.kind)
def run(self):
try:
namespace, name = self.arguments[0].rsplit("::", 1)
except ValueError:
namespace, name = "", self.arguments[0]
try:
project_info = self.project_info_factory.create_project_info(self.options)
except ProjectError as e:
warning = create_warning(None, self.state, self.lineno, kind=self.kind)
return warning.warn('doxygen{kind}: %s' % e)
try:
finder = self.finder_factory.create_finder(project_info)
except MTimerError as e:
warning = create_warning(None, self.state, self.lineno, kind=self.kind)
return warning.warn('doxygen{kind}: %s' % e)
finder_filter = self.create_finder_filter(namespace, name)
matches = []
finder.filter_(finder_filter, matches)
if len(matches) == 0:
display_name = "%s::%s" % (namespace, name) if namespace else name
warning = create_warning(project_info, self.state, self.lineno, kind=self.kind,
display_name=display_name)
return warning.warn('doxygen{kind}: Cannot find {kind} "{display_name}" {tail}')
target_handler = self.target_handler_factory.create_target_handler(
self.options, project_info, self.state.document
)
filter_ = self.filter_factory.create_outline_filter(self.options)
node_stack = matches[0]
mask_factory = NullMaskFactory()
return self.render(node_stack, project_info, self.options, filter_, target_handler,
mask_factory)
class DoxygenVariableDirective(DoxygenBaseItemDirective):
kind = "variable"
def render(self, node_stack, project_info, options, filter_, target_handler, mask_factory):
# Remove 'extern' keyword as Sphinx doesn't support it.
definition = node_stack[0].definition
extern = 'extern '
if definition.startswith(extern):
definition = definition[len(extern):]
self.directive_args[1] = [definition]
return DoxygenBaseItemDirective.render(self, node_stack, project_info, options, filter_,
target_handler, mask_factory)
class DoxygenDefineDirective(DoxygenBaseItemDirective):
kind = "define"
class DoxygenEnumDirective(DoxygenBaseItemDirective):
kind = "enum"
class DoxygenEnumValueDirective(DoxygenBaseItemDirective):
kind = "enumvalue"
def create_finder_filter(self, namespace, name):
return self.filter_factory.create_enumvalue_finder_filter(name)
class DoxygenTypedefDirective(DoxygenBaseItemDirective):
kind = "typedef"
class DoxygenUnionDirective(DoxygenBaseItemDirective):
kind = "union"
def create_finder_filter(self, namespace, name):
# Unions are stored in the xml file with their fully namespaced name
# We're using C++ namespaces here, it might be best to make this file
# type dependent
#
xml_name = "%s::%s" % (namespace, name) if namespace else name
return self.filter_factory.create_compound_finder_filter(xml_name, 'union')
# Setup Administration
# --------------------
class DirectiveContainer(object):
def __init__(self, directive, *args):
self.directive = directive
self.args = args
# Required for sphinx to inspect
self.required_arguments = directive.required_arguments
self.optional_arguments = directive.optional_arguments
self.option_spec = directive.option_spec
self.has_content = directive.has_content
self.final_argument_whitespace = directive.final_argument_whitespace
def __call__(self, *args):
call_args = []
call_args.extend(self.args)
call_args.extend(args)
return self.directive(*call_args)
class DoxygenDirectiveFactory(object):
directives = {
"doxygenindex": DoxygenIndexDirective,
"autodoxygenindex": AutoDoxygenIndexDirective,
"doxygenfunction": DoxygenFunctionDirective,
"doxygenstruct": DoxygenStructDirective,
"doxygenclass": DoxygenClassDirective,
"doxygenvariable": DoxygenVariableDirective,
"doxygendefine": DoxygenDefineDirective,
"doxygenenum": DoxygenEnumDirective,
"doxygenenumvalue": DoxygenEnumValueDirective,
"doxygentypedef": DoxygenTypedefDirective,
"doxygenunion": DoxygenUnionDirective,
"doxygennamespace": DoxygenNamespaceDirective,
"doxygengroup": DoxygenGroupDirective,
"doxygenfile": DoxygenFileDirective,
"autodoxygenfile": AutoDoxygenFileDirective,
}
def __init__(self, node_factory, text_renderer, root_data_object,
renderer_factory_creator_constructor, finder_factory,
project_info_factory, filter_factory, target_handler_factory, parser_factory):
self.node_factory = node_factory
self.text_renderer = text_renderer
self.root_data_object = root_data_object
self.renderer_factory_creator_constructor = renderer_factory_creator_constructor
self.finder_factory = finder_factory
self.project_info_factory = project_info_factory
self.filter_factory = filter_factory
self.target_handler_factory = target_handler_factory
self.parser_factory = parser_factory
def create_function_directive_container(self):
# Pass text_renderer to the function directive
return DirectiveContainer(
self.directives["doxygenfunction"],
self.node_factory,
self.text_renderer,
self.root_data_object,
self.renderer_factory_creator_constructor,
self.finder_factory,
self.project_info_factory,
self.filter_factory,
self.target_handler_factory,
self.parser_factory
)
def create_directive_container(self, type_):
return DirectiveContainer(
self.directives[type_],
self.root_data_object,
self.renderer_factory_creator_constructor,
self.finder_factory,
self.project_info_factory,
self.filter_factory,
self.target_handler_factory,
self.parser_factory
)
def get_config_values(self, app):
# All DirectiveContainers maintain references to this project info factory
# so we can update this to update them
self.project_info_factory.update(
app.config.breathe_projects,
app.config.breathe_default_project,
app.config.breathe_domain_by_extension,
app.config.breathe_domain_by_file_pattern,
app.config.breathe_projects_source,
app.config.breathe_build_directory
)
class NodeFactory(object):
def __init__(self, *args):
self.sources = args
def __getattr__(self, node_name):
for source in self.sources:
try:
return getattr(source, node_name)
except AttributeError:
pass
raise NodeNotFoundError(node_name)
class RootDataObject(object):
node_type = "root"
class PathHandler(object):
def __init__(self, config_directory, sep, basename, join):
self.config_directory = config_directory
self.sep = sep
self.basename = basename
self.join = join
def includes_directory(self, file_path):
# Check for backslash or forward slash as we don't know what platform we're on and sometimes
# the doxygen paths will have forward slash even on Windows.
return bool(file_path.count('\\')) or bool(file_path.count('/'))
def resolve_path(self, directory, filename):
"""Returns a full path to the filename in the given directory assuming that if the directory
path is relative, then it is relative to the conf.py directory.
"""
# os.path.join does the appropriate handling if _project_path is an absolute path
return self.join(self.config_directory, directory, filename)
def write_file(directory, filename, content):
# Check the directory exists
if not os.path.exists(directory):
os.makedirs(directory)
# Write the file with the provided contents
with open(os.path.join(directory, filename), "w") as f:
f.write(content)
class MTimerError(Exception):
pass
class MTimer(object):
def __init__(self, getmtime):
self.getmtime = getmtime
def get_mtime(self, filename):
try:
return self.getmtime(filename)
except OSError:
raise MTimerError('Cannot find file: %s' % os.path.realpath(filename))
class FileStateCache(object):
"""
Stores the modified time of the various doxygen xml files against the
reStructuredText file that they are referenced from so that we know which
reStructuredText files to rebuild if the doxygen xml is modified.
We store the information in the environment object so that it is pickled
down and stored between builds as Sphinx is designed to do.
"""
def __init__(self, mtimer, app):
self.app = app
self.mtimer = mtimer
def update(self, source_file):
if not hasattr(self.app.env, "breathe_file_state"):
self.app.env.breathe_file_state = {}
new_mtime = self.mtimer.get_mtime(source_file)
mtime, docnames = self.app.env.breathe_file_state.setdefault(
source_file, (new_mtime, set())
)
docnames.add(self.app.env.docname)
self.app.env.breathe_file_state[source_file] = (new_mtime, docnames)
def get_outdated(self, app, env, added, changed, removed):
if not hasattr(self.app.env, "breathe_file_state"):
return []
stale = []
for filename, info in self.app.env.breathe_file_state.iteritems():
old_mtime, docnames = info
if self.mtimer.get_mtime(filename) > old_mtime:
stale.extend(docnames)
return list(set(stale).difference(removed))
def purge_doc(self, app, env, docname):
if not hasattr(self.app.env, "breathe_file_state"):
return
toremove = []
for filename, info in self.app.env.breathe_file_state.iteritems():
_, docnames = info
docnames.discard(docname)
if not docnames:
toremove.append(filename)
for filename in toremove:
del self.app.env.breathe_file_state[filename]
class DomainDirectiveFactory(object):
# A mapping from node kinds to cpp domain classes and directive names.
cpp_classes = {
'class': (cpp.CPPClassObject, 'class'),
'struct': (cpp.CPPClassObject, 'class'),
'function': (cpp.CPPFunctionObject, 'function'),
'friend': (cpp.CPPFunctionObject, 'function'),
'slot': (cpp.CPPFunctionObject, 'function'),
'enum': (cpp.CPPTypeObject, 'type'),
'typedef': (cpp.CPPTypeObject, 'type'),
'union': (cpp.CPPTypeObject, 'type'),
'namespace': (cpp.CPPTypeObject, 'type'),
# Use CPPClassObject for enum values as the cpp domain doesn't have a directive for
# enum values and CPPMemberObject requires a type.
'enumvalue': (cpp.CPPClassObject, 'member'),
'define': (c.CObject, 'macro')
}
python_classes = {
'function': (python.PyModulelevel, 'function'),
'variable': (python.PyClassmember, 'attribute')
}
@staticmethod
def fix_python_signature(sig):
def_ = 'def '
if sig.startswith(def_):
sig = sig[len(def_):]
# Doxygen uses an invalid separator ('::') in Python signatures. Replace them with '.'.
return sig.replace('::', '.')
@staticmethod
def create(domain, args):
if domain == 'c':
return c.CObject(*args)
if domain == 'py':
cls, name = DomainDirectiveFactory.python_classes.get(
args[0], (python.PyClasslike, 'class'))
args[1] = [DomainDirectiveFactory.fix_python_signature(n) for n in args[1]]
else:
cls, name = DomainDirectiveFactory.cpp_classes.get(
args[0], (cpp.CPPMemberObject, 'member'))
# Replace the directive name because domain directives don't know how to handle
# Breathe's "doxygen" directives.
args = [name] + args[1:]
return cls(*args)
# Setup
# -----
def setup(app):
cache_factory = CacheFactory()
cache = cache_factory.create_cache()
path_handler = PathHandler(app.confdir, os.sep, os.path.basename, os.path.join)
mtimer = MTimer(os.path.getmtime)
file_state_cache = FileStateCache(mtimer, app)
parser_factory = DoxygenParserFactory(cache, path_handler, file_state_cache)
glob_factory = GlobFactory(fnmatch.fnmatch)
filter_factory = FilterFactory(glob_factory, path_handler)
item_finder_factory_creator = DoxygenItemFinderFactoryCreator(parser_factory, filter_factory)
index_parser = parser_factory.create_index_parser()
finder_factory = FinderFactory(index_parser, item_finder_factory_creator)
# Create a math_nodes object with a displaymath member for the displaymath
# node so that we can treat it in the same way as the nodes & addnodes
# modules in the NodeFactory
math_nodes = collections.namedtuple("MathNodes", ["displaymath"])
math_nodes.displaymath = sphinx.ext.mathbase.displaymath
node_factory = NodeFactory(docutils.nodes, sphinx.addnodes, math_nodes)
rst_content_creator = RstContentCreator(ViewList, textwrap.dedent)
renderer_factory_creator_constructor = DoxygenToRstRendererFactoryCreatorConstructor(
node_factory,
parser_factory,
DomainDirectiveFactory,
rst_content_creator
)
# Assume general build directory is the doctree directory without the last component. We strip
# off any trailing slashes so that dirname correctly drops the last part. This can be overriden
# with the breathe_build_directory config variable
build_dir = os.path.dirname(app.doctreedir.rstrip(os.sep))
project_info_factory = ProjectInfoFactory(app.srcdir, build_dir, app.confdir, fnmatch.fnmatch)
target_handler_factory = TargetHandlerFactory(node_factory)
root_data_object = RootDataObject()
text_renderer = TextRenderer(app)
directive_factory = DoxygenDirectiveFactory(
node_factory,
text_renderer,
root_data_object,
renderer_factory_creator_constructor,
finder_factory,
project_info_factory,
filter_factory,
target_handler_factory,
parser_factory
)
DoxygenFunctionDirective.app = app
def add_directive(name):
app.add_directive(name, directive_factory.create_directive_container(name))
add_directive('doxygenindex')
add_directive('doxygenstruct')
add_directive('doxygenenum')
add_directive('doxygenenumvalue')
add_directive('doxygentypedef')
add_directive('doxygenunion')
add_directive('doxygenclass')
add_directive('doxygenfile')
add_directive('doxygennamespace')
add_directive('doxygengroup')
add_directive('doxygenvariable')
add_directive('doxygendefine')
add_directive('autodoxygenindex')
add_directive('autodoxygenfile')
app.add_directive(
"doxygenfunction",
directive_factory.create_function_directive_container(),
)
app.add_config_value("breathe_projects", {}, True)
app.add_config_value("breathe_default_project", "", True)
# Provide reasonable defaults for domain_by_extension mapping. Can be overridden by users.
app.add_config_value("breathe_domain_by_extension", {'py': 'py'}, True)
app.add_config_value("breathe_domain_by_file_pattern", {}, True)
app.add_config_value("breathe_projects_source", {}, True)
app.add_config_value("breathe_build_directory", '', True)
app.add_config_value("breathe_default_members", (), True)
app.add_config_value("breathe_implementation_filename_extensions", ['.c', '.cc', '.cpp'], True)
app.add_config_value("breathe_doxygen_config_options", {}, True)
breathe_css = "breathe.css"
if (os.path.exists(os.path.join(app.confdir, "_static", breathe_css))):
app.add_stylesheet(breathe_css)
doxygen_handle = AutoDoxygenProcessHandle(
path_handler,
subprocess.check_call,
write_file,
project_info_factory
)
def doxygen_hook(app):
doxygen_handle.generate_xml(
app.config.breathe_projects_source,
app.config.breathe_doxygen_config_options
)
app.connect("builder-inited", doxygen_hook)
app.connect("builder-inited", directive_factory.get_config_values)
app.connect("builder-inited", filter_factory.get_config_values)
app.connect("env-get-outdated", file_state_cache.get_outdated)
app.connect("env-purge-doc", file_state_cache.purge_doc)
|
|
import argparse
import os
import re
from pathlib import Path
from typing import Optional
import py.path
import pytest
from _pytest.config import ExitCode
from _pytest.config import UsageError
from _pytest.main import resolve_collection_argument
from _pytest.main import validate_basetemp
from _pytest.pytester import Testdir
@pytest.mark.parametrize(
"ret_exc",
(
pytest.param((None, ValueError)),
pytest.param((42, SystemExit)),
pytest.param((False, SystemExit)),
),
)
def test_wrap_session_notify_exception(ret_exc, testdir):
returncode, exc = ret_exc
c1 = testdir.makeconftest(
"""
import pytest
def pytest_sessionstart():
raise {exc}("boom")
def pytest_internalerror(excrepr, excinfo):
returncode = {returncode!r}
if returncode is not False:
pytest.exit("exiting after %s..." % excinfo.typename, returncode={returncode!r})
""".format(
returncode=returncode, exc=exc.__name__
)
)
result = testdir.runpytest()
if returncode:
assert result.ret == returncode
else:
assert result.ret == ExitCode.INTERNAL_ERROR
assert result.stdout.lines[0] == "INTERNALERROR> Traceback (most recent call last):"
if exc == SystemExit:
assert result.stdout.lines[-3:] == [
f'INTERNALERROR> File "{c1}", line 4, in pytest_sessionstart',
'INTERNALERROR> raise SystemExit("boom")',
"INTERNALERROR> SystemExit: boom",
]
else:
assert result.stdout.lines[-3:] == [
f'INTERNALERROR> File "{c1}", line 4, in pytest_sessionstart',
'INTERNALERROR> raise ValueError("boom")',
"INTERNALERROR> ValueError: boom",
]
if returncode is False:
assert result.stderr.lines == ["mainloop: caught unexpected SystemExit!"]
else:
assert result.stderr.lines == [f"Exit: exiting after {exc.__name__}..."]
@pytest.mark.parametrize("returncode", (None, 42))
def test_wrap_session_exit_sessionfinish(
returncode: Optional[int], testdir: Testdir
) -> None:
testdir.makeconftest(
"""
import pytest
def pytest_sessionfinish():
pytest.exit(msg="exit_pytest_sessionfinish", returncode={returncode})
""".format(
returncode=returncode
)
)
result = testdir.runpytest()
if returncode:
assert result.ret == returncode
else:
assert result.ret == ExitCode.NO_TESTS_COLLECTED
assert result.stdout.lines[-1] == "collected 0 items"
assert result.stderr.lines == ["Exit: exit_pytest_sessionfinish"]
@pytest.mark.parametrize("basetemp", ["foo", "foo/bar"])
def test_validate_basetemp_ok(tmp_path, basetemp, monkeypatch):
monkeypatch.chdir(str(tmp_path))
validate_basetemp(tmp_path / basetemp)
@pytest.mark.parametrize("basetemp", ["", ".", ".."])
def test_validate_basetemp_fails(tmp_path, basetemp, monkeypatch):
monkeypatch.chdir(str(tmp_path))
msg = "basetemp must not be empty, the current working directory or any parent directory of it"
with pytest.raises(argparse.ArgumentTypeError, match=msg):
if basetemp:
basetemp = tmp_path / basetemp
validate_basetemp(basetemp)
def test_validate_basetemp_integration(testdir):
result = testdir.runpytest("--basetemp=.")
result.stderr.fnmatch_lines("*basetemp must not be*")
class TestResolveCollectionArgument:
@pytest.fixture
def invocation_dir(self, testdir: Testdir) -> py.path.local:
testdir.syspathinsert(str(testdir.tmpdir / "src"))
testdir.chdir()
pkg = testdir.tmpdir.join("src/pkg").ensure_dir()
pkg.join("__init__.py").ensure()
pkg.join("test.py").ensure()
return testdir.tmpdir
@pytest.fixture
def invocation_path(self, invocation_dir: py.path.local) -> Path:
return Path(str(invocation_dir))
def test_file(self, invocation_dir: py.path.local, invocation_path: Path) -> None:
"""File and parts."""
assert resolve_collection_argument(invocation_path, "src/pkg/test.py") == (
invocation_dir / "src/pkg/test.py",
[],
)
assert resolve_collection_argument(invocation_path, "src/pkg/test.py::") == (
invocation_dir / "src/pkg/test.py",
[""],
)
assert resolve_collection_argument(
invocation_path, "src/pkg/test.py::foo::bar"
) == (invocation_dir / "src/pkg/test.py", ["foo", "bar"])
assert resolve_collection_argument(
invocation_path, "src/pkg/test.py::foo::bar::"
) == (invocation_dir / "src/pkg/test.py", ["foo", "bar", ""])
def test_dir(self, invocation_dir: py.path.local, invocation_path: Path) -> None:
"""Directory and parts."""
assert resolve_collection_argument(invocation_path, "src/pkg") == (
invocation_dir / "src/pkg",
[],
)
with pytest.raises(
UsageError, match=r"directory argument cannot contain :: selection parts"
):
resolve_collection_argument(invocation_path, "src/pkg::")
with pytest.raises(
UsageError, match=r"directory argument cannot contain :: selection parts"
):
resolve_collection_argument(invocation_path, "src/pkg::foo::bar")
def test_pypath(self, invocation_dir: py.path.local, invocation_path: Path) -> None:
"""Dotted name and parts."""
assert resolve_collection_argument(
invocation_path, "pkg.test", as_pypath=True
) == (invocation_dir / "src/pkg/test.py", [])
assert resolve_collection_argument(
invocation_path, "pkg.test::foo::bar", as_pypath=True
) == (invocation_dir / "src/pkg/test.py", ["foo", "bar"])
assert resolve_collection_argument(invocation_path, "pkg", as_pypath=True) == (
invocation_dir / "src/pkg",
[],
)
with pytest.raises(
UsageError, match=r"package argument cannot contain :: selection parts"
):
resolve_collection_argument(
invocation_path, "pkg::foo::bar", as_pypath=True
)
def test_does_not_exist(self, invocation_path: Path) -> None:
"""Given a file/module that does not exist raises UsageError."""
with pytest.raises(
UsageError, match=re.escape("file or directory not found: foobar")
):
resolve_collection_argument(invocation_path, "foobar")
with pytest.raises(
UsageError,
match=re.escape(
"module or package not found: foobar (missing __init__.py?)"
),
):
resolve_collection_argument(invocation_path, "foobar", as_pypath=True)
def test_absolute_paths_are_resolved_correctly(
self, invocation_dir: py.path.local, invocation_path: Path
) -> None:
"""Absolute paths resolve back to absolute paths."""
full_path = str(invocation_dir / "src")
assert resolve_collection_argument(invocation_path, full_path) == (
py.path.local(os.path.abspath("src")),
[],
)
# ensure full paths given in the command-line without the drive letter resolve
# to the full path correctly (#7628)
drive, full_path_without_drive = os.path.splitdrive(full_path)
assert resolve_collection_argument(
invocation_path, full_path_without_drive
) == (py.path.local(os.path.abspath("src")), [])
def test_module_full_path_without_drive(testdir):
"""Collect and run test using full path except for the drive letter (#7628).
Passing a full path without a drive letter would trigger a bug in py.path.local
where it would keep the full path without the drive letter around, instead of resolving
to the full path, resulting in fixtures node ids not matching against test node ids correctly.
"""
testdir.makepyfile(
**{
"project/conftest.py": """
import pytest
@pytest.fixture
def fix(): return 1
""",
}
)
testdir.makepyfile(
**{
"project/tests/dummy_test.py": """
def test(fix):
assert fix == 1
"""
}
)
fn = testdir.tmpdir.join("project/tests/dummy_test.py")
assert fn.isfile()
drive, path = os.path.splitdrive(str(fn))
result = testdir.runpytest(path, "-v")
result.stdout.fnmatch_lines(
[
os.path.join("project", "tests", "dummy_test.py") + "::test PASSED *",
"* 1 passed in *",
]
)
|
|
from __future__ import absolute_import
import random
import requests
from typing import Any, Dict, List, Optional, SupportsInt, Text, Union, Type
from version import ZULIP_VERSION
from zerver.models import PushDeviceToken, Message, Recipient, UserProfile, \
UserMessage, get_display_recipient, receives_offline_notifications, \
receives_online_notifications
from zerver.models import get_user_profile_by_id
from zerver.lib.avatar import avatar_url
from zerver.lib.request import JsonableError
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.decorator import statsd_increment
from zerver.lib.utils import generate_random_token
from zerver.lib.redis_utils import get_redis_client
from apns import APNs, Frame, Payload, SENT_BUFFER_QTY
from gcm import GCM
from django.conf import settings
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from six.moves import urllib
import base64
import binascii
import logging
import os
import time
import ujson
from functools import partial
if settings.ZILENCER_ENABLED:
from zilencer.models import RemotePushDeviceToken
else: # nocoverage -- Not convenient to add test for this.
from mock import Mock
RemotePushDeviceToken = Mock() # type: ignore # https://github.com/JukkaL/mypy/issues/1188
DeviceToken = Union[PushDeviceToken, RemotePushDeviceToken]
# APNS error codes
ERROR_CODES = {
1: 'Processing error',
2: 'Missing device token', # looks like token was empty?
3: 'Missing topic', # topic is encoded in the certificate, looks like certificate is wrong. bail out.
4: 'Missing payload', # bail out, our message looks like empty
5: 'Invalid token size', # current token has wrong size, skip it and retry
6: 'Invalid topic size', # can not happen, we do not send topic, it is part of certificate. bail out.
7: 'Invalid payload size', # our payload is probably too big. bail out.
8: 'Invalid token', # our device token is broken, skipt it and retry
10: 'Shutdown', # server went into maintenance mode. reported token is the last success, skip it and retry.
None: 'Unknown', # unknown error, for sure we try again, but user should limit number of retries
}
redis_client = get_redis_client()
# Maintain a long-lived Session object to avoid having to re-SSL-handshake
# for each request
connection = None
# `APNS_SANDBOX` should be a bool
assert isinstance(settings.APNS_SANDBOX, bool)
def uses_notification_bouncer():
# type: () -> bool
return settings.PUSH_NOTIFICATION_BOUNCER_URL is not None
def get_apns_key(identifer):
# type: (SupportsInt) -> str
return 'apns:' + str(identifer)
class APNsMessage(object):
def __init__(self, user_id, tokens, alert=None, badge=None, sound=None,
category=None, **kwargs):
# type: (int, List[Text], Text, int, Text, Text, **Any) -> None
self.frame = Frame()
self.tokens = tokens
expiry = int(time.time() + 24 * 3600)
priority = 10
payload = Payload(alert=alert, badge=badge, sound=sound,
category=category, custom=kwargs)
for token in tokens:
data = {'token': token, 'user_id': user_id}
identifier = random.getrandbits(32)
key = get_apns_key(identifier)
redis_client.hmset(key, data)
redis_client.expire(key, expiry)
self.frame.add_item(token, payload, identifier, expiry, priority)
def get_frame(self):
# type: () -> Frame
return self.frame
def response_listener(error_response):
# type: (Dict[str, SupportsInt]) -> None
identifier = error_response['identifier']
key = get_apns_key(identifier)
if not redis_client.exists(key):
logging.warn("APNs key, {}, doesn't not exist.".format(key))
return
code = error_response['status']
assert isinstance(code, int)
errmsg = ERROR_CODES[code]
data = redis_client.hgetall(key)
token = data['token']
user = get_user_profile_by_id(int(data['user_id']))
b64_token = hex_to_b64(token)
logging.warn("APNS: Failed to deliver APNS notification to %s, reason: %s" % (b64_token, errmsg))
if code == 8:
# Invalid Token, remove from our database
logging.warn("APNS: Removing token from database due to above failure")
try:
PushDeviceToken.objects.get(user=user, token=b64_token).delete()
return # No need to check RemotePushDeviceToken
except PushDeviceToken.DoesNotExist:
pass
if settings.ZILENCER_ENABLED:
# Trying to delete from both models is a bit inefficient than
# deleting from only one model but this method is very simple.
try:
RemotePushDeviceToken.objects.get(user_id=user.id,
token=b64_token).delete()
except RemotePushDeviceToken.DoesNotExist:
pass
def get_connection(cert_file, key_file):
# type: (str, str) -> APNs
connection = APNs(use_sandbox=settings.APNS_SANDBOX,
cert_file=cert_file,
key_file=key_file,
enhanced=True)
connection.gateway_server.register_response_listener(response_listener)
return connection
if settings.APNS_CERT_FILE is not None and os.path.exists(settings.APNS_CERT_FILE): # nocoverage
connection = get_connection(settings.APNS_CERT_FILE,
settings.APNS_KEY_FILE)
def num_push_devices_for_user(user_profile, kind = None):
# type: (UserProfile, Optional[int]) -> PushDeviceToken
if kind is None:
return PushDeviceToken.objects.filter(user=user_profile).count()
else:
return PushDeviceToken.objects.filter(user=user_profile, kind=kind).count()
# We store the token as b64, but apns-client wants hex strings
def b64_to_hex(data):
# type: (bytes) -> Text
return binascii.hexlify(base64.b64decode(data)).decode('utf-8')
def hex_to_b64(data):
# type: (Text) -> bytes
return base64.b64encode(binascii.unhexlify(data.encode('utf-8')))
def _do_push_to_apns_service(user_id, message, apns_connection):
# type: (int, APNsMessage, APNs) -> None
if not apns_connection: # nocoverage
logging.info("Not delivering APNS message %s to user %s due to missing connection" % (message, user_id))
return
frame = message.get_frame()
apns_connection.gateway_server.send_notification_multiple(frame)
def send_apple_push_notification_to_user(user, alert, **extra_data):
# type: (UserProfile, Text, **Any) -> None
devices = PushDeviceToken.objects.filter(user=user, kind=PushDeviceToken.APNS)
send_apple_push_notification(user.id, devices, zulip=dict(alert=alert),
**extra_data)
# Send a push notification to the desired clients
# extra_data is a dict that will be passed to the
# mobile app
@statsd_increment("apple_push_notification")
def send_apple_push_notification(user_id, devices, **extra_data):
# type: (int, List[DeviceToken], **Any) -> None
if not connection:
logging.warning("Attempting to send push notification, but no connection was found. "
"This may be because we could not find the APNS Certificate file.")
return
# Plain b64 token kept for debugging purposes
tokens = [(b64_to_hex(device.token), device.ios_app_id, device.token)
for device in devices]
valid_devices = [device for device in tokens if device[1] in [settings.ZULIP_IOS_APP_ID, None]]
valid_tokens = [device[0] for device in valid_devices]
if valid_tokens:
logging.info("APNS: Sending apple push notification "
"to devices: %s" % (valid_devices,))
zulip_message = APNsMessage(user_id, valid_tokens,
alert=extra_data['zulip']['alert'],
**extra_data)
_do_push_to_apns_service(user_id, zulip_message, connection)
else: # nocoverage
logging.warn("APNS: Not sending notification because "
"tokens didn't match devices: %s/%s" % (tokens, settings.ZULIP_IOS_APP_ID,))
# NOTE: This is used by the check_apns_tokens manage.py command. Do not call it otherwise, as the
# feedback() call can take up to 15s
def check_apns_feedback():
# type: () -> None
feedback_connection = APNs(use_sandbox=settings.APNS_SANDBOX,
cert_file=settings.APNS_CERT_FILE,
key_file=settings.APNS_KEY_FILE)
for token, since in feedback_connection.feedback_server.items():
since_date = timestamp_to_datetime(since)
logging.info("Found unavailable token %s, unavailable since %s" % (token, since_date))
PushDeviceToken.objects.filter(token=hex_to_b64(token), last_updated__lt=since_date,
kind=PushDeviceToken.APNS).delete()
logging.info("Finished checking feedback for stale tokens")
if settings.ANDROID_GCM_API_KEY: # nocoverage
gcm = GCM(settings.ANDROID_GCM_API_KEY)
else:
gcm = None
def send_android_push_notification_to_user(user_profile, data):
# type: (UserProfile, Dict[str, Any]) -> None
devices = list(PushDeviceToken.objects.filter(user=user_profile,
kind=PushDeviceToken.GCM))
send_android_push_notification(devices, data)
@statsd_increment("android_push_notification")
def send_android_push_notification(devices, data, remote=False):
# type: (List[DeviceToken], Dict[str, Any], bool) -> None
if not gcm:
logging.warning("Attempting to send a GCM push notification, but no API key was configured")
return
reg_ids = [device.token for device in devices]
if remote:
DeviceTokenClass = RemotePushDeviceToken
else:
DeviceTokenClass = PushDeviceToken
try:
res = gcm.json_request(registration_ids=reg_ids, data=data, retries=10)
except IOError as e:
logging.warning(str(e))
return
if res and 'success' in res:
for reg_id, msg_id in res['success'].items():
logging.info("GCM: Sent %s as %s" % (reg_id, msg_id))
# res.canonical will contain results when there are duplicate registrations for the same
# device. The "canonical" registration is the latest registration made by the device.
# Ref: http://developer.android.com/google/gcm/adv.html#canonical
if 'canonical' in res:
for reg_id, new_reg_id in res['canonical'].items():
if reg_id == new_reg_id:
# I'm not sure if this should happen. In any case, not really actionable.
logging.warning("GCM: Got canonical ref but it already matches our ID %s!" % (reg_id,))
elif not DeviceTokenClass.objects.filter(token=new_reg_id,
kind=DeviceTokenClass.GCM).count():
# This case shouldn't happen; any time we get a canonical ref it should have been
# previously registered in our system.
#
# That said, recovery is easy: just update the current PDT object to use the new ID.
logging.warning(
"GCM: Got canonical ref %s replacing %s but new ID not registered! Updating." %
(new_reg_id, reg_id))
DeviceTokenClass.objects.filter(
token=reg_id, kind=DeviceTokenClass.GCM).update(token=new_reg_id)
else:
# Since we know the new ID is registered in our system we can just drop the old one.
logging.info("GCM: Got canonical ref %s, dropping %s" % (new_reg_id, reg_id))
DeviceTokenClass.objects.filter(token=reg_id, kind=DeviceTokenClass.GCM).delete()
if 'errors' in res:
for error, reg_ids in res['errors'].items():
if error in ['NotRegistered', 'InvalidRegistration']:
for reg_id in reg_ids:
logging.info("GCM: Removing %s" % (reg_id,))
device = DeviceTokenClass.objects.get(token=reg_id, kind=DeviceTokenClass.GCM)
device.delete()
else:
for reg_id in reg_ids:
logging.warning("GCM: Delivery to %s failed: %s" % (reg_id, error))
# python-gcm handles retrying of the unsent messages.
# Ref: https://github.com/geeknam/python-gcm/blob/master/gcm/gcm.py#L497
def get_alert_from_message(message):
# type: (Message) -> Text
"""
Determine what alert string to display based on the missed messages.
"""
sender_str = message.sender.full_name
if message.recipient.type == Recipient.HUDDLE:
return "New private group message from %s" % (sender_str,)
elif message.recipient.type == Recipient.PERSONAL:
return "New private message from %s" % (sender_str,)
elif message.recipient.type == Recipient.STREAM:
return "New mention from %s" % (sender_str,)
else:
return "New Zulip mentions and private messages from %s" % (sender_str,)
def get_apns_payload(message):
# type: (Message) -> Dict[str, Any]
return {
'alert': get_alert_from_message(message),
'message_ids': [message.id],
}
def get_gcm_payload(user_profile, message):
# type: (UserProfile, Message) -> Dict[str, Any]
content = message.content
content_truncated = (len(content) > 200)
if content_truncated:
content = content[:200] + "..."
android_data = {
'user': user_profile.email,
'event': 'message',
'alert': get_alert_from_message(message),
'zulip_message_id': message.id, # message_id is reserved for CCS
'time': datetime_to_timestamp(message.pub_date),
'content': content,
'content_truncated': content_truncated,
'sender_email': message.sender.email,
'sender_full_name': message.sender.full_name,
'sender_avatar_url': avatar_url(message.sender),
}
if message.recipient.type == Recipient.STREAM:
android_data['recipient_type'] = "stream"
android_data['stream'] = get_display_recipient(message.recipient)
android_data['topic'] = message.subject
elif message.recipient.type in (Recipient.HUDDLE, Recipient.PERSONAL):
android_data['recipient_type'] = "private"
return android_data
@statsd_increment("push_notifications")
def handle_push_notification(user_profile_id, missed_message):
# type: (int, Dict[str, Any]) -> None
try:
user_profile = get_user_profile_by_id(user_profile_id)
if not (receives_offline_notifications(user_profile) or receives_online_notifications(user_profile)):
return
umessage = UserMessage.objects.get(user_profile=user_profile,
message__id=missed_message['message_id'])
message = umessage.message
if umessage.flags.read:
return
apns_payload = get_apns_payload(message)
gcm_payload = get_gcm_payload(user_profile, message)
if uses_notification_bouncer():
send_notifications_to_bouncer(user_profile_id,
apns_payload,
gcm_payload)
return
android_devices = list(PushDeviceToken.objects.filter(user=user_profile,
kind=PushDeviceToken.GCM))
apple_devices = list(PushDeviceToken.objects.filter(user=user_profile,
kind=PushDeviceToken.APNS))
# TODO: set badge count in a better way
if apple_devices:
send_apple_push_notification(user_profile.id, apple_devices,
badge=1, zulip=apns_payload)
if android_devices:
send_android_push_notification(android_devices, gcm_payload)
except UserMessage.DoesNotExist:
logging.error("Could not find UserMessage with message_id %s" % (missed_message['message_id'],))
def send_notifications_to_bouncer(user_profile_id, apns_payload, gcm_payload):
# type: (int, Dict[str, Any], Dict[str, Any]) -> None
post_data = {
'user_id': user_profile_id,
'apns_payload': apns_payload,
'gcm_payload': gcm_payload,
}
send_json_to_push_bouncer('POST', 'notify', post_data)
def add_push_device_token(user_profile, token_str, kind, ios_app_id=None):
# type: (UserProfile, str, int, Optional[str]) -> None
# If we're sending things to the push notification bouncer
# register this user with them here
if uses_notification_bouncer():
post_data = {
'server_uuid': settings.ZULIP_ORG_ID,
'user_id': user_profile.id,
'token': token_str,
'token_kind': kind,
}
if kind == PushDeviceToken.APNS:
post_data['ios_app_id'] = ios_app_id
send_to_push_bouncer('POST', 'register', post_data)
return
# If another user was previously logged in on the same device and didn't
# properly log out, the token will still be registered to the wrong account
PushDeviceToken.objects.filter(token=token_str).exclude(user=user_profile).delete()
# Overwrite with the latest value
token, created = PushDeviceToken.objects.get_or_create(user=user_profile,
token=token_str,
defaults=dict(
kind=kind,
ios_app_id=ios_app_id))
if not created:
token.last_updated = timezone_now()
token.save(update_fields=['last_updated'])
def remove_push_device_token(user_profile, token_str, kind):
# type: (UserProfile, str, int) -> None
# If we're sending things to the push notification bouncer
# register this user with them here
if uses_notification_bouncer():
# TODO: Make this a remove item
post_data = {
'server_uuid': settings.ZULIP_ORG_ID,
'user_id': user_profile.id,
'token': token_str,
'token_kind': kind,
}
send_to_push_bouncer("POST", "unregister", post_data)
return
try:
token = PushDeviceToken.objects.get(token=token_str, kind=kind)
token.delete()
except PushDeviceToken.DoesNotExist:
raise JsonableError(_("Token does not exist"))
def send_json_to_push_bouncer(method, endpoint, post_data):
# type: (str, str, Dict[str, Any]) -> None
send_to_push_bouncer(
method,
endpoint,
ujson.dumps(post_data),
extra_headers={"Content-type": "application/json"},
)
def send_to_push_bouncer(method, endpoint, post_data, extra_headers=None):
# type: (str, str, Union[Text, Dict[str, Any]], Optional[Dict[str, Any]]) -> None
url = urllib.parse.urljoin(settings.PUSH_NOTIFICATION_BOUNCER_URL,
'/api/v1/remotes/push/' + endpoint)
api_auth = requests.auth.HTTPBasicAuth(settings.ZULIP_ORG_ID,
settings.ZULIP_ORG_KEY)
headers = {"User-agent": "ZulipServer/%s" % (ZULIP_VERSION,)}
if extra_headers is not None:
headers.update(extra_headers)
res = requests.request(method,
url,
data=post_data,
auth=api_auth,
timeout=30,
verify=True,
headers=headers)
# TODO: Think more carefully about how this error hanlding should work.
if res.status_code >= 500:
raise JsonableError(_("Error received from push notification bouncer"))
elif res.status_code >= 400:
try:
msg = ujson.loads(res.content)['msg']
except Exception:
raise JsonableError(_("Error received from push notification bouncer"))
raise JsonableError(msg)
elif res.status_code != 200:
raise JsonableError(_("Error received from push notification bouncer"))
# If we don't throw an exception, it's a successful bounce!
|
|
############################################################
#
# Written by Alexander Liptak (Summer Student 2017)
# Date: August 2017
# E-Mail: Alexander.Liptak.2015@live.rhul.ac.uk
# Phone: +44 7901 595107
#
# Tested with McStas 2.4
#
############################################################
import os
import sys
from multiprocessing import cpu_count
import numpy as np
from sympy import *
import matplotlib.pyplot as plt
from subprocess import Popen, CREATE_NEW_CONSOLE, check_call
from datetime import datetime
from time import sleep
from glob import glob
from colorama import init, Fore
from shutil import rmtree
from pickle import dump, load
############################################################
# Introdction
############################################################
print("==================================================")
print(" SWINE ")
print("==================================================")
print(" Slit Width Influence on Neutron flux Estimates ")
print("==================================================")
############################################################
# Load ANSI support for coloured text
#
# Colour meaning:
# RED - Error
# YELLOW - Warning
# GREEN - Success
# MAGENTA - Input
############################################################
init(autoreset=True)
############################################################
# Make sure I am running in Windows
############################################################
print("Checking OS...")
if os.name != 'nt':
print(Fore.RED + "This script only works on Windows!")
print(Fore.RED + "Exitting...")
sys.exit()
print(Fore.GREEN + "You are running a compaible Windows-based OS")
############################################################
# Make sure I am running in Python 3 or higher
# (no longer necessary as running embedded python)
############################################################
print("Checking Python version...")
if sys.version_info[0] < 3:
print(Fore.RED + "This script only works on Python 3!")
print(Fore.RED + "Exitting...")
sys.exit()
print(Fore.GREEN + "Compatible embedded Python "+sys.version.split(" ")[0])
############################################################
# Checking the amount of cores system has for running
# multiple simulations without slowing each sim down
############################################################
print("Checking system...")
cores = cpu_count()
print(Fore.GREEN + "Found [" + str(cores) + "] cores!")
############################################################
# Chekc if mcstas, mcrun and mclib are in their default dir
############################################################
print("Checking McStas...")
try:
mcrun = glob('C:\\mcstas*\\bin\\mcrun.bat')[0]
mcstas = glob('C:\\mcstas*\\bin\\mcstas.exe')[0]
mclib = glob(glob('C:\\mcstas*\\lib')[0]+'\\*')
gcc = glob('C:\\mcstas-*\\miniconda*\\Library\\mingw-w64\\bin\\')[0]
pydir = glob('C:\\mcstas-*\\miniconda*\\')[0]
except:
print("McStas is not installed in the default directory!")
print(Fore.RED + "Exitting...")
sys.exit()
print(Fore.GREEN + "Using version: " + mcrun.split('\\')[1])
############################################################
# Set temporary environment variables for McStas and GCC
############################################################
os.environ['PATH']=gcc+';'+pydir
############################################################
# Ask user whether to retrieve interactive plot or run sim
# Included end='' in print statement as a hack for colorama
# incompatibility with non-ANSI input()
# GitHub colorama issue #103
############################################################
print("==================================================")
while True:
print(Fore.MAGENTA + "Would like to run a simulation (S), simulate with debug mode (D), or load a previous plot (L)? [S/D/L] ", end='')
load_or_sim = str(input()).upper()
if load_or_sim == 'L' or load_or_sim == 'S' or load_or_sim == 'D':
if load_or_sim == 'L':
unpickle = True
debug = False
if load_or_sim == 'S':
unpickle = False
debug = False
if load_or_sim == 'D':
unpickle = False
debug = True
break
else:
print(Fore.YELLOW + "That is not a recongnised option!")
############################################################
# If user decided to load previous plot, begin unpickling
# For some reason, all unpickled figures default to tkagg
# so used appropriate maximise commands
# Shows plot and exits
############################################################
if unpickle == True:
print(Fore.MAGENTA + "Drag and drop your .swine file here: ", end='')
pickledplot = input()
print("Loading plot...")
fig = load(open(pickledplot, 'rb'))
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
print("Exitting...")
sys.exit()
############################################################
# Opens file for debugging, all external output will be
# piped here
############################################################
if debug == True:
debugfile = open('debug.log', 'a')
debugfile.write("==================================================\n")
############################################################
# Ask user whether to use the default OffSpec-based .instr
# file for this simulation or use their own
############################################################
print("==================================================")
while True:
print(Fore.MAGENTA + "Would like to run from deafult (OffSpec-based) instrument file? [Y/N] ", end='')
default_instr = str(input()).upper()
if default_instr == 'Y' or default_instr == 'N':
break
else:
print(Fore.YELLOW + "That is not a recongnised option!")
############################################################
# If user selected using the default instrument file, slit
# and sample parameter names are set automatically, and
# the user is given choice whether to use the default
# positions or set their own. Then the values for slit and
# sample postions are entered, or defaults are used.
# If the user wants to use their own instrument file, the
# parameters that control McStas slit and sample widths
# and positions need to be entered manually, as do their
# values.
############################################################
cwd = os.getcwd()
if default_instr == "Y":
instr = cwd+'\\resources\\default.instr'
s1w_param = 'slit1_width'
s2w_param = 'slit2_width'
s1p_param = 'slit1_pos'
s2p_param = 'slit2_pos'
sap_param = 'sample_pos'
out_param = 'sample_psd'
print("Enter slit and sample positons after bender (leave empty for default):")
print(Fore.MAGENTA + "McStas position of slit 1 [8.58](m): ", end='')
slit1Pos = float(input() or (8.58))
print(Fore.MAGENTA + "McStas position of slit 2 [13.63](m): ", end='')
slit2Pos = float(input() or (13.63))
print(Fore.MAGENTA + "McStas position of sample [14.03](m): ", end='')
sampPos = float(input() or (14.03))
if default_instr == "N":
print("Make sure your .instr file is formatted as set out in the README!")
print(Fore.MAGENTA + "Drag and drop your .instr file here: ", end='')
instr = input()
print(Fore.MAGENTA + "Enter McStas parameter that controls slit 1 width: ", end='')
s1w_param = str(input())
print(Fore.MAGENTA + "Enter McStas parameter that controls slit 2 width: ", end='')
s2w_param = str(input())
print(Fore.MAGENTA + "Enter McStas parameter that controls slit 1 position: ", end='')
s1p_param = str(input())
print(Fore.MAGENTA + "Enter McStas parameter that controls slit 2 position: ", end='')
s2p_param = str(input())
print(Fore.MAGENTA + "Enter McStas parameter that controls sample position: ", end='')
sap_param = str(input())
print(Fore.MAGENTA + "Enter McStas component name of your PSD_monitor: ", end='')
out_param = str(input())
while True:
try:
print("Enter slit and sample positons for your McStas instrument:")
print(Fore.MAGENTA + "McStas position of slit 1 (m): ", end='')
slit1Pos = float(input())
print(Fore.MAGENTA + "McStas position of slit 2 (m): ", end='')
slit2Pos = float(input())
print(Fore.MAGENTA + "McStas position of sample (m): ", end='')
sampPos = float(input())
break
except:
print(Fore.YELLOW + "Blank and non-numeric input is not allowed, try again!")
############################################################
# Only if using custom instrument file, checks whether
# specified parameters that were entered actually exist
# in the file
############################################################
if default_instr == "N":
if (s1w_param not in open(instr).read() or s1w_param == ''
or s2w_param not in open(instr).read() or s2w_param == ''
or s1p_param not in open(instr).read() or s1p_param == ''
or s2p_param not in open(instr).read() or s2p_param == ''
or sap_param not in open(instr).read() or sap_param == ''
or out_param not in open(instr).read() or out_param == ''):
print(Fore.RED + "The selected instrument file does not use these parameters!")
print(Fore.RED + "Edit your instrument file or re-run this script and try again.")
print(Fore.RED + "Exitting...")
sys.exit()
############################################################
# Compile instrument into C using McStas
# Requred to CD to the folder containing the instrument file
# to get around McStas GitHub Issue #532
############################################################
print("==================================================")
print("Compiling instrument file into C...")
INSTRtoC = mcstas, '-I', ' -I '.join(mclib), '-t', os.path.split(instr)[1]
try:
os.chdir(os.path.split(instr)[0])
if debug == False:
check_call(' '.join(INSTRtoC), creationflags=CREATE_NEW_CONSOLE)
if debug == True:
check_call(' '.join(INSTRtoC), stdout=debugfile, stderr=debugfile)
os.chdir(cwd)
except:
print(Fore.RED + "An unknown error has occured while compiling to C...")
print(Fore.RED + "Exitting...")
sys.exit()
print(Fore.GREEN + "Compiled to C successfully!")
############################################################
# Compile C code into binary
############################################################
print("Compiling C file into binary...")
CtoEXE = 'gcc', '-o', os.path.splitext(instr)[0]+'.exe', os.path.splitext(instr)[0]+'.c', '-g', '-O2','-lm'
try:
if debug == False:
check_call(' '.join(CtoEXE), creationflags=CREATE_NEW_CONSOLE)
if debug == True:
check_call(' '.join(CtoEXE), stdout=debugfile, stderr=debugfile)
except:
print(Fore.RED + "An unknown error has occured while compiling to binary...")
print(Fore.RED + "Exitting...")
sys.exit()
print(Fore.GREEN + "Compiled to binary successfully!")
############################################################
# Data collection that supports default values
############################################################
print("==================================================")
print("Please input the required values or press the return key for defaults.")
print("Default values are in square brackets and required units are in parentheses.")
print(Fore.MAGENTA + "Angle of sample [1.2](degrees): ", end='')
angle = np.deg2rad(float(input() or (1.2)))
print(Fore.MAGENTA + "Maximum allowed penumbra [80](mm): ", end='')
maxPenumbra = float(input() or (80))
print(Fore.MAGENTA + "Number of steps per slit (higer-finer, lower-faster) [50]: ", end='')
steps1 = int(input() or (50))
print(Fore.MAGENTA + "Number of steps per resolution (higer-finer, lower-faster) [50]: ", end='')
steps2 = int(input() or (50))
print(Fore.MAGENTA + "No of neutrons per simulation [1000000]: ", end='')
neutrons = int(input() or (1000000))
print(Fore.MAGENTA + "Plot description (appended to graph title): ", end='')
description = str(input() or (''))
############################################################
# Define necessary values, variables and equations that
# will have to be solved later
# Make sure all distances are in mm
# penumbra is the sympy equation for calulcating the
# penumbra of the footprint with respect to slit widths
# and their separation, as well as the angle of the
# sample
# dQQ is a sympy formula that calculates the resolution
# from slit widths, their positions, and the angle of
# the sample
############################################################
s1s2Sep = (slit2Pos-slit1Pos)*1000
s2SampSep = (sampPos-slit2Pos)*1000
s1 = symbols('s1')
s2 = symbols('s2')
penumbra = (2*((((s1s2Sep+s2SampSep)*(s1+s2))/(2*s1s2Sep))-(s1/2)))/(sin(angle))
dQQ = ((atan((s1+s2)/(s1s2Sep)))/(2*tan(angle)))*100
############################################################
# Set both slit minima to 0, solve penumbra equation for
# maximum allowed slit opening
############################################################
slit1min = 0.0
slit2min = 0.0
slit1max = float(next(iter(solveset(Eq(penumbra.subs(s2,0),maxPenumbra),s1))))
slit2max = float(next(iter(solveset(Eq(penumbra.subs(s1,0),maxPenumbra),s2))))
############################################################
# Create and fill array with all the slit width values
# that will be tested (Simulation 1 only)
############################################################
slit1vals = np.array([])
slit2vals = np.array([])
for i in range(steps1+1):
slit1vals = np.append(slit1vals, slit1min+(i*((slit1max - slit1min)/steps1)))
slit2vals = np.append(slit2vals, slit2min+(i*((slit2max - slit2min)/steps1)))
############################################################
# Create two arrays, correctly sized and filled with
# zeros
# Later, the values that satisfy the constraints will be
# tested and their results will be added to this array
# while those values that do not satisfy the constrains
# will remain as zero
############################################################
intensity = np.zeros((steps1+1,steps1+1))
quality = np.zeros((steps1+1,steps1+1))
############################################################
# Create output directory, if there is some error, closes
############################################################
swinedir = 'SWINE{:[%Y-%m-%d][%H-%M-%S]}'.format(datetime.now())
try:
os.mkdir(swinedir)
except:
print(Fore.RED + "You do not appear to have write permission in this folder!")
print(Fore.RED + "Exitting...")
sys.exit()
############################################################
# Everything ready to start, give user final instructions
############################################################
print("==================================================")
print("The script is now ready to run!")
print("Depending on your settings, this may take over a few hours to complete.")
print("It is recommended to not use the computer while this script is running.")
print(Fore.MAGENTA + "Press any key to continue...", end='')
input()
print("==================================================")
############################################################
# Simulation 1
# Create an empty list that will contain every call to be
# made to McStas
# Create an emty list that will contain debugging
# information
# Solve the penumbra and resolution equations for the
# current combination of slits, and if satisfies the
# constraints, call and debug info are appended to their
# respective lists
# Zero slit width simulations are also skipped due to
# an issue with the definition of a slit in McStas
# (GitHub Issue #522 in McCode)
############################################################
calls1 = []
debug1 = []
for index1, item1 in enumerate(slit1vals):
for index2, item2 in enumerate(slit2vals):
penumbraCurrent = penumbra.subs([(s1,item1),(s2,item2)])
qualityCurrent = dQQ.subs([(s1,item1),(s2,item2)])
quality[index1,index2] = qualityCurrent
if ((penumbraCurrent <= maxPenumbra) \
and (item1 != 0.0 and item2 != 0.0)):
calls1.append([mcrun, instr,
'-d', swinedir+'/A['+str(index1)+']['+str(index2)+']',
'-n', str(neutrons),
s1p_param+'='+str(slit1Pos), s2p_param+'='+str(slit2Pos),
sap_param+'='+str(sampPos),
s1w_param+'='+str(item1/1000), s2w_param+'='+str(item2/1000)])
debug1.append([item1, item2, penumbraCurrent, qualityCurrent])
############################################################
# Simulation 2
# Like previously, two lists are created that will contain
# the calls and debugging information
# The values for minimum and maximum resolution are obtained
# by taking the ceiling and floor functions of the minimum
# and maximum possible resolutions from the previous
# simulations, plus or minus one (respectively)
# For every resolution to be found, the range of s2 values
# that satisfy the maximum penumbra are found, as well as
# the correcponding s1 values. A check is made if either
# of these values are not negative, and a call list is
# generated, along with debugging information
# The final data matrix should be of the format:
# [resolution, [slit 2 widths], [intensities]]
# where the data for the intensity sublist will be
# collected after the simulations complete
############################################################
calls2 = []
debug2 = []
minQ = int(np.ceil(np.amin(quality)))+1
maxQ = int(np.floor(np.amax(quality)))-1
data2 = []
for index, item in enumerate(list(range(minQ, maxQ+1))):
data2.append([])
data2[index].append(item)
s2range = np.delete(np.linspace(0, float(next(iter(solveset(Eq(solveset(Eq(penumbra,maxPenumbra), symbol=s1),solveset(Eq(dQQ,item), symbol=s1)),symbol=s2)))), steps2), 0)
s1range = [float(next(iter(solveset(Eq(dQQ,item), symbol=s1).subs(s2, item)))) for element in s2range]
templist = []
for index2, item2 in enumerate(s2range):
if float(s2range[index2]) > 0 and float(s1range[index2]) > 0:
calls2.append([mcrun, instr,
'-d', swinedir+'/B['+str(item)+']['+str(item2)+']',
'-n', str(neutrons*10),
s1p_param+'='+str(slit1Pos), s2p_param+'='+str(slit2Pos),
sap_param+'='+str(sampPos),
s1w_param+'='+str(s1range[index2]/1000), s2w_param+'='+str(s2range[index2]/1000)])
debug2.append([item, s1range[index2], item2])
templist.append(s2range[index2])
data2[index].append(templist)
data2[index].append([])
############################################################
# Simulation 1
# Runs as many simulations at a time as there are cores
# Keeps count of how manu calls have been made so that
# we run them all and none are missed
# Print debugging information
############################################################
calls1_done = 0
while calls1_done < len(calls1):
running_calls = []
for core in range(0, cores):
if calls1_done < len(calls1):
print('| Sim1',
'|',format(int((calls1_done+1)/len(calls1)*100), '03.0f')+'%',
'| Core:',str(core),
'| S1W:',format(debug1[calls1_done][0], '03.2f'),
'| S2W:',format(debug1[calls1_done][1], '03.2f'),
'| PU:',format(float(debug1[calls1_done][2]), '03.2f'),
'| Res:',format(float(debug1[calls1_done][3]), '03.2f'), '|')
if debug == False:
sim = Popen(calls1[calls1_done], creationflags=CREATE_NEW_CONSOLE)
if debug == True:
sim = Popen(calls1[calls1_done], stdout=debugfile, stderr=debugfile)
running_calls.append(sim)
calls1_done = calls1_done + 1
print("--------------------------------------------------")
for call in running_calls:
sim.wait()
sleep(cores)
############################################################
# Same thing as above but for second set of simulations
############################################################
calls2_done = 0
while calls2_done < len(calls2):
running_calls = []
for core in range(0, cores):
if calls2_done < len(calls2):
print('| Sim2',
'|',format(int((calls2_done+1)/len(calls2)*100), '03.0f')+'%',
'| Core:',str(core),
'| Res:',str(int(debug2[calls2_done][0])),
'| S1W:',format(debug2[calls2_done][1], '03.2f'),
'| S2W:',format(debug2[calls2_done][2], '03.2f'), '|')
if debug == False:
sim = Popen(calls2[calls2_done], creationflags=CREATE_NEW_CONSOLE)
if debug == True:
sim = Popen(calls2[calls2_done], stdout=debugfile, stderr=debugfile)
running_calls.append(sim)
calls2_done = calls2_done + 1
print("--------------------------------------------------")
for call in running_calls:
sim.wait()
sleep(cores)
############################################################
# Reads the specified McRun output file from every subfolder
# If the subfolder is labeled A (sim 1), then the intensity
# scraped from this file is used to update the intensity
# matrix
# If the subfolder is labeled B (sim 2), then the value is
# appended to the correct sublist in the data matrix
############################################################
print("Collecting data...")
os.chdir(swinedir)
sleep(1)
for folder in os.listdir():
dim1 = str(folder).split('][')[0][2:]
dim2 = str(folder).split('][')[1][:-1]
with open(str(folder)+'/'+str(out_param)+'.dat', 'r') as file:
for line in file:
if 'values:' in line:
if str(folder)[0] == 'A':
intensity[int(dim1), int(dim2)] = line.split(' ')[2]
if str(folder)[0] == 'B':
for item in data2:
if int(dim1) == item[0]:
item[2].append(line.split(' ')[2])
break
############################################################
# Deleted the swinedir folder to save space, all needed data
# has been collected already
############################################################
print("Cleaning up...")
os.chdir(cwd)
rmtree(swinedir)
os.remove(os.path.basename(instr))
############################################################
# Cretes a blank figure that will hold two subplots
# Subplot 1 is created, and on it is plotted the heatmap
# generated from the intensity matrix. A colourbar for
# this data is also generated. Resolution contour lines
# are then obtained from the resolution matrix and plotted
# on the same subplot. The title and axis labels are made
# and the tick values are regenerated.
# Subplot 2 is created, and the data matrix is looped over
# so that a line for every resolution is drawn.
# The legend, title and axis lables are also drawn.
############################################################
print("Plotting data...")
fig = plt.figure()
plt.subplot(121)
heatmap = plt.imshow(intensity, cmap='hot', interpolation='nearest')
contour = plt.contour(quality, antialiased=True)
plt.clabel(contour, inline=1, fontsize=10)
plt.colorbar(heatmap)
plt.title('Neutron intensity at varying slit widths | '+description)
plt.xlabel('Slit 2 width (mm)')
plt.ylabel('Slit 1 width (mm)')
plt.xticks(np.linspace(0, len(slit2vals)-1, num=6), np.linspace(round(slit2min, 2), round(slit2max, 2), num=6))
plt.yticks(np.linspace(0, len(slit1vals)-1, num=6), np.linspace(round(slit1min, 2), round(slit1max, 2), num=6))
plt.subplot(122)
for item in data2:
plt.plot(item[1], item[2], '-', label='dQ/Q = '+str(item[0]))
plt.legend()
plt.title('Intensity against slit 2 width at constant resolution | '+description)
plt.xlabel('Slit 2 width (mm)')
plt.ylabel('Intensity')
############################################################
# The window needs to be maximised as the default view
# makes reading the plots impossible.
############################################################
try:
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
except:
print(Fore.YELLOW + "Error maximising window, please maximise windows manually!")
############################################################
# Experimental pickle support means it is possible to store
# entire plot in a file and recover it later, intreactive
# Also ahow figure and exit
############################################################
print("Saving figure...")
dump(fig, open(swinedir+'.swine', 'wb'))
print("Opening plot...")
plt.show()
print("Exitting...")
sys.exit()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Feature extraction routines."""
import numpy as np
import scipy.signal
import librosa.core
import librosa.util
from . import cache
# -- Chroma -- #
@cache
def logfsgram(y=None, sr=22050, S=None, n_fft=4096, hop_length=512, **kwargs):
'''Compute a log-frequency spectrogram (piano roll) using a fixed-window STFT.
:usage:
>>> # From time-series input
>>> S_log = librosa.logfsgram(y=y, sr=sr)
>>> # Or from power spectrogram input
>>> S = np.abs(librosa.stft(y))**2
>>> S_log = librosa.logfsgram(S=S, sr=sr)
>>> # Convert to chroma
>>> chroma_map = librosa.filters.cq_to_chroma(S_log.shape[0])
>>> C = chroma_map.dot(S_log)
:parameters:
- y : np.ndarray [shape=(n,)] or None
audio time series
- sr : int > 0 [scalar]
audio sampling rate of ``y``
- S : np.ndarray [shape=(d, t)] or None
(optional) power spectrogram
- n_fft : int > 0 [scalar]
FFT window size
- hop_length : int > 0 [scalar]
hop length for STFT. See :func:`librosa.core.stft` for details.
- bins_per_octave : int > 0 [scalar]
Number of bins per octave. Defaults to 12.
- tuning : float in ``[-0.5, 0.5)`` [scalar]
Deviation (in fractions of a bin) from A440 tuning.
If not provided, it will be automatically estimated.
- *kwargs*
Additional keyword arguments.
See :func:`librosa.filters.logfrequency()`
:returns:
- P : np.ndarray [shape=(n_pitches, t)]
P(f, t) contains the energy at pitch bin f, frame t.
.. note:: One of either ``S`` or ``y`` must be provided.
If ``y`` is provided, the power spectrogram is computed
automatically given the parameters ``n_fft`` and ``hop_length``.
If ``S`` is provided, it is used as the input spectrogram, and
``n_fft`` is inferred from its shape.
'''
# If we don't have a spectrogram, build one
if S is None:
# By default, use a power spectrogram
S = np.abs(librosa.stft(y, n_fft=n_fft, hop_length=hop_length))**2
else:
n_fft = (S.shape[0] - 1) * 2
# If we don't have tuning already, grab it from S
if 'tuning' not in kwargs:
bins_per_oct = kwargs.get('bins_per_octave', 12)
kwargs['tuning'] = estimate_tuning(S=S, sr=sr,
bins_per_octave=bins_per_oct)
# Build the CQ basis
cq_basis = librosa.filters.logfrequency(sr, n_fft=n_fft, **kwargs)
return cq_basis.dot(S)
@cache
def chromagram(y=None, sr=22050, S=None, norm=np.inf, n_fft=2048,
hop_length=512, tuning=None, **kwargs):
"""Compute a chromagram from a spectrogram or waveform
:usage:
>>> C = librosa.chromagram(y, sr)
>>> # Use a pre-computed spectrogram
>>> S = np.abs(librosa.stft(y, n_fft=4096))
>>> C = librosa.chromagram(S=S)
:parameters:
- y : np.ndarray [shape=(n,)] or None
audio time series
- sr : int > 0 [scalar]
sampling rate of ``y``
- S : np.ndarray [shape=(d, t)] or None
power spectrogram
- norm : float or None
Column-wise normalization.
See :func:`librosa.util.normalize` for details.
If ``None``, no normalization is performed.
- n_fft : int > 0 [scalar]
FFT window size if provided ``y, sr`` instead of ``S``
- hop_length : int > 0 [scalar]
hop length if provided ``y, sr`` instead of ``S``
- tuning : float in ``[-0.5, 0.5)`` [scalar] or None.
Deviation from A440 tuning in fractional bins (cents).
If ``None``, it is automatically estimated.
- *kwargs*
Additional keyword arguments to parameterize chroma filters.
See :func:`librosa.filters.chroma()` for details.
.. note:: One of either ``S`` or ``y`` must be provided.
If ``y`` is provided, the magnitude spectrogram is computed
automatically given the parameters ``n_fft`` and ``hop_length``.
If ``S`` is provided, it is used as the input spectrogram, and
``n_fft`` is inferred from its shape.
:returns:
- chromagram : np.ndarray [shape=(n_chroma, t)]
Normalized energy for each chroma bin at each frame.
:raises:
- ValueError
if an improper value is supplied for norm
"""
n_chroma = kwargs.get('n_chroma', 12)
# Build the power spectrogram if unspecified
if S is None:
S = np.abs(librosa.stft(y, n_fft=n_fft, hop_length=hop_length))**2
else:
n_fft = (S.shape[0] - 1) * 2
if tuning is None:
tuning = estimate_tuning(S=S, sr=sr, bins_per_octave=n_chroma)
# Get the filter bank
if 'A440' not in kwargs:
kwargs['A440'] = 440.0 * 2.0**(float(tuning) / n_chroma)
chromafb = librosa.filters.chroma(sr, n_fft, **kwargs)
# Compute raw chroma
raw_chroma = np.dot(chromafb, S)
# Compute normalization factor for each frame
if norm is None:
return raw_chroma
return librosa.util.normalize(raw_chroma, norm=norm, axis=0)
# -- Pitch and tuning -- #
@cache
def estimate_tuning(resolution=0.01, bins_per_octave=12, **kwargs):
'''Estimate the tuning of an audio time series or spectrogram input.
:usage:
>>> # With time-series input
>>> print(estimate_tuning(y=y, sr=sr))
>>> # In tenths of a cent
>>> print(estimate_tuning(y=y, sr=sr, resolution=1e-3))
>>> # Using spectrogram input
>>> S = np.abs(librosa.stft(y))
>>> print(estimate_tuning(S=S, sr=sr))
>>> # Using pass-through arguments to ``librosa.feature.piptrack``
>>> print(estimate_tuning(y=y, sr=sr, n_fft=8192,
fmax=librosa.midi_to_hz(128)))
:parameters:
- resolution : float in ``(0, 1)``
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to cents.
- bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
- *kwargs*
Additional keyword arguments. See :func:`librosa.feature.piptrack`
:returns:
- tuning: float in ``[-0.5, 0.5)``
estimated tuning deviation (fractions of a bin)
'''
pitch, mag = librosa.feature.piptrack(**kwargs)
# Only count magnitude where frequency is > 0
pitch_mask = pitch > 0
if pitch_mask.any():
threshold = np.median(mag[pitch_mask])
else:
threshold = 0.0
return librosa.feature.pitch_tuning(pitch[(mag > threshold) & pitch_mask],
resolution=resolution,
bins_per_octave=bins_per_octave)
@cache
def pitch_tuning(frequencies, resolution=0.01, bins_per_octave=12):
'''Given a collection of pitches, estimate its tuning offset
(in fractions of a bin) relative to A440=440.0Hz.
:usage:
>>> # Generate notes at +25 cents
>>> freqs = librosa.cqt_frequencies(24, 55, tuning=0.25)
>>> librosa.feature.pitch_tuning(freqs)
0.25
>>> # Track frequencies from a real spectrogram
>>> pitches, magnitudes, stft = librosa.feature.ifptrack(y, sr)
>>> # Select out pitches with high energy
>>> pitches = pitches[magnitudes > np.median(magnitudes)]
>>> librosa.feature.pitch_tuning(pitches)
:parameters:
- frequencies : array-like, float
A collection of frequencies detected in the signal.
See :func:`librosa.feature.piptrack`
- resolution : float in ``(0, 1)``
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to cents.
- bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
:returns:
- tuning: float in ``[-0.5, 0.5)``
estimated tuning deviation (fractions of a bin)
.. seealso::
- :func:`librosa.feature.estimate_tuning`
For estimating tuning from time-series or spectrogram input
'''
frequencies = np.asarray([frequencies], dtype=float).flatten()
# Trim out any DC components
frequencies = frequencies[frequencies > 0]
# Compute the residual relative to the number of bins
residual = np.mod(bins_per_octave * librosa.core.hz_to_octs(frequencies),
1.0)
# Are we on the wrong side of the semitone?
# A residual of 0.95 is more likely to be a deviation of -0.05
# from the next tone up.
residual[residual >= 0.5] -= 1.0
bins = np.linspace(-0.5, 0.5, np.ceil(1./resolution), endpoint=False)
counts, tuning = np.histogram(residual, bins)
# return the histogram peak
return tuning[np.argmax(counts)]
@cache
def ifptrack(y, sr=22050, n_fft=4096, hop_length=None, fmin=None,
fmax=None, threshold=0.75):
'''Instantaneous pitch frequency tracking.
:usage:
>>> pitches, magnitudes, D = librosa.feature.ifptrack(y, sr)
:parameters:
- y: np.ndarray [shape=(n,)]
audio signal
- sr : int > 0 [scalar]
audio sampling rate of ``y``
- n_fft: int > 0 [scalar]
FFT window size
- hop_length : int > 0 [scalar] or None
Hop size for STFT. Defaults to ``n_fft / 4``.
See :func:`librosa.core.stft()` for details.
- threshold : float in ``(0, 1)``
Maximum fraction of expected frequency increment to tolerate
- fmin : float or tuple of float
Ramp parameter for lower frequency cutoff.
If scalar, the ramp has 0 width.
If tuple, a linear ramp is applied from ``fmin[0]`` to ``fmin[1]``
Default: (150.0, 300.0)
- fmax : float or tuple of float
Ramp parameter for upper frequency cutoff.
If scalar, the ramp has 0 width.
If tuple, a linear ramp is applied from ``fmax[0]`` to ``fmax[1]``
Default: (2000.0, 4000.0)
:returns:
- pitches : np.ndarray [shape=(d, t)]
- magnitudes : np.ndarray [shape=(d, t)]
Where ``d`` is the subset of FFT bins within ``fmin`` and ``fmax``.
``pitches[i, t]`` contains instantaneous frequencies at time ``t``
``magnitudes[i, t]`` contains their magnitudes.
- D : np.ndarray [shape=(d, t), dtype=complex]
STFT matrix
'''
if fmin is None:
fmin = (150.0, 300.0)
if fmax is None:
fmax = (2000.0, 4000.0)
fmin = np.asarray([fmin]).squeeze()
fmax = np.asarray([fmax]).squeeze()
# Truncate to feasible region
fmin = np.maximum(0, fmin)
fmax = np.minimum(fmax, float(sr) / 2)
# What's our DFT bin resolution?
fft_res = float(sr) / n_fft
# Only look at bins up to 2 kHz
max_bin = int(round(fmax[-1] / fft_res))
if hop_length is None:
hop_length = int(n_fft / 4)
# Calculate the inst freq gram
if_gram, D = librosa.core.ifgram(y, sr=sr,
n_fft=n_fft,
win_length=int(n_fft/2),
hop_length=hop_length)
# Find plateaus in ifgram - stretches where delta IF is < thr:
# ie, places where the same frequency is spread across adjacent bins
idx_above = list(range(1, max_bin)) + [max_bin - 1]
idx_below = [0] + list(range(0, max_bin - 1))
# expected increment per bin = sr/w, threshold at 3/4 that
matches = (abs(if_gram[idx_above] - if_gram[idx_below])
< (threshold * fft_res))
# mask out any singleton bins (where both above and below are zero)
matches = matches * ((matches[idx_above] > 0) | (matches[idx_below] > 0))
pitches = np.zeros_like(matches, dtype=float)
magnitudes = np.zeros_like(matches, dtype=float)
# For each frame, extract all harmonic freqs & magnitudes
for t in range(matches.shape[1]):
# find nonzero regions in this vector
# The mask selects out constant regions + active borders
mask = ~np.pad(matches[:, t], 1, mode='constant')
starts = np.argwhere(matches[:, t] & mask[:-2]).astype(int)
ends = 1 + np.argwhere(matches[:, t] & mask[2:]).astype(int)
# Set up inner loop
frqs = np.zeros_like(starts, dtype=float)
mags = np.zeros_like(starts, dtype=float)
for i, (start_i, end_i) in enumerate(zip(starts, ends)):
start_i = np.asscalar(start_i)
end_i = np.asscalar(end_i)
# Weight frequencies by energy
weights = np.abs(D[start_i:end_i, t])
mags[i] = weights.sum()
# Compute the weighted average frequency.
# FIXME: is this the right thing to do?
# These are frequencies... shouldn't this be a
# weighted geometric average?
frqs[i] = weights.dot(if_gram[start_i:end_i, t])
if mags[i] > 0:
frqs[i] /= mags[i]
# Clip outside the ramp zones
idx = (fmax[-1] < frqs) | (frqs < fmin[0])
mags[idx] = 0
frqs[idx] = 0
# Ramp down at the high end
idx = (fmax[-1] > frqs) & (frqs > fmax[0])
mags[idx] *= (fmax[-1] - frqs[idx]) / (fmax[-1] - fmax[0])
# Ramp up from the bottom end
idx = (fmin[-1] > frqs) & (frqs > fmin[0])
mags[idx] *= (frqs[idx] - fmin[0]) / (fmin[-1] - fmin[0])
# Assign pitch and magnitude to their center bin
bins = (starts + ends) / 2
pitches[bins, t] = frqs
magnitudes[bins, t] = mags
return pitches, magnitudes, D
@cache
def piptrack(y=None, sr=22050, S=None, n_fft=4096, fmin=150.0,
fmax=4000.0, threshold=.1):
'''Pitch tracking on thresholded parabolically-interpolated STFT
:usage:
>>> pitches, magnitudes = librosa.feature.piptrack(y=y, sr=sr)
:parameters:
- y: np.ndarray [shape=(n,)] or None
audio signal
- sr : int > 0 [scalar]
audio sampling rate of ``y``
- S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
- n_fft : int > 0 [scalar] or None
number of fft bins to use, if ``y`` is provided.
- threshold : float in ``(0, 1)``
A bin in spectrum X is considered a pitch when it is greater than
``threshold*X.max()``
- fmin : float > 0 [scalar]
lower frequency cutoff.
- fmax : float > 0 [scalar]
upper frequency cutoff.
.. note::
One of ``S`` or ``y`` must be provided.
If ``S`` is not given, it is computed from ``y`` using
the default parameters of ``stft``.
:returns:
- pitches : np.ndarray [shape=(d, t)]
- magnitudes : np.ndarray [shape=(d,t)]
Where ``d`` is the subset of FFT bins within ``fmin`` and ``fmax``.
``pitches[f, t]`` contains instantaneous frequency at bin
``f``, time ``t``
``magnitudes[f, t]`` contains the corresponding magnitudes.
.. note:: Both ``pitches`` and ``magnitudes`` take value 0 at bins
of non-maximal magnitude.
.. note::
https://ccrma.stanford.edu/~jos/sasp/Sinusoidal_Peak_Interpolation.html
'''
# Check that we received an audio time series or STFT
if S is None:
if y is None:
raise ValueError('Either "y" or "S" must be provided')
S = np.abs(librosa.core.stft(y, n_fft=n_fft))
# Truncate to feasible region
fmin = np.maximum(fmin, 0)
fmax = np.minimum(fmax, float(sr) / 2)
# Pre-compute FFT frequencies
n_fft = 2 * (S.shape[0] - 1)
fft_freqs = librosa.core.fft_frequencies(sr=sr, n_fft=n_fft)
# Do the parabolic interpolation everywhere,
# then figure out where the peaks are
# then restrict to the feasible range (fmin:fmax)
avg = 0.5 * (S[2:] - S[:-2])
shift = 2 * S[1:-1] - S[2:] - S[:-2]
# Suppress divide-by-zeros.
# Points where shift == 0 will never be selected by localmax anyway
shift = avg / (shift + (shift == 0))
# Pad back up to the same shape as S
avg = np.pad(avg, ([1, 1], [0, 0]), mode='constant')
shift = np.pad(shift, ([1, 1], [0, 0]), mode='constant')
dskew = 0.5 * avg * shift
# Pre-allocate output
pitches = np.zeros_like(S)
mags = np.zeros_like(S)
# Clip to the viable frequency range
freq_mask = ((fmin <= fft_freqs) & (fft_freqs < fmax)).reshape((-1, 1))
# Compute the column-wise local max of S after thresholding
# Find the argmax coordinates
idx = np.argwhere(freq_mask &
librosa.core.localmax(S * (S > (threshold
* S.max(axis=0)))))
# Store pitch and magnitude
pitches[idx[:, 0], idx[:, 1]] = ((idx[:, 0] + shift[idx[:, 0], idx[:, 1]])
* float(sr) / n_fft)
mags[idx[:, 0], idx[:, 1]] = (S[idx[:, 0], idx[:, 1]]
+ dskew[idx[:, 0], idx[:, 1]])
return pitches, mags
# -- Mel spectrogram and MFCCs -- #
@cache
def mfcc(y=None, sr=22050, S=None, n_mfcc=20, **kwargs):
"""Mel-frequency cepstral coefficients
:usage:
>>> # Generate mfccs from a time series
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr)
>>> # Use a pre-computed log-power Mel spectrogram
>>> S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,
fmax=8000)
>>> mfccs = librosa.feature.mfcc(S=librosa.logamplitude(S))
>>> # Get more components
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40)
:parameters:
- y : np.ndarray [shape=(n,)] or None
audio time series
- sr : int > 0 [scalar]
sampling rate of ``y``
- S : np.ndarray [shape=(d, t)] or None
log-power Mel spectrogram
- n_mfcc: int > 0 [scalar]
number of MFCCs to return
- *kwargs*
Additional keyword arguments for
:func:`librosa.feature.melspectrogram`, if operating on time series
.. note::
One of ``S`` or ``y, sr`` must be provided.
If ``S`` is not given, it is computed from ``y, sr`` using
the default parameters of ``melspectrogram``.
:returns:
- M : np.ndarray [shape=(n_mfcc, t)]
MFCC sequence
"""
if S is None:
S = librosa.logamplitude(melspectrogram(y=y, sr=sr, **kwargs))
return np.dot(librosa.filters.dct(n_mfcc, S.shape[0]), S)
@cache
def melspectrogram(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
**kwargs):
"""Compute a Mel-scaled power spectrogram.
:usage:
>>> S = librosa.feature.melspectrogram(y=y, sr=sr)
>>> # Using a pre-computed power spectrogram
>>> D = np.abs(librosa.stft(y))**2
>>> S = librosa.feature.melspectrogram(S=D)
>>> # Passing through arguments to the Mel filters
>>> S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,
fmax=8000)
:parameters:
- y : np.ndarray [shape=(n,)] or None
audio time-series
- sr : int > 0 [scalar]
sampling rate of ``y``
- S : np.ndarray [shape=(d, t)]
magnitude or power spectrogram
- n_fft : int > 0 [scalar]
length of the FFT window
- hop_length : int > 0 [scalar]
number of samples between successive frames.
See :func:`librosa.core.stft()`
- *kwargs*
Additional keyword arguments for mel filterbank parameters.
See :func:`librosa.filters.mel()` for details.
.. note:: One of either ``S`` or ``y, sr`` must be provided.
If the pair ``y, sr`` is provided, the power spectrogram is computed.
If ``S`` is provided, it is used as the spectrogram, and the
parameters ``y, n_fft, hop_length`` are ignored.
:returns:
- S : np.ndarray [shape=(n_mels, t)]
Mel power spectrogram
"""
# Compute the STFT
if S is None:
S = np.abs(librosa.core.stft(y, n_fft=n_fft, hop_length=hop_length))**2
else:
n_fft = 2 * (S.shape[0] - 1)
# Build a Mel filter
mel_basis = librosa.filters.mel(sr, n_fft, **kwargs)
return np.dot(mel_basis, S)
# -- miscellaneous utilities -- #
@cache
def delta(data, width=9, order=1, axis=-1, trim=True):
'''Compute delta features.
:usage:
>>> # Compute MFCC deltas, delta-deltas
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr)
>>> delta_mfcc = librosa.feature.delta(mfccs)
>>> delta2_mfcc = librosa.feature.delta(mfccs, order=2)
:parameters:
- data : np.ndarray [shape=(d, T)]
the input data matrix (eg, spectrogram)
- width : int > 0, odd [scalar]
Number of frames over which to compute the delta feature
- order : int > 0 [scalar]
the order of the difference operator.
1 for first derivative, 2 for second, etc.
- axis : int [scalar]
the axis along which to compute deltas.
Default is -1 (columns).
- trim : bool
set to True to trim the output matrix to the original size.
:returns:
- delta_data : np.ndarray [shape=(d, t) or (d, t + window)]
delta matrix of ``data``.
'''
half_length = 1 + int(np.floor(width / 2.0))
window = np.arange(half_length - 1, -half_length, -1)
# Pad out the data by repeating the border values (delta=0)
padding = [(0, 0)] * data.ndim
padding[axis] = (half_length, half_length)
delta_x = np.pad(data, padding, mode='edge')
for _ in range(order):
delta_x = scipy.signal.lfilter(window, 1, delta_x, axis=axis)
if trim:
idx = [Ellipsis] * delta_x.ndim
idx[axis] = slice(half_length, -half_length)
delta_x = delta_x[idx]
return delta_x
@cache
def stack_memory(data, n_steps=2, delay=1, **kwargs):
"""Short-term history embedding: vertically concatenate a data
vector or matrix with delayed copies of itself.
Each column ``data[:, i]`` is mapped to::
data[:, i] -> [ data[:, i], ...
data[:, i - delay], ...
...
data[:, i - (n_steps-1)*delay], ...
]
For columns ``i < (n_steps - 1) * delay`` , the data will be padded.
By default, the data is padded with zeros, but this behavior can be
overridden by supplying additional keyword arguments which are passed
to ``np.pad()``.
:usage:
>>> # Generate a data vector
>>> data = np.arange(-3, 3)
>>> # Keep two steps (current and previous)
>>> librosa.feature.stack_memory(data)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1]])
>>> # Or three steps
>>> librosa.feature.stack_memory(data, n_steps=3)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1],
[ 0, 0, -3, -2, -1, 0]])
>>> # Use reflection padding instead of zero-padding
>>> librosa.feature.stack_memory(data, n_steps=3, mode='reflect')
array([[-3, -2, -1, 0, 1, 2],
[-2, -3, -2, -1, 0, 1],
[-1, -2, -3, -2, -1, 0]])
>>> # Or pad with edge-values, and delay by 2
>>> librosa.feature.stack_memory(data, n_steps=3, delay=2, mode='edge')
array([[-3, -2, -1, 0, 1, 2],
[-3, -3, -3, -2, -1, 0],
[-3, -3, -3, -3, -3, -2]])
:parameters:
- data : np.ndarray [shape=(t,) or (d, t)]
Input data matrix. If ``data`` is a vector (``data.ndim == 1``),
it will be interpreted as a row matrix and reshaped to ``(1, t)``.
- n_steps : int > 0 [scalar]
embedding dimension, the number of steps back in time to stack
- delay : int > 0 [scalar]
the number of columns to step
- *kwargs*
Additional arguments to pass to ``np.pad``.
:returns:
- data_history : np.ndarray [shape=(m * d, t)]
data augmented with lagged copies of itself,
where ``m == n_steps - 1``.
"""
# If we're given a vector, interpret as a matrix
if data.ndim == 1:
data = data.reshape((1, -1))
t = data.shape[1]
kwargs.setdefault('mode', 'constant')
if kwargs['mode'] == 'constant':
kwargs.setdefault('constant_values', [0])
# Pad the end with zeros, which will roll to the front below
data = np.pad(data, [(0, 0), ((n_steps - 1) * delay, 0)], **kwargs)
history = data
for i in range(1, n_steps):
history = np.vstack([np.roll(data, -i * delay, axis=1), history])
# Trim to original width
history = history[:, :t]
# Make contiguous
return np.ascontiguousarray(history.T).T
@cache
def sync(data, frames, aggregate=None):
"""Synchronous aggregation of a feature matrix
:usage:
>>> # Beat-synchronous MFCCs
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> S = librosa.feature.melspectrogram(y=y, sr=sr,
hop_length=64)
>>> mfcc = librosa.feature.mfcc(S=S)
>>> mfcc_sync = librosa.feature.sync(mfcc, beats)
>>> # Use median-aggregation instead of mean
>>> mfcc_sync = librosa.feature.sync(mfcc, beats,
aggregate=np.median)
>>> # Or max aggregation
>>> mfcc_sync = librosa.feature.sync(mfcc, beats,
aggregate=np.max)
:parameters:
- data : np.ndarray [shape=(d, T)]
matrix of features
- frames : np.ndarray [shape=(m,)]
ordered array of frame segment boundaries
- aggregate : function
aggregation function (defualt: ``np.mean``)
:returns:
- Y : ndarray [shape=(d, M)]
``Y[:, i] = aggregate(data[:, F[i-1]:F[i]], axis=1)``
.. note::
In order to ensure total coverage, boundary points may be added
to ``frames``.
If synchronizing a feature matrix against beat tracker output, ensure
that frame numbers are properly aligned and use the same hop length.
"""
if data.ndim < 2:
data = np.asarray([data])
elif data.ndim > 2:
raise ValueError('Synchronized data has ndim={:d},'
' must be 1 or 2.'.format(data.ndim))
if aggregate is None:
aggregate = np.mean
(dimension, n_frames) = data.shape
frames = np.unique(np.concatenate(([0], frames, [n_frames])))
frames = frames.astype(int)
if min(frames) < 0:
raise ValueError('Negative frame index.')
elif max(frames) > n_frames:
raise ValueError('Frame index exceeds data length.')
data_agg = np.empty((dimension, len(frames)-1), order='F')
start = frames[0]
for (i, end) in enumerate(frames[1:]):
data_agg[:, i] = aggregate(data[:, start:end], axis=1)
start = end
return data_agg
|
|
# Databricks notebook source
# MAGIC %md
# MAGIC ## Problem formulation
# MAGIC A common problem in computer vision is estimating the fundamental matrix based on a image pair. The fundamental matrix relates corresponding points in stereo geometry, and is useful as a pre-processing step for example when one wants to perform reconstruction of a captured scene. In this small project we use a scalable distributed algorithm to compute fundamental matrices between a large set of images.
# MAGIC
# MAGIC #### Short theory section
# MAGIC Assume that we want to link points in some image taken by camera <img src="https://latex.codecogs.com/svg.latex?&space;P_1" /> to points in an image taken by another camera <img src="https://latex.codecogs.com/svg.latex?&space;P_2" />. Let <img src="https://latex.codecogs.com/svg.latex?&space;x_i" /> and <img src="https://latex.codecogs.com/svg.latex?&space;x_i'" /> denote the projections of global point <img src="https://latex.codecogs.com/svg.latex?&space;X_i" /> onto the cameras <img src="https://latex.codecogs.com/svg.latex?&space;P_1" /> and <img src="https://latex.codecogs.com/svg.latex?&space;P_2" />, respectivly. Then the points are related as follows
# MAGIC
# MAGIC <img src="https://latex.codecogs.com/svg.latex?&space;\begin{cases}\lambda_i x_i = P_1X_i \\ \lambda_i' x_i' = P_2X_i
# MAGIC \end{cases} \Leftrightarrow \quad \begin{cases}\lambda_i x_i = P_1HH^{-1}X_i \\ \lambda_i' x_i' = P_2HH^{-1}X_i
# MAGIC \end{cases} \Leftrightarrow \quad \begin{cases}\lambda_i x_i = \tilde{P_1}\tilde{X_i} \\ \lambda_i' x_i' = \tilde{P_2}\tilde{X_i}
# MAGIC \end{cases}" />
# MAGIC
# MAGIC where <img src="https://latex.codecogs.com/svg.latex?&space;\lambda, \lambda'" /> are scale factors. Since we always can apply a projective transformation <img src="https://latex.codecogs.com/svg.latex?&space;H" /> to set one of the cameras to <img src="https://latex.codecogs.com/svg.latex?&space;P_1 = [I \quad 0]" /> and the other to some <img src="https://latex.codecogs.com/svg.latex?&space;P_2 = [A \quad t]" /> we can parametrize the global point <img src="https://latex.codecogs.com/svg.latex?&space;X_i" /> by
# MAGIC <img src="https://latex.codecogs.com/svg.latex?&space;X_i(\lambda) = [\lambda x_i \quad 1]^T" />. Thus the projected point onto camera <img src="https://latex.codecogs.com/svg.latex?&space;P_2" /> is represented by the line <img src="https://latex.codecogs.com/svg.latex?&space;P_2X_i(\lambda) = \lambda Ax_i + t " />. This line is called the epipolar line to the point <img src="https://latex.codecogs.com/svg.latex?&space;x_i" /> in epipolar geomtry, and descirbes how the point <img src="https://latex.codecogs.com/svg.latex?&space;x_i" /> in image 1 is related to points on in image 2. Since
# MAGIC all scene points that can project to <img src="https://latex.codecogs.com/svg.latex?&space;x_i" /> are on the viewing ray, all points in the second image that can
# MAGIC correspond <img src="https://latex.codecogs.com/svg.latex?&space;x_i" /> have to be on the epipolar line. This condition is called the epipolar constraint.
# MAGIC
# MAGIC 
# MAGIC
# MAGIC Taking two points on this line (one of them being <img src="https://latex.codecogs.com/svg.latex?&space;e'" /> using <img src="https://latex.codecogs.com/svg.latex?&space;\lambda = 0" />), (add what is e_2)
# MAGIC we can derive an expression of this line <img src="https://latex.codecogs.com/svg.latex?&space;\ell" />, as any point x on the line <img src="https://latex.codecogs.com/svg.latex?&space;\ell" /> must fulfill <img src="https://latex.codecogs.com/svg.latex?&space;\ell^Tx = 0" />. Thus the line is thus given by
# MAGIC
# MAGIC
# MAGIC <img src="https://latex.codecogs.com/svg.latex?&space;\ell = t
# MAGIC \times (Ax +t ) = t \times (Ax) = e' \times Ax_i.\\" />
# MAGIC
# MAGIC Let <img src="https://latex.codecogs.com/svg.latex?&space;F = e' \times A " />, this is called the fundamental matrix. The fundamental matrix thus is a mathematical formulation which links points in image 1 to lines in image 2 (and vice versa). If <img src="https://latex.codecogs.com/svg.latex?&space;x'" /> corresponds to <img src="https://latex.codecogs.com/svg.latex?&space;x" /> then the epipolar constraint can be written
# MAGIC
# MAGIC <img src="https://latex.codecogs.com/svg.latex?&space;x'^T\ell = x'^T F x = 0 " />
# MAGIC
# MAGIC F is a 3x 3 matrix with 9 entiers and has 7 degrees of freedom. It can be estimated using 7 points using the 7-point algorithm.
# MAGIC
# MAGIC
# MAGIC Before we have assumed the the correspndeces between points in the imagaes are known, however these are found by first extracting features in the images using some form of feature extractor (e.g. SIFT) and subsequently finding matches using some mathcing criterion/algorithm (e.g. using Lowes criterion or in our case FLANN based matcher)
# MAGIC #### SIFT
# MAGIC Scale-invariant feature transform (SIFT) is a feature detection algorithm which detect and describe local features in images, see examples of detected SIFT features in the two images (a) and (b). SIFT finds local features present in the image and compute desriptors and locations of these features. Next we need to link the features present in image 1 to the features in image 2, which can be done using e.g. a FLANN (Fast Library for Approximate Nearest Neighbors) based matcher. In short the features in the images are compared and the matches are found using a nearest neighbor search. After a matching algorithm is used we have correspandence between the detected points in image 1 and image 2, see example in image (c) below. Note that there is still a high probaility that some of these matches are incorrect.
# MAGIC
# MAGIC 
# MAGIC
# MAGIC ### RANSAC
# MAGIC Some matches found by the FLANN may be incorrect, and a common robust method used for reducing the influence of these outliers in the estimation of F is RANSAC (RANdom SAmpling Consensus). In short, it relies on the fact that the inliers will tend to a consesus regarding the correct estimation, whereas the outlier estimation will show greater variation. By sampling random sets of points with size corresponding to the degrees of freedom of the model, calculating their corresponding estimations, and grouping all estimations with a difference below a set threshold, the largest consesus group is found. This set is then lastly used for the final estimate of F.
# COMMAND ----------
# MAGIC %md
# MAGIC ###For a more joyful presentation of the theory, listed to The Fundamental Matrix Song! (link)
# MAGIC [](https://www.youtube.com/watch?v=DgGV3l82NTk)
# COMMAND ----------
# MAGIC %md
# MAGIC OpenCV is an well-known open-source library for computer vision, machine learning, and image processing tasks. In this project we will use it for feature extraction (SIFT), feature matching (FLANN) and the estimation of the fundamental matrix (using the 7-point algorithm). Let us install opencv
# COMMAND ----------
# MAGIC %pip install opencv-python
# COMMAND ----------
# MAGIC %md
# MAGIC Also we need to download a dataset that we can work with, this dataset is collected by Carl Olsson from LTH.
# MAGIC This is achieved by the bash shell script below.
# MAGIC The dataset is placed in the /tmp folder using the -P "prefix"
# COMMAND ----------
# MAGIC %sh
# MAGIC rm -r /tmp/0019
# MAGIC rm -r /tmp/eglise_int1.zip
# MAGIC
# MAGIC wget -P /tmp vision.maths.lth.se/calledataset/eglise_int/eglise_int1.zip
# MAGIC unzip /tmp/eglise_int1.zip -d /tmp/0019/
# MAGIC rm -r /tmp/eglise_int1.zip
# COMMAND ----------
# MAGIC %sh
# MAGIC rm -r /tmp/eglise_int2.zip
# MAGIC
# MAGIC wget -P /tmp vision.maths.lth.se/calledataset/eglise_int/eglise_int2.zip
# MAGIC unzip /tmp/eglise_int2.zip -d /tmp/0019/
# MAGIC rm -r /tmp/eglise_int2.zip
# COMMAND ----------
# MAGIC %sh
# MAGIC rm -r /tmp/eglise_int3.zip
# MAGIC
# MAGIC wget -P /tmp vision.maths.lth.se/calledataset/eglise_int/eglise_int3.zip
# MAGIC unzip /tmp/eglise_int3.zip -d /tmp/0019/
# MAGIC rm -r /tmp/eglise_int3.zip
# COMMAND ----------
# MAGIC %sh
# MAGIC cd /tmp/0019/
# MAGIC for f in *; do mv "$f" "eglise_$f"; done
# MAGIC cd /databricks/driver
# COMMAND ----------
# MAGIC %sh
# MAGIC rm -r /tmp/gbg.zip
# MAGIC
# MAGIC wget -P /tmp vision.maths.lth.se/calledataset/gbg/gbg.zip
# MAGIC unzip /tmp/gbg.zip -d /tmp/0019/
# MAGIC rm -r /tmp/gbg.zip
# COMMAND ----------
# MAGIC %scala
# MAGIC
# MAGIC import sys.process._
# MAGIC
# MAGIC //"wget -P /tmp vision.maths.lth.se/calledataset/door/door.zip" !!
# MAGIC //"unzip /tmp/door.zip -d /tmp/door/"!!
# MAGIC
# MAGIC //move downloaded dataset to dbfs
# MAGIC
# MAGIC val localpath="file:/tmp/0019/"
# MAGIC
# MAGIC dbutils.fs.rm("dbfs:/datasets/0019/mixedimages", true) // the boolean is for recursive rm
# MAGIC
# MAGIC dbutils.fs.mkdirs("dbfs:/datasets/0019/mixedimages")
# MAGIC
# MAGIC dbutils.fs.cp(localpath, "dbfs:/datasets/0019/mixedimages", true)
# COMMAND ----------
# MAGIC %sh
# MAGIC rm -r /tmp/0019
# COMMAND ----------
# MAGIC %scala
# MAGIC
# MAGIC display(dbutils.fs.ls("dbfs:/datasets/0019/mixedimages"))
# COMMAND ----------
#Loading one image from the dataset for testing
import numpy as np
import cv2
import matplotlib.pyplot as plt
def plot_img(figtitle,img):
#create figure with std size
fig = plt.figure(figtitle, figsize=(10, 5))
plt.imshow(img)
display(plt.show())
img1 = cv2.imread("/dbfs/datasets/0019/mixedimages/eglise_DSC_0133.JPG")
#img2 = cv2.imread("/dbfs/datasets/0019/mixedimages/DSC_0133.JPG")
plot_img("eglise", img1)
#plot_img("gbg", img2)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Read Image Dataset
# COMMAND ----------
import glob
import numpy as np
import cv2
import os
dataset_path = "/dbfs/datasets/0019/mixedimages/"
#get all filenames in folder
files = glob.glob(os.path.join(dataset_path,"*.JPG"))
dataset = []
#load all images
for i, file in enumerate(files): # Alex: changed
# Load an color image
#img = cv2.imread(file)
#add image and image name as a tupel to the list
dataset.append((file))
if i >= 12: # Alex: changed
break
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Define maps
# COMMAND ----------
import glob
import numpy as np
import cv2
import matplotlib.pyplot as plt
max_features = 1000
def plot_img(figtitle,s):
img = cv2.imread(s)
#create figure with std size
fig = plt.figure(figtitle, figsize=(10, 5))
plt.imshow(img)
display(plt.show())
def extract_features(s):
"""
"""
img = cv2.imread(s) # Johan : here we load the images on the executor from dbfs into memory
#convert to gray scale
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
sift = cv2.SIFT_create(max_features)
#extract sift features and descriptors
kp, des = sift.detectAndCompute(gray, None)
#convert keypoint class to list of feature locations (for serialization)
points=[]
for i in range(len(kp)):
points.append(kp[i].pt)
#return a tuple of image name, image, feature points, descriptors, called a feature tuple
return (s, points, des) # Johan : here we don't send the images
def estimate_fundamental_matrix(s):
"""
"""
# s[0] is a feature tuple for the first image, s[1] is the same for the second image
a = s[0]
b = s[1]
# unpacks the tuples
name1, kp1, desc1 = a
name2, kp2, desc2 = b
# Create FLANN matcher object
FLANN_INDEX_KDTREE = 0
indexParams = dict(algorithm=FLANN_INDEX_KDTREE,
trees=5)
searchParams = dict(checks=50)
flann = cv2.FlannBasedMatcher(indexParams,
searchParams)
# matches the descriptors, for each query descriptor it finds the two best matches among the train descriptors
matches = flann.knnMatch(desc1, desc2, k=2)
goodMatches = []
pts1 = []
pts2 = []
# compares the best with the second best match and only adds those where the best match is significantly better than the next best.
for i,(m,n) in enumerate(matches):
if m.distance < 0.8*n.distance:
goodMatches.append([m.queryIdx, m.trainIdx])
pts2.append(kp2[m.trainIdx])
pts1.append(kp1[m.queryIdx])
pts1 = np.array(pts1, dtype=np.float32)
pts2 = np.array(pts2, dtype=np.float32)
# finds the fundamental matrix using ransac:
# selects minimal sub-set of the matches,
# estimates the fundamental matrix,
# checks how many of the matches satisfy the epipolar geometry (the inlier set)
# iterates this for a number of iterations,
# returns the fundamental matrix and mask with the largest number of inliers.
F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_RANSAC)
inlier_matches = []
# removes all matches that are not inliers
if mask is not None:
for i, el in enumerate(mask):
if el == 1:
inlier_matches.append(goodMatches[i])
# returns a tuple containing the feature tuple of image one and image two, the fundamental matrix and the inlier matches
return (a, b, F, inlier_matches)
def display_data(data):
for el in data:
print(el[2])
print("#######################################################")
# COMMAND ----------
# MAGIC %md
# MAGIC Perform Calculations
# COMMAND ----------
# creates an rdd from the loaded images (im_name, image)
rdd = sc.parallelize(dataset)
print("num partitions: ",rdd.getNumPartitions())
# applys the feature extraction to the images
rdd_features = rdd.map(extract_features) # Alex: we could leave the name but remove the image in a and b
# forms pairs of images by applying the cartisian product and filtering away the identity pair
rdd_pairs = rdd_features.cartesian(rdd_features).filter(lambda s: s[0][0] != s[1][0])
# applys the fundamental matrix estimation function on the pairs formed in the previous step and filters away all pairs with a low inlier set.
rdd_fundamental_matrix = rdd_pairs.map(estimate_fundamental_matrix).filter(lambda s: len(s[3]) > 50)
# collects the result from the nodes
data = rdd_fundamental_matrix.collect()
# displays the fundamental matrices
display_data(data)
# COMMAND ----------
# MAGIC %md
# MAGIC Now we have computed the fundamental matrices, let us have a look at them by present the epipolar lines.
# COMMAND ----------
import random
def drawlines(img1,img2,lines,pts1,pts2):
#from opencv tutorial
''' img1 - image on which we draw the epilines for the points in img2
lines - corresponding epilines '''
r,c,_ = img1.shape
for r,pt1,pt2 in zip(lines,pts1,pts2):
color = tuple(np.random.randint(0,255,3).tolist())
x0,y0 = map(int, [0, -r[2]/r[1] ])
x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])
img1 = cv2.line(img1, (x0,y0), (x1,y1), color,3)
img1 = cv2.circle(img1,tuple(pt1),10,color,-1)
img2 = cv2.circle(img2,tuple(pt2),10,color,-1)
return img1,img2
# draws a random subset of the data
sampling = random.choices(data, k=4)
#plotts the inlier features in the first image and the corresponding epipolar lines in the second image
i = 0
fig, axs = plt.subplots(1, 8, figsize=(25, 5))
for el in sampling:
a, b, F, matches = el;
if F is None:
continue
name1, kp1, desc1 = a
name2, kp2, desc2 = b
im1 = cv2.imread(name1)
im2 = cv2.imread(name2)
pts1 = []
pts2 = []
for m in matches:
pts1.append(kp1[m[0]]);
pts2.append(kp2[m[1]]);
pts1 = np.array(pts1, dtype=np.float32)
pts2 = np.array(pts2, dtype=np.float32)
lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2, F)
lines1 = lines1.reshape(-1,3)
img1, img2 = drawlines(im1,im2,lines1,pts1,pts2)
axs[i].imshow(img2), axs[i].set_title('Image pair '+str(i+1)+': Features')
axs[i+1].imshow(img1), axs[i+1].set_title('Image pair '+str(i+1)+': Epipolar lines')
i += 2
#plt.subplot(121),plt.imshow(img1), plt.title('Epipolar lines')
#plt.subplot(122),plt.imshow(img2), plt.title('Points')
display(plt.show())
# COMMAND ----------
# MAGIC %md
# MAGIC Present Matches
# COMMAND ----------
import random
# draws a random subset of the data
sampling = random.choices(data, k=4)
j = 0
fig, axs = plt.subplots(1, 4, figsize=(25, 5))
# draws lines between the matched feature in the two images (not epipolar lines!)
for el in sampling:
a, b, F, matches = el;
if F is None:
continue
name1, kp1, desc1 = a
name2, kp2, desc2 = b
im1 = cv2.imread(name1)
im2 = cv2.imread(name2)
kp1_vec = []
kp2_vec = []
matches_vec = []
for i,m in enumerate(matches):
kp1_vec.append(cv2.KeyPoint(kp1[m[0]][0], kp1[m[0]][1],1))
kp2_vec.append(cv2.KeyPoint(kp2[m[1]][0], kp2[m[1]][1],1))
matches_vec.append(cv2.DMatch(i, i, 1))
matched_image = im1.copy()
matched_image = cv2.drawMatches(im1, kp1_vec, im2, kp2_vec, matches_vec, matched_image)
axs[j].imshow(matched_image), axs[j].set_title('Image pair '+str(j+1)+': Matches')
j += 1
#plot_img("matches", matched_image)
display(plt.show())
# COMMAND ----------
# Questions:
# Pics of different resolutions/sizes: Yes
#
# COMMAND ----------
|
|
# -*- coding: utf-8 -*-
"""
Implements two storage providers for `LockManager`.
Two alternative lock storage classes are defined here: one in-memory
(dict-based), and one persistent low performance variant using shelve.
See wsgidav.lock_manager.LockManager
See `Developers info`_ for more information about the WsgiDAV architecture.
.. _`Developers info`: http://wsgidav.readthedocs.org/en/latest/develop.html
"""
from __future__ import absolute_import, division, unicode_literals
import os
import shelve
import time
from . import util
from .rw_lock import ReadWriteLock
from ..wsgidav.lock_manager import normalizeLockRoot, lockString,\
generateLockToken, validateLock
__docformat__ = "reStructuredText"
_logger = util.getModuleLogger(__name__)
# TODO: comment's from Ian Bicking (2005)
#@@: Use of shelve means this is only really useful in a threaded environment.
# And if you have just a single-process threaded environment, you could get
# nearly the same effect with a dictionary of threading.Lock() objects. Of course,
# it would be better to move off shelve anyway, probably to a system with
# a directory of per-file locks, using the file locking primitives (which,
# sadly, are not quite portable).
# @@: It would probably be easy to store the properties as pickle objects
# in a parallel directory structure to the files you are describing.
# Pickle is expedient, but later you could use something more readable
# (pickles aren't particularly readable)
class LockStorageDict(object):
"""
An in-memory lock manager storage implementation using a dictionary.
R/W access is guarded by a thread.lock object.
Also, to make it work with a Shelve dictionary, modifying dictionary
members is done by re-assignment and we call a _flush() method.
This is obviously not persistent, but should be enough in some cases.
For a persistent implementation, see lock_manager.LockStorageShelve().
Notes:
expire is stored as expiration date in seconds since epoch (not in
seconds until expiration).
The dictionary is built like::
{ 'URL2TOKEN:/temp/litmus/lockme': ['opaquelocktoken:0x1d7b86...',
'opaquelocktoken:0xd7d4c0...'],
'opaquelocktoken:0x1d7b86...': { 'depth': '0',
'owner': "<?xml version=\'1.0\' encoding=\'UTF-8\'?>\\n<owner xmlns="DAV:">litmus test suite</owner>\\n",
'principal': 'tester',
'root': '/temp/litmus/lockme',
'scope': 'shared',
'expire': 1261328382.4530001,
'token': 'opaquelocktoken:0x1d7b86...',
'type': 'write',
},
'opaquelocktoken:0xd7d4c0...': { 'depth': '0',
'owner': '<?xml version=\'1.0\' encoding=\'UTF-8\'?>\\n<owner xmlns="DAV:">litmus: notowner_sharedlock</owner>\\n',
'principal': 'tester',
'root': '/temp/litmus/lockme',
'scope': 'shared',
'expire': 1261328381.6040001,
'token': 'opaquelocktoken:0xd7d4c0...',
'type': 'write'
},
}
"""
LOCK_TIME_OUT_DEFAULT = 604800 # 1 week, in seconds
LOCK_TIME_OUT_MAX = 4 * 604800 # 1 month, in seconds
def __init__(self):
self._dict = None
self._lock = ReadWriteLock()
def __repr__(self):
return self.__class__.__name__
def __del__(self):
pass
def _flush(self):
"""Overloaded by Shelve implementation."""
pass
def open(self):
"""Called before first use.
May be implemented to initialize a storage.
"""
assert self._dict is None
self._dict = {}
def close(self):
"""Called on shutdown."""
self._dict = None
def cleanup(self):
"""Purge expired locks (optional)."""
pass
def clear(self):
"""Delete all entries."""
if self._dict is not None:
self._dict.clear()
def get(self, token):
"""Return a lock dictionary for a token.
If the lock does not exist or is expired, None is returned.
token:
lock token
Returns:
Lock dictionary or <None>
Side effect: if lock is expired, it will be purged and None is returned.
"""
self._lock.acquireRead()
try:
lock = self._dict.get(token)
if lock is None:
# Lock not found: purge dangling URL2TOKEN entries
_logger.debug("Lock purged dangling: %s" % token)
self.delete(token)
return None
expire = float(lock[b"expire"])
if expire >= 0 and expire < time.time():
_logger.debug("Lock timed-out(%s): %s" % (expire, lockString(lock)))
self.delete(token)
return None
return lock
finally:
self._lock.release()
def create(self, path, lock):
"""Create a direct lock for a resource path.
path:
Normalized path (utf8 encoded string, no trailing '/')
lock:
lock dictionary, without a token entry
Returns:
New unique lock token.: <lock
**Note:** the lock dictionary may be modified on return:
- lock['root'] is ignored and set to the normalized <path>
- lock['timeout'] may be normalized and shorter than requested
- lock['token'] is added
"""
self._lock.acquireWrite()
try:
# We expect only a lock definition, not an existing lock
assert lock.get(b"token") is None
assert lock.get(b"expire") is None, "Use timeout instead of expire"
assert path and b"/" in path
# Normalize root: /foo/bar
org_path = path
path = normalizeLockRoot(path)
lock[b"root"] = path
# Normalize timeout from ttl to expire-date
timeout = float(lock.get(b"timeout"))
if timeout is None:
timeout = LockStorageDict.LOCK_TIME_OUT_DEFAULT
elif timeout < 0 or timeout > LockStorageDict.LOCK_TIME_OUT_MAX:
timeout = LockStorageDict.LOCK_TIME_OUT_MAX
lock[b"timeout"] = timeout
lock[b"expire"] = time.time() + timeout
validateLock(lock)
token = generateLockToken()
lock[b"token"] = token
# Store lock
self._dict[token] = lock
# Store locked path reference
key = b"URL2TOKEN:%s" % path
if not key in self._dict:
self._dict[key] = [ token ]
else:
# Note: Shelve dictionary returns copies, so we must reassign values:
tokList = self._dict[key]
tokList.append(token)
self._dict[key] = tokList
self._flush()
_logger.debug("LockStorageDict.set(%r): %s" % (org_path, lockString(lock)))
# print("LockStorageDict.set(%r): %s" % (org_path, lockString(lock)))
return lock
finally:
self._lock.release()
def refresh(self, token, timeout):
"""Modify an existing lock's timeout.
token:
Valid lock token.
timeout:
Suggested lifetime in seconds (-1 for infinite).
The real expiration time may be shorter than requested!
Returns:
Lock dictionary.
Raises ValueError, if token is invalid.
"""
assert token in self._dict, "Lock must exist"
assert timeout == -1 or timeout > 0
if timeout < 0 or timeout > LockStorageDict.LOCK_TIME_OUT_MAX:
timeout = LockStorageDict.LOCK_TIME_OUT_MAX
self._lock.acquireWrite()
try:
# Note: shelve dictionary returns copies, so we must reassign values:
lock = self._dict[token]
lock[b"timeout"] = timeout
lock[b"expire"] = time.time() + timeout
self._dict[token] = lock
self._flush()
finally:
self._lock.release()
return lock
def delete(self, token):
"""Delete lock.
Returns True on success. False, if token does not exist, or is expired.
"""
self._lock.acquireWrite()
try:
lock = self._dict.get(token)
_logger.debug("delete %s" % lockString(lock))
if lock is None:
return False
# Remove url to lock mapping
key = b"URL2TOKEN:%s" % lock.get(b"root")
if key in self._dict:
# _logger.debug(" delete token %s from url %s" % (token, lock.get("root")))
tokList = self._dict[key]
if len(tokList) > 1:
# Note: shelve dictionary returns copies, so we must reassign values:
tokList.remove(token)
self._dict[key] = tokList
else:
del self._dict[key]
# Remove the lock
del self._dict[token]
self._flush()
finally:
self._lock.release()
return True
def getLockList(self, path, includeRoot, includeChildren, tokenOnly):
"""Return a list of direct locks for <path>.
Expired locks are *not* returned (but may be purged).
path:
Normalized path (utf8 encoded string, no trailing '/')
includeRoot:
False: don't add <path> lock (only makes sense, when includeChildren
is True).
includeChildren:
True: Also check all sub-paths for existing locks.
tokenOnly:
True: only a list of token is returned. This may be implemented
more efficiently by some providers.
Returns:
List of valid lock dictionaries (may be empty).
"""
assert path and path.startswith(b"/")
assert includeRoot or includeChildren
def __appendLocks(toklist):
# Since we can do this quickly, we use self.get() even if
# tokenOnly is set, so expired locks are purged.
for token in toklist:
lock = self.get(token)
if lock:
if tokenOnly:
lockList.append(lock[b"token"])
else:
lockList.append(lock)
path = normalizeLockRoot(path)
self._lock.acquireRead()
try:
key = b"URL2TOKEN:%s" % path
tokList = self._dict.get(key, [])
lockList = []
if includeRoot:
__appendLocks(tokList)
if includeChildren:
for u, ltoks in self._dict.items():
if util.isChildUri(key, u):
__appendLocks(ltoks)
return lockList
finally:
self._lock.release()
class LockStorageShelve(LockStorageDict):
"""
A low performance lock manager implementation using shelve.
"""
def __init__(self, storagePath):
super(LockStorageShelve, self).__init__()
self._storagePath = os.path.abspath(storagePath)
def __repr__(self):
return "LockStorageShelve(%r)" % self._storagePath
def _flush(self):
"""Write persistent dictionary to disc."""
_logger.debug("_flush()")
self._lock.acquireWrite() # TODO: read access is enough?
try:
self._dict.sync()
finally:
self._lock.release()
def clear(self):
"""Delete all entries."""
self._lock.acquireWrite() # TODO: read access is enough?
try:
was_closed = self._dict is None
if was_closed:
self.open()
if len(self._dict):
self._dict.clear()
self._dict.sync()
if was_closed:
self.close()
finally:
self._lock.release()
def open(self):
_logger.debug("open(%r)" % self._storagePath)
# Open with writeback=False, which is faster, but we have to be
# careful to re-assign values to _dict after modifying them
self._dict = shelve.open(self._storagePath, writeback=False)
# if __debug__ and self._verbose >= 2:
## self._check("After shelve.open()")
# self._dump("After shelve.open()")
def close(self):
_logger.debug("close()")
self._lock.acquireWrite()
try:
if self._dict is not None:
self._dict.close()
self._dict = None
finally:
self._lock.release()
|
|
from datetime import datetime, timezone
from enum import Enum, IntEnum, unique
from json import dumps as json_dumps
from json import loads as json_loads
from typing import Dict, List
from attr import define
from bson import CodecOptions
from hypothesis import given
from hypothesis.strategies import (
binary,
characters,
composite,
datetimes,
dictionaries,
floats,
frozensets,
integers,
just,
lists,
sets,
text,
)
from cattr._compat import (
Counter,
FrozenSet,
Mapping,
MutableMapping,
MutableSequence,
MutableSet,
Sequence,
Set,
TupleSubscriptable,
)
from cattrs.preconf.bson import make_converter as bson_make_converter
from cattrs.preconf.json import make_converter as json_make_converter
from cattrs.preconf.msgpack import make_converter as msgpack_make_converter
from cattrs.preconf.orjson import make_converter as orjson_make_converter
from cattrs.preconf.pyyaml import make_converter as pyyaml_make_converter
from cattrs.preconf.tomlkit import make_converter as tomlkit_make_converter
from cattrs.preconf.ujson import make_converter as ujson_make_converter
@define
class Everything:
@unique
class AnIntEnum(IntEnum):
A = 1
@unique
class AStringEnum(str, Enum):
A = "a"
string: str
bytes: bytes
an_int: int
a_float: float
a_dict: Dict[str, int]
a_list: List[int]
a_homogenous_tuple: TupleSubscriptable[int, ...]
a_hetero_tuple: TupleSubscriptable[str, int, float]
a_counter: Counter[str]
a_mapping: Mapping[int, float]
a_mutable_mapping: MutableMapping[float, str]
a_sequence: Sequence[float]
a_mutable_sequence: MutableSequence[str]
a_set: Set[float]
a_mutable_set: MutableSet[int]
a_frozenset: FrozenSet[str]
an_int_enum: AnIntEnum
a_str_enum: AStringEnum
a_datetime: datetime
a_string_enum_dict: Dict[AStringEnum, int]
@composite
def everythings(
draw,
min_int=None,
max_int=None,
allow_inf=True,
allow_null_bytes_in_keys=True,
allow_quotes_in_keys=True,
allow_control_characters_in_values=True,
min_key_length=0,
allow_datetime_microseconds=True,
):
key_text = text(
characters(
blacklist_categories=("Cs",) if allow_null_bytes_in_keys else ("Cs", "Cc"),
blacklist_characters='"' if not allow_quotes_in_keys else None,
),
min_size=min_key_length,
)
strings = text(
characters(
blacklist_categories=("Cs",)
if allow_control_characters_in_values
else ("Cs", "Cc")
)
)
dts = datetimes(
min_value=datetime(1904, 1, 1),
max_value=datetime(2038, 1, 1),
timezones=just(timezone.utc),
)
if not allow_datetime_microseconds:
dts = dts.map(
lambda d: datetime(
d.year, d.month, d.day, d.hour, d.minute, d.second, tzinfo=d.tzinfo
)
)
return Everything(
draw(strings),
draw(binary()),
draw(integers(min_value=min_int, max_value=max_int)),
draw(floats(allow_nan=False, allow_infinity=allow_inf)),
draw(dictionaries(key_text, integers(min_value=min_int, max_value=max_int))),
draw(lists(integers(min_value=min_int, max_value=max_int))),
tuple(draw(lists(integers(min_value=min_int, max_value=max_int)))),
(
draw(strings),
draw(integers(min_value=min_int, max_value=max_int)),
draw(floats(allow_nan=False, allow_infinity=allow_inf)),
),
Counter(
draw(dictionaries(key_text, integers(min_value=min_int, max_value=max_int)))
),
draw(
dictionaries(
integers(min_value=min_int, max_value=max_int),
floats(allow_nan=False, allow_infinity=allow_inf),
)
),
draw(dictionaries(floats(allow_nan=False, allow_infinity=allow_inf), strings)),
draw(lists(floats(allow_nan=False, allow_infinity=allow_inf))),
draw(lists(strings)),
draw(sets(floats(allow_nan=False, allow_infinity=allow_inf))),
draw(sets(integers(min_value=min_int, max_value=max_int))),
draw(frozensets(strings)),
Everything.AnIntEnum.A,
Everything.AStringEnum.A,
draw(dts),
draw(
dictionaries(
just(Everything.AStringEnum.A),
integers(min_value=min_int, max_value=max_int),
)
),
)
@given(everythings())
def test_stdlib_json(everything: Everything):
converter = json_make_converter()
assert (
converter.structure(
json_loads(json_dumps(converter.unstructure(everything))), Everything
)
== everything
)
@given(
everythings(
min_int=-9223372036854775808, max_int=9223372036854775807, allow_inf=False
)
)
def test_ujson(everything: Everything):
from ujson import dumps as ujson_dumps
from ujson import loads as ujson_loads
converter = ujson_make_converter()
raw = ujson_dumps(converter.unstructure(everything))
assert converter.structure(ujson_loads(raw), Everything) == everything
@given(
everythings(
min_int=-9223372036854775808, max_int=9223372036854775807, allow_inf=False
)
)
def test_orjson(everything: Everything):
from orjson import dumps as orjson_dumps
from orjson import loads as orjson_loads
converter = orjson_make_converter()
raw = orjson_dumps(converter.unstructure(everything))
assert converter.structure(orjson_loads(raw), Everything) == everything
@given(everythings(min_int=-9223372036854775808, max_int=18446744073709551615))
def test_msgpack(everything: Everything):
from msgpack import dumps as msgpack_dumps
from msgpack import loads as msgpack_loads
converter = msgpack_make_converter()
raw = msgpack_dumps(converter.unstructure(everything))
assert (
converter.structure(msgpack_loads(raw, strict_map_key=False), Everything)
== everything
)
@given(
everythings(
min_int=-9223372036854775808,
max_int=9223372036854775807,
allow_null_bytes_in_keys=False,
allow_datetime_microseconds=False,
)
)
def test_bson(everything: Everything):
from bson import decode as bson_loads
from bson import encode as bson_dumps
converter = bson_make_converter()
raw = bson_dumps(
converter.unstructure(everything), codec_options=CodecOptions(tz_aware=True)
)
assert (
converter.structure(
bson_loads(raw, codec_options=CodecOptions(tz_aware=True)), Everything
)
== everything
)
@given(everythings())
def test_pyyaml(everything: Everything):
from yaml import safe_dump, safe_load
converter = pyyaml_make_converter()
unstructured = converter.unstructure(everything)
raw = safe_dump(unstructured)
assert converter.structure(safe_load(raw), Everything) == everything
@given(
everythings(
min_key_length=1,
allow_null_bytes_in_keys=False,
allow_quotes_in_keys=False,
allow_control_characters_in_values=False,
)
)
def test_tomlkit(everything: Everything):
from tomlkit import dumps as tomlkit_dumps
from tomlkit import loads as tomlkit_loads
converter = tomlkit_make_converter()
unstructured = converter.unstructure(everything)
raw = tomlkit_dumps(unstructured)
assert converter.structure(tomlkit_loads(raw), Everything) == everything
|
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""The "Manager" is the controlling instance for a codeintel system."""
import os
from os.path import dirname, join, abspath, splitext, basename, isabs
import sys
import imp
import logging
from collections import defaultdict
from glob import glob
import threading
from queue import Queue
import warnings
import traceback
import codecs
from SilverCity import ScintillaConstants
import codeintel2
from codeintel2.common import *
from codeintel2.accessor import *
from codeintel2.citadel import Citadel, BinaryBuffer
from codeintel2.buffer import ImplicitBuffer
from codeintel2.langintel import ImplicitLangIntel
from codeintel2.database.database import Database
from codeintel2.environment import DefaultEnvironment
from codeintel2 import indexer
from codeintel2.util import guess_lang_from_path
from codeintel2 import hooks
from codeintel2.udl import XMLParsingBufferMixin, UDLBuffer
import langinfo
if _xpcom_:
from xpcom.server import UnwrapObject
#---- global variables
log = logging.getLogger("codeintel.manager")
# log.setLevel(logging.INFO)
#---- public interface
class Manager(threading.Thread, Queue):
# See the module docstring for usage information.
def __init__(self, db_base_dir=None, on_scan_complete=None,
extra_module_dirs=None, env=None,
db_event_reporter=None, db_catalog_dirs=None,
db_import_everything_langs=None):
"""Create a CodeIntel manager.
"db_base_dir" (optional) specifies the base directory for
the codeintel database. If not given it will default to
'~/.codeintel'.
"on_scan_complete" (optional) is a callback for Citadel scan
completion. It will be passed the ScanRequest instance
as an argument.
"extra_module_dirs" (optional) is a list of extra dirs
in which to look for and use "codeintel_*.py"
support modules (and "lang_*.py" modules, DEPRECATED).
"env" (optional) is an Environment instance (or subclass).
See environment.py for details.
"db_event_reporter" (optional) is a callback that will be called
db_event_reporter(<event-desc-string>)
before "significant" long processing events in the DB. This
may be useful to forward to a status bar in a GUI.
"db_catalog_dirs" (optional) is a list of catalog dirs in
addition to the std one to use for the CatalogsZone. All
*.cix files in a catalog dir are made available.
"db_import_everything_langs" (optional) is a set of langs for which
the extra effort to support Database
`lib.hits_from_lpath()' should be made. See class
Database for more details.
"""
threading.Thread.__init__(self, name="CodeIntel Manager")
self.setDaemon(True)
Queue.__init__(self)
self.citadel = Citadel(self)
# Module registry bits.
self._registered_module_canon_paths = set()
self.silvercity_lexer_from_lang = {}
self.buf_class_from_lang = {}
self.langintel_class_from_lang = {}
self._langintel_from_lang_cache = {}
self.import_handler_class_from_lang = {}
self._is_citadel_from_lang = {
} # registered langs that are Citadel-based
self._is_cpln_from_lang = {
} # registered langs for which completion is supported
self._hook_handlers_from_lang = defaultdict(list)
self.env = env or DefaultEnvironment()
# The database must be enabled before registering modules.
self.db = Database(self, base_dir=db_base_dir,
catalog_dirs=db_catalog_dirs,
event_reporter=db_event_reporter,
import_everything_langs=db_import_everything_langs)
self.lidb = langinfo.get_default_database()
self._register_modules(extra_module_dirs)
self.idxr = indexer.Indexer(self, on_scan_complete)
def upgrade(self):
"""Upgrade the database, if necessary.
It blocks until the upgrade is complete. Alternatively, if you
want more control over upgrading use:
Database.upgrade_info()
Database.upgrade()
Database.reset()
"""
log.debug("upgrade db if necessary")
status, reason = self.db.upgrade_info()
if status == Database.UPGRADE_NECESSARY:
log.info("db upgrade is necessary")
self.db.upgrade()
elif status == Database.UPGRADE_NOT_POSSIBLE:
log.warn("%s (resetting db)", reason)
log.info("reset db at `%s' (creating backup)", self.db.base_dir)
self.db.reset()
elif status == Database.UPGRADE_NOT_NECESSARY:
log.debug("no upgrade necessary")
else:
raise CodeIntelError("unknown db upgrade status: %r" % status)
def initialize(self):
"""Initialize the codeintel system."""
# TODO: Implement DB cleaning.
# self.db.clean()
self.idxr.start()
def _register_modules(self, extra_module_dirs=None):
"""Register codeintel/lang modules.
@param extra_module_dirs {sequence} is an optional list of extra
dirs in which to look for and use "codeintel|lang_*.py"
support modules. By default just the codeintel2 package
directory is used.
"""
dirs = [dirname(__file__)]
if extra_module_dirs:
dirs += extra_module_dirs
import_hook = self._ImportHook(
self._registered_module_canon_paths.union(dirs))
sys.meta_path.append(import_hook)
try:
for dir in dirs:
for module_path in glob(join(dir, "codeintel_*.py")):
self._register_module(module_path)
for module_path in glob(join(dir, "lang_*.py")):
warnings.warn("%s: `lang_*.py' codeintel modules are deprecated, "
"use `codeintel_*.py'. Support for `lang_*.py' "
"will be dropped in Komodo 5.1." % module_path,
CodeIntelDeprecationWarning)
self._register_module(module_path)
finally:
sys.meta_path.remove(import_hook)
class _ImportHook(object):
"""This is an import hook for __import__ to look for modules in the
extra module paths as necessary. This is needed because a bunch of the
modules assume they're in the codeintel2 package.
"""
_suffixes = None
def __init__(self, paths):
"""Create an import hook
@param paths {set} The paths to scan in
"""
self._paths = paths
self._cache = None
def find_module(self, fullname, path=None):
parts = fullname.split(".")
if len(parts) != 2 or parts[0] != "codeintel2":
return None
name = parts[-1]
for path in self._paths:
fullpath = join(path, name + ".py")
if not os.path.exists(fullpath):
continue
self._cache = fullpath
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
parts = fullname.split(".")
if len(parts) != 2 or parts[0] != "codeintel2":
raise ImportError("Did not expect to handle import for %s" %
fullname)
name = parts[-1]
if self._cache and basename(self._cache) == name + ".py":
fullpath = self._cache
else:
# stale cache
for path in self._paths:
fullpath = join(path, name + ".py")
if os.path.exists(fullpath):
break
else:
raise ImportError("Failed to locate %s" % fullname)
try:
module = imp.load_source(fullname, fullpath)
sys.modules[fullname] = module
setattr(codeintel2, name, module)
return module
except:
log.exception("Failed to load %s", fullpath)
raise
def _register_module(self, module_path):
"""Register the given codeintel support module.
@param module_path {str} is the path to the support module.
@exception ImportError, CodeIntelError
This will import the given module path and call its top-level
`register` function passing it the Manager instance. That is
expected to callback to one or more of:
mgr.set_lang_info(...)
mgr.add_hooks_handler(...)
"""
module_canon_path = canonicalizePath(module_path)
if module_canon_path in self._registered_module_canon_paths:
return
module_dir, module_name = os.path.split(module_path)
module_name = splitext(module_name)[0]
module_full_name = "codeintel2." + module_name
if module_full_name in sys.modules:
module = sys.modules[module_full_name]
else:
iinfo = imp.find_module(module_name, [module_dir])
module = imp.load_module(module_name, *iinfo)
sys.modules[module_full_name] = module
setattr(codeintel2, module_name, module)
if hasattr(module, "register"):
log.debug("register `%s' support module", module_path)
try:
module.register(self)
except CodeIntelError as ex:
log.warn("error registering `%s' support module: %s",
module_path, ex)
except:
log.exception("unexpected error registering `%s' "
"support module", module_path)
self._registered_module_canon_paths.add(module_canon_path)
def set_lang_info(self, lang, silvercity_lexer=None, buf_class=None,
import_handler_class=None, cile_driver_class=None,
is_cpln_lang=False, langintel_class=None,
import_everything=False):
"""Called by register() functions in language support modules."""
if silvercity_lexer:
self.silvercity_lexer_from_lang[lang] = silvercity_lexer
if buf_class:
self.buf_class_from_lang[lang] = buf_class
if langintel_class:
self.langintel_class_from_lang[lang] = langintel_class
if import_handler_class:
self.import_handler_class_from_lang[lang] = import_handler_class
if cile_driver_class is not None:
self._is_citadel_from_lang[lang] = True
self.citadel.set_lang_info(lang, cile_driver_class,
is_cpln_lang=is_cpln_lang)
if is_cpln_lang:
self._is_cpln_from_lang[lang] = True
if import_everything:
self.db.import_everything_langs.add(lang)
def add_hook_handler(self, hook_handler):
"""Add a handler for various codeintel hooks.
@param hook_handler {hooks.HookHandler}
"""
assert isinstance(hook_handler, hooks.HookHandler)
assert hook_handler.name is not None, \
"hook handlers must have a name: %r.name is None" % hook_handler
for lang in hook_handler.langs:
self._hook_handlers_from_lang[lang].append(hook_handler)
def finalize(self, timeout=None):
if self.citadel is not None:
self.citadel.finalize()
if self.isAlive():
self.stop()
self.join(timeout)
self.idxr.finalize()
if self.db is not None:
try:
self.db.save()
except Exception:
log.exception("error saving database")
self.db = None # break the reference
# Proxy the batch update API onto our Citadel instance.
def batch_update(self, join=True, updater=None):
return self.citadel.batch_update(join=join, updater=updater)
def report_message(self, msg, details=None, notification_name="codeintel-message"):
"""Reports a unique codeintel message."""
log.info("%s: %s: %r", notification_name, msg, details)
def is_multilang(self, lang):
"""Return True iff this is a multi-lang language.
I.e. Is this a language that supports embedding of different
programming languages. For example RHTML can have Ruby and
JavaScript content, HTML can have JavaScript content.
"""
try:
return issubclass(self.buf_class_from_lang[lang], UDLBuffer)
except KeyError:
return False # This typically happens if lang is Text
def is_xml_lang(self, lang):
try:
buf_class = self.buf_class_from_lang[lang]
except KeyError:
return False
return issubclass(buf_class, XMLParsingBufferMixin)
def is_cpln_lang(self, lang):
"""Return True iff codeintel supports completion (i.e. autocomplete
and calltips) for this language."""
return lang in self._is_cpln_from_lang
def get_cpln_langs(self):
return list(self._is_cpln_from_lang.keys())
def is_citadel_lang(self, lang):
"""Returns True if the given lang has been registered and
is a Citadel-based language.
A "Citadel-based" language is one that uses CIX/CIDB/CITDL tech for
its codeintel. Note that currently not all Citadel-based langs use
the Citadel system for completion (e.g. Tcl).
"""
return lang in self._is_citadel_from_lang
def get_citadel_langs(self):
return list(self._is_citadel_from_lang.keys())
def langintel_from_lang(self, lang):
if lang not in self._langintel_from_lang_cache:
try:
langintel_class = self.langintel_class_from_lang[lang]
except KeyError:
langintel = ImplicitLangIntel(lang, self)
else:
langintel = langintel_class(self)
self._langintel_from_lang_cache[lang] = langintel
return self._langintel_from_lang_cache[lang]
def hook_handlers_from_lang(self, lang):
return self._hook_handlers_from_lang.get(lang, []) \
+ self._hook_handlers_from_lang.get("*", [])
# XXX
# XXX Cache bufs based on (path, lang) so can share bufs. (weakref)
# XXX
def buf_from_koIDocument(self, doc, env=None):
lang = doc.language
path = doc.displayPath
if doc.isUntitled:
path = join("<Unsaved>", path)
accessor = KoDocumentAccessor(doc,
self.silvercity_lexer_from_lang.get(lang))
encoding = doc.encoding.python_encoding_name
try:
buf_class = self.buf_class_from_lang[lang]
except KeyError:
# No langintel is defined for this class, check if the koILanguage
# defined is a UDL koILanguage.
from koUDLLanguageBase import KoUDLLanguage
if isinstance(UnwrapObject(doc.languageObj), KoUDLLanguage):
return UDLBuffer(self, accessor, env, path, encoding, lang=lang)
# Not a UDL language - use the implicit buffer then.
return ImplicitBuffer(lang, self, accessor, env, path, encoding)
else:
buf = buf_class(self, accessor, env, path, encoding)
return buf
def buf_from_content(self, content, lang, env=None, path=None,
encoding=None):
lexer = self.silvercity_lexer_from_lang.get(lang)
accessor = SilverCityAccessor(lexer, content)
try:
buf_class = self.buf_class_from_lang[lang]
except KeyError:
buf = ImplicitBuffer(lang, self, accessor, env, path, encoding)
else:
buf = buf_class(self, accessor, env, path, encoding)
return buf
def binary_buf_from_path(self, path, lang=None, env=None):
buf = BinaryBuffer(lang, self, env, path)
return buf
MAX_FILESIZE = 1 * 1024 * 1024 # 1MB
def buf_from_path(self, path, lang=None, env=None, encoding=None):
# Detect and abort on large files - to avoid memory errors, bug 88487.
# The maximum size is 1MB - someone uses source code that big?
filestat = os.stat(path)
if filestat.st_size > self.MAX_FILESIZE:
log.warn(
"File %r has size greater than 1MB (%d)", path, filestat.st_size)
raise CodeIntelError('File too big. Size: %d bytes, path: %r' % (
filestat.st_size, path))
if lang is None or encoding is None:
import textinfo
ti = textinfo.textinfo_from_path(path, encoding=encoding,
follow_symlinks=True)
if lang is None:
lang = (hasattr(ti.langinfo, "komodo_name")
and ti.langinfo.komodo_name
or ti.langinfo.name)
if not ti.is_text:
return self.binary_buf_from_path(path, lang, env)
encoding = ti.encoding
content = ti.text
else:
content = codecs.open(path, 'rb', encoding).read()
# TODO: Re-instate this when have solution for CILE test failures
# that this causes.
# if not isabs(path) and not path.startswith("<Unsaved>"):
# path = abspath(path)
return self.buf_from_content(content, lang, env, path, encoding)
#---- Completion Evaluation Session/Queue handling
# The current eval session (an Evaluator instance). A current session's
# lifetime is as follows:
# - [self._get()] Starts when the evaluator thread (this class) takes it
# off the queue.
# - [self._put()] Can be aborted (via sess.ctlr.abort()) if a new eval
# request comes in.
# - [eval_sess.eval()] Done when the session completes either by
# (1) an unexpected error during sess.eval() or (2) sess.ctlr.is_done()
# after sess.eval().
_curr_eval_sess = None
def request_eval(self, evalr):
"""Request evaluation of the given completion.
"evalr" is the Evaluator instance.
The manager has an evaluation thread on which this evalr will be
scheduled. Only one request is ever eval'd at one time. A new
request will cause an existing on to be aborted and requests made in
the interim will be trumped by this new one.
Dev Notes:
- XXX Add a timeout to the put and raise error on timeout?
"""
# evalr.eval(self)
self.put((evalr, False))
def request_reeval(self, evalr):
"""Occassionally evaluation will need to defer until something (e.g.
scanning into the CIDB) is one. These sessions will re-request
evaluation via this method.
"""
self.put((evalr, True))
def stop(self):
self.put((None, None)) # Sentinel to tell thread mainloop to stop.
def run(self):
while 1:
eval_sess, is_reeval = self.get()
if eval_sess is None: # Sentinel to stop.
break
try:
eval_sess.eval(self)
except:
try:
self._handle_eval_sess_error(eval_sess)
except:
pass
finally:
self._curr_eval_sess = None
self.db.report_event(None)
def _handle_eval_sess_error(self, eval_sess):
exc_info = sys.exc_info()
tb_path, tb_lineno, tb_func \
= traceback.extract_tb(exc_info[2])[-1][:3]
if hasattr(exc_info[0], "__name__"):
exc_str = "%s: %s" % (exc_info[0].__name__, exc_info[1])
else: # string exception
exc_str = exc_info[0]
eval_sess.ctlr.error("error evaluating %s: %s "
"(%s#%s in %s)", eval_sess, exc_str,
tb_path, tb_lineno, tb_func)
log.exception("error evaluating %s" % eval_sess)
eval_sess.ctlr.done("unexpected eval error")
def _put(self, xxx_todo_changeme):
# Only consider re-evaluation if we are still on the same eval
# session.
(eval_sess, is_reeval) = xxx_todo_changeme
if is_reeval and self._curr_eval_sess is not eval_sess:
return
replace = True
if hasattr(eval_sess, "ctlr") and eval_sess.ctlr and eval_sess.ctlr.keep_existing:
# Allow multiple eval sessions; currently used for variable
# highlighting (bug 80095), may pick up additional uses. Note that
# these sessions can still get wiped out by a single replace=False
# caller.
replace = False
if replace:
# We only allow *one* eval session at a time.
# - Drop a possible accumulated eval session.
if len(self.queue):
self.queue.clear()
## - Abort the current eval session.
if not is_reeval and self._curr_eval_sess is not None:
self._curr_eval_sess.ctlr.abort()
# Lazily start the eval thread.
if not self.isAlive():
self.start()
Queue._put(self, (eval_sess, is_reeval))
if replace:
assert len(self.queue) == 1
def _get(self):
eval_sess, is_reeval = Queue._get(self)
if is_reeval:
assert self._curr_eval_sess is eval_sess
else:
self._curr_eval_sess = eval_sess
return eval_sess, is_reeval
|
|
from __future__ import annotations
import atexit
import copy
import dataclasses
import enum
import json
import logging
import os
import sys
import time
import tkinter
from pathlib import Path
from tkinter import messagebox, ttk
from typing import Any, Callable, Iterator, List, Type, TypeVar, overload
import dacite
from pygments import styles, token
import porcupine
from porcupine import dirs, images, utils
_log = logging.getLogger(__name__)
class LineEnding(enum.Enum):
r"""
This :mod:`enum` has these members representing different ways to write
newline characters to files:
.. data:: CR
``\r``, aka "Mac line endings".
.. data:: LF
``\n``, aka "Linux/Unix line endings".
.. data:: CRLF
``\r\n``, aka "Windows line endings".
Python's :func:`open` function translates all of these to the string
``'\n'`` when reading files and uses a platform-specific default when
writing files.
There are 3 ways to represent line endings in Porcupine, and
different things want the line ending represented in different ways:
* The strings ``'\r'``, ``'\n'`` and ``'\r\n'``. For example,
:func:`open` line endings are specified like this.
* The strings ``'CR'``, ``'LF'`` and ``'CRLF'``. Line endings are
typically defined this way in configuration files, such as
`editorconfig <https://editorconfig.org/>`_ files.
* This enum. I recommend using this to avoid typos.
For example, ``LineEnding[some_string_from_user]`` (see below)
raises an error if the string is invalid.
Convert between this enum and the different kinds of strings like this:
* Enum to backslashy string: ``LineEnding.CRLF.value == '\r\n'``
* Enum to human readable string: ``LineEnding.CRLF.name == 'CRLF'``
* Backslashy string to enum: ``LineEnding('\r\n') == LineEnding.CRLF``
* Human readable string to enum: ``LineEnding['CRLF'] == LineEnding.CRLF``
Use ``LineEnding(os.linesep)`` to get the platform-specific default.
"""
CR = "\r"
LF = "\n"
CRLF = "\r\n"
def _type_check(type_: object, obj: object) -> object:
# dacite tricks needed for validating e.g. objects of type Optional[Path]
@dataclasses.dataclass
class ValueContainer:
__annotations__ = {"value": type_}
parsed = dacite.from_dict(ValueContainer, {"value": obj})
return parsed.value # type: ignore
class _Option:
def __init__(
self, name: str, default: object, type_: Any, converter: Callable[[Any], Any]
) -> None:
default = _type_check(type_, default)
self.name = name
self.value = default
self.default = default
self.type = type_
self.converter = converter
@dataclasses.dataclass
class _UnknownOption:
value: Any
call_converter: bool
def _default_converter(value: Any) -> Any:
return value
# includes the parent
def _get_children_recursively(parent: tkinter.Misc) -> Iterator[tkinter.Misc]:
yield parent
for child in parent.winfo_children():
yield from _get_children_recursively(child)
class Settings:
def __init__(self, change_event_widget: tkinter.Misc | None, change_event_format: str):
# '<<Foo:{}>>'
assert "{}" in change_event_format
assert change_event_format.startswith("<<")
assert change_event_format.endswith(">>")
self._options: dict[str, _Option] = {}
self._unknown_options: dict[str, _UnknownOption] = {}
self._change_event_widget = change_event_widget # None to notify all widgets
self._change_event_format = change_event_format
def add_option(
self,
option_name: str,
default: Any,
type_: Any | None = None,
*,
converter: Callable[[Any], Any] = _default_converter,
exist_ok: bool = False,
) -> None:
"""Add a custom option.
The type of *default* determines how :func:`set_` and :func:`get` behave.
For example, if *default* is a string, then
calling :func:`set_` with a value that isn't a string or
calling :func:`get` with the type set to something else than ``str``
is an error. You can also provide a custom type with the *type*
argument, e.g. ``add_option('foo', None, Optional[pathlib.Path])``.
If you are adding a global option (see :class:`Settings` for non-global
options), use only JSON-safe types. Let me know if this limitation is
too annoying.
If you are **not** adding a global option, you
can also specify a *converter* that takes the value in the
configuration file as an argument and returns an instance of *type*.
For example, ``pygments_lexer`` is set to a string like
"pygments.lexers.Foo" in the config file, even though it appears as a
class in the settings object. That's implemented similarly to this::
def import_lexer_class(name: str) -> something:
...
filetab.settings.add_option(
'pygments_lexer',
pygments.lexers.TextLexer,
...
converter=import_lexer_class)
By default, the converter returns its argument unchanged.
Do not use a lambda function as the converter,
because the settings must be picklable.
If an option with the same name exists already, an error is raised by
default, but if ``exist_ok=True`` is given, then adding the same
option again is allowed. When this happens, an error is raised if
*default*, *type* or *converter* doesn't match what was passed in when
the option was added for the first time.
"""
if type_ is None:
type_ = type(default)
assert type_ is not None
if option_name in self._options:
if not exist_ok:
raise RuntimeError(f"there's already an option named {option_name!r}")
old_option = self._options[option_name]
assert default == old_option.default
assert type_ == old_option.type
assert converter == old_option.converter
return
option = _Option(option_name, default, type_, converter)
self._options[option_name] = option
try:
unknown = self._unknown_options.pop(option_name)
except KeyError:
pass # nothing relevant in config file, use default
else:
# Error handling here because it's not possible to fail early when
# an option goes to _unknown_options, and bad data in a config file
# shouldn't cause add_option() and the rest of a plugin's setup()
# to fail.
try:
if unknown.call_converter:
self.set(option_name, converter(unknown.value))
else:
self.set(option_name, unknown.value)
except Exception:
# can be an error from converter
_log.exception(f"setting {option_name!r} to {unknown.value!r} failed")
def set(
self,
option_name: str,
value: object,
*,
from_config: bool = False,
call_converter: bool | None = None,
) -> None:
"""Set the value of an opiton.
Set ``from_config=True`` if the value comes from a configuration
file (see :func:`add_option`). That does two things:
* The converter given to :func:`add_option` will be used.
* If the option hasn't been added with :func:`add_option` yet, then
the value won't be set immediatelly, but instead it gets set
later when the option is added.
You can specify ``call_converter`` to force the converter to be or
to not be called.
This function is not named ``set`` to avoid conflicting with the
built-in :class:`set` class.
"""
# ...even though this method isn't named 'set_'. But the docstring is
# used in settings.rst to document a global "function".
if call_converter is None:
call_converter = from_config
if option_name not in self._options and from_config:
self._unknown_options[option_name] = _UnknownOption(value, call_converter)
return
option = self._options[option_name]
if call_converter:
value = option.converter(value)
value = _type_check(option.type, value)
# don't create change events when nothing changes (helps avoid infinite recursion)
if option.value == value:
return
option.value = value
event_name = self._change_event_format.format(option_name)
_log.debug(f"{option_name} was set to {value!r}, generating {event_name} events")
if self._change_event_widget is None:
try:
main_window = porcupine.get_main_window()
except RuntimeError as e:
# on porcupine startup, plugin disable list needs to be set before main window exists
if option_name != "disabled_plugins":
raise e
else:
for widget in _get_children_recursively(main_window):
widget.event_generate(event_name)
else:
self._change_event_widget.event_generate(event_name)
# I don't like how this requires overloads for every type
# https://stackoverflow.com/q/61471700
# fmt: off
@overload
def get(self, option_name: str, type_: Type[Path]) -> Path: ...
@overload
def get(self, option_name: str, type_: Type[LineEnding]) -> LineEnding: ...
@overload
def get(self, option_name: str, type_: Type[str]) -> str: ...
@overload
def get(self, option_name: str, type_: Type[bool]) -> bool: ...
@overload
def get(self, option_name: str, type_: Type[int]) -> int: ...
@overload
def get(self, option_name: str, type_: object) -> Any: ...
# fmt: on
def get(self, option_name: str, type_: Any) -> Any:
"""
Return the current value of an option.
*type_* should be e.g. ``str`` or ``int`` depending on what type the option is.
You can also specify ``object`` to allow any type.
This method works correctly for :class:`str` and :class:`int`,
but sometimes it returns Any because mypy sucks::
foo = settings.get('something', str)
reveal_type(foo) # str
from pathlib import Path
shitty_bar = settings.get('something', Optional[Path])
reveal_type(shitty_bar) # Any
Use a type annotation to work around this (and make sure to write the
same type two times)::
good_bar: Path | None = settings.get('something', Optional[Path])
reveal_type(good_bar) # Optional[Path]
Before Python 3.10, you can't use the new ``|`` syntax as an argument to ``settings.get()``,
even though it otherwise works with ``from __future__ import annotations``.
The same goes for built-in generics,
such as ``list[str]`` with lower-case ``list``.
Options of mutable types are returned as copies, so things like
``settings.get('something', List[str])`` always return a new list.
If you want to change a setting like that, you need to first get a copy
of the current value, then modify the copy, and finally :func:`set_` it
back. This is an easy way to make sure that change events run every
time the value changes.
"""
result = self._options[option_name].value
result = _type_check(type_, result)
return copy.deepcopy(result) # mutating wouldn't trigger change events
def debug_dump(self) -> None:
"""Print all settings and their values. This is useful for debugging."""
print(f"{len(self._options)} known options (add_option called)")
for name, option in self._options.items():
print(f" {name} = {option.value!r} (type: {option.type!r})")
print()
print(f"{len(self._unknown_options)} unknown options (add_option not called)")
for name, unknown in self._unknown_options.items():
string = f" {name} = {unknown.value!r}"
if not unknown.call_converter:
string += " (converter function will not be called)"
print(string)
print()
# TODO: document state methods?
def get_state(self) -> dict[str, _UnknownOption]:
result = self._unknown_options.copy()
for name, option in self._options.items():
value = self.get(name, object)
if value != option.default:
result[name] = _UnknownOption(value, call_converter=False)
return result
def set_state(self, state: dict[str, _UnknownOption]) -> None:
for name, unknown in state.items():
self.set(name, unknown.value, from_config=True, call_converter=unknown.call_converter)
_global_settings = Settings(None, "<<SettingChanged:{}>>")
add_option = _global_settings.add_option
set_ = _global_settings.set
get = _global_settings.get
debug_dump = _global_settings.debug_dump
def reset(option_name: str) -> None:
"""Set an option to its default value given to :func:`add_option`."""
set_(option_name, _global_settings._options[option_name].default)
def reset_all() -> None:
"""
Reset all settings, including the ones not shown in the setting dialog.
Clicking the reset button of the setting dialog runs this function.
"""
_global_settings._unknown_options.clear()
for name in _global_settings._options:
reset(name)
# Enum options are stored as name strings, e.g. 'CRLF' for LineEnding.CRLF
# TODO: this is a hack
def _value_to_save(obj: object) -> object:
if isinstance(obj, enum.Enum):
return obj.name
return obj
def get_json_path() -> Path:
return Path(dirs.user_config_dir) / "settings.json"
def save() -> None:
"""Save the settings to the config file.
Note that :func:`porcupine.run` always calls this before it returns,
so usually you don't need to worry about calling this yourself.
"""
with get_json_path().open("w", encoding="utf-8") as file:
json.dump(
{
name: _value_to_save(unknown_obj.value)
for name, unknown_obj in _global_settings.get_state().items()
},
file,
indent=4,
)
file.write("\n")
def _load_from_file() -> None:
try:
with get_json_path().open("r", encoding="utf-8") as file:
options = json.load(file)
except FileNotFoundError:
return
for name, value in options.items():
set_(name, value, from_config=True)
# pygments styles can be uninstalled, must not end up with invalid pygments style that way
def _check_pygments_style(name: str) -> str:
styles.get_style_by_name(name) # may raise error that will get logged
return name
# plugin disable list is needed on porcupine startup before anything is done with tkinter
#
# undocumented on purpose, don't use in plugins
def init_enough_for_using_disabled_plugins_list() -> None:
try:
_load_from_file()
except Exception:
_log.exception(f"reading {get_json_path()} failed")
add_option("disabled_plugins", [], List[str])
def _init_global_gui_settings() -> None:
add_option("pygments_style", "stata-dark", converter=_check_pygments_style)
add_option("default_line_ending", LineEnding(os.linesep), converter=LineEnding.__getitem__)
fixedfont = tkinter.font.Font(name="TkFixedFont", exists=True)
if fixedfont["size"] < 0:
# negative sizes have a special meaning in Tk, and i don't care much
# about it for porcupine, using stupid hard-coded default instead
fixedfont.config(size=10)
if sys.platform == "win32":
# Windows default monospace font sucks, see #245
default_font_family = "Consolas"
else:
# fixedfont['family'] is typically e.g. 'Monospace', that's not included in
# tkinter.font.families() because it refers to another font family that is
# in tkinter.font.families()
default_font_family = fixedfont.actual("family")
add_option("font_family", default_font_family)
add_option("font_size", fixedfont["size"])
# keep TkFixedFont up to date with settings
def update_fixedfont(event: tkinter.Event[tkinter.Misc] | None) -> None:
# can't bind to get_tab_manager() as recommended in docs because tab
# manager isn't ready yet when settings get inited
if event is None or event.widget == porcupine.get_main_window():
fixedfont.config(family=get("font_family", str), size=get("font_size", int))
porcupine.get_main_window().bind("<<SettingChanged:font_family>>", update_fixedfont, add=True)
porcupine.get_main_window().bind("<<SettingChanged:font_size>>", update_fixedfont, add=True)
update_fixedfont(None)
def _create_dialog_content() -> ttk.Frame:
dialog = tkinter.Toplevel()
dialog.withdraw()
dialog.title("Porcupine Settings")
dialog.protocol("WM_DELETE_WINDOW", dialog.withdraw)
dialog.bind("<Escape>", (lambda event: dialog.withdraw()), add=True)
def confirm_and_reset_all() -> None:
if messagebox.askyesno(
"Reset Settings", "Are you sure you want to reset all settings?", parent=dialog
):
reset_all()
big_frame = ttk.Frame(dialog)
big_frame.pack(fill="both", expand=True)
content = ttk.Frame(big_frame)
content.pack(fill="both", expand=True, padx=5, pady=5)
ttk.Separator(big_frame).pack(fill="x")
buttonframe = ttk.Frame(big_frame, padding=5)
buttonframe.pack(fill="x")
ttk.Button(
buttonframe, text="Reset all settings", command=confirm_and_reset_all, width=15
).pack(side="left")
ttk.Button(buttonframe, text="OK", command=dialog.withdraw, width=10).pack(side="right")
content.grid_columnconfigure(0, weight=1)
content.grid_columnconfigure(1, weight=1)
return content
_dialog_content: ttk.Frame | None = None
def show_dialog() -> None:
"""Show the "Porcupine Settings" dialog.
This function is called when the user opens the dialog from the menu.
"""
dialog = get_dialog_content().winfo_toplevel()
dialog.transient(porcupine.get_main_window())
dialog.deiconify()
def get_dialog_content() -> ttk.Frame:
"""Return the widget where setting changing widgets should be added.
Use ``settings.get_dialog_content().winfo_toplevel()`` to access the dialog
itself. It's a :class:`tkinter.Toplevel`.
Use grid with the returned widget. Its columns are configured like this::
,-----------------------------------------------------------.
| Porcupine Settings | _ | O | X |
|-----------------------------------------------------------|
| : : |
| : :col|
| column 0 : column 1 :umn|
| : : 2 |
| : : |
| : : |
| : : |
| : : |
| : : |
| : : |
| : : |
| : : |
| : : |
| : : |
| : : |
| : : |
| : : |
| : : |
|===========================================================|
| ,---------. ,---------. |
| | Reset | | OK | |
| `---------' `---------' |
`-----------------------------------------------------------'
Column 0 typically contains labels such as "Font Family:", and column 1
contains widgets for changing the settings. Column 2 is used for displaying
|triangle| when the user has chosen the setting badly.
"""
if _dialog_content is None:
raise RuntimeError("porcupine isn't running")
return _dialog_content
def _get_blank_triangle_sized_image(*, _cache: list[tkinter.PhotoImage] = []) -> tkinter.PhotoImage:
# see images/__init__.py
if not _cache:
_cache.append(
tkinter.PhotoImage(
width=images.get("triangle").width(), height=images.get("triangle").height()
)
)
atexit.register(_cache.clear)
return _cache[0]
_StrOrInt = TypeVar("_StrOrInt", str, int)
def _create_validation_triangle(
widget: ttk.Entry,
option_name: str,
type_: Type[_StrOrInt],
callback: Callable[[_StrOrInt], bool],
) -> ttk.Label:
triangle = ttk.Label(widget.master)
var = tkinter.StringVar()
def var_changed(*junk: object) -> None:
value_string = var.get()
value: _StrOrInt | None
try:
value = type_(value_string)
except ValueError: # e.g. int('foo')
value = None
else:
if not callback(value):
value = None
if value is None:
triangle.config(image=images.get("triangle"))
else:
triangle.config(image=_get_blank_triangle_sized_image())
set_(option_name, value, from_config=True)
def setting_changed(junk: object = None) -> None:
var.set(str(_value_to_save(get(option_name, object))))
widget.bind(f"<<SettingChanged:{option_name}>>", setting_changed, add=True)
var.trace_add("write", var_changed)
setting_changed()
widget.config(textvariable=var)
return triangle
def _grid_widgets(
label_text: str, chooser: tkinter.Widget, triangle: tkinter.Widget | None
) -> None:
label = ttk.Label(chooser.master, text=label_text)
label.grid(column=0, sticky="w")
chooser.grid(row=label.grid_info()["row"], column=1, sticky="we", pady=5)
if triangle is not None:
triangle.grid(row=label.grid_info()["row"], column=2)
def add_entry(
option_name: str, text: str, validate_callback: Callable[[str], bool], **entry_kwargs: Any
) -> ttk.Entry:
"""Add a :class:`tkinter.ttk.Entry` to the setting dialog.
A label that displays *text* will be added next to the entry.
All ``**entry_kwargs`` go to :class:`tkinter.ttk.Entry`.
When the user types something into the entry, *validate_callback*
is called with the text of the entry as its only argument.
If it returns ``True``, then the option given by *option_name*
is set to the string that the user typed.
Otherwise |triangle| is shown.
"""
entry = ttk.Entry(get_dialog_content(), **entry_kwargs)
triangle = _create_validation_triangle(entry, option_name, str, validate_callback)
_grid_widgets(text, entry, triangle)
return entry
def add_checkbutton(option_name: str, **checkbutton_kwargs: Any) -> ttk.Checkbutton:
"""Add a :class:`tkinter.ttk.Checkbutton` to the setting dialog.
All ``**checkbutton_kwargs`` go to :class:`tkinter.ttk.Checkbutton`.
You can do this, for example::
from porcupine import settings
def do_something() -> None:
# 'bool' here is a keyword and should not be replaced with 'True' or 'False'
if settings.get("foobar", bool):
print("Foobar enabled")
else:
print("Foobar disabled")
def setup() -> None:
settings.add_option("foobar", False) # False is default value
settings.add_checkbutton("foobar", text="Enable foobar")
Currently it is not possible to display a |triangle| next to the
checkbutton. Let me know if you need it.
"""
checkbutton = ttk.Checkbutton(get_dialog_content(), **checkbutton_kwargs)
checkbutton.grid(column=0, columnspan=2, sticky="w", pady=2)
var = tkinter.BooleanVar()
def var_changed(*junk: object) -> None:
value = var.get()
set_(option_name, value)
def setting_changed(junk: object = None) -> None:
var.set(get(option_name, bool))
checkbutton.bind(f"<<SettingChanged:{option_name}>>", setting_changed, add=True)
var.trace_add("write", var_changed)
setting_changed()
checkbutton.config(variable=var)
return checkbutton
def add_combobox(option_name: str, text: str, **combobox_kwargs: Any) -> ttk.Combobox:
"""Add a :class:`tkinter.ttk.Combobox` to the setting dialog.
All ``**combobox_kwargs`` go to :class:`tkinter.ttk.Combobox`.
Usually you should pass at least ``values=list_of_strings``.
The content of the combobox is checked whenever it changes.
If it's in ``combobox['values']``
(given with the ``values=list_of_strings`` keyword argument or changed
later by configuring the returned combobox), then the option given by
*option_name* is set to the content of the combobox. The converter passed
to :func:`add_option` will be used. If the content of the combobox is not
in ``combobox['values']``, then |triangle| is shown.
"""
combo = ttk.Combobox(get_dialog_content(), **combobox_kwargs)
triangle = _create_validation_triangle(
combo, option_name, str, (lambda value: value in combo["values"])
)
_grid_widgets(text, combo, triangle)
return combo
def add_spinbox(option_name: str, text: str, **spinbox_kwargs: Any) -> tkinter.ttk.Spinbox:
"""Add a :class:`tkinter.ttk.Spinbox` to the setting dialog.
All ``**spinbox_kwargs`` go to :class:`tkinter.ttk.Spinbox`.
Usually you should pass at least ``from_=some_integer, to=another_integer``.
The content of the spinbox is checked whenever it changes.
If it's a valid integer between ``spinbox['from']`` and ``spinbox['to']`` (inclusive),
then the option given by *option_name* is set to the :class:`int`.
Otherwise |triangle| is shown.
"""
spinbox = ttk.Spinbox(get_dialog_content(), **spinbox_kwargs)
triangle = _create_validation_triangle(
spinbox, option_name, int, lambda value: int(spinbox["from"]) <= value <= int(spinbox["to"])
)
_grid_widgets(text, spinbox, triangle)
return spinbox
def _get_colors(style_name: str) -> tuple[str, str]:
style = styles.get_style_by_name(style_name)
bg = style.background_color
fg = style.style_for_token(token.String)["color"] or style.style_for_token(token.Text)["color"]
if fg:
fg = "#" + fg
else:
# yes, style.default_style can be '#rrggbb', '' or nonexistent
# this is undocumented
#
# >>> from pygments.styles import *
# >>> [getattr(get_style_by_name(name), 'default_style', '???')
# ... for name in get_all_styles()]
# ['', '', '', '', '', '', '???', '???', '', '', '', '',
# '???', '???', '', '#cccccc', '', '', '???', '', '', '', '',
# '#222222', '', '', '', '???', '']
fg = getattr(style, "default_style", "") or utils.invert_color(bg)
return (fg, bg)
# TODO: document this?
def add_pygments_style_button(option_name: str, text: str) -> None:
var = tkinter.StringVar()
# not using ttk.Menubutton because i want custom colors
menubutton = tkinter.Menubutton(
get_dialog_content(), textvariable=var, takefocus=True, highlightthickness=1
)
menu = tkinter.Menu(menubutton, tearoff=False)
menubutton.config(menu=menu)
def var_to_settings(*junk: object) -> None:
set_(option_name, var.get())
def settings_to_var_and_colors(junk: object = None) -> None:
style_name = get(option_name, object)
var.set(style_name)
fg, bg = _get_colors(style_name)
menubutton.config(foreground=fg, background=bg, highlightcolor=fg, highlightbackground=bg)
menubutton.bind(f"<<SettingChanged:{option_name}>>", settings_to_var_and_colors, add=True)
var.trace_add("write", var_to_settings)
# Not done when creating button, because can slow down porcupine startup
def fill_menubutton(junk_event: object) -> None:
menu.delete(0, "end")
for index, style_name in enumerate(sorted(styles.get_all_styles())):
fg, bg = _get_colors(style_name)
menu.add_radiobutton(
label=style_name,
value=style_name,
variable=var,
foreground=fg,
background=bg,
# swapped colors
activeforeground=bg,
activebackground=fg,
columnbreak=(index != 0 and index % 20 == 0),
)
settings_to_var_and_colors()
menubutton.bind("<Map>", fill_menubutton, add=True)
_grid_widgets(text, menubutton, None)
def add_label(text: str) -> ttk.Label:
"""Add text to the setting dialog.
This is useful for explaining what some options do with more than a few words.
The text is always as wide as the dialog is, even when the dialog is resized.
"""
label = ttk.Label(get_dialog_content(), text=text)
label.grid(column=0, columnspan=3, sticky="we", pady=10)
get_dialog_content().bind(
"<Configure>", (lambda event: label.config(wraplength=event.width)), add=True
)
return label
# TODO: document this
def remember_pane_size(
panedwindow: utils.PanedWindow, pane: tkinter.Misc, option_name: str, default_size: int
) -> None:
# exist_ok=True to allow e.g. calling this once for each tab
add_option(option_name, default_size, int, exist_ok=True)
def settings_to_gui(junk: object = None) -> None:
if panedwindow["orient"] == "horizontal":
panedwindow.paneconfig(pane, width=get(option_name, int))
else:
panedwindow.paneconfig(pane, height=get(option_name, int))
def gui_to_settings() -> None:
if panedwindow["orient"] == "horizontal":
set_(option_name, pane.winfo_width())
else:
set_(option_name, pane.winfo_height())
settings_to_gui()
pane.bind("<Map>", settings_to_gui, add=True)
# after_idle helps with accuracy if you move mouse really fast
panedwindow.bind(
"<ButtonRelease-1>", (lambda e: panedwindow.after_idle(gui_to_settings)), add=True
)
def use_pygments_fg_and_bg(
widget: tkinter.Misc,
callback: Callable[[str, str], object],
*,
option_name: str = "pygments_style",
) -> None:
"""Run a callback whenever the pygments theme changes.
The callback no longer runs once ``widget`` has been destroyed. It is
called with the foreground and background color of the pygments theme as
arguments.
"""
def on_style_changed(junk: object = None) -> None:
style = styles.get_style_by_name(get(option_name, str))
# Similar to _get_colors() but doesn't use the color of strings
bg = style.background_color
fg = getattr(style, "default_style", "") or utils.invert_color(bg)
callback(fg, bg)
widget.bind(f"<<SettingChanged:{option_name}>>", on_style_changed, add=True)
on_style_changed()
def _is_monospace(font_family: str) -> bool:
# Ignore weird fonts starting with @ (happens on Windows)
if font_family.startswith("@"):
return False
# I don't want to create font objects just for this, lol
tcl_interpreter = get_dialog_content().tk
# https://core.tcl-lang.org/tk/info/3767882e06
if "emoji" in font_family.lower():
return False
# Let's first ask Tcl whether the font is fixed. This is fastest but
# returns the wrong result for some fonts that are not actually monospace.
if not tcl_interpreter.call("font", "metrics", (font_family, "12"), "-fixed"):
return False
# In non-monospace fonts, i is very narrow and m is very wide.
# Also, make sure that bolding or italic doesn't change the width.
sizes = [
tcl_interpreter.call("font", "measure", (font_family, "12"), "iii"),
tcl_interpreter.call("font", "measure", (font_family, "12"), "mmm"),
tcl_interpreter.call("font", "measure", (font_family, "12", "bold"), "mmm"),
tcl_interpreter.call("font", "measure", (font_family, "12", "italic"), "mmm"),
]
# Allow off-by-one errors, just in case. Don't know if they ever actually happen.
return max(sizes) - min(sizes) <= 1
def _get_monospace_font_families() -> list[str]:
cache_path = Path(dirs.user_cache_dir) / "font_cache.json"
all_families = sorted(set(tkinter.font.families()))
# This is surprisingly slow when there are lots of fonts. Let's cache.
try:
with cache_path.open("r") as file:
cache = json.load(file)
# all_families stored to cache in case user installs more fonts
if cache["version"] == 2 and cache["all_families"] == all_families:
_log.debug(f"Taking list of monospace families from {cache_path}")
return cache["monospace_families"]
except FileNotFoundError:
pass
except Exception:
_log.error(f"unexpected {cache_path} reading error", exc_info=True)
_log.warning(f"Can't use {cache_path}. Starting Porcupine might take a while.")
monospace_families = list(filter(_is_monospace, all_families))
try:
with cache_path.open("w") as file:
json.dump(
{
"version": 2,
"all_families": all_families,
"monospace_families": monospace_families,
},
file,
)
_log.debug(f"Wrote {cache_path}")
except Exception:
_log.error(f"unexpected {cache_path} writing error", exc_info=True)
return monospace_families
def _fill_dialog_content_with_defaults() -> None:
start_time = time.perf_counter()
monospace_families = _get_monospace_font_families()
_log.debug(f"Found monospace fonts in {round((time.perf_counter() - start_time)*1000)}ms")
add_combobox("font_family", "Font family:", values=monospace_families)
add_spinbox("font_size", "Font size:", from_=3, to=1000)
add_combobox(
"default_line_ending", "Default line ending:", values=[ending.name for ending in LineEnding]
)
add_pygments_style_button("pygments_style", "Pygments style for editing:")
# undocumented on purpose, don't use in plugins
def init_the_rest_after_initing_enough_for_using_disabled_plugins_list() -> None:
global _dialog_content
assert _dialog_content is None
_log.debug("initializing continues")
_init_global_gui_settings()
_dialog_content = _create_dialog_content()
_fill_dialog_content_with_defaults()
_log.debug("initialized")
|
|
# Copyright (c) 2016 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Container Driver for shares.
This driver uses a container as a share server.
Current implementation suggests that a container when started by Docker will
be plugged into a Linux bridge. Also it is suggested that all interfaces
willing to talk to each other reside in an OVS bridge."""
import re
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from manila import exception
from manila.i18n import _, _LI, _LW
from manila.share import driver
from manila import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
container_opts = [
cfg.StrOpt("container_linux_bridge_name",
default="docker0",
required=True,
help="Linux bridge used by container hypervisor to plug "
"host-side veth to. It will be unplugged from here "
"by the driver."),
cfg.StrOpt("container_ovs_bridge_name",
default="br-int",
required=True,
help="OVS bridge to use to plug a container to."),
cfg.BoolOpt("container_cifs_guest_ok",
default=True,
help="Determines whether to allow guest access to CIFS share "
"or not."),
cfg.StrOpt("container_image_name",
default="manila-docker-container",
help="Image to be used for a container-based share server."),
cfg.StrOpt("container_helper",
default="manila.share.drivers.container.container_helper."
"DockerExecHelper",
help="Container helper which provides container-related "
"operations to the driver."),
cfg.StrOpt("container_protocol_helper",
default="manila.share.drivers.container.protocol_helper."
"DockerCIFSHelper",
help="Helper which facilitates interaction with share server."),
cfg.StrOpt("container_storage_helper",
default="manila.share.drivers.container.storage_helper."
"LVMHelper",
help="Helper which facilitates interaction with storage "
"solution used to actually store data. By default LVM "
"is used to provide storage for a share."),
]
class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
def __init__(self, *args, **kwargs):
super(ContainerShareDriver, self).__init__([True], *args, **kwargs)
self.configuration.append_config_values(container_opts)
self.backend_name = self.configuration.safe_get(
"share_backend_name") or "Docker"
self.container = importutils.import_class(
self.configuration.container_helper)(
configuration=self.configuration)
self.storage = importutils.import_class(
self.configuration.container_storage_helper)(
configuration=self.configuration)
self._helpers = {}
def _get_helper(self, share):
if share["share_proto"].upper() == "CIFS":
helper = self._helpers.get("CIFS")
if helper is not None:
return helper(self.container,
share=share,
config=self.configuration)
self._helpers["CIFS"] = importutils.import_class(
self.configuration.container_protocol_helper)
return self._helpers["CIFS"](self.container,
share=share,
config=self.configuration)
else:
raise exception.InvalidShare(
reason=_("Wrong, unsupported or disabled protocol."))
def _update_share_stats(self):
data = {
'share_backend_name': self.backend_name,
'storage_protocol': 'CIFS',
'reserved_percentage':
self.configuration.reserved_share_percentage,
'consistency_group_support': None,
'snapshot_support': False,
'create_share_from_snapshot_support': False,
'driver_name': 'ContainerShareDriver',
'pools': self.storage.get_share_server_pools()
}
super(ContainerShareDriver, self)._update_share_stats(data)
def create_share(self, context, share, share_server=None):
LOG.debug("Create share on server '%s'." % share_server["id"])
server_id = self._get_container_name(share_server["id"])
share_name = share.share_id
self.container.execute(
server_id,
["mkdir", "-m", "750", "/shares/%s" % share_name]
)
self.storage.provide_storage(share)
lv_device = self.storage._get_lv_device(share)
self.container.execute(
server_id,
["mount", lv_device, "/shares/%s" % share_name]
)
location = self._get_helper(share).create_share(server_id)
return location
@utils.synchronized('container_driver_delete_share_lock', external=True)
def delete_share(self, context, share, share_server=None):
LOG.debug("Deleting share %(share)s on server '%(server)s'." %
{"server": share_server["id"],
"share": share.share_id})
server_id = self._get_container_name(share_server["id"])
self._get_helper(share).delete_share(server_id)
self.container.execute(
server_id,
["umount", "/shares/%s" % share.share_id]
)
# (aovchinnikov): bug 1621784 manifests itself here as well as in
# storage helper. There is a chance that we won't be able to remove
# this directory, despite the fact that it is not shared anymore and
# already contains nothing. In such case the driver should not fail
# share deletion, but issue a warning.
try:
self.container.execute(
server_id,
["rm", "-fR", "/shares/%s" % share.share_id]
)
except exception.ProcessExecutionError as e:
LOG.warning(_LW("Failed to remove /shares/%(share)s directory in "
"container %(cont)s."), {"share": share.share_id,
"cont": server_id})
LOG.error(e)
self.storage.remove_storage(share)
LOG.debug("Deletion of share %s is completed!", share.share_id)
def extend_share(self, share, new_size, share_server=None):
server_id = self._get_container_name(share_server["id"])
self.container.execute(
server_id,
["umount", "/shares/%s" % share.share_id]
)
self.storage.extend_share(share, new_size, share_server)
lv_device = self.storage._get_lv_device(share)
self.container.execute(
server_id,
["mount", lv_device, "/shares/%s" % share.share_id]
)
def ensure_share(self, context, share, share_server=None):
pass
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
server_id = self._get_container_name(share_server["id"])
LOG.debug("Updating access to share %(share)s at "
"share server %(share_server)s.",
{"share_server": share_server["id"],
"share": share.share_id})
self._get_helper(share).update_access(server_id,
access_rules, add_rules,
delete_rules)
def get_network_allocations_number(self):
return 1
def _get_container_name(self, server_id):
return "manila_%s" % server_id.replace("-", "_")
def do_setup(self, *args, **kwargs):
pass
def check_for_setup_error(self, *args, **kwargs):
host_id = self.configuration.safe_get("neutron_host_id")
neutron_class = importutils.import_class(
'manila.network.neutron.neutron_network_plugin.'
'NeutronNetworkPlugin'
)
actual_class = importutils.import_class(
self.configuration.safe_get("network_api_class"))
if host_id is None and issubclass(actual_class, neutron_class):
msg = _("%s requires neutron_host_id to be "
"specified.") % neutron_class
raise exception.ManilaException(msg)
elif host_id is None:
LOG.warning(_LW("neutron_host_id is not specified. This driver "
"might not work as expected without it."))
def _connect_to_network(self, server_id, network_info, host_veth):
LOG.debug("Attempting to connect container to neutron network.")
network_allocation = network_info['network_allocations'][0]
port_address = network_allocation.ip_address
port_mac = network_allocation.mac_address
port_id = network_allocation.id
self.container.execute(
server_id,
["ifconfig", "eth0", port_address, "up"]
)
self.container.execute(
server_id,
["ip", "link", "set", "dev", "eth0", "address", port_mac]
)
msg_helper = {
'id': server_id, 'veth': host_veth,
'lb': self.configuration.container_linux_bridge_name,
'ovsb': self.configuration.container_ovs_bridge_name,
'ip': port_address,
'network': network_info['neutron_net_id'],
'subnet': network_info['neutron_subnet_id'],
}
LOG.debug("Container %(id)s veth is %(veth)s.", msg_helper)
LOG.debug("Removing %(veth)s from %(lb)s.", msg_helper)
self._execute("brctl", "delif",
self.configuration.container_linux_bridge_name,
host_veth,
run_as_root=True)
LOG.debug("Plugging %(veth)s into %(ovsb)s.", msg_helper)
set_if = ['--', 'set', 'interface', host_veth]
e_mac = set_if + ['external-ids:attached-mac="%s"' % port_mac]
e_id = set_if + ['external-ids:iface-id="%s"' % port_id]
e_status = set_if + ['external-ids:iface-status=active']
e_mcid = set_if + ['external-ids:manila-container=%s' % server_id]
self._execute("ovs-vsctl", "--", "add-port",
self.configuration.container_ovs_bridge_name, host_veth,
*(e_mac + e_id + e_status + e_mcid), run_as_root=True)
LOG.debug("Now container %(id)s should be accessible from network "
"%(network)s and subnet %(subnet)s by address %(ip)s." %
msg_helper)
@utils.synchronized("container_driver_teardown_lock", external=True)
def _teardown_server(self, *args, **kwargs):
server_id = self._get_container_name(kwargs["server_details"]["id"])
self.container.stop_container(server_id)
interfaces = self._execute("ovs-vsctl", "list", "interface",
run_as_root=True)[0]
veths = set(re.findall("veth[0-9a-zA-Z]{7}", interfaces))
manila_re = ("manila_[0-9a-f]{8}_[0-9a-f]{4}_[0-9a-f]{4}_[0-9a-f]{4}_"
"[0-9a-f]{12}")
for veth in veths:
iface_data = self._execute("ovs-vsctl", "list", "interface", veth,
run_as_root=True)[0]
container_id = re.findall(manila_re, iface_data)
if container_id == []:
continue
elif container_id[0] == server_id:
LOG.debug("Deleting veth %s.", veth)
try:
self._execute("ovs-vsctl", "--", "del-port",
self.configuration.container_ovs_bridge_name,
veth, run_as_root=True)
except exception.ProcessExecutionError as e:
LOG.warning(_LW("Failed to delete port %s: port "
"vanished."), veth)
LOG.error(e)
def _get_veth_state(self):
result = self._execute("brctl", "show",
self.configuration.container_linux_bridge_name,
run_as_root=True)
veths = re.findall("veth.*\\n", result[0])
veths = [x.rstrip('\n') for x in veths]
msg = ("The following veth interfaces are plugged into %s now: " %
self.configuration.container_linux_bridge_name)
LOG.debug(msg + ", ".join(veths))
return veths
def _get_corresponding_veth(self, before, after):
result = list(set(after) ^ set(before))
if len(result) != 1:
raise exception.ManilaException(_("Multiple veths for container."))
return result[0]
@utils.synchronized("veth-lock", external=True)
def _setup_server(self, network_info, metadata=None):
msg = "Creating share server '%s'."
server_id = self._get_container_name(network_info["server_id"])
LOG.debug(msg % server_id)
veths_before = self._get_veth_state()
try:
self.container.start_container(server_id)
except Exception as e:
raise exception.ManilaException(_("Cannot create container: %s") %
e)
veths_after = self._get_veth_state()
veth = self._get_corresponding_veth(veths_before, veths_after)
self._connect_to_network(server_id, network_info, veth)
LOG.info(_LI("Container %s was created."), server_id)
return {"id": network_info["server_id"]}
|
|
"""
Code generator script to make the Cython BLAS and LAPACK wrappers
from the files "cython_blas_signatures.txt" and
"cython_lapack_signatures.txt" which contain the signatures for
all the BLAS/LAPACK routines that should be included in the wrappers.
"""
from collections import defaultdict
from operator import itemgetter
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
fortran_types = {'int': 'integer',
'c': 'complex',
'd': 'double precision',
's': 'real',
'z': 'complex*16',
'char': 'character',
'bint': 'logical'}
c_types = {'int': 'int',
'c': 'npy_complex64',
'd': 'double',
's': 'float',
'z': 'npy_complex128',
'char': 'char',
'bint': 'int',
'cselect1': '_cselect1',
'cselect2': '_cselect2',
'dselect2': '_dselect2',
'dselect3': '_dselect3',
'sselect2': '_sselect2',
'sselect3': '_sselect3',
'zselect1': '_zselect1',
'zselect2': '_zselect2'}
def arg_names_and_types(args):
return zip(*[arg.split(' *') for arg in args.split(', ')])
pyx_func_template = """
cdef extern from "{header_name}":
void _fortran_{name} "F_FUNC({name}wrp, {upname}WRP)"({ret_type} *out, {fort_args}) nogil
cdef {ret_type} {name}({args}) nogil:
cdef {ret_type} out
_fortran_{name}(&out, {argnames})
return out
"""
npy_types = {'c': 'npy_complex64', 'z': 'npy_complex128',
'cselect1': '_cselect1', 'cselect2': '_cselect2',
'dselect2': '_dselect2', 'dselect3': '_dselect3',
'sselect2': '_sselect2', 'sselect3': '_sselect3',
'zselect1': '_zselect1', 'zselect2': '_zselect2'}
def arg_casts(arg):
if arg in ['npy_complex64', 'npy_complex128', '_cselect1', '_cselect2',
'_dselect2', '_dselect3', '_sselect2', '_sselect3',
'_zselect1', '_zselect2']:
return '<{0}*>'.format(arg)
return ''
def pyx_decl_func(name, ret_type, args, header_name):
argtypes, argnames = arg_names_and_types(args)
# Fix the case where one of the arguments has the same name as the
# abbreviation for the argument type.
# Otherwise the variable passed as an argument is considered overwrites
# the previous typedef and Cython compilation fails.
if ret_type in argnames:
argnames = [n if n != ret_type else ret_type + '_' for n in argnames]
argnames = [n if n not in ['lambda', 'in'] else n + '_'
for n in argnames]
args = ', '.join([' *'.join([n, t])
for n, t in zip(argtypes, argnames)])
argtypes = [npy_types.get(t, t) for t in argtypes]
fort_args = ', '.join([' *'.join([n, t])
for n, t in zip(argtypes, argnames)])
argnames = [arg_casts(t) + n for n, t in zip(argnames, argtypes)]
argnames = ', '.join(argnames)
c_ret_type = c_types[ret_type]
args = args.replace('lambda', 'lambda_')
return pyx_func_template.format(name=name, upname=name.upper(), args=args,
fort_args=fort_args, ret_type=ret_type,
c_ret_type=c_ret_type, argnames=argnames,
header_name=header_name)
pyx_sub_template = """cdef extern from "{header_name}":
void _fortran_{name} "F_FUNC({name},{upname})"({fort_args}) nogil
cdef void {name}({args}) nogil:
_fortran_{name}({argnames})
"""
def pyx_decl_sub(name, args, header_name):
argtypes, argnames = arg_names_and_types(args)
argtypes = [npy_types.get(t, t) for t in argtypes]
argnames = [n if n not in ['lambda', 'in'] else n + '_' for n in argnames]
fort_args = ', '.join([' *'.join([n, t])
for n, t in zip(argtypes, argnames)])
argnames = [arg_casts(t) + n for n, t in zip(argnames, argtypes)]
argnames = ', '.join(argnames)
args = args.replace('*lambda,', '*lambda_,').replace('*in,', '*in_,')
return pyx_sub_template.format(name=name, upname=name.upper(),
args=args, fort_args=fort_args,
argnames=argnames, header_name=header_name)
blas_pyx_preamble = '''# cython: boundscheck = False
# cython: wraparound = False
# cython: cdivision = True
"""
BLAS Functions for Cython
=========================
Usable from Cython via::
cimport scipy.linalg.cython_blas
These wrappers do not check for alignment of arrays.
Alignment should be checked before these wrappers are used.
Raw function pointers (Fortran-style pointer arguments):
- {}
"""
# Within SciPy, these wrappers can be used via relative or absolute cimport.
# Examples:
# from ..linalg cimport cython_blas
# from scipy.linalg cimport cython_blas
# cimport scipy.linalg.cython_blas as cython_blas
# cimport ..linalg.cython_blas as cython_blas
# Within SciPy, if BLAS functions are needed in C/C++/Fortran,
# these wrappers should not be used.
# The original libraries should be linked directly.
cdef extern from "fortran_defs.h":
pass
from numpy cimport npy_complex64, npy_complex128
'''
def make_blas_pyx_preamble(all_sigs):
names = [sig[0] for sig in all_sigs]
return blas_pyx_preamble.format("\n- ".join(names))
lapack_pyx_preamble = '''"""
LAPACK functions for Cython
===========================
Usable from Cython via::
cimport scipy.linalg.cython_lapack
This module provides Cython-level wrappers for all primary routines included
in LAPACK 3.4.0 except for ``zcgesv`` since its interface is not consistent
from LAPACK 3.4.0 to 3.6.0. It also provides some of the
fixed-api auxiliary routines.
These wrappers do not check for alignment of arrays.
Alignment should be checked before these wrappers are used.
Raw function pointers (Fortran-style pointer arguments):
- {}
"""
# Within SciPy, these wrappers can be used via relative or absolute cimport.
# Examples:
# from ..linalg cimport cython_lapack
# from scipy.linalg cimport cython_lapack
# cimport scipy.linalg.cython_lapack as cython_lapack
# cimport ..linalg.cython_lapack as cython_lapack
# Within SciPy, if LAPACK functions are needed in C/C++/Fortran,
# these wrappers should not be used.
# The original libraries should be linked directly.
cdef extern from "fortran_defs.h":
pass
from numpy cimport npy_complex64, npy_complex128
cdef extern from "_lapack_subroutines.h":
# Function pointer type declarations for
# gees and gges families of functions.
ctypedef bint _cselect1(npy_complex64*)
ctypedef bint _cselect2(npy_complex64*, npy_complex64*)
ctypedef bint _dselect2(d*, d*)
ctypedef bint _dselect3(d*, d*, d*)
ctypedef bint _sselect2(s*, s*)
ctypedef bint _sselect3(s*, s*, s*)
ctypedef bint _zselect1(npy_complex128*)
ctypedef bint _zselect2(npy_complex128*, npy_complex128*)
'''
def make_lapack_pyx_preamble(all_sigs):
names = [sig[0] for sig in all_sigs]
return lapack_pyx_preamble.format("\n- ".join(names))
blas_py_wrappers = """
# Python-accessible wrappers for testing:
cdef inline bint _is_contiguous(double[:,:] a, int axis) nogil:
return (a.strides[axis] == sizeof(a[0,0]) or a.shape[axis] == 1)
cpdef float complex _test_cdotc(float complex[:] cx, float complex[:] cy) nogil:
cdef:
int n = cx.shape[0]
int incx = cx.strides[0] // sizeof(cx[0])
int incy = cy.strides[0] // sizeof(cy[0])
return cdotc(&n, &cx[0], &incx, &cy[0], &incy)
cpdef float complex _test_cdotu(float complex[:] cx, float complex[:] cy) nogil:
cdef:
int n = cx.shape[0]
int incx = cx.strides[0] // sizeof(cx[0])
int incy = cy.strides[0] // sizeof(cy[0])
return cdotu(&n, &cx[0], &incx, &cy[0], &incy)
cpdef double _test_dasum(double[:] dx) nogil:
cdef:
int n = dx.shape[0]
int incx = dx.strides[0] // sizeof(dx[0])
return dasum(&n, &dx[0], &incx)
cpdef double _test_ddot(double[:] dx, double[:] dy) nogil:
cdef:
int n = dx.shape[0]
int incx = dx.strides[0] // sizeof(dx[0])
int incy = dy.strides[0] // sizeof(dy[0])
return ddot(&n, &dx[0], &incx, &dy[0], &incy)
cpdef int _test_dgemm(double alpha, double[:,:] a, double[:,:] b, double beta,
double[:,:] c) nogil except -1:
cdef:
char *transa
char *transb
int m, n, k, lda, ldb, ldc
double *a0=&a[0,0]
double *b0=&b[0,0]
double *c0=&c[0,0]
# In the case that c is C contiguous, swap a and b and
# swap whether or not each of them is transposed.
# This can be done because a.dot(b) = b.T.dot(a.T).T.
if _is_contiguous(c, 1):
if _is_contiguous(a, 1):
transb = 'n'
ldb = (&a[1,0]) - a0 if a.shape[0] > 1 else 1
elif _is_contiguous(a, 0):
transb = 't'
ldb = (&a[0,1]) - a0 if a.shape[1] > 1 else 1
else:
with gil:
raise ValueError("Input 'a' is neither C nor Fortran contiguous.")
if _is_contiguous(b, 1):
transa = 'n'
lda = (&b[1,0]) - b0 if b.shape[0] > 1 else 1
elif _is_contiguous(b, 0):
transa = 't'
lda = (&b[0,1]) - b0 if b.shape[1] > 1 else 1
else:
with gil:
raise ValueError("Input 'b' is neither C nor Fortran contiguous.")
k = b.shape[0]
if k != a.shape[1]:
with gil:
raise ValueError("Shape mismatch in input arrays.")
m = b.shape[1]
n = a.shape[0]
if n != c.shape[0] or m != c.shape[1]:
with gil:
raise ValueError("Output array does not have the correct shape.")
ldc = (&c[1,0]) - c0 if c.shape[0] > 1 else 1
dgemm(transa, transb, &m, &n, &k, &alpha, b0, &lda, a0,
&ldb, &beta, c0, &ldc)
elif _is_contiguous(c, 0):
if _is_contiguous(a, 1):
transa = 't'
lda = (&a[1,0]) - a0 if a.shape[0] > 1 else 1
elif _is_contiguous(a, 0):
transa = 'n'
lda = (&a[0,1]) - a0 if a.shape[1] > 1 else 1
else:
with gil:
raise ValueError("Input 'a' is neither C nor Fortran contiguous.")
if _is_contiguous(b, 1):
transb = 't'
ldb = (&b[1,0]) - b0 if b.shape[0] > 1 else 1
elif _is_contiguous(b, 0):
transb = 'n'
ldb = (&b[0,1]) - b0 if b.shape[1] > 1 else 1
else:
with gil:
raise ValueError("Input 'b' is neither C nor Fortran contiguous.")
m = a.shape[0]
k = a.shape[1]
if k != b.shape[0]:
with gil:
raise ValueError("Shape mismatch in input arrays.")
n = b.shape[1]
if m != c.shape[0] or n != c.shape[1]:
with gil:
raise ValueError("Output array does not have the correct shape.")
ldc = (&c[0,1]) - c0 if c.shape[1] > 1 else 1
dgemm(transa, transb, &m, &n, &k, &alpha, a0, &lda, b0,
&ldb, &beta, c0, &ldc)
else:
with gil:
raise ValueError("Input 'c' is neither C nor Fortran contiguous.")
return 0
cpdef double _test_dnrm2(double[:] x) nogil:
cdef:
int n = x.shape[0]
int incx = x.strides[0] // sizeof(x[0])
return dnrm2(&n, &x[0], &incx)
cpdef double _test_dzasum(double complex[:] zx) nogil:
cdef:
int n = zx.shape[0]
int incx = zx.strides[0] // sizeof(zx[0])
return dzasum(&n, &zx[0], &incx)
cpdef double _test_dznrm2(double complex[:] x) nogil:
cdef:
int n = x.shape[0]
int incx = x.strides[0] // sizeof(x[0])
return dznrm2(&n, &x[0], &incx)
cpdef int _test_icamax(float complex[:] cx) nogil:
cdef:
int n = cx.shape[0]
int incx = cx.strides[0] // sizeof(cx[0])
return icamax(&n, &cx[0], &incx)
cpdef int _test_idamax(double[:] dx) nogil:
cdef:
int n = dx.shape[0]
int incx = dx.strides[0] // sizeof(dx[0])
return idamax(&n, &dx[0], &incx)
cpdef int _test_isamax(float[:] sx) nogil:
cdef:
int n = sx.shape[0]
int incx = sx.strides[0] // sizeof(sx[0])
return isamax(&n, &sx[0], &incx)
cpdef int _test_izamax(double complex[:] zx) nogil:
cdef:
int n = zx.shape[0]
int incx = zx.strides[0] // sizeof(zx[0])
return izamax(&n, &zx[0], &incx)
cpdef float _test_sasum(float[:] sx) nogil:
cdef:
int n = sx.shape[0]
int incx = sx.shape[0] // sizeof(sx[0])
return sasum(&n, &sx[0], &incx)
cpdef float _test_scasum(float complex[:] cx) nogil:
cdef:
int n = cx.shape[0]
int incx = cx.strides[0] // sizeof(cx[0])
return scasum(&n, &cx[0], &incx)
cpdef float _test_scnrm2(float complex[:] x) nogil:
cdef:
int n = x.shape[0]
int incx = x.strides[0] // sizeof(x[0])
return scnrm2(&n, &x[0], &incx)
cpdef float _test_sdot(float[:] sx, float[:] sy) nogil:
cdef:
int n = sx.shape[0]
int incx = sx.strides[0] // sizeof(sx[0])
int incy = sy.strides[0] // sizeof(sy[0])
return sdot(&n, &sx[0], &incx, &sy[0], &incy)
cpdef float _test_snrm2(float[:] x) nogil:
cdef:
int n = x.shape[0]
int incx = x.shape[0] // sizeof(x[0])
return snrm2(&n, &x[0], &incx)
cpdef double complex _test_zdotc(double complex[:] zx, double complex[:] zy) nogil:
cdef:
int n = zx.shape[0]
int incx = zx.strides[0] // sizeof(zx[0])
int incy = zy.strides[0] // sizeof(zy[0])
return zdotc(&n, &zx[0], &incx, &zy[0], &incy)
cpdef double complex _test_zdotu(double complex[:] zx, double complex[:] zy) nogil:
cdef:
int n = zx.shape[0]
int incx = zx.strides[0] // sizeof(zx[0])
int incy = zy.strides[0] // sizeof(zy[0])
return zdotu(&n, &zx[0], &incx, &zy[0], &incy)
"""
def generate_blas_pyx(func_sigs, sub_sigs, all_sigs, header_name):
funcs = "\n".join(pyx_decl_func(*(s+(header_name,))) for s in func_sigs)
subs = "\n" + "\n".join(pyx_decl_sub(*(s[::2]+(header_name,)))
for s in sub_sigs)
return make_blas_pyx_preamble(all_sigs) + funcs + subs + blas_py_wrappers
lapack_py_wrappers = """
# Python accessible wrappers for testing:
def _test_dlamch(cmach):
# This conversion is necessary to handle Python 3 strings.
cmach_bytes = bytes(cmach)
# Now that it is a bytes representation, a non-temporary variable
# must be passed as a part of the function call.
cdef char* cmach_char = cmach_bytes
return dlamch(cmach_char)
def _test_slamch(cmach):
# This conversion is necessary to handle Python 3 strings.
cmach_bytes = bytes(cmach)
# Now that it is a bytes representation, a non-temporary variable
# must be passed as a part of the function call.
cdef char* cmach_char = cmach_bytes
return slamch(cmach_char)
"""
def generate_lapack_pyx(func_sigs, sub_sigs, all_sigs, header_name):
funcs = "\n".join(pyx_decl_func(*(s+(header_name,))) for s in func_sigs)
subs = "\n" + "\n".join(pyx_decl_sub(*(s[::2]+(header_name,)))
for s in sub_sigs)
preamble = make_lapack_pyx_preamble(all_sigs)
return preamble + funcs + subs + lapack_py_wrappers
pxd_template = """ctypedef {ret_type} {name}_t({args}) nogil
cdef {name}_t *{name}_f
"""
pxd_template = """cdef {ret_type} {name}({args}) nogil
"""
def pxd_decl(name, ret_type, args):
args = args.replace('lambda', 'lambda_').replace('*in,', '*in_,')
return pxd_template.format(name=name, ret_type=ret_type, args=args)
blas_pxd_preamble = """# Within scipy, these wrappers can be used via relative or absolute cimport.
# Examples:
# from ..linalg cimport cython_blas
# from scipy.linalg cimport cython_blas
# cimport scipy.linalg.cython_blas as cython_blas
# cimport ..linalg.cython_blas as cython_blas
# Within SciPy, if BLAS functions are needed in C/C++/Fortran,
# these wrappers should not be used.
# The original libraries should be linked directly.
ctypedef float s
ctypedef double d
ctypedef float complex c
ctypedef double complex z
"""
def generate_blas_pxd(all_sigs):
body = '\n'.join(pxd_decl(*sig) for sig in all_sigs)
return blas_pxd_preamble + body
lapack_pxd_preamble = """# Within SciPy, these wrappers can be used via relative or absolute cimport.
# Examples:
# from ..linalg cimport cython_lapack
# from scipy.linalg cimport cython_lapack
# cimport scipy.linalg.cython_lapack as cython_lapack
# cimport ..linalg.cython_lapack as cython_lapack
# Within SciPy, if LAPACK functions are needed in C/C++/Fortran,
# these wrappers should not be used.
# The original libraries should be linked directly.
ctypedef float s
ctypedef double d
ctypedef float complex c
ctypedef double complex z
# Function pointer type declarations for
# gees and gges families of functions.
ctypedef bint cselect1(c*)
ctypedef bint cselect2(c*, c*)
ctypedef bint dselect2(d*, d*)
ctypedef bint dselect3(d*, d*, d*)
ctypedef bint sselect2(s*, s*)
ctypedef bint sselect3(s*, s*, s*)
ctypedef bint zselect1(z*)
ctypedef bint zselect2(z*, z*)
"""
def generate_lapack_pxd(all_sigs):
return lapack_pxd_preamble + '\n'.join(pxd_decl(*sig) for sig in all_sigs)
fortran_template = """ subroutine {name}wrp(
+ ret,
+ {argnames}
+ )
external {wrapper}
{ret_type} {wrapper}
{ret_type} ret
{argdecls}
ret = {wrapper}(
+ {argnames}
+ )
end
"""
dims = {'work': '(*)', 'ab': '(ldab,*)', 'a': '(lda,*)', 'dl': '(*)',
'd': '(*)', 'du': '(*)', 'ap': '(*)', 'e': '(*)', 'lld': '(*)'}
xy_specialized_dims = {'x': '', 'y': ''}
a_specialized_dims = {'a': '(*)'}
special_cases = defaultdict(dict,
ladiv = xy_specialized_dims,
lanhf = a_specialized_dims,
lansf = a_specialized_dims,
lapy2 = xy_specialized_dims,
lapy3 = xy_specialized_dims)
def process_fortran_name(name, funcname):
if 'inc' in name:
return name
special = special_cases[funcname[1:]]
if 'x' in name or 'y' in name:
suffix = special.get(name, '(n)')
else:
suffix = special.get(name, '')
return name + suffix
def called_name(name):
included = ['cdotc', 'cdotu', 'zdotc', 'zdotu', 'cladiv', 'zladiv']
if name in included:
return "w" + name
return name
def fort_subroutine_wrapper(name, ret_type, args):
wrapper = called_name(name)
types, names = arg_names_and_types(args)
argnames = ',\n + '.join(names)
names = [process_fortran_name(n, name) for n in names]
argdecls = '\n '.join('{0} {1}'.format(fortran_types[t], n)
for n, t in zip(names, types))
return fortran_template.format(name=name, wrapper=wrapper,
argnames=argnames, argdecls=argdecls,
ret_type=fortran_types[ret_type])
def generate_fortran(func_sigs):
return "\n".join(fort_subroutine_wrapper(*sig) for sig in func_sigs)
def make_c_args(args):
types, names = arg_names_and_types(args)
types = [c_types[arg] for arg in types]
return ', '.join('{0} *{1}'.format(t, n) for t, n in zip(types, names))
c_func_template = ("void F_FUNC({name}wrp, {upname}WRP)"
"({return_type} *ret, {args});\n")
def c_func_decl(name, return_type, args):
args = make_c_args(args)
return_type = c_types[return_type]
return c_func_template.format(name=name, upname=name.upper(),
return_type=return_type, args=args)
c_sub_template = "void F_FUNC({name},{upname})({args});\n"
def c_sub_decl(name, return_type, args):
args = make_c_args(args)
return c_sub_template.format(name=name, upname=name.upper(), args=args)
c_preamble = """#ifndef SCIPY_LINALG_{lib}_FORTRAN_WRAPPERS_H
#define SCIPY_LINALG_{lib}_FORTRAN_WRAPPERS_H
#include "fortran_defs.h"
#include "numpy/arrayobject.h"
"""
lapack_decls = """
typedef int (*_cselect1)(npy_complex64*);
typedef int (*_cselect2)(npy_complex64*, npy_complex64*);
typedef int (*_dselect2)(double*, double*);
typedef int (*_dselect3)(double*, double*, double*);
typedef int (*_sselect2)(float*, float*);
typedef int (*_sselect3)(float*, float*, float*);
typedef int (*_zselect1)(npy_complex128*);
typedef int (*_zselect2)(npy_complex128*, npy_complex128*);
"""
cpp_guard = """
#ifdef __cplusplus
extern "C" {
#endif
"""
c_end = """
#ifdef __cplusplus
}
#endif
#endif
"""
def generate_c_header(func_sigs, sub_sigs, all_sigs, lib_name):
funcs = "".join(c_func_decl(*sig) for sig in func_sigs)
subs = "\n" + "".join(c_sub_decl(*sig) for sig in sub_sigs)
if lib_name == 'LAPACK':
preamble = (c_preamble.format(lib=lib_name) + lapack_decls)
else:
preamble = c_preamble.format(lib=lib_name)
return "".join([preamble, cpp_guard, funcs, subs, c_end])
def split_signature(sig):
name_and_type, args = sig[:-1].split('(')
ret_type, name = name_and_type.split(' ')
return name, ret_type, args
def filter_lines(lines):
lines = [line for line in map(str.strip, lines)
if line and not line.startswith('#')]
func_sigs = [split_signature(line) for line in lines
if line.split(' ')[0] != 'void']
sub_sigs = [split_signature(line) for line in lines
if line.split(' ')[0] == 'void']
all_sigs = list(sorted(func_sigs + sub_sigs, key=itemgetter(0)))
return func_sigs, sub_sigs, all_sigs
def all_newer(src_files, dst_files):
from distutils.dep_util import newer
return all(os.path.exists(dst) and newer(dst, src)
for dst in dst_files for src in src_files)
def make_all(blas_signature_file="cython_blas_signatures.txt",
lapack_signature_file="cython_lapack_signatures.txt",
blas_name="cython_blas",
lapack_name="cython_lapack",
blas_fortran_name="_blas_subroutine_wrappers.f",
lapack_fortran_name="_lapack_subroutine_wrappers.f",
blas_header_name="_blas_subroutines.h",
lapack_header_name="_lapack_subroutines.h"):
src_files = (os.path.abspath(__file__),
blas_signature_file,
lapack_signature_file)
dst_files = (blas_name + '.pyx',
blas_name + '.pxd',
blas_fortran_name,
blas_header_name,
lapack_name + '.pyx',
lapack_name + '.pxd',
lapack_fortran_name,
lapack_header_name)
os.chdir(BASE_DIR)
if all_newer(src_files, dst_files):
print("scipy/linalg/_generate_pyx.py: all files up-to-date")
return
comments = ["This file was generated by _generate_pyx.py.\n",
"Do not edit this file directly.\n"]
ccomment = ''.join(['/* ' + line.rstrip() + ' */\n'
for line in comments]) + '\n'
pyxcomment = ''.join(['# ' + line for line in comments]) + '\n'
fcomment = ''.join(['c ' + line for line in comments]) + '\n'
with open(blas_signature_file, 'r') as f:
blas_sigs = f.readlines()
blas_sigs = filter_lines(blas_sigs)
blas_pyx = generate_blas_pyx(*(blas_sigs + (blas_header_name,)))
with open(blas_name + '.pyx', 'w') as f:
f.write(pyxcomment)
f.write(blas_pyx)
blas_pxd = generate_blas_pxd(blas_sigs[2])
with open(blas_name + '.pxd', 'w') as f:
f.write(pyxcomment)
f.write(blas_pxd)
blas_fortran = generate_fortran(blas_sigs[0])
with open(blas_fortran_name, 'w') as f:
f.write(fcomment)
f.write(blas_fortran)
blas_c_header = generate_c_header(*(blas_sigs + ('BLAS',)))
with open(blas_header_name, 'w') as f:
f.write(ccomment)
f.write(blas_c_header)
with open(lapack_signature_file, 'r') as f:
lapack_sigs = f.readlines()
lapack_sigs = filter_lines(lapack_sigs)
lapack_pyx = generate_lapack_pyx(*(lapack_sigs + (lapack_header_name,)))
with open(lapack_name + '.pyx', 'w') as f:
f.write(pyxcomment)
f.write(lapack_pyx)
lapack_pxd = generate_lapack_pxd(lapack_sigs[2])
with open(lapack_name + '.pxd', 'w') as f:
f.write(pyxcomment)
f.write(lapack_pxd)
lapack_fortran = generate_fortran(lapack_sigs[0])
with open(lapack_fortran_name, 'w') as f:
f.write(fcomment)
f.write(lapack_fortran)
lapack_c_header = generate_c_header(*(lapack_sigs + ('LAPACK',)))
with open(lapack_header_name, 'w') as f:
f.write(ccomment)
f.write(lapack_c_header)
if __name__ == '__main__':
make_all()
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resources to be indexed and searched over by the search module."""
__author__ = 'Ellis Michael (emichael@google.com)'
import collections
import datetime
import gettext
import HTMLParser
import logging
import operator
import os
import Queue
import re
import robotparser
import urllib
import urlparse
from xml.dom import minidom
import jinja2
import appengine_config
from common import jinja_utils
from models import models
from modules.announcements import announcements
from google.appengine.api import search
from google.appengine.api import urlfetch
PROTOCOL_PREFIX = 'http://'
YOUTUBE_DATA_URL = 'https://gdata.youtube.com/feeds/api/videos/'
YOUTUBE_TIMED_TEXT_URL = 'https://youtube.com/api/timedtext'
# The limit (in seconds) for the time that elapses before a new transcript
# fragment should be started. A lower value results in more fine-grain indexing
# and more docs in the index.
YOUTUBE_CAPTION_SIZE_SECS = 30
class URLNotParseableException(Exception):
"""Exception thrown when the resource at a URL cannot be parsed."""
pass
class ResourceHTMLParser(HTMLParser.HTMLParser):
"""Custom parser for processing HTML files."""
IGNORED_TAGS = ['script', 'style']
def __init__(self, url):
HTMLParser.HTMLParser.__init__(self)
self.content_list = []
self._links = []
self._title = ''
self.tag_tracker = collections.Counter()
self.url = url
def handle_starttag(self, tag, attrs):
attrs_dict = dict(attrs)
if tag == 'a' and 'href' in attrs_dict:
self._links.append(urlparse.urljoin(self.url, attrs_dict['href']))
self.tag_tracker[tag] += 1
def handle_endtag(self, tag):
if self.tag_tracker[tag] > 0:
self.tag_tracker[tag] -= 1
def handle_data(self, data):
"""Invoked every time the parser encounters the page's inner content."""
if self.tag_tracker['title']:
if self._title:
self._title += '\n%s' % data
else:
self._title = data
stripped_data = data.strip()
if (not any([self.tag_tracker[tag] for tag in self.IGNORED_TAGS]) and
stripped_data):
self.content_list.append(stripped_data)
def get_content(self):
return '\n'.join(self.content_list)
def get_links(self):
return self._links
def get_title(self):
return self._title
def get_parser_for_html(url, ignore_robots=False):
"""Returns a ResourceHTMLParser with the parsed data."""
if not (ignore_robots or _url_allows_robots(url)):
raise URLNotParseableException('robots.txt disallows access to URL: %s'
% url)
parser = ResourceHTMLParser(url)
try:
result = urlfetch.fetch(url)
if (result.status_code in [200, 304] and
any(content_type in result.headers['Content-type'] for
content_type in ['text/html', 'xml'])):
if not isinstance(result.content, unicode):
result.content = result.content.decode('utf-8')
parser.feed(result.content)
else:
raise ValueError
except BaseException as e:
raise URLNotParseableException('Could not parse file at URL: %s\n%s' %
(url, e))
return parser
def get_minidom_from_xml(url, ignore_robots=False):
"""Returns a minidom representation of an XML file at url."""
if not (ignore_robots or _url_allows_robots(url)):
raise URLNotParseableException('robots.txt disallows access to URL: %s'
% url)
try:
result = urlfetch.fetch(url)
except urlfetch.Error as e:
raise URLNotParseableException('Could not parse file at URL: %s. %s' %
(url, e))
if result.status_code not in [200, 304]:
raise URLNotParseableException('Bad status code (%s) for URL: %s' %
(result.status_code, url))
try:
if isinstance(result.content, unicode):
result.content = result.content.encode('utf-8')
xmldoc = minidom.parseString(result.content)
except BaseException as e:
raise URLNotParseableException(
'Error parsing XML document at URL: %s. %s' % (url, e))
return xmldoc
def _url_allows_robots(url):
"""Checks robots.txt for user agent * at URL."""
url = url.encode('utf-8')
try:
parts = urlparse.urlparse(url)
base = urlparse.urlunsplit((
parts.scheme, parts.netloc, '', None, None))
rp = robotparser.RobotFileParser(url=urlparse.urljoin(
base, '/robots.txt'))
rp.read()
except BaseException as e:
logging.info('Could not retreive robots.txt for URL: %s', url)
raise URLNotParseableException(e)
else:
return rp.can_fetch('*', url)
def get_locale_filtered_announcement_list(course):
# TODO(jorr): Restrict search in announcements by all tracking labels,
# not just locale.
announcement_list = (
announcements.AnnouncementEntity.get_announcements())
# pylint: disable=protected-access
return models.LabelDAO._apply_locale_labels_to_locale(
course.app_context.get_current_locale(), announcement_list)
# pylint: enable=protected-access
class Resource(object):
"""Abstract superclass for a resource."""
# Each subclass should define this constant
TYPE_NAME = 'Resource'
# Each subclass should use this constant to define the fields it needs
# returned with a search result.
RETURNED_FIELDS = []
# Each subclass should use this constant to define the fields it needs
# returned as snippets in the search result. In most cases, this should be
# one field.
SNIPPETED_FIELDS = []
# Each subclass should use this constant to define how many days should
# elapse before a resource should be re-indexed. This value should be
# nonnegative.
FRESHNESS_THRESHOLD_DAYS = 0
@classmethod
def generate_all(
cls, course, timestamps): # pylint: disable=unused-argument
"""A generator returning objects of type cls in the course.
This generator should yield resources based on the last indexed time in
timestamps.
Args:
course: models.courses.course. the course to index.
timestamps: dict from doc_ids to last indexed datetimes.
Yields:
A sequence of Resource objects.
"""
# For the superclass, return a generator which immediately halts. All
# implementations in subclasses must also be generators for memory-
# management reasons.
return
yield # pylint: disable=unreachable
@classmethod
def _get_doc_id(cls, *unused_vargs):
"""Subclasses should implement this with identifying fields as args."""
raise NotImplementedError
@classmethod
def _indexed_within_num_days(cls, timestamps, doc_id, num_days):
"""Determines whether doc_id was indexed in the last num_days days."""
try:
timestamp = timestamps[doc_id]
except (KeyError, TypeError):
return False
else:
delta = datetime.datetime.utcnow() - timestamp
return delta <= datetime.timedelta(num_days)
def get_document(self):
"""Return a search.Document to be indexed."""
raise NotImplementedError
def get_links(self):
"""External links to be indexed should be stored in self.links."""
return self.links if hasattr(self, 'links') else []
def get_unit_id(self):
return self.unit_id if hasattr(self, 'unit_id') else None
class Result(object):
"""The abstract superclass for a result returned by the search module."""
def get_html(self):
"""Return an HTML fragment to be used in the results page."""
raise NotImplementedError
@classmethod
def _generate_html_from_template(cls, template_name, template_value):
"""Generates marked-up HTML from template."""
template = jinja_utils.get_template(
template_name,
[os.path.join(appengine_config.BUNDLE_ROOT,
'modules', 'search', 'results_templates')])
return jinja2.Markup(template.render(template_value))
@classmethod
def _get_returned_field(cls, result, field):
"""Returns the value of a field in result, '' if none exists."""
try:
return result[field][0].value
except (KeyError, IndexError, AttributeError):
return ''
@classmethod
def _get_snippet(cls, result):
"""Returns the value of the snippet in result, '' if none exists."""
try:
return result.expressions[0].value
except (AttributeError, IndexError):
return ''
class LessonResource(Resource):
"""A lesson in a course."""
TYPE_NAME = 'Lesson'
RETURNED_FIELDS = ['title', 'unit_id', 'lesson_id', 'url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 3
@classmethod
def generate_all(cls, course, timestamps):
for lesson in course.get_lessons_for_all_units():
unit = course.find_unit_by_id(lesson.unit_id)
doc_id = cls._get_doc_id(lesson.unit_id, lesson.lesson_id)
if (course.is_unit_available(unit) and
course.is_lesson_available(unit, lesson) and
not cls._indexed_within_num_days(timestamps, doc_id,
cls.FRESHNESS_THRESHOLD_DAYS)):
try:
yield LessonResource(lesson)
except HTMLParser.HTMLParseError as e:
logging.info(
'Error parsing objectives for Lesson %s.%s: %s',
lesson.unit_id, lesson.lesson_id, e)
continue
@classmethod
def _get_doc_id(cls, unit_id, lesson_id):
return '%s_%s_%s' % (cls.TYPE_NAME, unit_id, lesson_id)
def __init__(self, lesson):
super(LessonResource, self).__init__()
self.unit_id = lesson.unit_id
self.lesson_id = lesson.lesson_id
self.title = unicode(lesson.title)
if lesson.notes:
self.notes = urlparse.urljoin(
PROTOCOL_PREFIX, unicode(lesson.notes))
else:
self.notes = ''
if lesson.objectives:
parser = ResourceHTMLParser(PROTOCOL_PREFIX)
parser.feed(unicode(lesson.objectives))
self.content = parser.get_content()
self.links = parser.get_links()
else:
self.content = ''
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.unit_id, self.lesson_id),
fields=[
search.TextField(
name='unit_id',
value=str(self.unit_id) if self.unit_id else ''),
search.TextField(name='title', value=self.title),
search.TextField(name='content', value=self.content),
search.TextField(name='url', value=(
'unit?unit=%s&lesson=%s' %
(self.unit_id, self.lesson_id))),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class LessonResult(Result):
"""An object for a lesson in search results."""
def __init__(self, search_result):
super(LessonResult, self).__init__()
self.url = self._get_returned_field(search_result, 'url')
self.title = self._get_returned_field(search_result, 'title')
self.unit_id = self._get_returned_field(search_result, 'unit_id')
self.snippet = self._get_snippet(search_result)
def get_html(self):
# I18N: Displayed in search results; denotes a lesson link.
lesson_string = gettext.gettext('Lesson')
template_value = {
'result_title': '%s - %s' % (self.title, lesson_string),
'result_url': self.url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('basic.html', template_value)
class ExternalLinkResource(Resource):
"""An external link from a course."""
TYPE_NAME = 'ExternalLink'
RETURNED_FIELDS = ['title', 'url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 15
# TODO(emichael): Allow the user to turn off external links in the dashboard
@classmethod
def generate_all_from_dist_dict(cls, link_dist, link_unit_id, timestamps):
"""Generate all external links from a map from URL to distance.
Args:
link_dist: dict. a map from URL to distance in the link graph from
the course.
link_unit_id: dict. A map from URL to the unit ID under which
the link is found.
timestamps: dict from doc_ids to last indexed datetimes. An empty
dict indicates that all documents should be generated.
Yields:
A sequence of ExternalLinkResource.
"""
url_queue = Queue.LifoQueue()
for url, dist in sorted(link_dist.iteritems(),
key=operator.itemgetter(1)):
url_queue.put(url)
while not url_queue.empty():
url = url_queue.get()
doc_id = cls._get_doc_id(url)
if (cls._indexed_within_num_days(timestamps, doc_id,
cls.FRESHNESS_THRESHOLD_DAYS)):
continue
dist = link_dist[url]
unit_id = link_unit_id.get(url)
if dist > 1:
break
try:
resource = ExternalLinkResource(url, unit_id)
except URLNotParseableException as e:
logging.info(e)
else:
if dist < 1:
for new_link in resource.get_links():
if new_link not in link_dist:
link_dist[new_link] = dist + 1
url_queue.put(new_link)
link_unit_id[new_link] = unit_id
yield resource
def __init__(self, url, unit_id):
# distance is the distance from the course material in the link graph,
# where a lesson notes page has a distance of 0
super(ExternalLinkResource, self).__init__()
self.url = url
self.unit_id = unit_id
parser = get_parser_for_html(url)
self.content = parser.get_content()
self.title = parser.get_title()
self.links = parser.get_links()
@classmethod
def _get_doc_id(cls, url):
return '%s_%s' % (cls.TYPE_NAME, url)
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.url),
fields=[
search.TextField(name='title', value=self.title),
search.TextField(name='content', value=self.content),
search.TextField(name='url', value=self.url),
search.TextField(
name='unit_id',
value=str(self.unit_id) if self.unit_id else ''),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class ExternalLinkResult(Result):
"""An object for an external link in the search results."""
def __init__(self, search_result):
super(ExternalLinkResult, self).__init__()
self.url = self._get_returned_field(search_result, 'url')
self.title = self._get_returned_field(search_result, 'title')
self.unit_id = self._get_returned_field(search_result, 'unit_id')
self.snippet = self._get_snippet(search_result)
def get_html(self):
template_value = {
'result_title': self.title,
'result_url': self.url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('basic.html', template_value)
class YouTubeFragmentResource(Resource):
"""An object for a YouTube transcript fragment in search results."""
TYPE_NAME = 'YouTubeFragment'
RETURNED_FIELDS = ['title', 'video_id', 'start', 'thumbnail_url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 30
@classmethod
def generate_all(cls, course, timestamps):
"""Generate all YouTubeFragments for a course."""
# TODO(emichael): Handle the existence of a single video in multiple
# places in a course.
youtube_ct_regex = r"""<[ ]*gcb-youtube[^>]+videoid=['"]([^'"]+)['"]"""
for lesson in course.get_lessons_for_all_units():
unit = course.find_unit_by_id(lesson.unit_id)
if not (course.is_unit_available(unit) and
course.is_lesson_available(unit, lesson)):
continue
lesson_url = 'unit?unit=%s&lesson=%s' % (
lesson.unit_id, lesson.lesson_id)
if lesson.video and not cls._indexed_within_num_days(
timestamps, lesson.video, cls.FRESHNESS_THRESHOLD_DAYS):
for fragment in cls._get_fragments_for_video(
lesson.unit_id, lesson.video, lesson_url):
yield fragment
match = re.search(youtube_ct_regex, unicode(lesson.objectives))
if match:
for video_id in match.groups():
if not cls._indexed_within_num_days(
timestamps, video_id, cls.FRESHNESS_THRESHOLD_DAYS):
for fragment in cls._get_fragments_for_video(
lesson.unit_id, video_id, lesson_url):
yield fragment
if announcements.custom_module.enabled:
for entity in get_locale_filtered_announcement_list(course):
if entity.is_draft:
continue
announcement_url = 'announcements#%s' % entity.key()
match = re.search(youtube_ct_regex, entity.html)
if match:
for video_id in match.groups():
if not cls._indexed_within_num_days(
timestamps, video_id,
cls.FRESHNESS_THRESHOLD_DAYS):
for fragment in cls._get_fragments_for_video(
None, video_id, announcement_url):
yield fragment
@classmethod
def _indexed_within_num_days(cls, timestamps, video_id, num_days):
for doc_id in timestamps:
if doc_id.startswith(cls._get_doc_id(video_id, '')):
return super(
YouTubeFragmentResource, cls)._indexed_within_num_days(
timestamps, doc_id, num_days)
return False
@classmethod
def _get_fragments_for_video(cls, unit_id, video_id, url_in_course):
"""Get all of the transcript fragment docs for a specific video."""
try:
(transcript, title, thumbnail_url) = cls._get_video_data(video_id)
except BaseException as e:
logging.info('Could not parse YouTube video with id %s.\n%s',
video_id, e)
return []
# Aggregate the fragments into YOUTUBE_CAPTION_SIZE_SECS time chunks
fragments = transcript.getElementsByTagName('text')
aggregated_fragments = []
# This parser is only used for unescaping HTML entities
parser = HTMLParser.HTMLParser()
while fragments:
current_start = float(fragments[0].attributes['start'].value)
current_text = []
while (fragments and
float(fragments[0].attributes['start'].value) -
current_start < YOUTUBE_CAPTION_SIZE_SECS):
current_text.append(parser.unescape(
fragments.pop(0).firstChild.nodeValue))
aggregated_fragment = YouTubeFragmentResource(
video_id, unit_id, url_in_course, current_start,
'\n'.join(current_text), title, thumbnail_url)
aggregated_fragments.append(aggregated_fragment)
return aggregated_fragments
@classmethod
def _get_video_data(cls, video_id):
"""Returns (track_minidom, title, thumbnail_url) for a video."""
try:
vid_info = get_minidom_from_xml(
urlparse.urljoin(YOUTUBE_DATA_URL, video_id),
ignore_robots=True)
title = vid_info.getElementsByTagName(
'title')[0].firstChild.nodeValue
thumbnail_url = vid_info.getElementsByTagName(
'media:thumbnail')[0].attributes['url'].value
except (URLNotParseableException, IOError,
IndexError, AttributeError) as e:
logging.error('Could not parse video info for video id %s.\n%s',
video_id, e)
title = ''
thumbnail_url = ''
# TODO(emichael): Handle the existence of multiple tracks
url = urlparse.urljoin(YOUTUBE_TIMED_TEXT_URL,
'?v=%s&type=list' % video_id)
tracklist = get_minidom_from_xml(url, ignore_robots=True)
tracks = tracklist.getElementsByTagName('track')
if not tracks:
raise URLNotParseableException('No tracks for video %s' % video_id)
track_name = tracks[0].attributes['name'].value
track_lang = tracks[0].attributes['lang_code'].value
track_id = tracks[0].attributes['id'].value
url = urlparse.urljoin(YOUTUBE_TIMED_TEXT_URL, urllib.quote(
'?v=%s&lang=%s&name=%s&id=%s' %
(video_id, track_lang, track_name, track_id), '?/=&'))
transcript = get_minidom_from_xml(url, ignore_robots=True)
return (transcript, title, thumbnail_url)
@classmethod
def _get_doc_id(cls, video_id, start_time):
return '%s_%s_%s' % (cls.TYPE_NAME, video_id, start_time)
def __init__(self, video_id, unit_id, url, start, text, video_title,
thumbnail_url):
super(YouTubeFragmentResource, self).__init__()
self.url = url
self.video_id = video_id
self.unit_id = unit_id
self.start = start
self.text = text
self.video_title = video_title
self.thumbnail_url = thumbnail_url
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.video_id, self.start),
fields=[
search.TextField(name='title', value=self.video_title),
search.TextField(name='video_id', value=self.video_id),
search.TextField(
name='unit_id',
value=str(self.unit_id) if self.unit_id else ''),
search.TextField(name='content', value=self.text),
search.NumberField(name='start', value=self.start),
search.TextField(name='thumbnail_url',
value=self.thumbnail_url),
search.TextField(name='url', value=self.url),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class YouTubeFragmentResult(Result):
"""An object for a lesson in search results."""
def __init__(self, search_result):
super(YouTubeFragmentResult, self).__init__()
self.doc_id = search_result.doc_id
self.title = self._get_returned_field(search_result, 'title')
self.video_id = self._get_returned_field(search_result, 'video_id')
self.unit_id = self._get_returned_field(search_result, 'unit_id')
self.start = self._get_returned_field(search_result, 'start')
self.thumbnail_url = self._get_returned_field(search_result,
'thumbnail_url')
self.url = self._get_returned_field(search_result, 'url')
self.snippet = self._get_snippet(search_result)
def get_html(self):
template_value = {
'result_title': self.title,
'result_url': self.url,
'video_id': self.video_id,
'start_time': self.start,
'thumbnail_url': self.thumbnail_url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('youtube.html', template_value)
class AnnouncementResource(Resource):
"""An announcement in a course."""
TYPE_NAME = 'Announcement'
RETURNED_FIELDS = ['title', 'url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 1
@classmethod
def generate_all(cls, course, timestamps):
if announcements.custom_module.enabled:
for entity in get_locale_filtered_announcement_list(course):
doc_id = cls._get_doc_id(entity.key())
if not(entity.is_draft or cls._indexed_within_num_days(
timestamps, doc_id, cls.FRESHNESS_THRESHOLD_DAYS)):
try:
yield AnnouncementResource(entity)
except HTMLParser.HTMLParseError as e:
logging.info('Error parsing Announcement %s: %s',
entity.title, e)
continue
def __init__(self, announcement):
super(AnnouncementResource, self).__init__()
self.title = announcement.title
self.key = announcement.key()
parser = ResourceHTMLParser(PROTOCOL_PREFIX)
parser.feed(announcement.html)
self.content = parser.get_content()
@classmethod
def _get_doc_id(cls, key):
return '%s_%s' % (cls.TYPE_NAME, key)
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.key),
fields=[
search.TextField(name='title', value=self.title),
search.TextField(name='content', value=self.content),
search.TextField(name='url',
value='announcements#%s' % self.key),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class AnnouncementResult(Result):
"""An object for an announcement in search results."""
def __init__(self, search_result):
super(AnnouncementResult, self).__init__()
self.url = self._get_returned_field(search_result, 'url')
self.title = self._get_returned_field(search_result, 'title')
self.unit_id = None # Announcements are definitionally not in units.
self.snippet = self._get_snippet(search_result)
def get_html(self):
# I18N: Displayed in search results; denotes an announcement link.
announcement_string = gettext.gettext('Announcement')
template_value = {
'result_title': '%s - %s' % (self.title, announcement_string),
'result_url': self.url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('basic.html', template_value)
# Register new resource types here
RESOURCE_TYPES = [
(LessonResource, LessonResult),
(ExternalLinkResource, ExternalLinkResult),
(YouTubeFragmentResource, YouTubeFragmentResult),
(AnnouncementResource, AnnouncementResult)
]
def get_returned_fields():
"""Returns a list of fields that should be returned in a search result."""
returned_fields = set(['type'])
for resource_type, unused_result_type in RESOURCE_TYPES:
returned_fields |= set(resource_type.RETURNED_FIELDS)
return list(returned_fields)
def get_snippeted_fields():
"""Returns a list of fields that should be snippeted in a search result."""
snippeted_fields = set()
for resource_type, unused_result_type in RESOURCE_TYPES:
snippeted_fields |= set(resource_type.SNIPPETED_FIELDS)
return list(snippeted_fields)
def generate_all_documents(course, timestamps):
"""A generator for all docs for a given course.
Args:
course: models.courses.Course. the course to be indexed.
timestamps: dict from doc_ids to last indexed datetimes. An empty dict
indicates that all documents should be generated.
Yields:
A sequence of search.Document. If a document is within the freshness
threshold, no document will be generated. This function does not modify
timestamps.
"""
link_dist = {}
link_unit_id = {}
for resource_type, unused_result_type in RESOURCE_TYPES:
for resource in resource_type.generate_all(course, timestamps):
unit_id = resource.get_unit_id()
if isinstance(resource, LessonResource) and resource.notes:
link_dist[resource.notes] = 0
link_unit_id[resource.notes] = unit_id
for link in resource.get_links():
link_dist[link] = 1
link_unit_id[resource.notes] = unit_id
yield resource.get_document()
for resource in ExternalLinkResource.generate_all_from_dist_dict(
link_dist, link_unit_id, timestamps):
yield resource.get_document()
def process_results(results):
"""Generate result objects for the results of a query."""
result_types = {resource_type.TYPE_NAME: result_type
for (resource_type, result_type) in RESOURCE_TYPES}
processed_results = []
for result in results:
try:
result_type = result_types[result['type'][0].value]
processed_results.append(result_type(result))
except (AttributeError, IndexError, KeyError) as e:
# If there is no type information, we cannot process the result
logging.error("%s. Couldn't process result", e)
return processed_results
|
|
import conf.managers
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
("sites", "0002_alter_domain_unique"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Client",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("archive", models.BooleanField(default=False)),
("payment_id", models.CharField(blank=True, max_length=255, null=True)),
(
"invoice_email",
models.EmailField(blank=True, max_length=255, null=True),
),
("sites", models.ManyToManyField(to="sites.Site")),
],
options={
"default_permissions": ("view", "add", "change", "delete"),
"ordering": ["-id"],
},
managers=[
("objects", django.db.models.manager.Manager()),
("on_site", conf.managers.CurrentSiteManager()),
],
),
migrations.CreateModel(
name="Entry",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("date", models.DateField(blank=True)),
("duration", models.DurationField(blank=True)),
("note", models.TextField(blank=True, null=True)),
],
options={
"verbose_name_plural": "Entries",
"ordering": ["-date", "-id"],
"default_permissions": ("view", "add", "change", "delete"),
},
managers=[
("objects", django.db.models.manager.Manager()),
("on_site", conf.managers.CurrentSiteManager()),
],
),
migrations.CreateModel(
name="Invoice",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(auto_now_add=True)),
("paid", models.DateTimeField(blank=True, null=True)),
(
"transaction_id",
models.CharField(blank=True, max_length=255, null=True),
),
(
"client",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="core.Client"
),
),
(
"entries",
models.ManyToManyField(related_name="invoices", to="core.Entry"),
),
(
"site",
models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.CASCADE,
to="sites.Site",
),
),
],
options={"default_permissions": ("view", "add", "change", "delete")},
managers=[
("objects", django.db.models.manager.Manager()),
("on_site", conf.managers.CurrentSiteManager()),
],
),
migrations.CreateModel(
name="Project",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("archive", models.BooleanField(default=False)),
(
"estimate",
models.DecimalField(
blank=True, decimal_places=2, max_digits=10, null=True
),
),
(
"client",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="projects",
to="core.Client",
),
),
],
options={
"default_permissions": ("view", "add", "change", "delete"),
"ordering": ["client", "-id"],
},
),
migrations.CreateModel(
name="Task",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
(
"hourly_rate",
models.DecimalField(
blank=True, decimal_places=2, max_digits=10, null=True
),
),
("sites", models.ManyToManyField(to="sites.Site")),
],
options={
"default_permissions": ("view", "add", "change", "delete"),
"ordering": ["-id"],
},
managers=[
("objects", django.db.models.manager.Manager()),
("on_site", conf.managers.CurrentSiteManager()),
],
),
migrations.AddField(
model_name="entry",
name="project",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="entries",
to="core.Project",
),
),
migrations.AddField(
model_name="entry",
name="site",
field=models.ForeignKey(
default=1, on_delete=django.db.models.deletion.CASCADE, to="sites.Site"
),
),
migrations.AddField(
model_name="entry",
name="task",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="entries",
to="core.Task",
),
),
migrations.AddField(
model_name="entry",
name="user",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="entries",
to=settings.AUTH_USER_MODEL,
),
),
]
|
|
###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import datetime
from autobahn.twisted.wamp import ApplicationSession
class TimeService(ApplicationSession):
"""
A simple time service application component.
"""
def __init__(self, realm = "realm1"):
ApplicationSession.__init__(self)
self._realm = realm
def onConnect(self):
self.join(self._realm)
def onJoin(self, details):
def utcnow():
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
self.register(utcnow, 'com.timeservice.now')
from twisted.python import log
from autobahn.twisted.websocket import WampWebSocketServerProtocol, WampWebSocketServerFactory
from twisted.internet.defer import Deferred
import json
import urllib
import Cookie
from autobahn.util import newid, utcnow
from autobahn.websocket import http
class ServerProtocol(WampWebSocketServerProtocol):
## authid -> cookie -> set(connection)
def onConnect(self, request):
protocol, headers = WampWebSocketServerProtocol.onConnect(self, request)
## our cookie tracking ID
self._cbtid = None
## see if there already is a cookie set ..
if request.headers.has_key('cookie'):
try:
cookie = Cookie.SimpleCookie()
cookie.load(str(request.headers['cookie']))
except Cookie.CookieError:
pass
else:
if cookie.has_key('cbtid'):
cbtid = cookie['cbtid'].value
if self.factory._cookies.has_key(cbtid):
self._cbtid = cbtid
log.msg("Cookie already set: %s" % self._cbtid)
## if no cookie is set, create a new one ..
if self._cbtid is None:
self._cbtid = newid()
maxAge = 86400
cbtData = {'created': utcnow(),
'authenticated': None,
'maxAge': maxAge,
'connections': set()}
self.factory._cookies[self._cbtid] = cbtData
## do NOT add the "secure" cookie attribute! "secure" refers to the
## scheme of the Web page that triggered the WS, not WS itself!!
##
headers['Set-Cookie'] = 'cbtid=%s;max-age=%d' % (self._cbtid, maxAge)
log.msg("Setting new cookie: %s" % self._cbtid)
## add this WebSocket connection to the set of connections
## associated with the same cookie
self.factory._cookies[self._cbtid]['connections'].add(self)
self._authenticated = self.factory._cookies[self._cbtid]['authenticated']
## accept the WebSocket connection, speaking subprotocol `protocol`
## and setting HTTP headers `headers`
return (protocol, headers)
from autobahn.wamp.protocol import RouterSession
from autobahn.wamp import types
class MyRouterSession(RouterSession):
def onOpen(self, transport):
RouterSession.onOpen(self, transport)
print "transport authenticated: {}".format(self._transport._authenticated)
def onHello(self, realm, details):
print "onHello: {} {}".format(realm, details)
if self._transport._authenticated is not None:
return types.Accept(authid = self._transport._authenticated)
else:
return types.Challenge("mozilla-persona")
return accept
def onLeave(self, details):
if details.reason == "wamp.close.logout":
cookie = self._transport.factory._cookies[self._transport._cbtid]
cookie['authenticated'] = None
for proto in cookie['connections']:
proto.sendClose()
def onAuthenticate(self, signature, extra):
print "onAuthenticate: {} {}".format(signature, extra)
dres = Deferred()
## The client did it's Mozilla Persona authentication thing
## and now wants to verify the authentication and login.
assertion = signature
audience = 'http://localhost:8080/'
audience = 'http://192.168.1.130:8080/'
## To verify the authentication, we need to send a HTTP/POST
## to Mozilla Persona. When successful, Persona will send us
## back something like:
# {
# "audience": "http://192.168.1.130:8080/",
# "expires": 1393681951257,
# "issuer": "gmail.login.persona.org",
# "email": "tobias.oberstein@gmail.com",
# "status": "okay"
# }
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
body = urllib.urlencode({'audience': audience, 'assertion': assertion})
from twisted.web.client import getPage
d = getPage(url = "https://verifier.login.persona.org/verify",
method = 'POST',
postdata = body,
headers = headers)
log.msg("Authentication request sent.")
def done(res):
res = json.loads(res)
try:
if res['status'] == 'okay':
## Mozilla Persona successfully authenticated the user
## remember the user's email address. this marks the cookie as
## authenticated
self._transport.factory._cookies[self._transport._cbtid]['authenticated'] = res['email']
log.msg("Authenticated user {}".format(res['email']))
dres.callback(types.Accept(authid = res['email']))
else:
log.msg("Authentication failed!")
dres.callback(types.Deny())
except Exception as e:
print "ERRR", e
def error(err):
log.msg("Authentication request failed: {}".format(err.value))
dres.callback(types.Deny())
d.addCallbacks(done, error)
return dres
if __name__ == '__main__':
import sys, argparse
from twisted.python import log
from twisted.internet.endpoints import serverFromString
## parse command line arguments
##
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action = "store_true",
help = "Enable debug output.")
parser.add_argument("-c", "--component", type = str, default = None,
help = "Start WAMP-WebSocket server with this application component, e.g. 'timeservice.TimeServiceBackend', or None.")
parser.add_argument("--websocket", type = str, default = "tcp:8080",
help = 'WebSocket server Twisted endpoint descriptor, e.g. "tcp:9000" or "unix:/tmp/mywebsocket".')
parser.add_argument("--wsurl", type = str, default = "ws://localhost:8080",
help = 'WebSocket URL (must suit the endpoint), e.g. "ws://localhost:9000".')
args = parser.parse_args()
## start Twisted logging to stdout
##
if True or args.debug:
log.startLogging(sys.stdout)
## we use an Autobahn utility to install the "best" available Twisted reactor
##
from autobahn.twisted.choosereactor import install_reactor
reactor = install_reactor()
if args.debug:
print("Running on reactor {}".format(reactor))
## create a WAMP router factory
##
from autobahn.wamp.router import RouterFactory
router_factory = RouterFactory()
## create a WAMP router session factory
##
from autobahn.twisted.wamp import RouterSessionFactory
session_factory = RouterSessionFactory(router_factory)
session_factory.session = MyRouterSession
## start an embedded application component ..
##
session_factory.add(TimeService())
## create a WAMP-over-WebSocket transport server factory
##
from autobahn.twisted.websocket import WampWebSocketServerFactory
transport_factory = WampWebSocketServerFactory(session_factory, args.wsurl, debug = args.debug)
transport_factory.protocol = ServerProtocol
transport_factory._cookies = {}
transport_factory.setProtocolOptions(failByDrop = False)
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.resource import WebSocketResource
## we serve static files under "/" ..
root = File(".")
## .. and our WebSocket server under "/ws"
resource = WebSocketResource(transport_factory)
root.putChild("ws", resource)
## run both under one Twisted Web Site
site = Site(root)
## start the WebSocket server from an endpoint
##
server = serverFromString(reactor, args.websocket)
server.listen(site)
## now enter the Twisted reactor loop
##
reactor.run()
|
|
import json
from indra.sources import sparser
from indra.sources.sparser.processor import _fix_agent
from indra.statements import Agent, Phosphorylation, Complex
from nose.plugins.attrib import attr
# ############################
# XML processing tests
# ############################
def test_invalid_xml():
sp = sparser.process_xml('xyz')
assert sp is None
def test_phosphorylation():
sp = sparser.process_xml(xml_str1)
assert len(sp.statements) == 1
assert sp.statements[0].enz.name == 'MAP2K1'
assert sp.statements[0].sub.name == 'MAPK3'
assert len(sp.statements[0].evidence) == 1
ev = sp.statements[0].evidence[0]
assert ev.pmid == '54321'
assert ev.text
assert ev.source_api == 'sparser'
# This test uses some slow UniPtot web queries to standardize agent names
@attr('webservice', 'slow')
def test_phosphorylation2():
sp = sparser.process_xml(xml_str2)
assert len(sp.statements) == 1
assert sp.statements[0].enz.name == 'MAPK1'
assert sp.statements[0].sub.name == 'TP53BP2'
assert sp.statements[0].residue == 'S'
assert sp.statements[0].position == '827'
assert (len(sp.statements[0].evidence) == 1)
ev = sp.statements[0].evidence[0]
assert (ev.pmid == '12345')
assert (ev.text)
assert (ev.source_api == 'sparser')
def test_fix_agent_be_name():
a = Agent('XXX', db_refs={'FPLX': 'CDK'})
_fix_agent(a)
assert a.name == 'CDK'
def test_fix_agent_hgnc_only():
a = Agent('XXX', db_refs={'HGNC': '7199'})
_fix_agent(a)
assert a.name == 'MOS'
assert a.db_refs.get('UP') == 'P00540'
def test_fix_agent_fa_only():
a = Agent('XXX', db_refs={'FA': '00815'})
_fix_agent(a)
assert a.name == 'Cyclin'
assert a.db_refs.get('FPLX') == 'Cyclin'
assert a.db_refs.get('NXPFA') == '00815'
assert 'FA' not in a.db_refs
def test_fix_agent_ncit_only():
a = Agent('XXX', db_refs={'NCIT': 'C25785'})
_fix_agent(a)
assert a.name == 'KRAS'
assert a.db_refs.get('HGNC') == '6407'
assert a.db_refs.get('UP') == 'P01116'
def test_fix_agent_ncit_only():
a = Agent('XXX', db_refs={'NCIT': 'C129655'})
_fix_agent(a)
assert a.name == 'TUBB'
assert a.db_refs.get('FPLX') == 'TUBB'
def test_fix_agent_pcid():
a = Agent('XXX', db_refs={'PCID': '123'})
_fix_agent(a)
assert 'PCID' not in a.db_refs
assert a.db_refs['PUBCHEM'] == '123'
# ############################
# JSON processing tests
# ############################
def test_process_json_str():
sp = sparser.process_json_dict(json.loads(json_str1))
assert sp is not None
assert len(sp.statements) == 1
assert isinstance(sp.statements[0], Phosphorylation)
sp.set_statements_pmid('1234567')
assert sp.statements[0].evidence[0].pmid == '1234567'
assert sp.json_stmts[0]['evidence'][0]['pmid'] == '1234567'
def test_process_json_str_with_bad_agents():
sp = sparser.process_json_dict(json.loads(json_str2))
assert sp is not None
assert len(sp.statements) == 2, len(sp.statements)
types = {type(s) for s in sp.statements}
assert types == {Complex, Phosphorylation}, types
assert all(len(s.agent_list()) == 2 for s in sp.statements)
def test_process_json_str_with_missing_agent():
"""This makes sure an error isn't raised in this case."""
sp = sparser.process_json_dict(json.loads(json_str3))
assert sp is not None
assert len(sp.statements) == 0, len(sp.statements)
xml_str1 = '''
<article pmid="54321">
<interpretation>
<sentence-text>MEK1 phosphorylates ERK1</sentence-text>
<sem>
<ref category="phosphorylate">
<var name="agent">
<ref category="protein">
<var name="name">MP2K1_HUMAN</var>
<var name="uid">UP:MP2K1_HUMAN</var>
</ref>
</var>
<var name="substrate">
<ref category="protein">
<var name="name">MK03_HUMAN</var>
<var name="uid">UP:MK03_HUMAN</var>
</ref>
</var>
<var name="present"><ref category="present"></ref></var>
</ref>
</sem>
</interpretation>
</article>
'''
xml_str2 = '''
<article pmid="12345">
<interpretation>
<sentence-text>Hence ASPP2 can be phosphorylated at serine 827 by MAPK1 in vitro</sentence-text>
<sem>
<ref category="phosphorylate">
<var name="subordinate-conjunction">
<ref category="subordinate-conjunction"><var name="word">hence</var></ref></var>
<var name="substrate">
<ref category="protein">
<var name="name">ASPP2_HUMAN</var>
<var name="uid">UP:ASPP2_HUMAN</var>
</ref>
</var>
<var name="agent">
<ref category="protein">
<var name="context">
<ref category="in-vitro"></ref>
</var>
<var name="uid">UP:MK01_HUMAN</var>
<var name="name">MK01_HUMAN</var>
</ref>
</var>
<var name="site">
<ref category="residue-on-protein">
<var name="amino-acid">
<ref category="amino-acid"><var name="name">serine</var></ref>
</var>
<var name="position"> 827</var>
</ref>
</var>
<var name="modal"><ref category="can"></ref></var>
</ref>
</sem>
</interpretation>
</article>
'''
json_str1 = '''
[
{
"type": "Phosphorylation",
"evidence": [
{
"source_api": "sparser",
"text": "MEK phosphorylates ERK",
"pmid": "PMC_3500"}],
"sub": {
"name": "ERK",
"db_refs": {
"NCIT": "C26360",
"TEXT": "ERK"},
"TEXT": "ERK"},
"enz": {
"name": "MEK",
"db_refs": {
"FPLX": "MEK",
"TEXT": "MEK"},
"TEXT": "MEK"}
}
]'''
json_str2 = '''
[
{
"type": "Phosphorylation",
"evidence": [
{
"source_api": "sparser",
"text": "MEK phosphorylates ERK",
"pmid": "PMC_3500"}],
"sub": "ERK",
"enz": {
"name": "MEK",
"db_refs": {
"FPLX": "MEK",
"TEXT": "MEK"},
"TEXT": "MEK"}
},
{
"type": "Complex",
"members": [
"MEK",
{
"name": "ERK",
"db_refs": {"FPLX": "ERK",
"TEXT": "ERK"}
}
],
"belief": 1,
"id": "3eedc7a9-fbbd-4e2e-b227-07d96f4bcff5"
}
]'''
json_str3 = '''
[
{
"type": "Inhibition",
"obj_activity": "activity",
"evidence": [
{
"text": "The in vivo and in vitro studies suggested that NR enzyme is inhibited by NO in a mediated process that requires the cell integrity.",
"source_api": "sparser",
"pmid": "PMC10191200"
}
],
"obj": {
"db_refs": {
"UP": "P22945"
},
"name": "NIA_EMENI",
"TEXT": "NR"
}
}
]
'''
|
|
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import numpy as np
import model
'''
[[ 0.03945358 -0.0444226 ]
[ 0.02318096 0.00571191]
[ 0.00215099 0.00061758]
[-0.04630127 -0.03907587]
[ 0.02477768 -0.02004011]
[ 0.00562305 0.03217997]
[ 0.0229703 -0.03174545]
[-0.04438719 -0.00687166]
[ 0.03753287 -0.00732407]
[ 0.02020464 -0.04932442]
[-0.02919442 -0.04751461]
[-0.04881425 0.03355291]
[ 0.01543792 -0.01663084]
[-0.00563183 -0.01765734]
[-0.02370799 0.03013503]
[-0.01040227 0.01394242]
[-0.02957147 0.01905343]
[ 0.01896143 0.0437004 ]
[-0.0488611 -0.02024952]
[-0.03427717 -0.02002436]]
[[-0.04582611 0.03165361 0.03341099 0.02498396 -0.00405747]
[ 0.04609683 -0.02605636 0.03114619 -0.04617096 0.01960472]
[ 0.01715769 0.04887364 -0.04406923 0.02113665 -0.01745777]
[ 0.01267737 0.04962686 0.03808175 0.00741551 0.00754201]
[ 0.00734122 0.00167065 -0.00884858 0.02363003 0.02987597]
[-0.01590672 -0.00353671 -0.0046834 -0.02397486 -0.01590803]
[-0.02862644 -0.02824073 0.01624198 -0.0225911 0.03864351]
[-0.03349948 0.02139851 0.01027939 0.01095601 -0.0151518 ]
[-0.01234832 0.03841607 0.04164791 0.04757022 0.02577851]
[-0.01668056 -0.04694973 0.00422466 -0.02944108 -0.02543925]
[ 0.03240678 0.01726907 -0.0194487 0.00952698 -0.0277692 ]
[-0.00158513 0.02107752 -0.01342812 -0.00992333 0.04881607]
[ 0.01516276 -0.01148018 -0.02377852 0.02116852 0.00681743]
[-0.03672289 0.00992345 0.00807212 -0.02994791 0.01511351]
[-0.0141693 -0.00466816 -0.01336293 0.04354934 -0.04254397]
[ 0.01684963 0.04686322 -0.02303303 0.00437417 -0.02925827]
[-0.01778433 0.00464271 0.02053557 -0.03296186 -0.00603393]
[-0.03159004 0.02571815 -0.01076855 0.02767207 0.02036933]
[ 0.02858665 0.00938568 -0.0342062 0.03908044 0.02502953]
[-0.04824239 0.00612401 0.01264863 0.03672833 0.01885514]]
[ 0.03002262 0.03607186 -0.03585084 -0.02389173 0.04190451 0.01144202
0.00309152 -0.01952825 -0.04142651 0.01618458 -0.0224176 -0.03141491
-0.04748542 0.02265899 -0.02689984 -0.03372463 0.00532304 0.0238619
0.03720967 0.01196872]
Torch TOP_C and TOP_H
TOP_C: 0.01 *
-1.6705 0.3561 1.1811 1.9170 0.6607
-1.6712 0.3541 1.1826 1.9134 0.6582
-1.6693 0.3587 1.1812 1.9196 0.6624
-1.6693 0.3571 1.1862 1.9131 0.6574
[torch.DoubleTensor of size 4x5]
TOP_H: 0.001 *
-8.4013 1.7818 5.8529 9.3769 3.3270
-8.4050 1.7717 5.8601 9.3598 3.3142
-8.3955 1.7945 5.8531 9.3896 3.3355
-8.3960 1.7865 5.8777 9.3582 3.3101
[torch.DoubleTensor of size 4x5]
TOP_C: 0.01 *
-2.5088 0.5292 1.7895 2.8844 1.0299
-2.5093 0.5281 1.7901 2.8825 1.0286
-2.5082 0.5318 1.7857 2.8908 1.0346
-2.5078 0.5304 1.7929 2.8820 1.0278
[torch.DoubleTensor of size 4x5]
TOP_H: 0.01 *
-1.2616 0.2648 0.8869 1.4114 0.5185
-1.2618 0.2642 0.8871 1.4104 0.5178
-1.2612 0.2661 0.8850 1.4145 0.5209
-1.2611 0.2654 0.8886 1.4102 0.5174
[torch.DoubleTensor of size 4x5]
TOP_C: 0.01 *
-2.9315 0.6096 2.1031 3.3684 1.2311
-2.9293 0.6146 2.1030 3.3735 1.2344
-2.9300 0.6146 2.0974 3.3795 1.2391
-2.9291 0.6152 2.1020 3.3749 1.2355
[torch.DoubleTensor of size 4x5]
TOP_H: 0.01 *
-1.4739 0.3050 1.0423 1.6485 0.6197
-1.4728 0.3075 1.0423 1.6509 0.6214
-1.4731 0.3076 1.0396 1.6539 0.6238
-1.4727 0.3078 1.0418 1.6516 0.6220
[torch.DoubleTensor of size 4x5]
'''
LSTM_W = np.array([
[ 0.03945358, 0.02318096, 0.00215099, -0.04630127, 0.02477768, 0.00562305,
0.0229703 , -0.04438719, 0.03753287, 0.02020464, -0.02919442, -0.04881425,
0.01543792, -0.00563183, -0.02370799, -0.01040227, -0.02957147, 0.01896143,
-0.0488611 , -0.03427717],
[-0.0444226 , 0.00571191, 0.00061758, -0.03907587, -0.02004011, 0.03217997,
-0.03174545, -0.00687166, -0.00732407, -0.04932442, -0.04751461, 0.03355291,
-0.01663084, -0.01765734, 0.03013503, 0.01394242, 0.01905343, 0.0437004,
-0.02024952, -0.02002436],
[-0.04582611, 0.04609683, 0.01715769, 0.01267737, 0.00734122, -0.01590672,
-0.02862644, -0.03349948, -0.01234832, -0.01668056, 0.03240678, -0.00158513,
0.01516276, -0.03672289, -0.0141693 , 0.01684963, -0.01778433, -0.03159004,
0.02858665, -0.04824239],
[ 0.03165361, -0.02605636, 0.04887364, 0.04962686, 0.00167065, -0.00353671,
-0.02824073, 0.02139851, 0.03841607, -0.04694973, 0.01726907, 0.02107752,
-0.01148018, 0.00992345, -0.00466816, 0.04686322, 0.00464271, 0.02571815,
0.00938568, 0.00612401],
[ 0.03341099, 0.03114619, -0.04406923, 0.03808175, -0.00884858, -0.0046834,
0.01624198, 0.01027939, 0.04164791, 0.00422466, -0.0194487 , -0.01342812,
-0.02377852, 0.00807212, -0.01336293, -0.02303303, 0.02053557, -0.01076855,
-0.0342062 , 0.01264863],
[ 0.02498396, -0.04617096, 0.02113665, 0.00741551, 0.02363003, -0.02397486,
-0.0225911 , 0.01095601, 0.04757022, -0.02944108, 0.00952698, -0.00992333,
0.02116852, -0.02994791, 0.04354934, 0.00437417, -0.03296186, 0.02767207,
0.03908044, 0.03672833],
[-0.00405747, 0.01960472, -0.01745777, 0.00754201, 0.02987597, -0.01590803,
0.03864351, -0.0151518 , 0.02577851, -0.02543925, -0.0277692 , 0.04881607,
0.00681743, 0.01511351, -0.04254397, -0.02925827, -0.00603393, 0.02036933,
0.02502953, 0.01885514]
])
LSTM_B = np.array(
[ 0.03002262, 0.03607186, -0.03585084, -0.02389173, 0.04190451, 0.01144202,
0.00309152, -0.01952825, -0.04142651, 0.01618458, -0.0224176 , -0.03141491,
-0.04748542, 0.02265899, -0.02689984, -0.03372463, 0.00532304, 0.0238619,
0.03720967, 0.01196872
])
INPUT_CNN = np.array([
[[-0.04201929, 0.02275813],
[-0.04060676, 0.02283999],
[-0.04333816, 0.02333505],
[-0.04131923, 0.02480407]],
[[-0.04124087, 0.02429205],
[-0.04117644, 0.02419558],
[-0.04282973, 0.02318067],
[-0.04131923, 0.02480407]],
[[-0.03877186, 0.0243939 ],
[-0.04173752, 0.02552123],
[-0.04168687, 0.02385954],
[-0.04201929, 0.02454825]]]).transpose( (1, 0, 2) )
assert LSTM_W.shape == (7, 20)
new_lstm_w = np.zeros((7,20), dtype=np.float32)
new_lstm_w[:,0:5] = LSTM_W[:,0:5]
new_lstm_w[:,5:10] = LSTM_W[:,15:20]
new_lstm_w[:,10:15] = LSTM_W[:,10:15]
new_lstm_w[:,15:20] = LSTM_W[:,5:10]
LSTM_W = new_lstm_w
new_lstm_b = np.zeros((20,), dtype=np.float32)
new_lstm_b[0:5] = LSTM_B[0:5]
new_lstm_b[5:10] = LSTM_B[15:20]
new_lstm_b[10:15] = LSTM_B[10:15]
new_lstm_b[15:20] = LSTM_B[5:10]
LSTM_B = new_lstm_b
inp = INPUT_CNN[0, 0, :]
print(inp) # first batch, first unroll step
inp_h = np.hstack([inp, np.zeros(5)])
prev_c = np.zeros(5)
all_inps = np.dot(inp_h, LSTM_W) + LSTM_B
print('ALL_INPS:', all_inps)
i, j, f, o = np.split(all_inps, 4)
print(i, j, f, o)
def sigmoid(x):
return 1. / (1. + np.exp(-x))
new_c = prev_c * sigmoid(f) + sigmoid(i) * np.tanh(j)
print('NEW_C:', new_c)
new_h = np.tanh(new_c) * sigmoid(o)
print('NEW_H:', new_h)
'''
next_c: [-0.01671056 0.00356125 0.01181377 0.01917946 0.00660749]
next_h: [-0.00840437 0.00178187 0.00585398 0.00938162 0.00332717]
'''
'''
[ 1.88394852e-04 4.18517351e-04 4.94887555e-05 3.97929451e-04
2.45719528e-04]
'''
'''
[array([[-0.00840133, 0.00178184, 0.00585286, 0.00937691, 0.00332699],
[-0.00840504, 0.00177166, 0.00586006, 0.00935978, 0.00331423],
[-0.00839551, 0.0017945 , 0.00585306, 0.00938957, 0.00333546],
[-0.00839595, 0.00178647, 0.0058777 , 0.00935818, 0.00331012]], dtype=float32), array([[-0.0126155 , 0.00264827, 0.00886869, 0.01411371, 0.00518486],
[-0.01261795, 0.00264249, 0.00887132, 0.01410431, 0.00517832],
[-0.01261209, 0.00266095, 0.00885007, 0.01414492, 0.00520893],
[-0.01261059, 0.00265393, 0.00888564, 0.01410206, 0.00517435]], dtype=float32), array([[-0.01473925, 0.00305038, 0.01042287, 0.01648509, 0.00619734],
[-0.01472822, 0.00307533, 0.01042284, 0.01650903, 0.00621392],
[-0.01473146, 0.00307552, 0.01039554, 0.01653865, 0.00623778],
[-0.01472719, 0.00307848, 0.01041825, 0.01651621, 0.00621954]], dtype=float32)]
'''
from read_param_init import SOFTMAX_W, SOFTMAX_B
Y = np.array([[ 2, 3, 4],
[ 32, 429, 7408],
[3078, 64, 35],
[ 27, 48, 395]], dtype=np.int32)
class TestRNN(tf.test.TestCase):
def model(self):
return model.inference_graph(char_vocab_size=51, word_vocab_size=10000,
char_embed_size=3, batch_size=4, num_highway_layers=0,
num_rnn_layers=1, rnn_size=5, max_word_length=11,
kernels= [2], kernel_features=[2], num_unroll_steps=3,
dropout=0.0)
def test(self):
with self.test_session() as sess:
m = self.model()
loss = model.loss_graph(m.logits, batch_size=4, num_unroll_steps=3)
rnn_outputs = [
np.array([[-0.00840133, 0.00178184, 0.00585286, 0.00937691, 0.00332699],
[-0.00840504, 0.00177166, 0.00586006, 0.00935978, 0.00331423],
[-0.00839551, 0.0017945 , 0.00585306, 0.00938957, 0.00333546],
[-0.00839595, 0.00178647, 0.0058777 , 0.00935818, 0.00331012]]),
np.array([[-0.0126155 , 0.00264827, 0.00886869, 0.01411371, 0.00518486],
[-0.01261795, 0.00264249, 0.00887132, 0.01410431, 0.00517832],
[-0.01261209, 0.00266095, 0.00885007, 0.01414492, 0.00520893],
[-0.01261059, 0.00265393, 0.00888564, 0.01410206, 0.00517435]]),
np.array([[-0.01473925, 0.00305038, 0.01042287, 0.01648509, 0.00619734],
[-0.01472822, 0.00307533, 0.01042284, 0.01650903, 0.00621392],
[-0.01473146, 0.00307552, 0.01039554, 0.01653865, 0.00623778],
[-0.01472719, 0.00307848, 0.01041825, 0.01651621, 0.00621954]])
]
feed = {
'LSTM/WordEmbedding/SimpleLinear/Matrix:0': SOFTMAX_W,
'LSTM/WordEmbedding/SimpleLinear/Bias:0': SOFTMAX_B,
loss.targets: Y
}
for o,r in zip(rnn_outputs, m.rnn_outputs):
feed[r] = o
l = sess.run(loss.loss, feed)
print(l)
'''
[[-0.00115102 -0.01835673 0.01088401 0.00553839 -0.02548739 0.00961501
-0.04911561 0.04094783 0.01729541 0.04113884 0.0110002 0.03410089
-0.02663253 0.01714642 0.03581101 -0.03634553 -0.01540088 -0.01764538
0.03884879 -0.03207963]
[-0.00115117 -0.01835723 0.01088434 0.00553844 -0.02548673 0.00961541
-0.04911538 0.04094752 0.01729532 0.04113849 0.01100097 0.0341017
-0.02663185 0.01714566 0.03581182 -0.03634511 -0.0154006 -0.01764595
0.03884758 -0.03208043]
[-0.00115108 -0.01835609 0.01088368 0.00553811 -0.0254877 0.0096147
-0.04911536 0.04094845 0.01729582 0.04113897 0.01099989 0.03410037
-0.02663329 0.01714694 0.03581046 -0.03634582 -0.01540092 -0.01764458
0.03884939 -0.03207891]
[-0.0011517 -0.01835642 0.01088412 0.00553769 -0.02548616 0.00961538
-0.0491141 0.0409487 0.01729641 0.04113809 0.01100182 0.03410203
-0.02663257 0.01714548 0.03581202 -0.03634498 -0.01540009 -0.01764486
0.03884656 -0.03208012]]
[[-0.00137119 -0.01813851 0.01110794 0.00582019 -0.02566941 0.00940851
-0.04911464 0.04097762 0.0171818 0.04152314 0.01122282 0.0339342
-0.02648103 0.01748628 0.03570804 -0.0365119 -0.01505298 -0.01722943
0.03911369 -0.03211264]
[-0.00137125 -0.01813885 0.01110811 0.00582023 -0.02566908 0.00940875
-0.04911457 0.04097738 0.0171817 0.04152295 0.0112232 0.03393462
-0.02648065 0.01748586 0.03570845 -0.03651169 -0.01505288 -0.01722979
0.03911307 -0.03211308]
[-0.00137074 -0.01813789 0.01110745 0.00582038 -0.02567078 0.0094078
-0.04911549 0.04097777 0.01718157 0.04152391 0.01122116 0.03393264
-0.02648198 0.01748771 0.03570654 -0.03651269 -0.01505364 -0.01722879
0.03911621 -0.03211133]
[-0.00137166 -0.01813823 0.01110794 0.00581962 -0.02566858 0.00940876
-0.04911353 0.04097832 0.01718257 0.04152259 0.01122391 0.03393493
-0.02648121 0.01748567 0.03570866 -0.03651157 -0.01505247 -0.01722896
0.03911219 -0.03211286]]
[[-0.00148683 -0.01802572 0.01122714 0.00596118 -0.02575907 0.00930692
-0.04911366 0.04099131 0.01712257 0.04171915 0.01133703 0.033848
-0.02640488 0.01765947 0.03566255 -0.03659378 -0.01487576 -0.0170169
0.03924932 -0.03213345]
[-0.00148696 -0.01802452 0.01122649 0.00596062 -0.02575966 0.00930636
-0.04911316 0.04099248 0.01712336 0.04171939 0.01133645 0.033847
-0.02640637 0.01766046 0.03566148 -0.03659437 -0.01487585 -0.01701534
0.03925047 -0.03213206]
[-0.0014862 -0.01802442 0.01122626 0.00596135 -0.0257613 0.00930568
-0.04911482 0.04099185 0.01712243 0.04172042 0.0113344 0.03384538
-0.02640666 0.0176619 0.03566003 -0.03659511 -0.01487673 -0.0170155
0.03925342 -0.03213113]
[-0.00148684 -0.01802438 0.01122635 0.00596064 -0.02575997 0.0093062
-0.04911336 0.04099251 0.01712332 0.04171955 0.01133605 0.03384664
-0.02640661 0.01766078 0.03566112 -0.03659455 -0.01487602 -0.0170152
0.03925106 -0.03213174]]
'''
assert False
|
|
from sympy import (abc, Add, cos, Derivative, diff, exp, Float, Function,
I, Integer, log, Mul, oo, Poly, Rational, S, sin, sqrt, Symbol, symbols,
Wild, pi, meijerg
)
from sympy.utilities.pytest import XFAIL
def test_symbol():
x = Symbol('x')
a, b, c, p, q = map(Wild, 'abcpq')
e = x
assert e.match(x) == {}
assert e.matches(x) == {}
assert e.match(a) == {a: x}
e = Rational(5)
assert e.match(c) == {c: 5}
assert e.match(e) == {}
assert e.match(e + 1) is None
def test_add():
x, y, a, b, c = map(Symbol, 'xyabc')
p, q, r = map(Wild, 'pqr')
e = a + b
assert e.match(p + b) == {p: a}
assert e.match(p + a) == {p: b}
e = 1 + b
assert e.match(p + b) == {p: 1}
e = a + b + c
assert e.match(a + p + c) == {p: b}
assert e.match(b + p + c) == {p: a}
e = a + b + c + x
assert e.match(a + p + x + c) == {p: b}
assert e.match(b + p + c + x) == {p: a}
assert e.match(b) is None
assert e.match(b + p) == {p: a + c + x}
assert e.match(a + p + c) == {p: b + x}
assert e.match(b + p + c) == {p: a + x}
e = 4*x + 5
assert e.match(4*x + p) == {p: 5}
assert e.match(3*x + p) == {p: x + 5}
assert e.match(p*x + 5) == {p: 4}
def test_power():
x, y, a, b, c = map(Symbol, 'xyabc')
p, q, r = map(Wild, 'pqr')
e = (x + y)**a
assert e.match(p**q) == {p: x + y, q: a}
assert e.match(p**p) is None
e = (x + y)**(x + y)
assert e.match(p**p) == {p: x + y}
assert e.match(p**q) == {p: x + y, q: x + y}
e = (2*x)**2
assert e.match(p*q**r) == {p: 4, q: x, r: 2}
e = Integer(1)
assert e.match(x**p) == {p: 0}
def test_match_exclude():
x = Symbol('x')
y = Symbol('y')
p = Wild("p")
q = Wild("q")
r = Wild("r")
e = Rational(6)
assert e.match(2*p) == {p: 3}
e = 3/(4*x + 5)
assert e.match(3/(p*x + q)) == {p: 4, q: 5}
e = 3/(4*x + 5)
assert e.match(p/(q*x + r)) == {p: 3, q: 4, r: 5}
e = 2/(x + 1)
assert e.match(p/(q*x + r)) == {p: 2, q: 1, r: 1}
e = 1/(x + 1)
assert e.match(p/(q*x + r)) == {p: 1, q: 1, r: 1}
e = 4*x + 5
assert e.match(p*x + q) == {p: 4, q: 5}
e = 4*x + 5*y + 6
assert e.match(p*x + q*y + r) == {p: 4, q: 5, r: 6}
a = Wild('a', exclude=[x])
e = 3*x
assert e.match(p*x) == {p: 3}
assert e.match(a*x) == {a: 3}
e = 3*x**2
assert e.match(p*x) == {p: 3*x}
assert e.match(a*x) is None
e = 3*x + 3 + 6/x
assert e.match(p*x**2 + p*x + 2*p) == {p: 3/x}
assert e.match(a*x**2 + a*x + 2*a) is None
def test_mul():
x, y, a, b, c = map(Symbol, 'xyabc')
p, q = map(Wild, 'pq')
e = 4*x
assert e.match(p*x) == {p: 4}
assert e.match(p*y) is None
assert e.match(e + p*y) == {p: 0}
e = a*x*b*c
assert e.match(p*x) == {p: a*b*c}
assert e.match(c*p*x) == {p: a*b}
e = (a + b)*(a + c)
assert e.match((p + b)*(p + c)) == {p: a}
e = x
assert e.match(p*x) == {p: 1}
e = exp(x)
assert e.match(x**p*exp(x*q)) == {p: 0, q: 1}
e = I*Poly(x, x)
assert e.match(I*p) == {p: Poly(x, x)}
def test_mul_noncommutative():
x, y = symbols('x y')
A, B = symbols('A B', commutative=False)
u, v = symbols('u v', cls=Wild)
w = Wild('w', commutative=False)
assert (u*v).matches(x) in ({v: x, u: 1}, {u: x, v: 1})
assert (u*v).matches(x*y) in ({v: y, u: x}, {u: y, v: x})
assert (u*v).matches(A) is None
assert (u*v).matches(A*B) is None
assert (u*v).matches(x*A) is None
assert (u*v).matches(x*y*A) is None
assert (u*v).matches(x*A*B) is None
assert (u*v).matches(x*y*A*B) is None
assert (v*w).matches(x) is None
assert (v*w).matches(x*y) is None
assert (v*w).matches(A) == {w: A, v: 1}
assert (v*w).matches(A*B) == {w: A*B, v: 1}
assert (v*w).matches(x*A) == {w: A, v: x}
assert (v*w).matches(x*y*A) == {w: A, v: x*y}
assert (v*w).matches(x*A*B) == {w: A*B, v: x}
assert (v*w).matches(x*y*A*B) == {w: A*B, v: x*y}
assert (v*w).matches(-x) is None
assert (v*w).matches(-x*y) is None
assert (v*w).matches(-A) == {w: A, v: -1}
assert (v*w).matches(-A*B) == {w: A*B, v: -1}
assert (v*w).matches(-x*A) == {w: A, v: -x}
assert (v*w).matches(-x*y*A) == {w: A, v: -x*y}
assert (v*w).matches(-x*A*B) == {w: A*B, v: -x}
assert (v*w).matches(-x*y*A*B) == {w: A*B, v: -x*y}
def test_complex():
a, b, c = map(Symbol, 'abc')
x, y = map(Wild, 'xy')
assert (1 + I).match(x + I) == {x: 1}
assert (a + I).match(x + I) == {x: a}
assert (2*I).match(x*I) == {x: 2}
assert (a*I).match(x*I) == {x: a}
assert (a*I).match(x*y) == {x: I, y: a}
assert (2*I).match(x*y) == {x: 2, y: I}
assert (a + b*I).match(x + y*I) == {x: a, y: b}
def test_functions():
from sympy.core.function import WildFunction
x = Symbol('x')
g = WildFunction('g')
p = Wild('p')
q = Wild('q')
f = cos(5*x)
notf = x
assert f.match(p*cos(q*x)) == {p: 1, q: 5}
assert f.match(p*g) == {p: 1, g: cos(5*x)}
assert notf.match(g) is None
@XFAIL
def test_functions_X1():
from sympy.core.function import WildFunction
x = Symbol('x')
g = WildFunction('g')
p = Wild('p')
q = Wild('q')
f = cos(5*x)
assert f.match(p*g(q*x)) == {p: 1, g: cos, q: 5}
def test_interface():
x, y = map(Symbol, 'xy')
p, q = map(Wild, 'pq')
assert (x + 1).match(p + 1) == {p: x}
assert (x*3).match(p*3) == {p: x}
assert (x**3).match(p**3) == {p: x}
assert (x*cos(y)).match(p*cos(q)) == {p: x, q: y}
assert (x*y).match(p*q) in [{p:x, q:y}, {p:y, q:x}]
assert (x + y).match(p + q) in [{p:x, q:y}, {p:y, q:x}]
assert (x*y + 1).match(p*q) in [{p:1, q:1 + x*y}, {p:1 + x*y, q:1}]
def test_derivative1():
x, y = map(Symbol, 'xy')
p, q = map(Wild, 'pq')
f = Function('f', nargs=1)
fd = Derivative(f(x), x)
assert fd.match(p) == {p: fd}
assert (fd + 1).match(p + 1) == {p: fd}
assert (fd).match(fd) == {}
assert (3*fd).match(p*fd) is not None
assert (3*fd - 1).match(p*fd + q) == {p: 3, q: -1}
def test_derivative_bug1():
f = Function("f")
x = Symbol("x")
a = Wild("a", exclude=[f, x])
b = Wild("b", exclude=[f])
pattern = a * Derivative(f(x), x, x) + b
expr = Derivative(f(x), x) + x**2
d1 = {b: x**2}
d2 = pattern.xreplace(d1).matches(expr, d1)
assert d2 is None
def test_derivative2():
f = Function("f")
x = Symbol("x")
a = Wild("a", exclude=[f, x])
b = Wild("b", exclude=[f])
e = Derivative(f(x), x)
assert e.match(Derivative(f(x), x)) == {}
assert e.match(Derivative(f(x), x, x)) is None
e = Derivative(f(x), x, x)
assert e.match(Derivative(f(x), x)) is None
assert e.match(Derivative(f(x), x, x)) == {}
e = Derivative(f(x), x) + x**2
assert e.match(a*Derivative(f(x), x) + b) == {a: 1, b: x**2}
assert e.match(a*Derivative(f(x), x, x) + b) is None
e = Derivative(f(x), x, x) + x**2
assert e.match(a*Derivative(f(x), x) + b) is None
assert e.match(a*Derivative(f(x), x, x) + b) == {a: 1, b: x**2}
def test_match_deriv_bug1():
n = Function('n')
l = Function('l')
x = Symbol('x')
p = Wild('p')
e = diff(l(x), x)/x - diff(diff(n(x), x), x)/2 - \
diff(n(x), x)**2/4 + diff(n(x), x)*diff(l(x), x)/4
e = e.subs(n(x), -l(x)).doit()
t = x*exp(-l(x))
t2 = t.diff(x, x)/t
assert e.match( (p*t2).expand() ) == {p: -Rational(1)/2}
def test_match_bug2():
x, y = map(Symbol, 'xy')
p, q, r = map(Wild, 'pqr')
res = (x + y).match(p + q + r)
assert (p + q + r).subs(res) == x + y
def test_match_bug3():
x, a, b = map(Symbol, 'xab')
p = Wild('p')
assert (b*x*exp(a*x)).match(x*exp(p*x)) is None
def test_match_bug4():
x = Symbol('x')
p = Wild('p')
e = x
assert e.match(-p*x) == {p: -1}
def test_match_bug5():
x = Symbol('x')
p = Wild('p')
e = -x
assert e.match(-p*x) == {p: 1}
def test_match_bug6():
x = Symbol('x')
p = Wild('p')
e = x
assert e.match(3*p*x) == {p: Rational(1)/3}
def test_match_polynomial():
x = Symbol('x')
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
c = Wild('c', exclude=[x])
d = Wild('d', exclude=[x])
eq = 4*x**3 + 3*x**2 + 2*x + 1
pattern = a*x**3 + b*x**2 + c*x + d
assert eq.match(pattern) == {a: 4, b: 3, c: 2, d: 1}
assert (eq - 3*x**2).match(pattern) == {a: 4, b: 0, c: 2, d: 1}
assert (x + sqrt(2) + 3).match(a + b*x + c*x**2) == \
{b: 1, a: sqrt(2) + 3, c: 0}
def test_exclude():
x, y, a = map(Symbol, 'xya')
p = Wild('p', exclude=[1, x])
q = Wild('q')
r = Wild('r', exclude=[sin, y])
assert sin(x).match(r) is None
assert cos(y).match(r) is None
e = 3*x**2 + y*x + a
assert e.match(p*x**2 + q*x + r) == {p: 3, q: y, r: a}
e = x + 1
assert e.match(x + p) is None
assert e.match(p + 1) is None
assert e.match(x + 1 + p) == {p: 0}
e = cos(x) + 5*sin(y)
assert e.match(r) is None
assert e.match(cos(y) + r) is None
assert e.match(r + p*sin(q)) == {r: cos(x), p: 5, q: y}
def test_floats():
a, b = map(Wild, 'ab')
e = cos(0.12345, evaluate=False)**2
r = e.match(a*cos(b)**2)
assert r == {a: 1, b: Float(0.12345)}
def test_Derivative_bug1():
f = Function("f")
x = abc.x
a = Wild("a", exclude=[f(x)])
b = Wild("b", exclude=[f(x)])
eq = f(x).diff(x)
assert eq.match(a*Derivative(f(x), x) + b) == {a: 1, b: 0}
def test_match_wild_wild():
p = Wild('p')
q = Wild('q')
r = Wild('r')
assert p.match(q + r) in [ {q: p, r: 0}, {q: 0, r: p} ]
assert p.match(q*r) in [ {q: p, r: 1}, {q: 1, r: p} ]
p = Wild('p')
q = Wild('q', exclude=[p])
r = Wild('r')
assert p.match(q + r) == {q: 0, r: p}
assert p.match(q*r) == {q: 1, r: p}
p = Wild('p')
q = Wild('q', exclude=[p])
r = Wild('r', exclude=[p])
assert p.match(q + r) is None
assert p.match(q*r) is None
def test_combine_inverse():
x, y = symbols("x y")
assert Mul._combine_inverse(x*I*y, x*I) == y
assert Mul._combine_inverse(x*I*y, y*I) == x
assert Mul._combine_inverse(oo*I*y, y*I) == oo
assert Mul._combine_inverse(oo*I*y, oo*I) == y
assert Add._combine_inverse(oo, oo) == S(0)
assert Add._combine_inverse(oo*I, oo*I) == S(0)
def test_issue_3773():
x = symbols('x')
z, phi, r = symbols('z phi r')
c, A, B, N = symbols('c A B N', cls=Wild)
l = Wild('l', exclude=(0,))
eq = z * sin(2*phi) * r**7
matcher = c * sin(phi*N)**l * r**A * log(r)**B
assert eq.match(matcher) == {c: z, l: 1, N: 2, A: 7, B: 0}
assert (-eq).match(matcher) == {c: -z, l: 1, N: 2, A: 7, B: 0}
assert (x*eq).match(matcher) == {c: x*z, l: 1, N: 2, A: 7, B: 0}
assert (-7*x*eq).match(matcher) == {c: -7*x*z, l: 1, N: 2, A: 7, B: 0}
matcher = c*sin(phi*N)**l * r**A
assert eq.match(matcher) == {c: z, l: 1, N: 2, A: 7}
assert (-eq).match(matcher) == {c: -z, l: 1, N: 2, A: 7}
assert (x*eq).match(matcher) == {c: x*z, l: 1, N: 2, A: 7}
assert (-7*x*eq).match(matcher) == {c: -7*x*z, l: 1, N: 2, A: 7}
def test_issue_3883():
from sympy.abc import gamma, mu, x
f = (-gamma * (x - mu)**2 - log(gamma) + log(2*pi))/2
a, b, c = symbols('a b c', cls=Wild, exclude=(gamma,))
assert f.match(a * log(gamma) + b * gamma + c) == \
{a: -S(1)/2, b: -(x - mu)**2/2, c: log(2*pi)/2}
assert f.expand().collect(gamma).match(a * log(gamma) + b * gamma + c) == \
{a: -S(1)/2, b: (-(x - mu)**2/2).expand(), c: (log(2*pi)/2).expand()}
g1 = Wild('g1', exclude=[gamma])
g2 = Wild('g2', exclude=[gamma])
g3 = Wild('g3', exclude=[gamma])
assert f.expand().match(g1 * log(gamma) + g2 * gamma + g3) == \
{g3: log(2)/2 + log(pi)/2, g1: -S(1)/2, g2: -mu**2/2 + mu*x - x**2/2}
def test_issue_4418():
x = Symbol('x')
a, b, c = symbols('a b c', cls=Wild, exclude=(x,))
f, g = symbols('f g', cls=Function)
eq = diff(g(x)*f(x).diff(x), x)
assert eq.match(
g(x).diff(x)*f(x).diff(x) + g(x)*f(x).diff(x, x) + c) == {c: 0}
assert eq.match(a*g(x).diff(
x)*f(x).diff(x) + b*g(x)*f(x).diff(x, x) + c) == {a: 1, b: 1, c: 0}
def test_issue_4700():
f = Function('f')
x = Symbol('x')
a, b = symbols('a b', cls=Wild, exclude=(f(x),))
p = a*f(x) + b
eq1 = sin(x)
eq2 = f(x) + sin(x)
eq3 = f(x) + x + sin(x)
eq4 = x + sin(x)
assert eq1.match(p) == {a: 0, b: sin(x)}
assert eq2.match(p) == {a: 1, b: sin(x)}
assert eq3.match(p) == {a: 1, b: x + sin(x)}
assert eq4.match(p) == {a: 0, b: x + sin(x)}
def test_issue_5168():
a, b, c = symbols('a b c', cls=Wild)
x = Symbol('x')
f = Function('f')
assert x.match(a) == {a: x}
assert x.match(a*f(x)**c) == {a: x, c: 0}
assert x.match(a*b) == {a: 1, b: x}
assert x.match(a*b*f(x)**c) == {a: 1, b: x, c: 0}
assert (-x).match(a) == {a: -x}
assert (-x).match(a*f(x)**c) == {a: -x, c: 0}
assert (-x).match(a*b) == {a: -1, b: x}
assert (-x).match(a*b*f(x)**c) == {a: -1, b: x, c: 0}
assert (2*x).match(a) == {a: 2*x}
assert (2*x).match(a*f(x)**c) == {a: 2*x, c: 0}
assert (2*x).match(a*b) == {a: 2, b: x}
assert (2*x).match(a*b*f(x)**c) == {a: 2, b: x, c: 0}
assert (-2*x).match(a) == {a: -2*x}
assert (-2*x).match(a*f(x)**c) == {a: -2*x, c: 0}
assert (-2*x).match(a*b) == {a: -2, b: x}
assert (-2*x).match(a*b*f(x)**c) == {a: -2, b: x, c: 0}
def test_issue_4559():
x = Symbol('x')
e = Symbol('e')
w = Wild('w', exclude=[x])
y = Wild('y')
# this is as it should be
assert (3/x).match(w/y) == {w: 3, y: x}
assert (3*x).match(w*y) == {w: 3, y: x}
assert (x/3).match(y/w) == {w: 3, y: x}
assert (3*x).match(y/w) == {w: S(1)/3, y: x}
# these could be allowed to fail
assert (x/3).match(w/y) == {w: S(1)/3, y: 1/x}
assert (3*x).match(w/y) == {w: 3, y: 1/x}
assert (3/x).match(w*y) == {w: 3, y: 1/x}
# Note that solve will give
# multiple roots but match only gives one:
#
# >>> solve(x**r-y**2,y)
# [-x**(r/2), x**(r/2)]
r = Symbol('r', rational=True)
assert (x**r).match(y**2) == {y: x**(r/2)}
assert (x**e).match(y**2) == {y: sqrt(x**e)}
# since (x**i = y) -> x = y**(1/i) where i is an integer
# the following should also be valid as long as y is not
# zero when i is negative.
a = Wild('a')
e = S(0)
assert e.match(a) == {a: e}
assert e.match(1/a) is None
assert e.match(a**.3) is None
e = S(3)
assert e.match(1/a) == {a: 1/e}
assert e.match(1/a**2) == {a: 1/sqrt(e)}
e = pi
assert e.match(1/a) == {a: 1/e}
assert e.match(1/a**2) == {a: 1/sqrt(e)}
assert (-e).match(sqrt(a)) is None
assert (-e).match(a**2) == {a: I*sqrt(pi)}
# The pattern matcher doesn't know how to handle (x - a)**2 == (a - x)**2. To
# avoid ambiguity in actual applications, don't put a coefficient (including a
# minus sign) in front of a wild.
@XFAIL
def test_issue_4883():
a = Wild('a')
x = Symbol('x')
e = [i**2 for i in (x - 2, 2 - x)]
p = [i**2 for i in (x - a, a- x)]
for eq in e:
for pat in p:
assert eq.match(pat) == {a: 2}
def test_issue_4319():
x, y = symbols('x y')
p = -x*(S(1)/8 - y)
ans = {S.Zero, y - S(1)/8}
def ok(pat):
assert set(p.match(pat).values()) == ans
ok(Wild("coeff", exclude=[x])*x + Wild("rest"))
ok(Wild("w", exclude=[x])*x + Wild("rest"))
ok(Wild("coeff", exclude=[x])*x + Wild("rest"))
ok(Wild("w", exclude=[x])*x + Wild("rest"))
ok(Wild("e", exclude=[x])*x + Wild("rest"))
ok(Wild("ress", exclude=[x])*x + Wild("rest"))
ok(Wild("resu", exclude=[x])*x + Wild("rest"))
def test_issue_3778():
p, c, q = symbols('p c q', cls=Wild)
x = Symbol('x')
assert (sin(x)**2).match(sin(p)*sin(q)*c) == {q: x, c: 1, p: x}
assert (2*sin(x)).match(sin(p) + sin(q) + c) == {q: x, c: 0, p: x}
def test_issue_6103():
x = Symbol('x')
a = Wild('a')
assert (-I*x*oo).match(I*a*oo) == {a: -x}
def test_issue_3539():
a = Wild('a')
x = Symbol('x')
assert (x - 2).match(a - x) is None
assert (6/x).match(a*x) is None
assert (6/x**2).match(a/x) == {a: 6/x}
def test_gh_issue_2711():
x = Symbol('x')
f = meijerg(((), ()), ((0,), ()), x)
a = Wild('a')
b = Wild('b')
assert f.find(a) == set([(S.Zero,), ((), ()), ((S.Zero,), ()), x, S.Zero,
(), meijerg(((), ()), ((S.Zero,), ()), x)])
assert f.find(a + b) == \
{meijerg(((), ()), ((S.Zero,), ()), x), x, S.Zero}
assert f.find(a**2) == {meijerg(((), ()), ((S.Zero,), ()), x), x}
|
|
##########################################################################
#
# Copyright (c) 2009-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import os
import maya.cmds
import imath
import IECore
import IECoreMaya
class TemporaryAttributeValuesTest( IECoreMaya.TestCase ) :
def test( self ) :
s = maya.cmds.spaceLocator()[0]
maya.cmds.addAttr( s, at="enum", sn="enumTest", enumName="A:B:C", defaultValue = 1 )
self.assertEqual( maya.cmds.getAttr( s + ".enumTest" ), 1 )
maya.cmds.addAttr( s, at="bool", sn="boolTest", defaultValue=1 )
self.assertEqual( maya.cmds.getAttr( s + ".boolTest" ), 1 )
maya.cmds.addAttr( s, at="float", sn="floatTest" )
self.assertEqual( maya.cmds.getAttr( s + ".floatTest" ), 0 )
maya.cmds.addAttr( s, at="long", sn="intTest" )
self.assertEqual( maya.cmds.getAttr( s + ".intTest" ), 0 )
maya.cmds.addAttr( s, at="float2", sn="float2Test" )
maya.cmds.addAttr( s, at="float", sn="float2TestX", parent="float2Test" )
maya.cmds.addAttr( s, at="float", sn="float2TestY", parent="float2Test" )
self.assertEqual( maya.cmds.getAttr( s + ".float2TestX" ), 0 )
self.assertEqual( maya.cmds.getAttr( s + ".float2TestY" ), 0 )
maya.cmds.addAttr( s, at="long2", sn="int2Test" )
maya.cmds.addAttr( s, at="long", sn="int2TestX", parent="int2Test", defaultValue=1 )
maya.cmds.addAttr( s, at="long", sn="int2TestY", parent="int2Test", defaultValue=2 )
self.assertEqual( maya.cmds.getAttr( s + ".int2TestX" ), 1 )
self.assertEqual( maya.cmds.getAttr( s + ".int2TestY" ), 2 )
maya.cmds.addAttr( s, at="float3", sn="float3Test" )
maya.cmds.addAttr( s, at="float", sn="float3TestX", parent="float3Test", defaultValue=10 )
maya.cmds.addAttr( s, at="float", sn="float3TestY", parent="float3Test", defaultValue=20 )
maya.cmds.addAttr( s, at="float", sn="float3TestZ", parent="float3Test", defaultValue=30 )
self.assertEqual( maya.cmds.getAttr( s + ".float3TestX" ), 10 )
self.assertEqual( maya.cmds.getAttr( s + ".float3TestY" ), 20 )
self.assertEqual( maya.cmds.getAttr( s + ".float3TestZ" ), 30 )
maya.cmds.addAttr( s, at="short3", sn="short3Test" )
maya.cmds.addAttr( s, at="short", sn="short3TestX", parent="short3Test", defaultValue=101 )
maya.cmds.addAttr( s, at="short", sn="short3TestY", parent="short3Test", defaultValue=201 )
maya.cmds.addAttr( s, at="short", sn="short3TestZ", parent="short3Test", defaultValue=301 )
self.assertEqual( maya.cmds.getAttr( s + ".short3TestX" ), 101 )
self.assertEqual( maya.cmds.getAttr( s + ".short3TestY" ), 201 )
self.assertEqual( maya.cmds.getAttr( s + ".short3TestZ" ), 301 )
maya.cmds.addAttr( s, dt="string", sn="stringTest" )
maya.cmds.setAttr( s + ".stringTest", "hi", type="string" )
self.assertEqual( maya.cmds.getAttr( s + ".stringTest" ), "hi" )
context = IECoreMaya.TemporaryAttributeValues(
{
s + ".enumTest" : 2,
s + ".boolTest" : False,
s + ".floatTest" : 10,
s + ".intTest" : 20,
s + ".float2Test" : ( 1, 2 ),
s + ".int2Test" : imath.V2i( 3, 4 ),
s + ".float3Test" : ( 9, 6, 1 ),
s + ".short3Test" : ( 500, 2, -1 ),
s + ".stringTest" : "bye",
}
)
with context :
self.assertEqual( maya.cmds.getAttr( s + ".enumTest" ), 2 )
self.assertEqual( maya.cmds.getAttr( s + ".boolTest" ), 0 )
self.assertEqual( maya.cmds.getAttr( s + ".floatTest" ), 10 )
self.assertEqual( maya.cmds.getAttr( s + ".intTest" ), 20 )
self.assertEqual( maya.cmds.getAttr( s + ".float2TestX" ), 1 )
self.assertEqual( maya.cmds.getAttr( s + ".float2TestY" ), 2 )
self.assertEqual( maya.cmds.getAttr( s + ".int2TestX" ), 3 )
self.assertEqual( maya.cmds.getAttr( s + ".int2TestY" ), 4 )
self.assertEqual( maya.cmds.getAttr( s + ".float3TestX" ), 9 )
self.assertEqual( maya.cmds.getAttr( s + ".float3TestY" ), 6 )
self.assertEqual( maya.cmds.getAttr( s + ".float3TestZ" ), 1 )
self.assertEqual( maya.cmds.getAttr( s + ".short3TestX" ), 500 )
self.assertEqual( maya.cmds.getAttr( s + ".short3TestY" ), 2 )
self.assertEqual( maya.cmds.getAttr( s + ".short3TestZ" ), -1 )
self.assertEqual( maya.cmds.getAttr( s + ".stringTest" ), "bye" )
self.assertEqual( maya.cmds.getAttr( s + ".enumTest" ), 1 )
self.assertEqual( maya.cmds.getAttr( s + ".boolTest" ), 1 )
self.assertEqual( maya.cmds.getAttr( s + ".floatTest" ), 0 )
self.assertEqual( maya.cmds.getAttr( s + ".intTest" ), 0 )
self.assertEqual( maya.cmds.getAttr( s + ".float2TestX" ), 0 )
self.assertEqual( maya.cmds.getAttr( s + ".float2TestY" ), 0 )
self.assertEqual( maya.cmds.getAttr( s + ".int2TestX" ), 1 )
self.assertEqual( maya.cmds.getAttr( s + ".int2TestY" ), 2 )
self.assertEqual( maya.cmds.getAttr( s + ".float3TestX" ), 10 )
self.assertEqual( maya.cmds.getAttr( s + ".float3TestY" ), 20 )
self.assertEqual( maya.cmds.getAttr( s + ".float3TestZ" ), 30 )
self.assertEqual( maya.cmds.getAttr( s + ".stringTest" ), "hi" )
def testNameSpaceAttributes( self ) :
maya.cmds.namespace( add="ns1" )
s = maya.cmds.spaceLocator()[0]
maya.cmds.addAttr( s, at="enum", sn="enumTest", enumName="A:B:C", defaultValue = 1 )
s = maya.cmds.rename( s, "ns1:"+s )
plugPath = s + ".enumTest"
maya.cmds.namespace( set=":" )
self.assertEqual( plugPath, "ns1:locator1.enumTest" )
self.assertEqual( maya.cmds.namespace( exists="ns1" ), True )
self.assertEqual( maya.cmds.namespaceInfo( currentNamespace=True ), ":" )
self.assertEqual( maya.cmds.getAttr( plugPath ), 1 )
with IECoreMaya.TemporaryAttributeValues( { plugPath : 2 } ) :
self.assertEqual( maya.cmds.getAttr( plugPath ), 2 )
self.assertEqual( maya.cmds.getAttr( plugPath ), 1 )
def testReferenceAttributes( self ) :
s = maya.cmds.spaceLocator()[0]
maya.cmds.addAttr( s, at="enum", sn="enumTest", enumName="A:B:C", defaultValue = 1 )
plugPath = s + ".enumTest"
self.assertEqual( maya.cmds.getAttr( plugPath ), 1 )
with IECoreMaya.TemporaryAttributeValues( { plugPath : 2 } ) :
self.assertEqual( maya.cmds.getAttr( plugPath ), 2 )
self.assertEqual( maya.cmds.getAttr( plugPath ), 1 )
# save it to a file
#######################################################################
maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "temporaryAttributeReference.ma" ) )
referenceScene = maya.cmds.file( force = True, type = "mayaAscii", save = True )
# make a new scene referencing that file
#######################################################################
maya.cmds.file( new = True, force = True )
maya.cmds.file( referenceScene, reference = True, namespace = "ns1" )
plugPath = "ns1:" + s + ".enumTest"
self.assertEqual( maya.cmds.getAttr( plugPath ), 1 )
with IECoreMaya.TemporaryAttributeValues( { plugPath : 2 } ) :
self.assertEqual( maya.cmds.getAttr( plugPath ), 2 )
self.assertEqual( maya.cmds.getAttr( plugPath ), 1 )
with IECoreMaya.TemporaryAttributeValues( { plugPath : 0 } ) :
self.assertEqual( maya.cmds.getAttr( plugPath ), 0 )
self.assertEqual( maya.cmds.getAttr( plugPath ), 1 )
def tearDown( self ) :
for f in [
"test/IECoreMaya/temporaryAttributeReference.ma" ,
] :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
IECoreMaya.TestProgram()
|
|
from typing import NamedTuple
import numpy as np
from . import is_scalar_nan
def _unique(values, *, return_inverse=False):
"""Helper function to find unique values with support for python objects.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : ndarray
Values to check for unknowns.
return_inverse : bool, default=False
If True, also return the indices of the unique values.
Returns
-------
unique : ndarray
The sorted unique values.
unique_inverse : ndarray
The indices to reconstruct the original array from the unique array.
Only provided if `return_inverse` is True.
"""
if values.dtype == object:
return _unique_python(values, return_inverse=return_inverse)
# numerical
out = np.unique(values, return_inverse=return_inverse)
if return_inverse:
uniques, inverse = out
else:
uniques = out
# np.unique will have duplicate missing values at the end of `uniques`
# here we clip the nans and remove it from uniques
if uniques.size and is_scalar_nan(uniques[-1]):
nan_idx = np.searchsorted(uniques, np.nan)
uniques = uniques[:nan_idx + 1]
if return_inverse:
inverse[inverse > nan_idx] = nan_idx
if return_inverse:
return uniques, inverse
return uniques
class MissingValues(NamedTuple):
"""Data class for missing data information"""
nan: bool
none: bool
def to_list(self):
"""Convert tuple to a list where None is always first."""
output = []
if self.none:
output.append(None)
if self.nan:
output.append(np.nan)
return output
def _extract_missing(values):
"""Extract missing values from `values`.
Parameters
----------
values: set
Set of values to extract missing from.
Returns
-------
output: set
Set with missing values extracted.
missing_values: MissingValues
Object with missing value information.
"""
missing_values_set = {value for value in values
if value is None or is_scalar_nan(value)}
if not missing_values_set:
return values, MissingValues(nan=False, none=False)
if None in missing_values_set:
if len(missing_values_set) == 1:
output_missing_values = MissingValues(nan=False, none=True)
else:
# If there is more than one missing value, then it has to be
# float('nan') or np.nan
output_missing_values = MissingValues(nan=True, none=True)
else:
output_missing_values = MissingValues(nan=True, none=False)
# create set without the missing values
output = values - missing_values_set
return output, output_missing_values
class _nandict(dict):
"""Dictionary with support for nans."""
def __init__(self, mapping):
super().__init__(mapping)
for key, value in mapping.items():
if is_scalar_nan(key):
self.nan_value = value
break
def __missing__(self, key):
if hasattr(self, 'nan_value') and is_scalar_nan(key):
return self.nan_value
raise KeyError(key)
def _map_to_integer(values, uniques):
"""Map values based on its position in uniques."""
table = _nandict({val: i for i, val in enumerate(uniques)})
return np.array([table[v] for v in values])
def _unique_python(values, *, return_inverse):
# Only used in `_uniques`, see docstring there for details
try:
uniques_set = set(values)
uniques_set, missing_values = _extract_missing(uniques_set)
uniques = sorted(uniques_set)
uniques.extend(missing_values.to_list())
uniques = np.array(uniques, dtype=values.dtype)
except TypeError:
types = sorted(t.__qualname__
for t in set(type(v) for v in values))
raise TypeError("Encoders require their input to be uniformly "
f"strings or numbers. Got {types}")
if return_inverse:
return uniques, _map_to_integer(values, uniques)
return uniques
def _encode(values, *, uniques, check_unknown=True):
"""Helper function to encode values into [0, n_uniques - 1].
Uses pure python method for object dtype, and numpy method for
all other dtypes.
The numpy method has the limitation that the `uniques` need to
be sorted. Importantly, this is not checked but assumed to already be
the case. The calling method needs to ensure this for all non-object
values.
Parameters
----------
values : ndarray
Values to encode.
uniques : ndarray
The unique values in `values`. If the dtype is not object, then
`uniques` needs to be sorted.
check_unknown : bool, default=True
If True, check for values in `values` that are not in `unique`
and raise an error. This is ignored for object dtype, and treated as
True in this case. This parameter is useful for
_BaseEncoder._transform() to avoid calling _check_unknown()
twice.
Returns
-------
encoded : ndarray
Encoded values
"""
if values.dtype.kind in 'OUS':
try:
return _map_to_integer(values, uniques)
except KeyError as e:
raise ValueError(f"y contains previously unseen labels: {str(e)}")
else:
if check_unknown:
diff = _check_unknown(values, uniques)
if diff:
raise ValueError(f"y contains previously unseen labels: "
f"{str(diff)}")
return np.searchsorted(uniques, values)
def _check_unknown(values, known_values, return_mask=False):
"""
Helper function to check for unknowns in values to be encoded.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : array
Values to check for unknowns.
known_values : array
Known values. Must be unique.
return_mask : bool, default=False
If True, return a mask of the same shape as `values` indicating
the valid values.
Returns
-------
diff : list
The unique values present in `values` and not in `know_values`.
valid_mask : boolean array
Additionally returned if ``return_mask=True``.
"""
valid_mask = None
if values.dtype.kind in 'OUS':
values_set = set(values)
values_set, missing_in_values = _extract_missing(values_set)
uniques_set = set(known_values)
uniques_set, missing_in_uniques = _extract_missing(uniques_set)
diff = values_set - uniques_set
nan_in_diff = missing_in_values.nan and not missing_in_uniques.nan
none_in_diff = missing_in_values.none and not missing_in_uniques.none
def is_valid(value):
return (value in uniques_set or
missing_in_uniques.none and value is None or
missing_in_uniques.nan and is_scalar_nan(value))
if return_mask:
if diff or nan_in_diff or none_in_diff:
valid_mask = np.array([is_valid(value) for value in values])
else:
valid_mask = np.ones(len(values), dtype=bool)
diff = list(diff)
if none_in_diff:
diff.append(None)
if nan_in_diff:
diff.append(np.nan)
else:
unique_values = np.unique(values)
diff = np.setdiff1d(unique_values, known_values,
assume_unique=True)
if return_mask:
if diff.size:
valid_mask = np.in1d(values, known_values)
else:
valid_mask = np.ones(len(values), dtype=bool)
# check for nans in the known_values
if np.isnan(known_values).any():
diff_is_nan = np.isnan(diff)
if diff_is_nan.any():
# removes nan from valid_mask
if diff.size and return_mask:
is_nan = np.isnan(values)
valid_mask[is_nan] = 1
# remove nan from diff
diff = diff[~diff_is_nan]
diff = list(diff)
if return_mask:
return diff, valid_mask
return diff
|
|
import time
import datetime
import traceback
import multiprocessing
import urllib2
import xml.sax
import redis
import random
import pymongo
import re
import requests
import dateutil.parser
import isodate
import urlparse
from django.conf import settings
from django.db import IntegrityError
from django.core.cache import cache
from apps.reader.models import UserSubscription
from apps.rss_feeds.models import Feed, MStory
from apps.rss_feeds.page_importer import PageImporter
from apps.rss_feeds.icon_importer import IconImporter
from apps.notifications.tasks import QueueNotifications, MUserFeedNotification
from apps.push.models import PushSubscription
from apps.statistics.models import MAnalyticsFetcher, MStatistics
from utils import feedparser
from utils.story_functions import pre_process_story, strip_tags, linkify
from utils import log as logging
from utils.feed_functions import timelimit, TimeoutError
from qurl import qurl
from BeautifulSoup import BeautifulSoup
from django.utils import feedgenerator
from django.utils.html import linebreaks
from django.utils.encoding import smart_unicode
from utils import json_functions as json
from celery.exceptions import SoftTimeLimitExceeded
from utils.twitter_fetcher import TwitterFetcher
from utils.json_fetcher import JSONFetcher
# from utils.feed_functions import mail_feed_error_to_admin
# Refresh feed code adapted from Feedjack.
# http://feedjack.googlecode.com
FEED_OK, FEED_SAME, FEED_ERRPARSE, FEED_ERRHTTP, FEED_ERREXC = range(5)
class FetchFeed:
def __init__(self, feed_id, options):
self.feed = Feed.get_by_id(feed_id)
self.options = options
self.fpf = None
self.raw_feed = None
@timelimit(30)
def fetch(self):
"""
Uses requests to download the feed, parsing it in feedparser. Will be storified later.
"""
start = time.time()
identity = self.get_identity()
log_msg = u'%2s ---> [%-30s] ~FYFetching feed (~FB%d~FY), last update: %s' % (identity,
self.feed.log_title[:30],
self.feed.id,
datetime.datetime.now() - self.feed.last_update)
logging.debug(log_msg)
etag = self.feed.etag
modified = self.feed.last_modified.utctimetuple()[:7] if self.feed.last_modified else None
address = self.feed.feed_address
if (self.options.get('force') or random.random() <= .01):
self.options['force'] = True
modified = None
etag = None
address = qurl(address, add={"_": random.randint(0, 10000)})
logging.debug(u' ---> [%-30s] ~FBForcing fetch: %s' % (
self.feed.log_title[:30], address))
elif (not self.feed.fetched_once or not self.feed.known_good):
modified = None
etag = None
if self.options.get('feed_xml'):
logging.debug(u' ---> [%-30s] ~FM~BKFeed has been fat pinged. Ignoring fat: %s' % (
self.feed.log_title[:30], len(self.options.get('feed_xml'))))
if self.options.get('fpf'):
self.fpf = self.options.get('fpf')
logging.debug(u' ---> [%-30s] ~FM~BKFeed fetched in real-time with fat ping.' % (
self.feed.log_title[:30]))
return FEED_OK, self.fpf
if 'youtube.com' in address:
try:
youtube_feed = self.fetch_youtube(address)
except (requests.adapters.ConnectionError):
youtube_feed = None
if not youtube_feed:
logging.debug(u' ***> [%-30s] ~FRYouTube fetch failed: %s.' %
(self.feed.log_title[:30], address))
return FEED_ERRHTTP, None
self.fpf = feedparser.parse(youtube_feed)
elif re.match('(https?)?://twitter.com/\w+/?$', qurl(address, remove=['_'])):
twitter_feed = self.fetch_twitter(address)
if not twitter_feed:
logging.debug(u' ***> [%-30s] ~FRTwitter fetch failed: %s' %
(self.feed.log_title[:30], address))
return FEED_ERRHTTP, None
self.fpf = feedparser.parse(twitter_feed)
if not self.fpf:
try:
headers = self.feed.fetch_headers()
if etag:
headers['If-None-Match'] = etag
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
modified_header = '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])
headers['If-Modified-Since'] = modified_header
if etag or modified:
headers['A-IM'] = 'feed'
raw_feed = requests.get(address, headers=headers)
if raw_feed.status_code >= 400:
logging.debug(" ***> [%-30s] ~FRFeed fetch was %s status code, trying fake user agent: %s" % (self.feed.log_title[:30], raw_feed.status_code, raw_feed.headers))
raw_feed = requests.get(address, headers=self.feed.fetch_headers(fake=True))
if raw_feed.content and 'application/json' in raw_feed.headers.get('Content-Type', ""):
# JSON Feed
json_feed = self.fetch_json_feed(address, raw_feed)
if not json_feed:
logging.debug(u' ***> [%-30s] ~FRJSON fetch failed: %s' %
(self.feed.log_title[:30], address))
return FEED_ERRHTTP, None
self.fpf = feedparser.parse(json_feed)
elif raw_feed.content and raw_feed.status_code < 400:
response_headers = raw_feed.headers
response_headers['Content-Location'] = raw_feed.url
self.raw_feed = smart_unicode(raw_feed.content)
self.fpf = feedparser.parse(self.raw_feed,
response_headers=response_headers)
if self.options.get('debug', False):
logging.debug(" ---> [%-30s] ~FBFeed fetch status %s: %s length / %s" % (self.feed.log_title[:30], raw_feed.status_code, len(smart_unicode(raw_feed.content)), raw_feed.headers))
except Exception, e:
logging.debug(" ***> [%-30s] ~FRFeed failed to fetch with request, trying feedparser: %s" % (self.feed.log_title[:30], unicode(e)[:100]))
if not self.fpf or self.options.get('force_fp', False):
try:
self.fpf = feedparser.parse(address,
agent=self.feed.user_agent,
etag=etag,
modified=modified)
except (TypeError, ValueError, KeyError, EOFError, MemoryError), e:
logging.debug(u' ***> [%-30s] ~FRFeed fetch error: %s' %
(self.feed.log_title[:30], e))
pass
if not self.fpf:
try:
logging.debug(u' ***> [%-30s] ~FRTurning off headers...' %
(self.feed.log_title[:30]))
self.fpf = feedparser.parse(address, agent=self.feed.user_agent)
except (TypeError, ValueError, KeyError, EOFError, MemoryError), e:
logging.debug(u' ***> [%-30s] ~FRFetch failed: %s.' %
(self.feed.log_title[:30], e))
return FEED_ERRHTTP, None
logging.debug(u' ---> [%-30s] ~FYFeed fetch in ~FM%.4ss' % (
self.feed.log_title[:30], time.time() - start))
return FEED_OK, self.fpf
def get_identity(self):
identity = "X"
current_process = multiprocessing.current_process()
if current_process._identity:
identity = current_process._identity[0]
return identity
def fetch_twitter(self, address=None):
twitter_fetcher = TwitterFetcher(self.feed, self.options)
return twitter_fetcher.fetch(address)
def fetch_json_feed(self, address, headers):
json_fetcher = JSONFetcher(self.feed, self.options)
return json_fetcher.fetch(address, headers)
def fetch_youtube(self, address):
username = None
channel_id = None
list_id = None
if 'gdata.youtube.com' in address:
try:
username_groups = re.search('gdata.youtube.com/feeds/\w+/users/(\w+)/', address)
if not username_groups:
return
username = username_groups.group(1)
except IndexError:
return
elif 'youtube.com/feeds/videos.xml?user=' in address:
try:
username = urlparse.parse_qs(urlparse.urlparse(address).query)['user'][0]
except IndexError:
return
elif 'youtube.com/feeds/videos.xml?channel_id=' in address:
try:
channel_id = urlparse.parse_qs(urlparse.urlparse(address).query)['channel_id'][0]
except (IndexError, KeyError):
return
elif 'youtube.com/playlist' in address:
try:
list_id = urlparse.parse_qs(urlparse.urlparse(address).query)['list'][0]
except IndexError:
return
elif 'youtube.com/feeds/videos.xml?playlist_id' in address:
try:
list_id = urlparse.parse_qs(urlparse.urlparse(address).query)['playlist_id'][0]
except IndexError:
return
if channel_id:
video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?channel_id=%s" % channel_id)
channel_json = requests.get("https://www.googleapis.com/youtube/v3/channels?part=snippet&id=%s&key=%s" %
(channel_id, settings.YOUTUBE_API_KEY))
channel = json.decode(channel_json.content)
try:
username = channel['items'][0]['snippet']['title']
description = channel['items'][0]['snippet']['description']
except (IndexError, KeyError):
return
elif list_id:
playlist_json = requests.get("https://www.googleapis.com/youtube/v3/playlists?part=snippet&id=%s&key=%s" %
(list_id, settings.YOUTUBE_API_KEY))
playlist = json.decode(playlist_json.content)
try:
username = playlist['items'][0]['snippet']['title']
description = playlist['items'][0]['snippet']['description']
except (IndexError, KeyError):
return
channel_url = "https://www.youtube.com/playlist?list=%s" % list_id
elif username:
video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?user=%s" % username)
description = "YouTube videos uploaded by %s" % username
else:
return
if list_id:
playlist_json = requests.get("https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId=%s&key=%s" %
(list_id, settings.YOUTUBE_API_KEY))
playlist = json.decode(playlist_json.content)
try:
video_ids = [video['snippet']['resourceId']['videoId'] for video in playlist['items']]
except (IndexError, KeyError):
return
else:
if video_ids_xml.status_code != 200:
return
video_ids_soup = BeautifulSoup(video_ids_xml.content)
channel_url = video_ids_soup.find('author').find('uri').getText()
video_ids = []
for video_id in video_ids_soup.findAll('yt:videoid'):
video_ids.append(video_id.getText())
videos_json = requests.get("https://www.googleapis.com/youtube/v3/videos?part=contentDetails%%2Csnippet&id=%s&key=%s" %
(','.join(video_ids), settings.YOUTUBE_API_KEY))
videos = json.decode(videos_json.content)
if 'error' in videos:
logging.debug(" ***> ~FRYoutube returned an error: ~FM~SB%s" % (videos))
return
data = {}
data['title'] = ("%s's YouTube Videos" % username if 'Uploads' not in username else username)
data['link'] = channel_url
data['description'] = description
data['lastBuildDate'] = datetime.datetime.utcnow()
data['generator'] = 'NewsBlur YouTube API v3 Decrapifier - %s' % settings.NEWSBLUR_URL
data['docs'] = None
data['feed_url'] = address
rss = feedgenerator.Atom1Feed(**data)
for video in videos['items']:
thumbnail = video['snippet']['thumbnails'].get('maxres')
if not thumbnail:
thumbnail = video['snippet']['thumbnails'].get('high')
if not thumbnail:
thumbnail = video['snippet']['thumbnails'].get('medium')
duration_sec = isodate.parse_duration(video['contentDetails']['duration']).seconds
if duration_sec >= 3600:
hours = (duration_sec / 3600)
minutes = (duration_sec - (hours*3600)) / 60
seconds = duration_sec - (hours*3600) - (minutes*60)
duration = "%s:%s:%s" % (hours, '{0:02d}'.format(minutes), '{0:02d}'.format(seconds))
else:
minutes = duration_sec / 60
seconds = duration_sec - (minutes*60)
duration = "%s:%s" % ('{0:02d}'.format(minutes), '{0:02d}'.format(seconds))
content = """<div class="NB-youtube-player"><iframe allowfullscreen="true" src="%s?iv_load_policy=3"></iframe></div>
<div class="NB-youtube-stats"><small>
<b>From:</b> <a href="%s">%s</a><br />
<b>Duration:</b> %s<br />
</small></div><hr>
<div class="NB-youtube-description">%s</div>
<img src="%s" style="display:none" />""" % (
("https://www.youtube.com/embed/" + video['id']),
channel_url, username,
duration,
linkify(linebreaks(video['snippet']['description'])),
thumbnail['url'] if thumbnail else "",
)
link = "http://www.youtube.com/watch?v=%s" % video['id']
story_data = {
'title': video['snippet']['title'],
'link': link,
'description': content,
'author_name': username,
'categories': [],
'unique_id': "tag:youtube.com,2008:video:%s" % video['id'],
'pubdate': dateutil.parser.parse(video['snippet']['publishedAt']),
}
rss.add_item(**story_data)
return rss.writeString('utf-8')
class ProcessFeed:
def __init__(self, feed_id, fpf, options, raw_feed=None):
self.feed_id = feed_id
self.options = options
self.fpf = fpf
self.raw_feed = raw_feed
def refresh_feed(self):
self.feed = Feed.get_by_id(self.feed_id)
if self.feed_id != self.feed.pk:
logging.debug(" ***> Feed has changed: from %s to %s" % (self.feed_id, self.feed.pk))
self.feed_id = self.feed.pk
def process(self):
""" Downloads and parses a feed.
"""
start = time.time()
self.refresh_feed()
ret_values = dict(new=0, updated=0, same=0, error=0)
if hasattr(self.fpf, 'status'):
if self.options['verbose']:
if self.fpf.bozo and self.fpf.status != 304:
logging.debug(u' ---> [%-30s] ~FRBOZO exception: %s ~SB(%s entries)' % (
self.feed.log_title[:30],
self.fpf.bozo_exception,
len(self.fpf.entries)))
if self.fpf.status == 304:
self.feed = self.feed.save()
self.feed.save_feed_history(304, "Not modified")
return FEED_SAME, ret_values
# 302 and 307: Temporary redirect: ignore
# 301 and 308: Permanent redirect: save it (after 10 tries)
if self.fpf.status == 301 or self.fpf.status == 308:
if self.fpf.href.endswith('feedburner.com/atom.xml'):
return FEED_ERRHTTP, ret_values
redirects, non_redirects = self.feed.count_redirects_in_history('feed')
self.feed.save_feed_history(self.fpf.status, "HTTP Redirect (%d to go)" % (10-len(redirects)))
if len(redirects) >= 10 or len(non_redirects) == 0:
address = self.fpf.href
if self.options['force'] and address:
address = qurl(address, remove=['_'])
self.feed.feed_address = address
if not self.feed.known_good:
self.feed.fetched_once = True
logging.debug(" ---> [%-30s] ~SB~SK~FRFeed is %s'ing. Refetching..." % (self.feed.log_title[:30], self.fpf.status))
self.feed = self.feed.schedule_feed_fetch_immediately()
if not self.fpf.entries:
self.feed = self.feed.save()
self.feed.save_feed_history(self.fpf.status, "HTTP Redirect")
return FEED_ERRHTTP, ret_values
if self.fpf.status >= 400:
logging.debug(" ---> [%-30s] ~SB~FRHTTP Status code: %s. Checking address..." % (self.feed.log_title[:30], self.fpf.status))
fixed_feed = None
if not self.feed.known_good:
fixed_feed, feed = self.feed.check_feed_link_for_feed_address()
if not fixed_feed:
self.feed.save_feed_history(self.fpf.status, "HTTP Error")
else:
self.feed = feed
self.feed = self.feed.save()
return FEED_ERRHTTP, ret_values
if not self.fpf:
logging.debug(" ---> [%-30s] ~SB~FRFeed is Non-XML. No feedparser feed either!" % (self.feed.log_title[:30]))
self.feed.save_feed_history(551, "Broken feed")
return FEED_ERRHTTP, ret_values
if self.fpf and not self.fpf.entries:
if self.fpf.bozo and isinstance(self.fpf.bozo_exception, feedparser.NonXMLContentType):
logging.debug(" ---> [%-30s] ~SB~FRFeed is Non-XML. %s entries. Checking address..." % (self.feed.log_title[:30], len(self.fpf.entries)))
fixed_feed = None
if not self.feed.known_good:
fixed_feed, feed = self.feed.check_feed_link_for_feed_address()
if not fixed_feed:
self.feed.save_feed_history(552, 'Non-xml feed', self.fpf.bozo_exception)
else:
self.feed = feed
self.feed = self.feed.save()
return FEED_ERRPARSE, ret_values
elif self.fpf.bozo and isinstance(self.fpf.bozo_exception, xml.sax._exceptions.SAXException):
logging.debug(" ---> [%-30s] ~SB~FRFeed has SAX/XML parsing issues. %s entries. Checking address..." % (self.feed.log_title[:30], len(self.fpf.entries)))
fixed_feed = None
if not self.feed.known_good:
fixed_feed, feed = self.feed.check_feed_link_for_feed_address()
if not fixed_feed:
self.feed.save_feed_history(553, 'Not an RSS feed', self.fpf.bozo_exception)
else:
self.feed = feed
self.feed = self.feed.save()
return FEED_ERRPARSE, ret_values
# the feed has changed (or it is the first time we parse it)
# saving the etag and last_modified fields
original_etag = self.feed.etag
self.feed.etag = self.fpf.get('etag')
if self.feed.etag:
self.feed.etag = self.feed.etag[:255]
# some times this is None (it never should) *sigh*
if self.feed.etag is None:
self.feed.etag = ''
if self.feed.etag != original_etag:
self.feed.save(update_fields=['etag'])
original_last_modified = self.feed.last_modified
if hasattr(self.fpf, 'modified') and self.fpf.modified:
try:
self.feed.last_modified = datetime.datetime.strptime(self.fpf.modified, '%a, %d %b %Y %H:%M:%S %Z')
except Exception, e:
self.feed.last_modified = None
logging.debug("Broken mtime %s: %s" % (self.feed.last_modified, e))
pass
if self.feed.last_modified != original_last_modified:
self.feed.save(update_fields=['last_modified'])
self.fpf.entries = self.fpf.entries[:100]
original_title = self.feed.feed_title
if self.fpf.feed.get('title'):
self.feed.feed_title = strip_tags(self.fpf.feed.get('title'))
if self.feed.feed_title != original_title:
self.feed.save(update_fields=['feed_title'])
tagline = self.fpf.feed.get('tagline', self.feed.data.feed_tagline)
if tagline:
original_tagline = self.feed.data.feed_tagline
self.feed.data.feed_tagline = smart_unicode(tagline)
if self.feed.data.feed_tagline != original_tagline:
self.feed.data.save(update_fields=['feed_tagline'])
if not self.feed.feed_link_locked:
new_feed_link = self.fpf.feed.get('link') or self.fpf.feed.get('id') or self.feed.feed_link
if self.options['force'] and new_feed_link:
new_feed_link = qurl(new_feed_link, remove=['_'])
if new_feed_link != self.feed.feed_link:
logging.debug(" ---> [%-30s] ~SB~FRFeed's page is different: %s to %s" % (self.feed.log_title[:30], self.feed.feed_link, new_feed_link))
redirects, non_redirects = self.feed.count_redirects_in_history('page')
self.feed.save_page_history(301, "HTTP Redirect (%s to go)" % (10-len(redirects)))
if len(redirects) >= 10 or len(non_redirects) == 0:
self.feed.feed_link = new_feed_link
self.feed.save(update_fields=['feed_link'])
# Determine if stories aren't valid and replace broken guids
guids_seen = set()
permalinks_seen = set()
for entry in self.fpf.entries:
guids_seen.add(entry.get('guid'))
permalinks_seen.add(Feed.get_permalink(entry))
guid_difference = len(guids_seen) != len(self.fpf.entries)
single_guid = len(guids_seen) == 1
replace_guids = single_guid and guid_difference
permalink_difference = len(permalinks_seen) != len(self.fpf.entries)
single_permalink = len(permalinks_seen) == 1
replace_permalinks = single_permalink and permalink_difference
# Compare new stories to existing stories, adding and updating
start_date = datetime.datetime.utcnow()
story_hashes = []
stories = []
for entry in self.fpf.entries:
story = pre_process_story(entry, self.fpf.encoding)
if story.get('published') < start_date:
start_date = story.get('published')
if replace_guids:
if replace_permalinks:
new_story_guid = unicode(story.get('published'))
if self.options['verbose']:
logging.debug(u' ---> [%-30s] ~FBReplacing guid (%s) with timestamp: %s' % (
self.feed.log_title[:30],
story.get('guid'), new_story_guid))
story['guid'] = new_story_guid
else:
new_story_guid = Feed.get_permalink(story)
if self.options['verbose']:
logging.debug(u' ---> [%-30s] ~FBReplacing guid (%s) with permalink: %s' % (
self.feed.log_title[:30],
story.get('guid'), new_story_guid))
story['guid'] = new_story_guid
story['story_hash'] = MStory.feed_guid_hash_unsaved(self.feed.pk, story.get('guid'))
stories.append(story)
story_hashes.append(story.get('story_hash'))
original_story_hash_count = len(story_hashes)
story_hashes_in_unread_cutoff = self.feed.story_hashes_in_unread_cutoff[:original_story_hash_count]
story_hashes.extend(story_hashes_in_unread_cutoff)
story_hashes = list(set(story_hashes))
if self.options['verbose'] or settings.DEBUG:
logging.debug(u' ---> [%-30s] ~FBFound ~SB%s~SN guids, adding ~SB%s~SN/%s guids from db' % (
self.feed.log_title[:30],
original_story_hash_count, len(story_hashes)-original_story_hash_count,
len(story_hashes_in_unread_cutoff)))
existing_stories = dict((s.story_hash, s) for s in MStory.objects(
story_hash__in=story_hashes,
# story_date__gte=start_date,
# story_feed_id=self.feed.pk
))
# if len(existing_stories) == 0:
# existing_stories = dict((s.story_hash, s) for s in MStory.objects(
# story_date__gte=start_date,
# story_feed_id=self.feed.pk
# ))
ret_values = self.feed.add_update_stories(stories, existing_stories,
verbose=self.options['verbose'],
updates_off=self.options['updates_off'])
# PubSubHubbub
if (hasattr(self.fpf, 'feed') and
hasattr(self.fpf.feed, 'links') and self.fpf.feed.links):
hub_url = None
self_url = self.feed.feed_address
for link in self.fpf.feed.links:
if link['rel'] == 'hub' and not hub_url:
hub_url = link['href']
elif link['rel'] == 'self':
self_url = link['href']
push_expired = False
if self.feed.is_push:
try:
push_expired = self.feed.push.lease_expires < datetime.datetime.now()
except PushSubscription.DoesNotExist:
self.feed.is_push = False
if (hub_url and self_url and not settings.DEBUG and
self.feed.active_subscribers > 0 and
(push_expired or not self.feed.is_push or self.options.get('force'))):
logging.debug(u' ---> [%-30s] ~BB~FW%sSubscribing to PuSH hub: %s' % (
self.feed.log_title[:30],
"~SKRe-~SN" if push_expired else "", hub_url))
try:
PushSubscription.objects.subscribe(self_url, feed=self.feed, hub=hub_url)
except TimeoutError:
logging.debug(u' ---> [%-30s] ~BB~FW~FRTimed out~FW subscribing to PuSH hub: %s' % (
self.feed.log_title[:30], hub_url))
elif (self.feed.is_push and
(self.feed.active_subscribers <= 0 or not hub_url)):
logging.debug(u' ---> [%-30s] ~BB~FWTurning off PuSH, no hub found' % (
self.feed.log_title[:30]))
self.feed.is_push = False
self.feed = self.feed.save()
# Push notifications
if ret_values['new'] > 0 and MUserFeedNotification.feed_has_users(self.feed.pk) > 0:
QueueNotifications.delay(self.feed.pk, ret_values['new'])
# All Done
logging.debug(u' ---> [%-30s] ~FYParsed Feed: %snew=%s~SN~FY %sup=%s~SN same=%s%s~SN %serr=%s~SN~FY total=~SB%s' % (
self.feed.log_title[:30],
'~FG~SB' if ret_values['new'] else '', ret_values['new'],
'~FY~SB' if ret_values['updated'] else '', ret_values['updated'],
'~SB' if ret_values['same'] else '', ret_values['same'],
'~FR~SB' if ret_values['error'] else '', ret_values['error'],
len(self.fpf.entries)))
self.feed.update_all_statistics(has_new_stories=bool(ret_values['new']), force=self.options['force'])
fetch_date = datetime.datetime.now()
if ret_values['new']:
if not getattr(settings, 'TEST_DEBUG', False):
self.feed.trim_feed()
self.feed.expire_redis()
if MStatistics.get('raw_feed', None) == self.feed.pk:
self.feed.save_raw_feed(self.raw_feed, fetch_date)
self.feed.save_feed_history(200, "OK", date=fetch_date)
if self.options['verbose']:
logging.debug(u' ---> [%-30s] ~FBTIME: feed parse in ~FM%.4ss' % (
self.feed.log_title[:30], time.time() - start))
return FEED_OK, ret_values
class Dispatcher:
def __init__(self, options, num_threads):
self.options = options
self.feed_stats = {
FEED_OK:0,
FEED_SAME:0,
FEED_ERRPARSE:0,
FEED_ERRHTTP:0,
FEED_ERREXC:0}
self.feed_trans = {
FEED_OK:'ok',
FEED_SAME:'unchanged',
FEED_ERRPARSE:'cant_parse',
FEED_ERRHTTP:'http_error',
FEED_ERREXC:'exception'}
self.feed_keys = sorted(self.feed_trans.keys())
self.num_threads = num_threads
self.time_start = datetime.datetime.utcnow()
self.workers = []
def refresh_feed(self, feed_id):
"""Update feed, since it may have changed"""
return Feed.get_by_id(feed_id)
def process_feed_wrapper(self, feed_queue):
delta = None
current_process = multiprocessing.current_process()
identity = "X"
feed = None
if current_process._identity:
identity = current_process._identity[0]
for feed_id in feed_queue:
start_duration = time.time()
feed_fetch_duration = None
feed_process_duration = None
page_duration = None
icon_duration = None
feed_code = None
ret_entries = None
start_time = time.time()
ret_feed = FEED_ERREXC
try:
feed = self.refresh_feed(feed_id)
skip = False
if self.options.get('fake'):
skip = True
weight = "-"
quick = "-"
rand = "-"
elif (self.options.get('quick') and not self.options['force'] and
feed.known_good and feed.fetched_once and not feed.is_push):
weight = feed.stories_last_month * feed.num_subscribers
random_weight = random.randint(1, max(weight, 1))
quick = float(self.options.get('quick', 0))
rand = random.random()
if random_weight < 1000 and rand < quick:
skip = True
elif False and feed.feed_address.startswith("http://news.google.com/news"):
skip = True
weight = "-"
quick = "-"
rand = "-"
if skip:
logging.debug(' ---> [%-30s] ~BGFaking fetch, skipping (%s/month, %s subs, %s < %s)...' % (
feed.log_title[:30],
weight,
feed.num_subscribers,
rand, quick))
continue
ffeed = FetchFeed(feed_id, self.options)
ret_feed, fetched_feed = ffeed.fetch()
feed_fetch_duration = time.time() - start_duration
raw_feed = ffeed.raw_feed
if ((fetched_feed and ret_feed == FEED_OK) or self.options['force']):
pfeed = ProcessFeed(feed_id, fetched_feed, self.options, raw_feed=raw_feed)
ret_feed, ret_entries = pfeed.process()
feed = pfeed.feed
feed_process_duration = time.time() - start_duration
if (ret_entries and ret_entries['new']) or self.options['force']:
start = time.time()
if not feed.known_good or not feed.fetched_once:
feed.known_good = True
feed.fetched_once = True
feed = feed.save()
if self.options['force'] or random.random() <= 0.02:
logging.debug(' ---> [%-30s] ~FBPerforming feed cleanup...' % (feed.log_title[:30],))
start_cleanup = time.time()
feed.sync_redis()
logging.debug(' ---> [%-30s] ~FBDone with feed cleanup. Took ~SB%.4s~SN sec.' % (feed.log_title[:30], time.time() - start_cleanup))
try:
self.count_unreads_for_subscribers(feed)
except TimeoutError:
logging.debug(' ---> [%-30s] Unread count took too long...' % (feed.log_title[:30],))
if self.options['verbose']:
logging.debug(u' ---> [%-30s] ~FBTIME: unread count in ~FM%.4ss' % (
feed.log_title[:30], time.time() - start))
except urllib2.HTTPError, e:
logging.debug(' ---> [%-30s] ~FRFeed throws HTTP error: ~SB%s' % (unicode(feed_id)[:30], e.fp.read()))
feed_code = e.code
feed.save_feed_history(feed_code, e.msg, e.fp.read())
fetched_feed = None
except Feed.DoesNotExist, e:
logging.debug(' ---> [%-30s] ~FRFeed is now gone...' % (unicode(feed_id)[:30]))
continue
except SoftTimeLimitExceeded, e:
logging.debug(" ---> [%-30s] ~BR~FWTime limit hit!~SB~FR Moving on to next feed..." % feed)
ret_feed = FEED_ERREXC
fetched_feed = None
feed_code = 559
feed.save_feed_history(feed_code, 'Timeout', e)
except TimeoutError, e:
logging.debug(' ---> [%-30s] ~FRFeed fetch timed out...' % (feed.log_title[:30]))
feed_code = 505
feed.save_feed_history(feed_code, 'Timeout', e)
fetched_feed = None
except Exception, e:
logging.debug('[%d] ! -------------------------' % (feed_id,))
tb = traceback.format_exc()
logging.error(tb)
logging.debug('[%d] ! -------------------------' % (feed_id,))
ret_feed = FEED_ERREXC
feed = Feed.get_by_id(getattr(feed, 'pk', feed_id))
if not feed: continue
feed.save_feed_history(500, "Error", tb)
feed_code = 500
fetched_feed = None
# mail_feed_error_to_admin(feed, e, local_vars=locals())
if (not settings.DEBUG and hasattr(settings, 'RAVEN_CLIENT') and
settings.RAVEN_CLIENT):
settings.RAVEN_CLIENT.captureException()
if not feed_code:
if ret_feed == FEED_OK:
feed_code = 200
elif ret_feed == FEED_SAME:
feed_code = 304
elif ret_feed == FEED_ERRHTTP:
feed_code = 400
if ret_feed == FEED_ERREXC:
feed_code = 500
elif ret_feed == FEED_ERRPARSE:
feed_code = 550
if not feed: continue
feed = self.refresh_feed(feed.pk)
if not feed: continue
if ((self.options['force']) or
(random.random() > .9) or
(fetched_feed and
feed.feed_link and
feed.has_page and
(ret_feed == FEED_OK or
(ret_feed == FEED_SAME and feed.stories_last_month > 10)))):
logging.debug(u' ---> [%-30s] ~FYFetching page: %s' % (feed.log_title[:30], feed.feed_link))
page_importer = PageImporter(feed)
try:
page_data = page_importer.fetch_page()
page_duration = time.time() - start_duration
except SoftTimeLimitExceeded, e:
logging.debug(" ---> [%-30s] ~BR~FWTime limit hit!~SB~FR Moving on to next feed..." % feed)
page_data = None
feed.save_feed_history(557, 'Timeout', e)
except TimeoutError, e:
logging.debug(' ---> [%-30s] ~FRPage fetch timed out...' % (feed.log_title[:30]))
page_data = None
feed.save_page_history(555, 'Timeout', '')
except Exception, e:
logging.debug('[%d] ! -------------------------' % (feed_id,))
tb = traceback.format_exc()
logging.error(tb)
logging.debug('[%d] ! -------------------------' % (feed_id,))
feed.save_page_history(550, "Page Error", tb)
fetched_feed = None
page_data = None
# mail_feed_error_to_admin(feed, e, local_vars=locals())
if (not settings.DEBUG and hasattr(settings, 'RAVEN_CLIENT') and
settings.RAVEN_CLIENT):
settings.RAVEN_CLIENT.captureException()
feed = self.refresh_feed(feed.pk)
logging.debug(u' ---> [%-30s] ~FYFetching icon: %s' % (feed.log_title[:30], feed.feed_link))
force = self.options['force']
if random.random() > .99:
force = True
icon_importer = IconImporter(feed, page_data=page_data, force=force)
try:
icon_importer.save()
icon_duration = time.time() - start_duration
except SoftTimeLimitExceeded, e:
logging.debug(" ---> [%-30s] ~BR~FWTime limit hit!~SB~FR Moving on to next feed..." % feed)
feed.save_feed_history(558, 'Timeout', e)
except TimeoutError, e:
logging.debug(' ---> [%-30s] ~FRIcon fetch timed out...' % (feed.log_title[:30]))
feed.save_page_history(556, 'Timeout', '')
except Exception, e:
logging.debug('[%d] ! -------------------------' % (feed_id,))
tb = traceback.format_exc()
logging.error(tb)
logging.debug('[%d] ! -------------------------' % (feed_id,))
# feed.save_feed_history(560, "Icon Error", tb)
# mail_feed_error_to_admin(feed, e, local_vars=locals())
if (not settings.DEBUG and hasattr(settings, 'RAVEN_CLIENT') and
settings.RAVEN_CLIENT):
settings.RAVEN_CLIENT.captureException()
else:
logging.debug(u' ---> [%-30s] ~FBSkipping page fetch: (%s on %s stories) %s' % (feed.log_title[:30], self.feed_trans[ret_feed], feed.stories_last_month, '' if feed.has_page else ' [HAS NO PAGE]'))
feed = self.refresh_feed(feed.pk)
delta = time.time() - start_time
feed.last_load_time = round(delta)
feed.fetched_once = True
try:
feed = feed.save(update_fields=['last_load_time', 'fetched_once'])
except IntegrityError:
logging.debug(" ***> [%-30s] ~FRIntegrityError on feed: %s" % (feed.log_title[:30], feed.feed_address,))
if ret_entries and ret_entries['new']:
self.publish_to_subscribers(feed, ret_entries['new'])
done_msg = (u'%2s ---> [%-30s] ~FYProcessed in ~FM~SB%.4ss~FY~SN (~FB%s~FY) [%s]' % (
identity, feed.log_title[:30], delta,
feed.pk, self.feed_trans[ret_feed],))
logging.debug(done_msg)
total_duration = time.time() - start_duration
MAnalyticsFetcher.add(feed_id=feed.pk, feed_fetch=feed_fetch_duration,
feed_process=feed_process_duration,
page=page_duration, icon=icon_duration,
total=total_duration, feed_code=feed_code)
self.feed_stats[ret_feed] += 1
if len(feed_queue) == 1:
return feed
# time_taken = datetime.datetime.utcnow() - self.time_start
def publish_to_subscribers(self, feed, new_count):
try:
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
listeners_count = r.publish(str(feed.pk), 'story:new_count:%s' % new_count)
if listeners_count:
logging.debug(" ---> [%-30s] ~FMPublished to %s subscribers" % (feed.log_title[:30], listeners_count))
except redis.ConnectionError:
logging.debug(" ***> [%-30s] ~BMRedis is unavailable for real-time." % (feed.log_title[:30],))
def count_unreads_for_subscribers(self, feed):
user_subs = UserSubscription.objects.filter(feed=feed,
active=True,
user__profile__last_seen_on__gte=feed.unread_cutoff)\
.order_by('-last_read_date')
if not user_subs.count():
return
for sub in user_subs:
if not sub.needs_unread_recalc:
sub.needs_unread_recalc = True
sub.save()
if self.options['compute_scores']:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
stories = MStory.objects(story_feed_id=feed.pk,
story_date__gte=feed.unread_cutoff)
stories = Feed.format_stories(stories, feed.pk)
story_hashes = r.zrangebyscore('zF:%s' % feed.pk, int(feed.unread_cutoff.strftime('%s')),
int(time.time() + 60*60*24))
missing_story_hashes = set(story_hashes) - set([s['story_hash'] for s in stories])
if missing_story_hashes:
missing_stories = MStory.objects(story_feed_id=feed.pk,
story_hash__in=missing_story_hashes)\
.read_preference(pymongo.ReadPreference.PRIMARY)
missing_stories = Feed.format_stories(missing_stories, feed.pk)
stories = missing_stories + stories
logging.debug(u' ---> [%-30s] ~FYFound ~SB~FC%s(of %s)/%s~FY~SN un-secondaried stories while computing scores' % (feed.log_title[:30], len(missing_stories), len(missing_story_hashes), len(stories)))
cache.set("S:%s" % feed.pk, stories, 60)
logging.debug(u' ---> [%-30s] ~FYComputing scores: ~SB%s stories~SN with ~SB%s subscribers ~SN(%s/%s/%s)' % (
feed.log_title[:30], len(stories), user_subs.count(),
feed.num_subscribers, feed.active_subscribers, feed.premium_subscribers))
self.calculate_feed_scores_with_stories(user_subs, stories)
elif self.options.get('mongodb_replication_lag'):
logging.debug(u' ---> [%-30s] ~BR~FYSkipping computing scores: ~SB%s seconds~SN of mongodb lag' % (
feed.log_title[:30], self.options.get('mongodb_replication_lag')))
@timelimit(10)
def calculate_feed_scores_with_stories(self, user_subs, stories):
for sub in user_subs:
silent = False if self.options['verbose'] >= 2 else True
sub.calculate_feed_scores(silent=silent, stories=stories)
def add_jobs(self, feeds_queue, feeds_count=1):
""" adds a feed processing job to the pool
"""
self.feeds_queue = feeds_queue
self.feeds_count = feeds_count
def run_jobs(self):
if self.options['single_threaded']:
return self.process_feed_wrapper(self.feeds_queue[0])
else:
for i in range(self.num_threads):
feed_queue = self.feeds_queue[i]
self.workers.append(multiprocessing.Process(target=self.process_feed_wrapper,
args=(feed_queue,)))
for i in range(self.num_threads):
self.workers[i].start()
|
|
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import random
import re
import requests
import six
from tornado import gen, httpclient, ioloop, websocket
from .util import add_future
class RdioWebClient(object):
SERVER = "www.rdio.com"
API_VERSION = "1"
client_version = 20130124
class ApiFailureException(Exception):
pass
def __init__(self):
self.session_initialized = False
self.player_id = "_rdioslave_" + ("%06d" % random.randint(0, 1000000))
# user auth session
self.cookies = {}
self.authorization_key = None
self.user_key = None
# client state
self.ws = None
self.pubsub_data = None
####################
# Session
####################
def has_session(self):
if not self.session_initialized:
return False
try:
self.current_user_sync()
except self.ApiFailureException:
return False
return True
def init_session(self, username, password):
def get_auth_key(page):
match = re.search(r'"authorizationKey": "([^"]*)"', page)
assert match, "could not find authorizationKey: rdio login changed!"
return match.group(1)
resp = requests.get("https://%s/account/signin/" % self.SERVER)
self.cookies['r'] = resp.cookies['r']
self.authorization_key = get_auth_key(resp.text)
result = self.sign_in_sync(username, password)
# redirect url sends us a cookie and then 302s to home
resp = requests.get(result['redirect_url'])
self.cookies['r'] = resp.history[0].cookies['r']
self.authorization_key = get_auth_key(resp.text)
resp = self.current_user_sync()
self.user_key = resp['key']
self.session_initialized = True
def write_session(self, file_path):
data = {
'cookies': self.cookies,
'authorization_key': self.authorization_key,
'user_key': self.user_key,
}
with open(file_path, "wt") as f:
f.write(json.dumps(data))
def read_session(self, file_path):
with open(file_path, "rt") as f:
data = json.loads(f.read())
# json unicodifies everything, so we need to convert back to str
for k, v in six.iteritems(data['cookies']):
self.cookies[k.encode('ascii')] = str(v.encode('ascii'))
self.authorization_key = data['authorization_key'].encode('ascii')
self.user_key = data['user_key'].encode('ascii')
self.session_initialized = True
assert self.has_session()
####################
# PubSub
####################
@gen.coroutine
def setup_pubsub(self, on_message):
self.pubsub_data = yield self.pubsub_info()
host = self.pubsub_data['servers'][0]
self.ws = yield websocket.websocket_connect("ws://%s" % host)
add_future(self.pubsub_read(on_message))
self.connect()
def connect(self):
assert self.ws
caps = {'player': {'canRemote': True, 'name': self.player_id}}
ws_msg = "CONNECT %s|%s" % (self.pubsub_data['token'], json.dumps(caps))
print("sending on websocket: %s" % ws_msg)
self.ws.write_message(ws_msg)
def pub(self, channel, message):
assert self.ws
if not isinstance(message, six.string_types):
message = json.dumps(message)
ws_msg = "PUB %s/%s|%s" % (self.user_key, channel, message)
print("sending on websocket: %s" % ws_msg)
self.ws.write_message(ws_msg)
def sub(self, channel):
assert self.ws
ws_msg = "SUB %s/%s" % (self.user_key, channel)
print("sending on websocket: %s" % ws_msg)
self.ws.write_message(ws_msg)
@gen.coroutine
def pubsub_read(self, on_message):
while True:
message = yield self.ws.read_message()
print("[PubSub] got message: %s" % message)
if message is None:
# Socket closed; set up pubsub again. Probably racy (we might
# try to pub while there's no websocket there).
yield self.setup_pubsub(on_message)
return
on_message(message)
####################
# API call helpers
####################
def _encode_params(self, params):
# This is kind of dubious
return requests.models.RequestEncodingMixin._encode_params(params)
def _construct_api_request(self, method, params, secure, gag_debug):
if params is None:
params = {}
params['v'] = self.client_version
params['_authorization_key'] = self.authorization_key
params['method'] = method
protocol = "https" if secure else "http"
url = "%s://%s/api/%s/%s" % (protocol, self.SERVER, self.API_VERSION, method)
if not gag_debug:
print(json.dumps(params, indent=4))
request = httpclient.HTTPRequest(
url,
method="POST",
headers={
"Cookie": "; ".join(["%s=%s" % it for it in six.iteritems(self.cookies)]),
"Content-Type": "application/x-www-form-urlencoded",
},
body=self._encode_params(params),
)
return request
def _process_api_response(self, response):
if response.code != 200:
raise self.ApiFailureException(str(response.code))
response_parsed = json.loads(response.body)
if response_parsed['status'] != "ok":
raise self.ApiFailureException(json.dumps(response_parsed, indent=4))
return response_parsed['result']
@gen.coroutine
def call_api(self, method, params=None, secure=False, gag_debug=False):
request = self._construct_api_request(method, params, secure, gag_debug)
response = yield httpclient.AsyncHTTPClient().fetch(request)
raise gen.Return(self._process_api_response(response))
def call_api_sync(self, method, params=None, secure=False, gag_debug=False):
request = self._construct_api_request(method, params, secure, gag_debug)
response = httpclient.HTTPClient().fetch(request)
return self._process_api_response(response)
####################
# Rdio API
####################
def current_user_sync(self):
return self.call_api_sync("currentUser")
@gen.coroutine
def generate_station(self, station_key, exclude, extras=None):
params = {
"station_key": station_key,
"exclude": ",".join(exclude)
}
if extras is not None:
params["extras"] = ",".join(extras)
ret = yield self.call_api("generateStation", params)
raise gen.Return(ret)
@gen.coroutine
def get(self, keys, extras=None):
assert not isinstance(keys, six.string_types)
params = {
"keys": ",".join(keys),
}
if extras is not None:
params["extras"] = ",".join(extras)
ret = yield self.call_api("get", params)
raise gen.Return(ret)
@gen.coroutine
def get_playback_info(self, key, manual_play=True, type="flash",
player_name=None, requires_unlimited=False):
if player_name is None:
player_name = self.player_id
data = {
'key': key,
'manualPlay': manual_play,
'type': type,
'playerName': player_name,
'requiresUnlimited': requires_unlimited,
}
ret = yield self.call_api("getPlaybackInfo", data)
raise gen.Return(ret)
@gen.coroutine
def get_player_state(self):
ret = yield self.call_api("getPlayerState")
raise gen.Return(ret)
@gen.coroutine
def pubsub_info(self):
ret = yield self.call_api("pubsubInfo")
raise gen.Return(ret)
@gen.coroutine
def save_player_state(self, player_state=None, queue=None):
assert any((player_state is not None, queue is not None))
params = {}
if player_state is not None:
params["player_state"] = json.dumps(player_state)
if queue is not None:
params["queue"] = json.dumps(queue)
ret = yield self.call_api("savePlayerState", params)
raise gen.Return(ret)
@gen.coroutine
def add_start_event(self, source, key):
params = dict(source=source, key=key)
ret = yield self.call_api("addStartEvent", params)
raise gen.Return(ret)
def sign_in_sync(self, username, password, remember=1, next_url=""):
params = {
'username': username,
'password': password,
'remember': remember,
'nextUrl': next_url,
}
return self.call_api_sync("signIn", params, secure=True, gag_debug=True)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python package for statistical functions in MLlib.
"""
from pyspark import RDD
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import Matrix, _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
__all__ = ['MultivariateStatisticalSummary', 'ChiSqTestResult', 'Statistics']
class MultivariateStatisticalSummary(JavaModelWrapper):
"""
Trait for multivariate statistical summary of a data matrix.
"""
def mean(self):
return self.call("mean").toArray()
def variance(self):
return self.call("variance").toArray()
def count(self):
return self.call("count")
def numNonzeros(self):
return self.call("numNonzeros").toArray()
def max(self):
return self.call("max").toArray()
def min(self):
return self.call("min").toArray()
class ChiSqTestResult(JavaModelWrapper):
"""
:: Experimental ::
Object containing the test results for the chi-squared hypothesis test.
"""
@property
def method(self):
"""
Name of the test method
"""
return self._java_model.method()
@property
def pValue(self):
"""
The probability of obtaining a test statistic result at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
"""
return self._java_model.pValue()
@property
def degreesOfFreedom(self):
"""
Returns the degree(s) of freedom of the hypothesis test.
Return type should be Number(e.g. Int, Double) or tuples of Numbers.
"""
return self._java_model.degreesOfFreedom()
@property
def statistic(self):
"""
Test statistic.
"""
return self._java_model.statistic()
@property
def nullHypothesis(self):
"""
Null hypothesis of the test.
"""
return self._java_model.nullHypothesis()
def __str__(self):
return self._java_model.toString()
class Statistics(object):
@staticmethod
def colStats(rdd):
"""
Computes column-wise summary statistics for the input RDD[Vector].
:param rdd: an RDD[Vector] for which column-wise summary statistics
are to be computed.
:return: :class:`MultivariateStatisticalSummary` object containing
column-wise summary statistics.
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([2, 0, 0, -2]),
... Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8])])
>>> cStats = Statistics.colStats(rdd)
>>> cStats.mean()
array([ 4., 4., 0., 3.])
>>> cStats.variance()
array([ 4., 13., 0., 25.])
>>> cStats.count()
3L
>>> cStats.numNonzeros()
array([ 3., 2., 0., 3.])
>>> cStats.max()
array([ 6., 7., 0., 8.])
>>> cStats.min()
array([ 2., 0., 0., -2.])
"""
cStats = callMLlibFunc("colStats", rdd.map(_convert_to_vector))
return MultivariateStatisticalSummary(cStats)
@staticmethod
def corr(x, y=None, method=None):
"""
Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the input RDD is returned. Use C{method=}
to specify the method to be used for single RDD inout.
If two RDDs of floats are passed in, a single float is returned.
:param x: an RDD of vector for which the correlation matrix is to be computed,
or an RDD of float of the same cardinality as y when y is specified.
:param y: an RDD of float of the same cardinality as x.
:param method: String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`
:return: Correlation matrix comparing columns in x.
>>> x = sc.parallelize([1.0, 0.0, -2.0], 2)
>>> y = sc.parallelize([4.0, 5.0, 3.0], 2)
>>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2)
>>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7
True
>>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson")
True
>>> Statistics.corr(x, y, "spearman")
0.5
>>> from math import isnan
>>> isnan(Statistics.corr(x, zeros))
True
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])])
>>> pearsonCorr = Statistics.corr(rdd)
>>> print str(pearsonCorr).replace('nan', 'NaN')
[[ 1. 0.05564149 NaN 0.40047142]
[ 0.05564149 1. NaN 0.91359586]
[ NaN NaN 1. NaN]
[ 0.40047142 0.91359586 NaN 1. ]]
>>> spearmanCorr = Statistics.corr(rdd, method="spearman")
>>> print str(spearmanCorr).replace('nan', 'NaN')
[[ 1. 0.10540926 NaN 0.4 ]
[ 0.10540926 1. NaN 0.9486833 ]
[ NaN NaN 1. NaN]
[ 0.4 0.9486833 NaN 1. ]]
>>> try:
... Statistics.corr(rdd, "spearman")
... print "Method name as second argument without 'method=' shouldn't be allowed."
... except TypeError:
... pass
"""
# Check inputs to determine whether a single value or a matrix is needed for output.
# Since it's legal for users to use the method name as the second argument, we need to
# check if y is used to specify the method name instead.
if type(y) == str:
raise TypeError("Use 'method=' to specify method name.")
if not y:
return callMLlibFunc("corr", x.map(_convert_to_vector), method).toArray()
else:
return callMLlibFunc("corr", x.map(float), y.map(float), method)
@staticmethod
def chiSqTest(observed, expected=None):
"""
:: Experimental ::
If `observed` is Vector, conduct Pearson's chi-squared goodness
of fit test of the observed data against the expected distribution,
or againt the uniform distribution (by default), with each category
having an expected frequency of `1 / len(observed)`.
(Note: `observed` cannot contain negative values)
If `observed` is matrix, conduct Pearson's independence test on the
input contingency matrix, which cannot contain negative entries or
columns or rows that sum up to 0.
If `observed` is an RDD of LabeledPoint, conduct Pearson's independence
test for every feature against the label across the input RDD.
For each feature, the (feature, label) pairs are converted into a
contingency matrix for which the chi-squared statistic is computed.
All label and feature values must be categorical.
:param observed: it could be a vector containing the observed categorical
counts/relative frequencies, or the contingency matrix
(containing either counts or relative frequencies),
or an RDD of LabeledPoint containing the labeled dataset
with categorical features. Real-valued features will be
treated as categorical for each distinct value.
:param expected: Vector containing the expected categorical counts/relative
frequencies. `expected` is rescaled if the `expected` sum
differs from the `observed` sum.
:return: ChiSquaredTest object containing the test statistic, degrees
of freedom, p-value, the method used, and the null hypothesis.
>>> from pyspark.mllib.linalg import Vectors, Matrices
>>> observed = Vectors.dense([4, 6, 5])
>>> pearson = Statistics.chiSqTest(observed)
>>> print pearson.statistic
0.4
>>> pearson.degreesOfFreedom
2
>>> print round(pearson.pValue, 4)
0.8187
>>> pearson.method
u'pearson'
>>> pearson.nullHypothesis
u'observed follows the same distribution as expected.'
>>> observed = Vectors.dense([21, 38, 43, 80])
>>> expected = Vectors.dense([3, 5, 7, 20])
>>> pearson = Statistics.chiSqTest(observed, expected)
>>> print round(pearson.pValue, 4)
0.0027
>>> data = [40.0, 24.0, 29.0, 56.0, 32.0, 42.0, 31.0, 10.0, 0.0, 30.0, 15.0, 12.0]
>>> chi = Statistics.chiSqTest(Matrices.dense(3, 4, data))
>>> print round(chi.statistic, 4)
21.9958
>>> data = [LabeledPoint(0.0, Vectors.dense([0.5, 10.0])),
... LabeledPoint(0.0, Vectors.dense([1.5, 20.0])),
... LabeledPoint(1.0, Vectors.dense([1.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 40.0])),
... LabeledPoint(1.0, Vectors.dense([3.5, 40.0])),]
>>> rdd = sc.parallelize(data, 4)
>>> chi = Statistics.chiSqTest(rdd)
>>> print chi[0].statistic
0.75
>>> print chi[1].statistic
1.5
"""
if isinstance(observed, RDD):
if not isinstance(observed.first(), LabeledPoint):
raise ValueError("observed should be an RDD of LabeledPoint")
jmodels = callMLlibFunc("chiSqTest", observed)
return [ChiSqTestResult(m) for m in jmodels]
if isinstance(observed, Matrix):
jmodel = callMLlibFunc("chiSqTest", observed)
else:
if expected and len(expected) != len(observed):
raise ValueError("`expected` should have same length with `observed`")
jmodel = callMLlibFunc("chiSqTest", _convert_to_vector(observed), expected)
return ChiSqTestResult(jmodel)
def _test():
import doctest
from pyspark import SparkContext
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
|
from collections import defaultdict
from hashlib import sha1
from itertools import chain
from operator import itemgetter
import os
from os import listdir
import sys
from funcy import (merge, imap, group_by, is_mapping, repeat,
constantly, icat, autocurry)
from dxr.filters import LINE
from dxr.indexers import (FileToIndex as FileToIndexBase,
TreeToIndex as TreeToIndexBase,
QUALIFIED_LINE_NEEDLE, unsparsify, FuncSig)
from dxr.lines import Ref
from dxr.plugins.clang.condense import condense_file, condense_global
from dxr.plugins.clang.menus import (FunctionRef, VariableRef, TypeRef,
NamespaceRef, NamespaceAliasRef, MacroRef, IncludeRef, TypedefRef)
from dxr.plugins.clang.needles import all_needles
mappings = {
LINE: {
'properties': {
'c_function': QUALIFIED_LINE_NEEDLE,
'c_function_ref': QUALIFIED_LINE_NEEDLE,
'c_function_decl': QUALIFIED_LINE_NEEDLE,
'c_type_ref': QUALIFIED_LINE_NEEDLE,
'c_type_decl': QUALIFIED_LINE_NEEDLE,
'c_type': QUALIFIED_LINE_NEEDLE,
'c_var': QUALIFIED_LINE_NEEDLE,
'c_var_ref': QUALIFIED_LINE_NEEDLE,
'c_var_decl': QUALIFIED_LINE_NEEDLE,
'c_macro': QUALIFIED_LINE_NEEDLE,
'c_macro_ref': QUALIFIED_LINE_NEEDLE,
'c_namespace': QUALIFIED_LINE_NEEDLE,
'c_namespace_ref': QUALIFIED_LINE_NEEDLE,
'c_namespace_alias': QUALIFIED_LINE_NEEDLE,
'c_namespace_alias_ref': QUALIFIED_LINE_NEEDLE,
'c_warning': QUALIFIED_LINE_NEEDLE,
'c_warning_opt': QUALIFIED_LINE_NEEDLE,
'c_call': QUALIFIED_LINE_NEEDLE,
'c_bases': QUALIFIED_LINE_NEEDLE,
'c_derived': QUALIFIED_LINE_NEEDLE,
'c_member': QUALIFIED_LINE_NEEDLE,
'c_overrides': QUALIFIED_LINE_NEEDLE,
# At a base method's site, record all the methods that override
# it. Then we can search for any of those methods and turn up the
# base one:
'c_overridden': QUALIFIED_LINE_NEEDLE
}
}
}
class FileToIndex(FileToIndexBase):
"""C and C++ indexer using clang compiler plugin"""
def __init__(self, path, contents, plugin_name, tree, overrides, overriddens, parents, children, csv_names, temp_folder):
super(FileToIndex, self).__init__(path, contents, plugin_name, tree)
self.overrides = overrides
self.overriddens = overriddens
self.parents = parents
self.children = children
self.condensed = condense_file(temp_folder, path,
overrides, overriddens,
parents, children,
csv_names)
def needles_by_line(self):
return all_needles(
self.condensed,
self.overrides,
self.overriddens,
self.parents,
self.children)
def refs(self):
def getter_or_empty(y):
return lambda x: x.get(y, [])
# Ref subclasses and the thing-getters that provide input to their
# from_condensed() methods:
classes_and_getters = [
(FunctionRef, [getter_or_empty('function'),
kind_getter('decldef', 'function'),
# Refs are not structured much like functions, but
# they have a qualname key, which is all FunctionRef
# requires, so we can just chain kind_getters
# together with other getters.
kind_getter('ref', 'function')]),
(VariableRef, [getter_or_empty('variable'),
kind_getter('ref', 'variable')]),
(TypeRef, [getter_or_empty('type'),
kind_getter('ref', 'type'),
not_kind_getter('decldef', 'function')]),
(TypedefRef, [getter_or_empty('typedef'),
kind_getter('ref', 'typedef')]),
(NamespaceRef, [getter_or_empty('namespace'),
kind_getter('ref', 'namespace')]),
(NamespaceAliasRef, [getter_or_empty('namespace_alias'),
kind_getter('ref', 'namespace_alias')]),
(MacroRef, [getter_or_empty('macro'),
kind_getter('ref', 'macro')]),
(IncludeRef, [getter_or_empty('include')])]
for ref_class, getters in classes_and_getters:
for prop in chain.from_iterable(g(self.condensed) for g in getters):
if 'span' in prop:
start, end = prop['span']
yield (self.char_offset(start.row, start.col),
self.char_offset(end.row, end.col),
ref_class.from_condensed(self.tree, prop))
@unsparsify
def annotations_by_line(self):
icon = "background-image: url('{0}/static/icons/warning.png');".format(
self.tree.config.www_root) # TODO: DRY
getter = itemgetter('msg', 'opt', 'span')
for msg, opt, span in imap(getter, self.condensed.get('warnings', [])):
if opt:
msg = "{0}[{1}]".format(msg, opt)
annotation = {
'title': msg,
'class': "note note-warning",
'style': icon
}
yield annotation, span
def links(self):
"""Yield a section for each class, type, enum, etc., as well as one
for macro definitions.
"""
def get_scopes_to_members():
"""Return a hash of qualified-scope-of-type -> set-of-members."""
ret = defaultdict(list)
for member in chain(self.condensed['function'],
self.condensed['variable']):
try:
scope, _ = member['qualname'].rsplit('::', 1)
except ValueError:
# There was no ::, so this wasn't a member of anything.
pass
else:
ret[scope].append(member)
return ret
scopes_to_members = get_scopes_to_members()
# Spin around the types (enums, classes, unions, etc.):
for type in self.condensed['type']:
if type['name']:
# First, link to the type definition itself:
links = [(type['kind'],
type['name'],
'#%s' % type['span'].start.row)]
# Look up the stuff with that scope in the hash, and spit out
# names and line numbers, sorting by line number.
members = list(scopes_to_members[type['qualname']])
members.sort(key=lambda m: m['span'].start.row)
links.extend(('method' if isinstance(m['type'], FuncSig)
else 'field', # icon
m['name'],
'#%s' % m['span'].start.row)
for m in members if m['name'])
yield 30, type['name'], links
# Add all macros to the macro section:
links = [('macro', t['name'], '#%s' % t['span'].start.row)
for t in self.condensed['macro']]
if links:
yield 100, 'Macros', links
@autocurry
def kind_getter(field, kind, condensed):
"""Reach into a field and filter based on the kind."""
return (ref for ref in condensed.get(field, []) if ref.get('kind') == kind)
@autocurry
def not_kind_getter(field, kind, condensed):
"""Reach into a field and filter out those with given kind."""
return (ref for ref in condensed.get(field, []) if ref.get('kind') != kind)
class TreeToIndex(TreeToIndexBase):
def pre_build(self):
self._temp_folder = os.path.join(self.tree.temp_folder,
'plugins',
self.plugin_name)
def environment(self, vars_):
"""Set up environment variables to trigger analysis dumps from clang.
We'll store all the havested metadata in the plugins temporary folder.
"""
tree = self.tree
plugin_folder = os.path.dirname(__file__)
flags = [
'-load', os.path.join(plugin_folder, 'libclang-index-plugin.so'),
'-add-plugin', 'dxr-index',
'-plugin-arg-dxr-index', tree.source_folder
]
flags_str = " ".join(imap('-Xclang {}'.format, flags))
env = {
'CC': "clang %s" % flags_str,
'CXX': "clang++ %s" % flags_str,
'DXR_CLANG_FLAGS': flags_str,
'DXR_CXX_CLANG_OBJECT_FOLDER': tree.object_folder,
'DXR_CXX_CLANG_TEMP_FOLDER': self._temp_folder,
}
env['DXR_CC'] = env['CC']
env['DXR_CXX'] = env['CXX']
return merge(vars_, env)
def post_build(self):
def csv_map():
"""Map input files to the output CSVs corresponding to them.
Return {path sha1: [file names (minus '.csv' extension)]}.
This saves a lot of globbing later, which can add up to hours over
the course of tens of thousands of files, depending on IO speed. An
alternative approach might be a radix tree of folders: less RAM,
more IO. Try that and bench it sometime.
"""
ret = defaultdict(list)
for csv_name in listdir(self._temp_folder):
if csv_name.endswith('.csv'):
path_hash, content_hash, ext = csv_name.split('.')
# Removing ".csv" saves at least 2MB per worker on 700K files:
ret[path_hash].append(csv_name[:-4])
return ret
self._csv_map = csv_map()
self._overrides, self._overriddens, self._parents, self._children = condense_global(self._temp_folder,
chain.from_iterable(self._csv_map.itervalues()))
def file_to_index(self, path, contents):
return FileToIndex(path,
contents,
self.plugin_name,
self.tree,
self._overrides,
self._overriddens,
self._parents,
self._children,
self._csv_map[sha1(path).hexdigest()],
self._temp_folder)
|
|
import asyncio, discord, os, re, psutil, platform, time, sys, fnmatch, subprocess, speedtest, json, struct, shutil, tempfile
from PIL import Image
from discord.ext import commands
from Cogs import Utils, Settings, DisplayName, ReadableTime, GetImage, ProgressBar, UserTime, Message, DL
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Bot(bot, settings, sys.argv[0], 'python'))
# This is the Bot module - it contains things like nickname, status, etc
class Bot(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings, path = None, pypath = None):
self.bot = bot
self.settings = settings
self.startTime = int(time.time())
self.path = path
self.pypath = pypath
self.regex = re.compile(r"(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?")
self.is_current = False
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
def _is_submodule(self, parent, child):
return parent == child or child.startswith(parent + ".")
@commands.Cog.listener()
async def on_unloaded_extension(self, ext):
# Called to shut things down
if not self._is_submodule(ext.__name__, self.__module__):
return
self.is_current = False
@commands.Cog.listener()
async def on_loaded_extension(self, ext):
# See if we were loaded
if not self._is_submodule(ext.__name__, self.__module__):
return
await self.bot.wait_until_ready()
self.is_current = True
self.bot.loop.create_task(self.status_loop())
async def status_loop(self):
# Helper method to loop through and ensure the status remains
while not self.bot.is_closed():
try:
if not self.is_current:
# Bail if we're not the current instance
return
await self._update_status()
except Exception as e:
print(str(e))
await asyncio.sleep(3600) # runs only every 60 minutes (3600 seconds)
async def onserverjoin(self, server):
# Iterate the blocked list and see if we are blocked
serverList = self.settings.getGlobalStat('BlockedServers',[])
for serv in serverList:
serverName = str(serv).lower()
try:
serverID = int(serv)
except Exception:
serverID = None
if serverName == server.name.lower() or serverID == server.id:
# Found it
try:
await server.leave()
except:
pass
return True
# Check for owner name and id quick
# Name *MUST* be case-sensitive and have the discriminator for safety
namecheck = server.owner.name + "#" + str(server.owner.discriminator)
if serv == namecheck or serverID == server.owner.id:
# Got the owner
try:
await server.leave()
except:
pass
return True
return False
@commands.command(pass_context=True)
async def botinfo(self, ctx):
"""Lists some general stats about the bot."""
bot_member = self.bot.user if not ctx.guild else ctx.guild.get_member(self.bot.user.id)
color = bot_member if isinstance(bot_member,discord.Member) else None
message = await Message.EmbedText(title="Gathering info...", color=color).send(ctx)
# Get guild count
guild_count = "{:,}".format(len(self.bot.guilds))
# Try to do this more efficiently, and faster
total_members = [x.id for x in self.bot.get_all_members()]
unique_members = set(total_members)
if len(total_members) == len(unique_members):
member_count = "{:,}".format(len(total_members))
else:
member_count = "{:,} ({:,} unique)".format(len(total_members), len(unique_members))
# Get commands/cogs count
cog_amnt = 0
empty_cog = 0
for cog in self.bot.cogs:
visible = []
for c in self.bot.get_cog(cog).get_commands():
if c.hidden:
continue
visible.append(c)
if not len(visible):
empty_cog +=1
# Skip empty cogs
continue
cog_amnt += 1
cog_count = "{:,} cog".format(cog_amnt)
# Easy way to append "s" if needed:
if not len(self.bot.cogs) == 1:
cog_count += "s"
if empty_cog:
cog_count += " [{:,} without commands]".format(empty_cog)
visible = []
for command in self.bot.commands:
if command.hidden:
continue
visible.append(command)
command_count = "{:,}".format(len(visible))
# Get localized created time
local_time = UserTime.getUserTime(ctx.author, self.settings, bot_member.created_at)
created_at = "{} {}".format(local_time['time'], local_time['zone'])
# Get localized joined time if in a server
if isinstance(bot_member,discord.Member):
local_time = UserTime.getUserTime(ctx.author, self.settings, bot_member.joined_at)
joined_at = "{} {}".format(local_time['time'], local_time['zone'])
# Get the current prefix
prefix = await self.bot.command_prefix(self.bot, ctx.message)
prefix = ", ".join([x for x in prefix if not x == "<@!{}> ".format(self.bot.user.id)])
# Get the owners
ownerList = self.settings.getGlobalStat('Owner',[])
owners = "Unclaimed..."
if len(ownerList):
userList = []
for owner in ownerList:
# Get the owner's name
user = self.bot.get_user(int(owner))
if not user:
userString = "Unknown User ({})".format(owner)
else:
userString = "{}#{}".format(user.name, user.discriminator)
userList.append(userString)
owners = ', '.join(userList)
# Get bot's avatar url
avatar = bot_member.avatar_url
if not len(avatar):
avatar = bot_member.default_avatar_url
# Build the embed
fields = [
{"name":"Members","value":member_count,"inline":True},
{"name":"Servers","value":guild_count,"inline":True},
{"name":"Commands","value":command_count + " (in {})".format(cog_count),"inline":True},
{"name":"Created","value":created_at,"inline":True},
{"name":"Owners","value":owners,"inline":True},
{"name":"Prefixes","value":prefix,"inline":True},
{"name":"Shard Count","value":self.bot.shard_count,"inline":True}
]
if isinstance(bot_member,discord.Member):
fields.append({"name":"Joined","value":joined_at,"inline":True})
# Get status
status_text = ":green_heart:"
if bot_member.status == discord.Status.offline:
status_text = ":black_heart:"
elif bot_member.status == discord.Status.dnd:
status_text = ":heart:"
elif bot_member.status == discord.Status.idle:
status_text = ":yellow_heart:"
fields.append({"name":"Status","value":status_text,"inline":True})
if bot_member.activity and bot_member.activity.name:
play_list = [ "Playing", "Streaming", "Listening to", "Watching" ]
try:
play_string = play_list[bot_member.activity.type]
except:
play_string = "Playing"
fields.append({"name":play_string,"value":str(bot_member.activity.name),"inline":True})
if bot_member.activity.type == 1:
# Add the URL too
fields.append({"name":"Stream URL","value":"[Watch Now]({})".format(bot_member.activity.url),"inline":True})
# Update the embed
await Message.Embed(
title=DisplayName.name(bot_member) + " Info",
color=color,
description="Current Bot Information",
fields=fields,
thumbnail=avatar
).edit(ctx, message)
@commands.command(pass_context=True)
async def ping(self, ctx):
"""Feeling lonely?"""
before_typing = time.monotonic()
await ctx.trigger_typing()
after_typing = time.monotonic()
ms = int((after_typing - before_typing) * 1000)
msg = '*{}*, ***PONG!*** (~{}ms)'.format(ctx.message.author.mention, ms)
await ctx.send(msg,allowed_mentions=discord.AllowedMentions.all())
@commands.command(pass_context=True)
async def nickname(self, ctx, *, name : str = None):
"""Set the bot's nickname (admin-only)."""
if not await Utils.is_admin_reply(ctx): return
# Let's get the bot's member in the current server
botName = "{}#{}".format(self.bot.user.name, self.bot.user.discriminator)
botMember = ctx.message.guild.get_member_named(botName)
await botMember.edit(nick=name)
@commands.command(pass_context=True)
async def hostinfo(self, ctx):
"""List info about the bot's host environment."""
message = await ctx.channel.send('Gathering info...')
# cpuCores = psutil.cpu_count(logical=False)
# cpuThred = psutil.cpu_count()
cpuThred = os.cpu_count()
cpuUsage = psutil.cpu_percent(interval=1)
memStats = psutil.virtual_memory()
memPerc = memStats.percent
memUsed = memStats.used
memTotal = memStats.total
memUsedGB = "{0:.1f}".format(((memUsed / 1024) / 1024) / 1024)
memTotalGB = "{0:.1f}".format(((memTotal/1024)/1024)/1024)
currentOS = platform.platform()
system = platform.system()
release = platform.release()
version = platform.version()
processor = platform.processor()
botMember = DisplayName.memberForID(self.bot.user.id, ctx.message.guild)
botName = DisplayName.name(botMember)
currentTime = int(time.time())
timeString = ReadableTime.getReadableTimeBetween(self.startTime, currentTime)
pythonMajor = sys.version_info.major
pythonMinor = sys.version_info.minor
pythonMicro = sys.version_info.micro
pythonRelease = sys.version_info.releaselevel
pyBit = struct.calcsize("P") * 8
process = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'], shell=False, stdout=subprocess.PIPE)
git_head_hash = process.communicate()[0].strip()
threadString = 'thread'
if not cpuThred == 1:
threadString += 's'
msg = '***{}\'s*** **Home:**\n'.format(botName)
msg += '```\n'
msg += 'OS : {}\n'.format(currentOS)
if not self.settings.getGlobalStat("HideHostname",False):
msg += 'Hostname : {}\n'.format(platform.node())
msg += 'Language : Python {}.{}.{} {} ({} bit)\n'.format(pythonMajor, pythonMinor, pythonMicro, pythonRelease, pyBit)
msg += 'Commit : {}\n\n'.format(git_head_hash.decode("utf-8"))
msg += ProgressBar.center('{}% of {} {}'.format(cpuUsage, cpuThred, threadString), 'CPU') + '\n'
msg += ProgressBar.makeBar(int(round(cpuUsage))) + "\n\n"
msg += ProgressBar.center('{} ({}%) of {}GB used'.format(memUsedGB, memPerc, memTotalGB), 'RAM') + '\n'
msg += ProgressBar.makeBar(int(round(memPerc))) + "\n\n"
msg += '{} uptime```'.format(timeString)
await message.edit(content=msg)
@commands.command()
async def hidehostname(self, ctx, *, yes_no = None):
"""Queries or turns on/off hostname hiding in the hostinfo command (owner-only)."""
if not await Utils.is_owner_reply(ctx): return
await ctx.send(Utils.yes_no_setting(
ctx,
"Hostname hiding in `hostinfo`".format(ctx.prefix),
"HideHostname",
yes_no,
default=False,
is_global=True
))
@commands.command(pass_context=True)
async def getimage(self, ctx, *, image):
"""Tests downloading - owner only"""
# Only allow owner to modify the limits
if not await Utils.is_owner_reply(ctx): return
mess = await Message.Embed(title="Test", description="Downloading file...").send(ctx)
file_path = await GetImage.download(image)
mess = await Message.Embed(title="Test", description="Uploading file...").edit(ctx, mess)
await Message.EmbedText(title="Image", file=file_path).edit(ctx, mess)
GetImage.remove(file_path)
@commands.command(pass_context=True)
async def speedtest(self, ctx):
"""Run a network speed test (owner only)."""
if not await Utils.is_owner_reply(ctx): return
message = await ctx.send('Running speed test...')
try:
st = speedtest.Speedtest()
st.get_best_server()
l = asyncio.get_event_loop()
msg = '**Speed Test Results:**\n'
msg += '```\n'
await message.edit(content="Running speed test...\n- Downloading...")
d = await self.bot.loop.run_in_executor(None, st.download)
msg += ' Ping: {} ms\nDownload: {} Mb/s\n'.format(round(st.results.ping, 2), round(d/1024/1024, 2))
await message.edit(content="Running speed test...\n- Downloading...\n- Uploading...")
u = await self.bot.loop.run_in_executor(None, st.upload)
msg += ' Upload: {} Mb/s```'.format(round(u/1024/1024, 2))
await message.edit(content=msg)
except Exception as e:
await message.edit(content="Speedtest Error: {}".format(str(e)))
@commands.command(pass_context=True)
async def adminunlim(self, ctx, *, yes_no : str = None):
"""Sets whether or not to allow unlimited xp to admins (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
await ctx.send(Utils.yes_no_setting(ctx,"Admin unlimited xp","AdminUnlimited",yes_no))
@commands.command(pass_context=True)
async def basadmin(self, ctx, *, yes_no : str = None):
"""Sets whether or not to treat bot-admins as admins with regards to xp (admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
await ctx.send(Utils.yes_no_setting(ctx,"Bot-admin as admin","BotAdminAsAdmin",yes_no))
@commands.command(pass_context=True)
async def joinpm(self, ctx, *, yes_no : str = None):
"""Sets whether or not to pm the rules to new users when they join (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
await ctx.send(Utils.yes_no_setting(ctx,"New user pm","JoinPM",yes_no))
@commands.command(pass_context=True)
async def avatar(self, ctx, filename = None):
"""Sets the bot's avatar (owner only)."""
if not await Utils.is_owner_reply(ctx): return
if filename is None and not len(ctx.message.attachments):
m = await ctx.send("Removing avatar...")
try:
await self.bot.user.edit(avatar=None)
except discord.errors.HTTPException as e:
return await m.edit(content="Looks like I can't do that right now. Try again later!")
return await m.edit(content='Avatar removed!')
# Check if attachment
if filename == None:
filename = ctx.message.attachments[0].url
# Let's check if the "url" is actually a user
test_user = DisplayName.memberForName(filename, ctx.guild)
if test_user:
# Got a user!
filename = test_user.avatar_url if len(test_user.avatar_url) else test_user.default_avatar_url
# Ensure string
filename = str(filename)
# Check if we created a temp folder for this image
isTemp = False
status = await ctx.send('Checking if url (and downloading if valid)...')
# File name is *something* - let's first check it as a url, then a file
extList = ["jpg", "jpeg", "png", "gif", "tiff", "tif", "webp"]
if GetImage.get_ext(filename).lower() in extList:
# URL has an image extension
f = await GetImage.download(filename)
if f:
# we got a download - let's reset and continue
filename = f
isTemp = True
if not os.path.isfile(filename):
if not os.path.isfile('./{}'.format(filename)):
return await status.edit(content='*{}* doesn\'t exist absolutely, or in my working directory.'.format(filename))
else:
# Local file name
filename = './{}'.format(filename)
# File exists - check if image
img = Image.open(filename)
ext = img.format
if not ext:
# File isn't a valid image
return await status.edit(content='*{}* isn\'t a valid image format.'.format(filename))
wasConverted = False
# Is an image PIL understands
if not ext.lower == "png":
# Not a PNG - let's convert
await status.edit(content='Converting to png...')
filename = '{}.png'.format(filename)
img.save(filename)
wasConverted = True
# We got it - crop and go from there
w, h = img.size
dw = dh = 0
if w > h:
# Wide
dw = int((w-h)/2)
elif h > w:
# Tall
dh = int((h-w)/2)
# Run the crop
img.crop((dw, dh, w-dw, h-dh)).save(filename)
await status.edit(content='Uploading and applying avatar...')
with open(filename, 'rb') as f:
newAvatar = f.read()
try:
await self.bot.user.edit(avatar=newAvatar)
except discord.errors.HTTPException as e:
return await status.edit(content="Looks like I can't do that right now. Try again later!")
# Cleanup - try removing with shutil.rmtree, then with os.remove()
await status.edit(content='Cleaning up...')
if isTemp:
GetImage.remove(filename)
else:
if wasConverted:
os.remove(filename)
await status.edit(content='Avatar set!')
# Needs rewrite!
@commands.command(pass_context=True)
async def reboot(self, ctx):
"""Reboots the bot (owner only)."""
if not await Utils.is_owner_reply(ctx): return
# Save the return channel and flush settings
self.settings.setGlobalStat("ReturnChannel",ctx.channel.id)
# Flush settings asynchronously here
await ctx.invoke(self.settings.flush)
await ctx.send("Rebooting...")
# Logout, stop the event loop, close the loop, quit
try:
task_list = asyncio.Task.all_tasks()
except AttributeError:
task_list = asyncio.all_tasks()
for task in task_list:
try:
task.cancel()
except:
continue
try:
await self.bot.close()
self.bot.loop.stop()
self.bot.loop.close()
except:
pass
# Kill this process
os._exit(2)
@commands.command(pass_context=True)
async def shutdown(self, ctx):
"""Shuts down the bot (owner only)."""
if not await Utils.is_owner_reply(ctx): return
# Flush settings asynchronously here
await ctx.invoke(self.settings.flush)
await ctx.send("Shutting down...")
# Logout, stop the event loop, close the loop, quit
try:
task_list = asyncio.Task.all_tasks()
except AttributeError:
task_list = asyncio.all_tasks()
for task in task_list:
try:
task.cancel()
except:
continue
try:
await self.bot.close()
self.bot.loop.stop()
self.bot.loop.close()
except:
pass
# Kill this process
os._exit(3)
@commands.command(pass_context=True)
async def servers(self, ctx):
"""Lists the number of servers I'm connected to!"""
await ctx.send("I am a part of *{}* server{}!".format(len(self.bot.guilds),"" if len(self.bot.guilds) == 1 else "s"))
async def _update_status(self):
# Helper method to update the status based on the server dict
# Get ready - play game!
game = self.settings.getGlobalStat("Game", None)
url = self.settings.getGlobalStat("Stream", None)
t = self.settings.getGlobalStat("Type", 0)
status = self.settings.getGlobalStat("Status", None)
# Set status
if status == "2":
s = discord.Status.idle
elif status == "3":
s = discord.Status.dnd
elif status == "4":
s = discord.Status.invisible
else:
# Online when in doubt
s = discord.Status.online
dgame = discord.Activity(name=game, url=url, type=t) if game else None
await self.bot.change_presence(status=s, activity=dgame)
@commands.command(pass_context=True)
async def pres(self, ctx, playing_type="0", status_type="online", game=None, url=None):
"""Changes the bot's presence (owner-only).
Playing type options are:
0. Playing (or None without game)
1. Streaming (requires valid twitch url)
2. Listening
3. Watching
Status type options are:
1. Online
2. Idle
3. DnD
4. Invisible
If any of the passed entries have spaces, they must be in quotes."""
if not await Utils.is_owner_reply(ctx): return
# Check playing type
play = None
play_string = ""
if playing_type.lower() in [ "0", "play", "playing" ]:
play = 0
play_string = "Playing"
elif playing_type.lower() in [ "1", "stream", "streaming" ]:
play = 1
play_string = "Streaming"
if url == None or not any("twitch.tv" in x.lower() for x in Utils.get_urls(url)):
# Guess what - you failed!! :D
return await ctx.send("You need a valid twitch.tv url to set a streaming status!")
elif playing_type.lower() in [ "2", "listen", "listening" ]:
play = 2
play_string = "Listening"
elif playing_type.lower() in [ "3", "watch", "watching" ]:
play = 3
play_string = "Watching"
# Verify we got something
if play == None:
# NOooooooooaooOOooOOooope.
return await ctx.send("Playing type is invalid!")
# Clear the URL if we're not streaming
if not play == 1:
url = None
# Check status type
stat = None
stat_string = ""
if status_type.lower() in [ "1", "online", "here", "green" ]:
stat = "1"
stat_string = "Online"
elif status_type.lower() in [ "2", "idle", "away", "gone", "yellow" ]:
stat = "2"
stat_string = "Idle"
elif status_type.lower() in [ "3", "dnd", "do not disturb", "don't disturb", "busy", "red" ]:
stat = "3"
stat_string = "Do Not Disturb"
elif status_type.lower() in [ "4", "offline", "invisible", "ghost", "gray", "black" ]:
stat = "4"
stat_string = "Invisible"
# Verify we got something
if stat == None:
# OHMYGODHOWHARDISITTOFOLLOWDIRECTIONS?!?!?
return await ctx.send("Status type is invalid!")
# Here, we assume that everything is A OK. Peachy keen.
# Set the shiz and move along
self.settings.setGlobalStat("Game",game)
self.settings.setGlobalStat("Stream",url)
self.settings.setGlobalStat("Status",stat)
self.settings.setGlobalStat("Type",play)
# Actually update our shit
await self._update_status()
# Let's formulate a sexy little response concoction
inline = True
await Message.Embed(
title="Presence Update",
color=ctx.author,
fields=[
{ "name" : "Game", "value" : str(game), "inline" : inline },
{ "name" : "Status", "value" : stat_string, "inline" : inline },
{ "name" : "Type", "value" : play_string, "inline" : inline },
{ "name" : "URL", "value" : str(url), "inline" : inline }
]
).send(ctx)
@commands.command(pass_context=True)
async def status(self, ctx, status = None):
"""Gets or sets the bot's online status (owner-only).
Options are:
1. Online
2. Idle
3. DnD
4. Invisible"""
if not await Utils.is_owner_reply(ctx): return
if status == None:
botmem = ctx.guild.get_member(self.bot.user.id)
return await ctx.send("I'm currently set to *{}!*".format(botmem.status))
stat_string = "1"
if status == "1" or status.lower() == "online":
s = discord.Status.online
stat_name = "online"
elif status == "2" or status.lower() == "idle" or status.lower() == "away" or status.lower() == "afk":
stat_string = "2"
s = discord.Status.idle
stat_name = "idle"
elif status == "3" or status.lower() == "dnd" or status.lower() == "do not disturb" or status.lower() == "don't disturb":
stat_string = "3"
s = discord.Status.dnd
stat_name = "dnd"
elif status == "4" or status.lower() == "offline" or status.lower() == "invisible":
stat_string = "4"
s = discord.Status.invisible
stat_name = "invisible"
else:
return await ctx.send("That is not a valid status.")
self.settings.setGlobalStat("Status",stat_string)
await self._update_status()
await ctx.send("Status changed to *{}!*".format(stat_name))
async def set_status(self, ctx, status, status_name="Playing", status_type=0, status_url=None):
# Only allow owner
if not await Utils.is_owner_reply(ctx): return
if status == status_url == None:
self.settings.setGlobalStat('Game',None)
self.settings.setGlobalStat('Stream',None)
self.settings.setGlobalStat('Type',0)
msg = 'Removing my {} status...'.format(status_name.lower())
message = await ctx.send(msg)
await self._update_status()
return await message.edit(content='{} status removed!'.format(status_name))
if status_type == 1:
if not status:
return await ctx.send("You need to provide a url if streaming!")
if not any("twitch.tv" in x.lower() for x in Utils.get_urls(ctx)):
return await ctx.send("You need to provide a valid twitch.tv url for streaming!")
self.settings.setGlobalStat('Game',status)
self.settings.setGlobalStat('Stream',status_url)
self.settings.setGlobalStat('Type',status_type)
msg = 'Setting my {} status to *{}*...'.format(status_name.lower(), status)
message = await ctx.send(Utils.suppressed(ctx,msg))
await self._update_status()
await message.edit(content='{} status set to **{}**{}!'.format(status_name,Utils.suppressed(ctx,status)," at `{}`".format(status_url) if status_url else ""))
@commands.command(pass_context=True)
async def playgame(self, ctx, *, game : str = None):
"""Sets the playing status of the bot (owner-only)."""
await self.set_status(ctx,game,"Playing",0)
@commands.command(pass_context=True)
async def watchgame(self, ctx, *, game : str = None):
"""Sets the watching status of the bot (owner-only)."""
await self.set_status(ctx,game,"Watching",3)
@commands.command(pass_context=True)
async def listengame(self, ctx, *, game : str = None):
"""Sets the listening status of the bot (owner-only)."""
await self.set_status(ctx,game,"Listening",2)
@commands.command(pass_context=True)
async def streamgame(self, ctx, url = None, *, game : str = None):
"""Sets the streaming status of the bot, requires the url and the game (owner-only)."""
await self.set_status(ctx,game,"Streaming",1,url)
@commands.command(pass_context=True)
async def setbotparts(self, ctx, *, parts : str = None):
"""Set the bot's parts - can be a url, formatted text, or nothing to clear."""
if not await Utils.is_owner_reply(ctx): return
if not parts:
parts = ""
self.settings.setGlobalUserStat(self.bot.user, "Parts", parts)
msg = '*{}\'s* parts have been set to:\n{}'.format(DisplayName.serverNick(self.bot.user, ctx.guild), parts)
await ctx.send(Utils.suppressed(ctx,msg))
@commands.command(pass_context=True)
async def source(self, ctx):
"""Link the github source."""
source = "https://github.com/corpnewt/CorpBot.py"
msg = '**My insides are located at:**\n\n{}'.format(source)
await ctx.send(msg)
@commands.command(pass_context=True)
async def cloc(self, ctx):
"""Outputs the total count of lines of code in the currently installed repo."""
# Script pulled and edited from https://github.com/kyco/python-count-lines-of-code/blob/python3/cloc.py
message = await Message.EmbedText(title="Shuffling papers...", color=ctx.author).send(ctx)
bot_member = self.bot.user if not ctx.guild else ctx.guild.get_member(self.bot.user.id)
# Get our current working directory - should be the bot's home
path = os.getcwd()
# Set up some lists
extensions = []
code_count = []
ext_dict = {
"py":"Python (.py)",
"bat":"Windows Batch (.bat)",
"sh":"Shell Script (.sh)",
"command":"Command Script (.command)"}
# Get the extensions - include our include list
extensions = self.get_extensions(path, list(ext_dict))
for run in extensions:
extension = "*."+run
temp = 0
for root, dir, files in os.walk(path):
for items in fnmatch.filter(files, extension):
value = root + "/" + items
temp += sum(+1 for line in open(value, 'rb'))
code_count.append(temp)
# Set up our output
fields = [{"name":ext_dict.get(extensions[x],extensions[x]),"value":"{:,} line{}".format(code_count[x],"" if code_count[x]==1 else "s")} for x in range(len(code_count))]
return await Message.Embed(
title="Counted Lines of Code",
description="Some poor soul took the time to sloppily write the following to bring me life...",
fields=fields,
thumbnail=bot_member.avatar_url if bot_member.avatar_url else bot_member.default_avatar_url
).edit(ctx,message)
# Helper function to get extensions
def get_extensions(self, path, excl):
extensions = []
for root, dir, files in os.walk(path):
for items in fnmatch.filter(files, "*"):
temp_extensions = items.rfind(".")
ext = items[temp_extensions+1:]
if ext not in extensions:
if ext in excl:
extensions.append(ext)
return extensions
|
|
import sh
import glob
import os.path
from contextlib import ExitStack
from . import util
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class InvalidConfigError(Error):
"""Exception raised when there is a critical configuration error.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
def get_common_args_and_env(config, remote, repo_name):
args = {}
env = {}
if 'ssh_command' in config:
env['BORG_RSH'] = config['ssh_command']
passphrase = util.lookup(config, ['secret', remote, repo_name, 'passphrase'],
default=config.get('passphrase'))
if passphrase is not None:
env['BORG_PASSPHRASE'] = passphrase
env['BORG_DISPLAY_PASSPHRASE'] = "n"
if 'log_level' in config:
level = config['log_level']
if level in ('critical', 'error', 'warning', 'info', 'debug', 'verbose'):
args[level] = True
else:
raise InvalidConfigError('"%s" is not a legal log level. Expected "critical",\
"error", "warning", "info", "debug" or "verbose".')
if 'umask' in config:
args['umask'] = config['umask']
if 'remote_borg_path' in config:
args['remote-path'] = config['remote_borg_path']
if 'location' not in config:
raise InvalidConfigError('No location specified for remote "%s".' % remote)
return args, env
def execution_context(config):
"""Return a suitable context manager for calling borg. If the sudo setting is
true, it will be the sudo context manager from the sh package. If the sudo_user
setting is also set, that user will be passed to sudo using the --user option.
If the sudo setting is not set to true, a dummy context manager which does not
do anything will be returned.
Arguments:
config -- a dictionary-like configuration object which will be used to
select which context manager will be returned
"""
if config.get('sudo', False):
user = config.get('sudo_user', None)
if user is not None:
return sh.contrib.sudo(u=user, _with=True)
else:
return sh.contrib.sudo
else:
# If the sudo setting is not true, return a dummy context manager.
return ExitStack()
def hook(config, args_tail=[]):
try:
command = config['command']
except KeyError:
raise InvalidConfigError('The "command" option is required for hooks.')
args = config.get('args', [])
args.extend(args_tail)
with execution_context(config):
return sh.Command(command)(*args, _env={}, _fg=True)
def init(config, remote, repo_name):
"""Call borg to initialize a repository. Any relevant options specified in the
config object will be passed to borg.
Arguments:
config -- a dictionary-like object with the needed configuration for the
source and remote involved
remote -- the name of the remote
repo_name -- the name of the repository to initialize
"""
args, env = get_common_args_and_env(config, remote, repo_name)
if 'encryption' in config:
encryption = config['encryption']
if encryption in ('none', 'keyfile', 'repokey'):
args['encryption'] = encryption
else:
raise InvalidConfigError('"%s" is not a valid encryption mode. Expected "none",\
"keyfile" or "repokey".')
if config.get('append_only', False):
args['append-only'] = True
location = config['location']
repo_path = os.path.expanduser(location + repo_name)
with execution_context(config):
sh.borg.init(repo_path, _fg=True, _env=env, **args)
def create(config, remote, repo_name, archive):
"""Call borg to create an archive (perform a backup). Any relevant options specified
in the config object will be passed to borg.
Arguments:
config -- a dictionary-like object with the needed configuration for the
source and remote involved
remote -- the name of the remote to backup to
repo_name -- the name of the repository to initialize
archive -- the name of the archive to create (this needs to be unique within
the repository
"""
args = []
kwargs, env = get_common_args_and_env(config, remote, repo_name)
if config.get('stats', False):
kwargs['stats'] = True
if config.get('progress', False):
kwargs['progress'] = True
if 'exclude_file' in config:
kwargs['exclude-from'] = os.path.expanduser(config['exclude_file'])
if config.get('exclude_caches', False):
kwargs['exclude-caches'] = True
if config.get('one_file_system', False):
kwargs['one-file-system'] = True
if config.get('dry_run', False):
kwargs['dry-run'] = True
if 'compression' in config:
kwargs['compression'] = config['compression']
location = config['location']
args.append(os.path.expanduser(location + repo_name) + "::" + archive)
# Add all paths to cmd, with ~ expanded and shell-like globbing (using * wildcards)
paths = []
for path in config.get('paths', []):
paths.extend(glob.glob(os.path.expanduser(path)))
if len(paths) > 0:
args.extend(paths)
else:
raise InvalidConfigError('There are no existing paths to backup to the repo "%s".' % repo_name)
with execution_context(config):
sh.borg.create(*args, _fg=True, _env=env, **kwargs)
def prune(config, remote, repo_name, prefix):
"""Call borg to prune a repository. Any relevant options specified in the
config object will be passed to borg.
Arguments:
config -- a dictionary-like object with the needed configuration for the
repo and remote involved
remote -- the name of the remote where the repo is
repo_name -- the name of the repository to prune
"""
args = []
kwargs, env = get_common_args_and_env(config, remote, repo_name)
if config.get('stats', False):
kwargs['stats'] = True
if prefix is not None:
kwargs['prefix'] = prefix
if config.get('dry_run', False):
kwargs['dry-run'] = True
for attr in ('keep-within', 'keep-secondly', 'keep-minutely',
'keep-hourly', 'keep-daily', 'keep-weekly', 'keep-monthly',
'keep-yearly'):
cfg_attr = attr.replace('-', '_')
if cfg_attr in config:
kwargs[attr] = config[cfg_attr]
location = config['location']
args.append(os.path.expanduser(location + repo_name))
with execution_context(config):
print("Running borg prune with:", repr(env), repr(args), repr(kwargs))
sh.borg.prune(*args, _fg=True, _env=env, **kwargs)
def check(config, remote, repo_name, prefix):
"""Call borg to check a repository. Any relevant options specified in the
config object will be passed to borg.
Arguments:
config -- a dictionary-like object with the needed configuration for the
repo and remote involved
remote -- the name of the remote where the repo is
repo_name -- the name of the repository to prune
"""
args = []
kwargs, env = get_common_args_and_env(config, remote, repo_name)
if prefix is not None:
kwargs['prefix'] = prefix
if 'check_first' in config:
kwargs['first'] = config['check_first']
if 'check_first' in config:
kwargs['last'] = config['check_last']
if config.get('dry_run', False):
kwargs['dry-run'] = True
if config.get('repository_only', False):
kwargs['repository-only'] = True
if config.get('archives_only', False):
kwargs['archives-only'] = True
if config.get('verify_data', False):
kwargs['verify_data'] = True
location = config['location']
args.append(os.path.expanduser(location + repo_name))
with execution_context(config):
print("Running borg check with:", repr(env), repr(args), repr(kwargs))
sh.borg.check(*args, _fg=True, _env=env, **kwargs)
def extract(config):
raise NotImplementedError
|
|
#!/usr/bin/env python
from __future__ import print_function
import os
import re
import sys
import json
import time
import binascii
import tarfile
import atexit
import logging
logger = logging.getLogger('knlog')
logger.setLevel(logging.INFO)
hdr = logging.FileHandler('/tmp/upload/fastdick.log', encoding='utf-8')
formatter = logging.Formatter('[%(asctime)s] %(message)s')
hdr.setFormatter(formatter)
logger.addHandler(hdr)
def print(fmt, **args):
if not args:
logger.info(fmt)
else:
logger.info(fmt, args)
try:
import ssl
import hashlib
except ImportError as ex:
print("Error: cannot import module ssl or hashlib (%s)." % str(ex))
print("If you are using openwrt, run \"opkg install python-openssl\"")
os._exit(0)
try:
import zlib
except ImportError as ex:
print("Warning: cannot import module zlib (%s)." % str(ex))
# TODO: if there's a python dist that is not bundled with zlib ever exists, disable gzip Accept-Encoding
#xunlei use self-signed certificate; on py2.7.9+
if hasattr(ssl, '_create_unverified_context') and hasattr(ssl, '_create_default_https_context'):
ssl._create_default_https_context = ssl._create_unverified_context
#rsa_mod = 0xAC69F5CCC8BDE47CD3D371603748378C9CFAD2938A6B021E0E191013975AD683F5CBF9ADE8BD7D46B4D2EC2D78AF146F1DD2D50DC51446BB8880B8CE88D476694DFC60594393BEEFAA16F5DBCEBE22F89D640F5336E42F587DC4AFEDEFEAC36CF007009CCCE5C1ACB4FF06FBA69802A8085C2C54BADD0597FC83E6870F1E36FD
#rsa_pubexp = 0x010001
APP_VERSION = "2.4.1.3"
PROTOCOL_VERSION = 200
VASID_DOWN = 14 # vasid for downstream accel
VASID_UP = 33 # vasid for upstream accel
FALLBACK_MAC = '000000000000'
FALLBACK_PORTAL = "119.147.41.210:12180"
FALLBACK_UPPORTAL = "153.37.208.185:81"
UNICODE_WARNING_SHOWN = False
PY3K = sys.version_info[0] == 3
if not PY3K:
import urllib2
from urllib2 import URLError
from urllib import quote as url_quote
from cStringIO import StringIO as sio
#rsa_pubexp = long(rsa_pubexp)
else:
import urllib.request as urllib2
from urllib.error import URLError
from urllib.parse import quote as url_quote
from io import BytesIO as sio
account_session = '.swjsq.session'
account_file_plain = 'swjsq.account.txt'
shell_file = 'swjsq_wget.sh'
ipk_file = 'swjsq_0.0.1_all.ipk'
log_file = 'swjsq.log'
login_xunlei_intv = 600 # do not login twice in 10min
DEVICE = "SmallRice R1"
DEVICE_MODEL = "R1"
OS_VERSION = "5.0.1"
OS_API_LEVEL = "24"
OS_BUILD = "LRX22C"
header_xl = {
'Content-Type':'',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'gzip',
'User-Agent': 'android-async-http/xl-acc-sdk/version-2.1.1.177662'
}
header_api = {
'Content-Type':'',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'gzip',
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android %s; %s Build/%s)' % (OS_VERSION, DEVICE_MODEL, OS_BUILD)
}
def get_mac(nic = '', to_splt = ':'):
if os.name == 'nt':
cmd = 'ipconfig /all'
splt = '-'
elif os.name == "posix":
if os.path.exists('/usr/bin/ip') or os.path.exists('/bin/ip'):
if nic:
cmd = 'ip link show dev %s' % nic
else:
# Unfortunately, loopback interface always comes first
# So we have to grep it out
cmd = 'ip link show up | grep -v loopback'
else:
cmd = 'ifconfig %s' % (nic or '-a')
splt = ':'
else:
return FALLBACK_MAC
try:
r = os.popen(cmd).read()
if r:
_ = re.findall('((?:[0-9A-Fa-f]{2}%s){5}[0-9A-Fa-f]{2})' % splt, r)
if not _:
return FALLBACK_MAC
else:
return _[0].replace(splt, to_splt)
except:
pass
return FALLBACK_MAC
def api_url(up = False):
portal = None
if up:
portals = (("", "up", 80), )
else:
portals = (("", "", 81), ("2", "", 81), ("", "", 82))
for cmb in portals:
portal = json.loads(http_req("http://api%s.%sportal.swjsq.vip.xunlei.com:%d/v2/queryportal" % cmb))
try:
portal = json.loads(http_req("http://api%s.%sportal.swjsq.vip.xunlei.com:%d/v2/queryportal" % cmb))
except:
pass
else:
break
if not portal or portal['errno']:
print('Warning: get interface_ip failed, use fallback address')
if up:
return FALLBACK_UPPORTAL
else:
return FALLBACK_PORTAL
return '%s:%s' % (portal['interface_ip'], portal['interface_port'])
def long2hex(l):
return hex(l)[2:].upper().rstrip('L')
_real_print = print
logfd = open(log_file, 'ab')
def print(s, **kwargs):
line = "%s %s" % (time.strftime('%X', time.localtime(time.time())), s)
if PY3K:
logfd.write(line.encode('utf-8'))
else:
try:
logfd.write(line)
except UnicodeEncodeError:
logfd.write(line.encode('utf-8'))
if PY3K:
logfd.write(b'\n')
else:
logfd.write("\n")
_real_print(line, **kwargs)
def uprint(s, fallback = None, end = None):
global UNICODE_WARNING_SHOWN
while True:
try:
print(s, end = end)
except UnicodeEncodeError:
if UNICODE_WARNING_SHOWN:
print('Warning: locale of your system may not be utf8 compatible, output will be truncated')
UNICODE_WARNING_SHOWN = True
else:
break
try:
print(s.encode('utf-8'), end = end)
except UnicodeEncodeError:
if fallback:
print(fallback, end = end)
break
def http_req(url, headers = {}, body = None, encoding = 'utf-8'):
req = urllib2.Request(url)
for k in headers:
req.add_header(k, headers[k])
if sys.version.startswith('3') and isinstance(body, str):
body = bytes(body, encoding = 'ascii')
resp = urllib2.urlopen(req, data = body, timeout = 60)
buf = resp.read()
# check if response is gzip encoded
if buf.startswith(b'\037\213'):
try:
buf = zlib.decompress(buf, 16 + zlib.MAX_WBITS) # skip gzip headers
except Exception as ex:
print('Warning: malformed gzip response (%s).' % str(ex))
# buf is unchanged
ret = buf.decode(encoding)
if sys.version.startswith('3') and isinstance(ret, bytes):
ret = str(ret)
return ret
class fast_d1ck(object):
def __init__(self):
self.api_url = api_url(up = False)
self.api_up_url = api_url(up = True)
self.mac = get_mac(to_splt = '').upper() + '004V'
self.xl_uid = None
self.xl_session = None
self.xl_loginkey = None
self.xl_login_payload = None
self.last_login_xunlei = 0
self.do_down_accel = False
self.do_up_accel = False
self.state = 0
def load_xl(self, dt):
if 'sessionID' in dt:
self.xl_session = dt['sessionID']
if 'userID' in dt:
self.xl_uid = dt['userID']
if 'loginKey' in dt:
self.xl_loginkey = dt['loginKey']
def login_xunlei(self, uname, pwd):
_ = int(login_xunlei_intv - time.time() + self.last_login_xunlei)
if _ > 0:
print("sleep %ds to prevent login flood" % _)
time.sleep(_)
self.last_login_xunlei = time.time()
# pwd = rsa_encode(pwd_md5)
fake_device_id = hashlib.md5(("msfdc%s23333" % pwd).encode('utf-8')).hexdigest() # just generate a 32bit string
# sign = div.10?.device_id + md5(sha1(packageName + businessType + md5(a protocolVersion specific GUID)))
device_sign = "div101.%s%s" % (fake_device_id, hashlib.md5(
hashlib.sha1(("%scom.xunlei.vip.swjsq68c7f21687eed3cdb400ca11fc2263c998" % fake_device_id).encode('utf-8'))
.hexdigest().encode('utf-8')
).hexdigest())
_payload = {
"protocolVersion": str(PROTOCOL_VERSION),
"sequenceNo": "1000001",
"platformVersion": "2",
"sdkVersion": "177662",
"peerID": self.mac,
"businessType": "68",
"clientVersion": APP_VERSION,
"devicesign":device_sign,
"isCompressed": "0",
#"cmdID": 1,
"userName": uname,
"passWord": pwd,
#"loginType": 0, # normal account
"sessionID": "",
"verifyKey": "",
"verifyCode": "",
"appName": "ANDROID-com.xunlei.vip.swjsq",
#"rsaKey": {
# "e": "%06X" % rsa_pubexp,
# "n": long2hex(rsa_mod)
#},
#"extensionList": "",
"deviceModel": DEVICE_MODEL,
"deviceName": DEVICE,
"OSVersion": OS_VERSION
}
ct = http_req('https://mobile-login.xunlei.com:443/login', body=json.dumps(_payload), headers=header_xl, encoding='utf-8')
self.xl_login_payload = _payload
dt = json.loads(ct)
self.load_xl(dt)
return dt
def check_xunlei_vas(self, vasid):
# copy original payload to new dict
_payload = dict(self.xl_login_payload)
_payload.update({
"sequenceNo": "1000002",
"vasid": str(vasid),
"userID": str(self.xl_uid),
"sessionID": self.xl_session,
#"extensionList": [
# "payId", "isVip", "mobile", "birthday", "isSubAccount", "isAutoDeduct", "isYear", "imgURL",
# "vipDayGrow", "role", "province", "rank", "expireDate", "personalSign", "jumpKey", "allowScore",
# "nickName", "vipGrow", "isSpecialNum", "vipLevel", "order", "payName", "isRemind", "account",
# "sex", "vasType", "register", "todayScore", "city", "country"
#]
})
# delete unwanted kv pairs
for k in ('userName', 'passWord', 'verifyKey', 'verifyCode'):
del _payload[k]
ct = http_req('https://mobile-login.xunlei.com:443/getuserinfo', body=json.dumps(_payload), headers=header_xl, encoding='utf-8')
return json.loads(ct)
def renew_xunlei(self):
_ = int(login_xunlei_intv - time.time() + self.last_login_xunlei)
if _ > 0:
print("sleep %ds to prevent login flood" % _)
time.sleep(_)
self.last_login_xunlei = time.time()
_payload = dict(self.xl_login_payload)
_payload.update({
"sequenceNo": "1000001",
"userName": str(self.xl_uid), #wtf
"loginKey": self.xl_loginkey,
})
for k in ('passWord', 'verifyKey', 'verifyCode', "sessionID"):
del _payload[k]
ct = http_req('https://mobile-login.xunlei.com:443/loginkey ', body=json.dumps(_payload), headers=header_xl, encoding='utf-8')
dt = json.loads(ct)
self.load_xl(dt)
return dt
def api(self, cmd, extras = '', no_session = False):
ret = {}
for _k1, api_url_k, _clienttype, _v in (('down', 'api_url', 'swjsq', 'do_down_accel'), ('up', 'api_up_url', 'uplink', 'do_up_accel')):
if not getattr(self, _v):
continue
while True:
# missing dial_account, (userid), os
api_url = getattr(self, api_url_k)
# TODO: phasing out time_and
url = 'http://%s/v2/%s?%sclient_type=android-%s-%s&peerid=%s&time_and=%d&client_version=android%s-%s&userid=%s&os=android-%s%s' % (
api_url,
cmd,
('sessionid=%s&' % self.xl_session) if not no_session else '',
_clienttype, APP_VERSION,
self.mac,
time.time() * 1000,
_clienttype, APP_VERSION,
self.xl_uid,
url_quote("%s.%s%s" % (OS_VERSION, OS_API_LEVEL, DEVICE_MODEL)),
('&%s' % extras) if extras else '',
)
try:
ret[_k1] = {}
ret[_k1] = json.loads(http_req(url, headers = header_api))
break
except URLError as ex:
uprint("Warning: error during %sapi connection: %s, use portal: %s" % (_k1, str(ex), api_url))
if (_k1 == 'down' and api_url == FALLBACK_PORTAL) or (_k1 == 'up' and api_url == FALLBACK_UPPORTAL):
print("Error: can't connect to %s api" % _k1)
os._exit(5)
if _k1 == 'down':
setattr(self, api_url_k, FALLBACK_PORTAL)
elif _k1 == 'up':
setattr(self, api_url_k, FALLBACK_UPPORTAL)
return ret
def run(self, uname, pwd, save=True):
if uname[-2] == ':':
print('Error: sub account can not upgrade')
os._exit(3)
login_methods = [lambda : self.login_xunlei(uname, pwd)]
if self.xl_session:
login_methods.insert(0, self.renew_xunlei)
failed = True
for _lm in login_methods:
dt = _lm()
if dt['errorCode'] != "0" or not self.xl_session or not self.xl_loginkey:
uprint('Error: login xunlei failed, %s' % dt['errorDesc'], 'Error: login failed')
print(dt)
else:
failed = False
break
if failed:
os._exit(1)
print('Login xunlei succeeded')
yyyymmdd = time.strftime("%Y%m%d", time.localtime(time.time()))
if 'vipList' not in dt:
vipList = []
else:
vipList = dt['vipList']
# chaoji member
if vipList and vipList[0]['isVip'] == "1" and vipList[0]['vasType'] == "5" and vipList[0]['expireDate'] > yyyymmdd: # choaji membership
self.do_down_accel = True
# self.do_up_accel = True
print('Expire date for chaoji member: %s' % vipList[0]['expireDate'])
# kuainiao down/up member
_vas_debug = []
for _vas, _name, _v in ((VASID_DOWN, 'fastdick', 'do_down_accel'), (VASID_UP, 'upstream acceleration', 'do_up_accel')):
if getattr(self, _v): # don't check again if vas is activated in other membership
continue
_dt = self.check_xunlei_vas(_vas)
if 'vipList' not in _dt or not _dt['vipList']:
continue
for vip in _dt['vipList']:
if vip['vasid'] == str(_vas):
_vas_debug.append(vip)
if vip['isVip'] == "1":
if vip['expireDate'] < yyyymmdd:
print('Warning: Your %s membership expires on %s' % (_name, vip['expireDate']))
else:
print('Expire date for %s: %s' % (_name, vip['expireDate']))
setattr(self, _v, True)
if not self.do_down_accel and not self.do_up_accel:
print('Error: You are neither xunlei fastdick member nor upstream acceleration member, buy buy buy!\nDebug: %s' % _vas_debug)
os._exit(2)
if save:
try:
os.remove(account_file_plain)
except:
pass
with open(account_session, 'w') as f:
f.write('%s\n%s' % (json.dumps(dt), json.dumps(self.xl_login_payload)))
api_ret = self.api('bandwidth', no_session = True)
_to_upgrade = []
for _k1, _k2, _name, _v in (
('down', 'downstream', 'fastdick', 'do_down_accel'),
('up', 'upstream', 'upstream acceleration', 'do_up_accel')):
if not getattr(self, _v):
continue
_ = api_ret[_k1]
if 'can_upgrade' not in _ or not _['can_upgrade']:
uprint('Warning: %s can not upgrade, so sad TAT: %s' % (_name, _['message']), 'Error: %s can not upgrade, so sad TAT' % _name)
setattr(self, _v, False)
else:
_to_upgrade.append('%s %dM -> %dM' % (
_k1,
_['bandwidth'][_k2]/1024,
_['max_bandwidth'][_k2]/1024,
))
if not self.do_down_accel and not self.do_up_accel:
print("Error: neither downstream nor upstream can be upgraded")
os._exit(3)
_avail = api_ret[list(api_ret.keys())[0]]
uprint('To Upgrade: %s%s %s' % ( _avail['province_name'], _avail['sp_name'], ", ".join(_to_upgrade)),
'To Upgrade: %s %s %s' % ( _avail['province'], _avail['sp'], ", ".join(_to_upgrade))
)
_dial_account = _avail['dial_account']
# _script_mtime = os.stat(os.path.realpath(__file__)).st_mtime
# if not os.path.exists(shell_file) or os.stat(shell_file).st_mtime < _script_mtime:
# self.make_wget_script(pwd, _dial_account)
# if not os.path.exists(ipk_file) or os.stat(ipk_file).st_mtime < _script_mtime:
# update_ipk()
#print(_)
def _atexit_func():
print("Sending recover request")
try:
self.api('recover', extras = "dial_account=%s" % _dial_account)
except KeyboardInterrupt:
print('Secondary ctrl+c pressed, exiting')
try:
logfd.close()
except:
pass
atexit.register(_atexit_func)
self.state = 0
while True:
has_error = False
try:
# self.state=1~17 keepalive, self.state++
# self.state=18 (3h) re-upgrade all, self.state-=18
# self.state=100 login, self.state:=18
if self.state == 100:
_dt_t = self.renew_xunlei()
if int(_dt_t['errorCode']):
time.sleep(60)
dt = self.login_xunlei(uname, pwd)
if int(dt['errorCode']):
self.state = 100
continue
else:
_dt_t = dt
self.state = 18
if self.state % 18 == 0:#3h
print('Initializing upgrade')
if self.state:# not first time
self.api('recover', extras = "dial_account=%s" % _dial_account)
time.sleep(5)
api_ret = self.api('upgrade', extras = "user_type=1&dial_account=%s" % _dial_account)
#print(_)
_upgrade_done = []
for _k1, _k2 in ('down', 'downstream'), ('up', 'upstream'):
if _k1 not in api_ret:
continue
if not api_ret[_k1]['errno']:
_upgrade_done.append("%s %dM" % (_k1, api_ret[_k1]['bandwidth'][_k2]/1024))
if _upgrade_done:
print("Upgrade done: %s" % ", ".join(_upgrade_done))
else:
# _dt_t = self.renew_xunlei()
# if _dt_t['errorCode']:
# self.state = 100
# continue
try:
api_ret = self.api('keepalive')
except Exception as ex:
print("keepalive exception: %s" % str(ex))
time.sleep(60)
self.state = 18
continue
for _k1, _k2, _name, _v in ('down', 'Downstream', 'fastdick', 'do_down_accel'), ('up', 'Upstream', 'upstream acceleration', 'do_up_accel'):
if _k1 in api_ret and api_ret[_k1]['errno']:
_ = api_ret[_k1]
print('%s error %s: %s' % (_k2, _['errno'], _['message']))
if _['errno'] in (513, 824):# TEST: re-upgrade when get 513 or 824 speedup closed
self.state = 100
elif _['errno'] == 812:
print('%s already upgraded, continuing' % _k2)
elif _['errno'] == 717 or _['errno'] == 718:# re-upgrade when get 'account auth session failed'
self.state = 100
elif _['errno'] == 518: # disable down/up when get qurey vip response user not has business property
print("Warning: membership expired? Disabling %s" % _name)
setattr(self, _v, False)
else:
has_error = True
if self.state == 100:
continue
except Exception as ex:
import traceback
_ = traceback.format_exc()
print(_)
has_error = True
if has_error:
# sleep 5 min and repeat the same state
time.sleep(290)#5 min
else:
self.state += 1
time.sleep(590)#10 min
def make_wget_script(self, pwd, dial_account):
# i=1~17 keepalive, renew session, i++
# i=18 (3h) re-upgrade, i:=0
# i=100 login, i:=18
xl_renew_payload = dict(self.xl_login_payload)
xl_renew_payload.update({
"sequenceNo": "1000001",
"userName": str(self.xl_uid), #wtf
"loginKey": "$loginkey",
})
for k in ('passWord', 'verifyKey', 'verifyCode', "sessionID"):
del xl_renew_payload[k]
with open(shell_file, 'wb') as f:
_ = '''#!/bin/ash
TEST_URL="https://baidu.com"
UA_XL="User-Agent: swjsq/0.0.1"
if [ ! -z "`wget --no-check-certificate -O - $TEST_URL 2>&1|grep "100%"`" ]; then
HTTP_REQ="wget -q --no-check-certificate -O - "
POST_ARG="--post-data="
else
command -v curl >/dev/null 2>&1 && curl -kI $TEST_URL >/dev/null 2>&1 || { echo >&2 "Xunlei-FastD1ck cannot find wget or curl installed with https(ssl) enabled in this system."; exit 1; }
HTTP_REQ="curl -ks"
POST_ARG="--data "
fi
uid='''+str(self.xl_uid)+'''
pwd='''+pwd+'''
nic=eth0
peerid='''+self.mac+'''
uid_orig=$uid
last_login_xunlei=0
login_xunlei_intv='''+str(login_xunlei_intv)+'''
day_of_month_orig=`date +%d`
orig_day_of_month=`echo $day_of_month_orig|grep -oE "[1-9]{1,2}"`
#portal=`$HTTP_REQ http://api.portal.swjsq.vip.xunlei.com:82/v2/queryportal`
#portal_ip=`echo $portal|grep -oE '([0-9]{1,3}[\.]){3}[0-9]{1,3}'`
#portal_port_temp=`echo $portal|grep -oE "port...[0-9]{1,5}"`
#portal_port=`echo $portal_port_temp|grep -oE '[0-9]{1,5}'`
portal_ip='''+self.api_url.split(":")[0]+'''
portal_port='''+self.api_url.split(":")[1]+'''
portal_up_ip='''+self.api_up_url.split(":")[0]+'''
portal_up_port='''+self.api_up_url.split(":")[1]+'''
if [ -z "$portal_ip" ]; then
sleep 30
portal=`$HTTP_REQ http://api.portal.swjsq.vip.xunlei.com:81/v2/queryportal`
portal_ip=`echo $portal|grep -oE '([0-9]{1,3}[\.]){3}[0-9]{1,3}'`
portal_port_temp=`echo $portal|grep -oE "port...[0-9]{1,5}"`
portal_port=`echo $portal_port_temp|grep -oE '[0-9]{1,5}'`
if [ -z "$portal_ip" ]; then
portal_ip="'''+FALLBACK_PORTAL.split(":")[0]+'''"
portal_port='''+FALLBACK_PORTAL.split(":")[1]+'''
fi
fi
log () {
echo `date +%X 2>/dev/null` $@
}
api_url="http://$portal_ip:$portal_port/v2"
api_up_url="http://$portal_up_ip:$portal_up_port/v2"
do_down_accel='''+str(int(self.do_down_accel))+'''
do_up_accel='''+str(int(self.do_up_accel))+'''
i=100
while true; do
if test $i -ge 100; then
tmstmp=`date "+%s"`
let slp=login_xunlei_intv-tmstmp+last_login_xunlei
if test $slp -ge 0; then
sleep $slp
fi
last_login_xunlei=$tmstmp
if [ ! -z "$loginkey" ]; then
log "renew xunlei"
ret=`$HTTP_REQ https://mobile-login.xunlei.com:443/loginkey $POST_ARG"'''+json.dumps(xl_renew_payload).replace('"','\\"')+'''" --header "$UA_XL"`
error_code=`echo $ret|grep -oE "errorCode...[0-9]+"|grep -oE "[0-9]+"`
if [[ -z $error_code || $error_code -ne 0 ]]; then
log "renew error code $error_code"
fi
session_temp=`echo $ret|grep -oE "sessionID...[A-F,0-9]{32}"`
session=`echo $session_temp|grep -oE "[A-F,0-9]{32}"`
if [ -z "$session" ]; then
log "renew session is empty"
sleep 60
else
log "session is $session"
fi
fi
if [ -z "$session" ]; then
log "login xunlei"
ret=`$HTTP_REQ https://mobile-login.xunlei.com:443/login $POST_ARG"'''+json.dumps(self.xl_login_payload).replace('"','\\"')+'''" --header "$UA_XL"`
session_temp=`echo $ret|grep -oE "sessionID...[A-F,0-9]{32}"`
session=`echo $session_temp|grep -oE "[A-F,0-9]{32}"`
uid_temp=`echo $ret|grep -oE "userID...[0-9]+"`
uid=`echo $uid_temp|grep -oE "[0-9]+"`
if [ -z "$session" ]; then
log "login session is empty"
uid=$uid_orig
else
log "session is $session"
fi
if [ -z "$uid" ]; then
#echo "uid is empty"
uid=$uid_orig
else
log "uid is $uid"
fi
fi
if [ -z "$session" ]; then
sleep 600
continue
fi
loginkey=`echo $ret|grep -oE "lk...[a-f,0-9,\.]{96}"`
i=18
fi
if test $i -eq 18; then
log "upgrade"
_ts=`date +%s`0000
if test $do_down_accel -eq 1; then
$HTTP_REQ "$api_url/upgrade?peerid=$peerid&userid=$uid&sessionid=$session&user_type=1&client_type=android-swjsq-'''+APP_VERSION+'''&time_and=$_ts&client_version=androidswjsq-'''+APP_VERSION+'''&os=android-'''+OS_VERSION+'.'+OS_API_LEVEL+DEVICE_MODEL+'''&dial_account='''+dial_account+'''"
fi
if test $do_up_accel -eq 1; then
$HTTP_REQ "$api_up_url/upgrade?peerid=$peerid&userid=$uid&sessionid=$session&user_type=1&client_type=android-uplink-'''+APP_VERSION+'''&time_and=$_ts&client_version=androiduplink-'''+APP_VERSION+'''&os=android-'''+OS_VERSION+'.'+OS_API_LEVEL+DEVICE_MODEL+'''&dial_account='''+dial_account+'''"
fi
i=1
sleep 590
continue
fi
sleep 1
day_of_month_orig=`date +%d`
day_of_month=`echo $day_of_month_orig|grep -oE "[1-9]{1,2}"`
if [[ -z $orig_day_of_month || $day_of_month -ne $orig_day_of_month ]]; then
log "recover"
orig_day_of_month=$day_of_month
_ts=`date +%s`0000
if test $do_down_accel -eq 1; then
$HTTP_REQ "$api_url/recover?peerid=$peerid&userid=$uid&sessionid=$session&client_type=android-swjsq-'''+APP_VERSION+'''&time_and=$_ts&client_version=androidswjsq-'''+APP_VERSION+'''&os=android-'''+OS_VERSION+'.'+OS_API_LEVEL+DEVICE_MODEL+'''&dial_account='''+dial_account+'''"
fi
if test $do_up_accel -eq 1; then
$HTTP_REQ "$api_up_url/recover?peerid=$peerid&userid=$uid&sessionid=$session&client_type=android-uplink-'''+APP_VERSION+'''&time_and=$_ts&client_version=androiduplink-'''+APP_VERSION+'''&os=android-'''+OS_VERSION+'.'+OS_API_LEVEL+DEVICE_MODEL+'''&dial_account='''+dial_account+'''"
fi
sleep 5
i=100
continue
fi
log "keepalive"
_ts=`date +%s`0000
if test $do_down_accel -eq 1; then
ret=`$HTTP_REQ "$api_url/keepalive?peerid=$peerid&userid=$uid&sessionid=$session&client_type=android-swjsq-'''+APP_VERSION+'''&time_and=$_ts&client_version=androidswjsq-'''+APP_VERSION+'''&os=android-'''+OS_VERSION+'.'+OS_API_LEVEL+DEVICE_MODEL+'''&dial_account='''+dial_account+'''"`
if [[ -z $ret ]]; then
sleep 60
i=18
continue
fi
if [ ! -z "`echo $ret|grep "not exist channel"`" ]; then
i=100
fi
if [ ! -z "`echo $ret|grep "user not has business property"`" ]; then
log "membership expired? disabling fastdick"
do_down_accel=0
fi
fi
if test $do_up_accel -eq 1; then
ret=`$HTTP_REQ "$api_up_url/keepalive?peerid=$peerid&userid=$uid&sessionid=$session&client_type=android-uplink-'''+APP_VERSION+'''&time_and=$_ts&client_version=androiduplink-'''+APP_VERSION+'''&os=android-'''+OS_VERSION+'.'+OS_API_LEVEL+DEVICE_MODEL+'''&dial_account='''+dial_account+'''"`
if [[ -z $ret ]]; then
sleep 60
i=18
continue
fi
if [ ! -z "`echo $ret|grep "not exist channel"`" ]; then
i=100
fi
if [ ! -z "`echo $ret|grep "user not has business property"`" ]; then
log "membership expired? disabling upstream acceleration"
do_up_accel=0
fi
fi
if test $i -ne 100; then
let i=i+1
sleep 590
fi
done
'''.replace("\r", "")
if PY3K:
_ = _.encode("utf-8")
f.write(_)
def update_ipk():
def _sio(s = None):
if not s:
return sio()
if PY3K:
return sio(bytes(s, "ascii"))
else:
return sio(s)
def flen(fobj):
pos = fobj.tell()
fobj.seek(0)
_ = len(fobj.read())
fobj.seek(pos)
return _
def add_to_tar(tar, name, sio_obj, perm = 420):
info = tarfile.TarInfo(name = name)
info.size = flen(sio_obj)
info.mode = perm
sio_obj.seek(0)
tar.addfile(info, sio_obj)
if os.path.exists(ipk_file):
os.remove(ipk_file)
ipk_fobj = tarfile.open(name = ipk_file, mode = 'w:gz')
data_stream = sio()
data_fobj = tarfile.open(fileobj = data_stream, mode = 'w:gz')
# /usr/bin/swjsq
data_content = open(shell_file, 'rb')
add_to_tar(data_fobj, './bin/swjsq', data_content, perm = 511)
# /etc/init.d/swjsq
data_content = _sio('''#!/bin/sh /etc/rc.common
START=90
STOP=15
USE_PROCD=1
start_service()
{
procd_open_instance
procd_set_param respawn ${respawn_threshold:-3600} ${respawn_timeout:-5} ${respawn_retry:-5}
procd_set_param command /bin/swjsq
procd_set_param stdout 1
procd_set_param stderr 1
procd_close_instance
}
''')
add_to_tar(data_fobj, './etc/init.d/swjsq', data_content, perm = 511)
# wrap up
data_fobj.close()
add_to_tar(ipk_fobj, './data.tar.gz', data_stream)
data_stream.close()
control_stream = sio()
control_fobj = tarfile.open(fileobj = control_stream, mode = 'w:gz')
control_content = _sio('''Package: swjsq
Version: 0.0.1
Depends: libc
Source: none
Section: net
Maintainer: fffonion
Architecture: all
Installed-Size: %d
Description: Xunlei Fast Dick
''' % flen(data_content))
add_to_tar(control_fobj, './control', control_content)
control_fobj.close()
add_to_tar(ipk_fobj, './control.tar.gz', control_stream)
control_stream.close()
data_content.close()
control_content.close()
debian_binary_stream = _sio('2.0\n')
add_to_tar(ipk_fobj, './debian-binary', debian_binary_stream)
debian_binary_stream.close()
ipk_fobj.close()
if __name__ == '__main__':
# change to script directory
if getattr(sys, 'frozen', False):
_wd = os.path.dirname(os.path.realpath(sys.executable))
else:
_wd = sys.path[0]
os.chdir(_wd)
ins = fast_d1ck()
try:
if os.path.exists(account_file_plain):
uid, pwd = open(account_file_plain).read().strip().split(',')
ins.run(uid, pwd)
elif os.path.exists(account_session):
with open(account_session) as f:
session = json.loads(f.readline())
ins.xl_login_payload = json.loads(f.readline())
ins.load_xl(session)
ins.run(ins.xl_login_payload['userName'], ins.xl_login_payload['passWord'])
elif 'XUNLEI_UID' in os.environ and 'XUNLEI_PASSWD' in os.environ:
uid = os.environ['XUNLEI_UID']
pwd = os.environ['XUNLEI_PASSWD']
ins.run(uid, pwd)
else:
_real_print('Please use XUNLEI_UID=<uid>/XUNLEI_PASSWD=<pass> envrionment varibles or create config file "%s", input account splitting with comma(,). Eg:\nyonghuming,mima' % account_file_plain)
except KeyboardInterrupt:
pass
|
|
#!/usr/bin/env python
#==============================================================================
#title :lml-prepare.py
#description :Prepares a SOS database to receive observation data.
# Inserts sensor (procedure), resulttemplate, featureofinterest,
# based on station,sensor,unit configuration.
# Uses the SOS server json and kvp api (config in database),
# database configuration in 'sos_config.py'.
#author :Wouter Boasson
#date :20140731
#version :1.1
#usage :python lml-prepare.py
#notes :Running the script is pretty harmless, the SOS server with
# database constraints prevent duplicates in the database.
#python_version :2.7.x
#==============================================================================
import os, sys, time
import urllib, urllib2, socket
import json
import uuid
import psycopg2, psycopg2.extras
from datetime import datetime
from datetime import timedelta
import xml.etree.ElementTree as ET
from sos_config import *
print """Prepare SOS-service for LML-data.
(c) Wouter Boasson, 2014
"""
VERBOSE = False
DRYRUN = False
#VERBOSE = True
#DRYRUN = True
# Global variables + example
# The actual values will be retrieved from the database (table: <schema>.configuration)
#SOSSERVER = "http://your.sosserver.eu/sos/"
#SOSJSON = "sos/json"
#SOSKVP = "sos/kvp?"
#AUTHTOKEN = "Your auth token"
#HTTPTIMEOUT = 5 (in seconds)
#RETRYWAIT = 0.05 (in seconds)
now = datetime.now()
# Read configuration keys from database
def GetConfigFromDb(pgcur, key):
sql = "SELECT configvalue FROM configuration WHERE key = %s;"
pgcur.execute(sql, (key, ))
return pgcur.fetchone()[0]
# Http GET request to the SOS KVP service endpoint
# (needed to delete temporary observations)
def HttpGet(myrequest):
fullpath = SOSKVP + myrequest
socket.timeout(HTTPTIMEOUT)
headers = {"Authorization": AUTHTOKEN}
req = urllib2.Request(SOSSERVER + fullpath, None, headers)
try:
response = urllib2.urlopen(req)
except urllib2.HTTPError as e:
print ' the server couldn\'t fulfill the request.'
print ' error code:', e.code, fullpath
return "ERROR:HTTP " + str(e.code) + " " + SOSSERVER + fullpath
except urllib2.URLError as e:
print ' we failed to reach a server.'
print ' reason:', e.reason, fullpath
return "ERROR:URL " + str(e.reason) + " " + SOSSERVER + fullpath
except socket.timeout, e:
print ' server timeout', fullpath
return "ERROR:CONNECTION " + str(e) + " " + SOSSERVER + fullpath
else:
result = response.read()
return result
# POST request to the SOS JSON service
# (in use for inserting sensor, resulttemplate, featureofinterest, observation)
def HttpPostData(postdata):
socket.timeout(HTTPTIMEOUT)
try:
headers = {"Content-type": "application/json", "Accept": "application/json", "Authorization": AUTHTOKEN}
req = urllib2.Request(SOSSERVER + SOSJSON, postdata, headers)
response = urllib2.urlopen(req)
return response.read()
except urllib2.HTTPError as e:
print ' error: ' + str(e.code)
return "ERROR:HTTP " + str(e.code)
# Insert error message in database
# (logging)
def LogMsg(pgcur, operation, filename, msg):
msglevel = msg[:msg.find(":")]
sql = "INSERT INTO message_log(msgtimestamp, operation, filename, msglevel, msg, ts_created) values (clock_timestamp(), %s, %s, %s, %s, now());"
pgcur.execute(sql, (operation, filename, msglevel, msg[msg.find(":") + 1:]))
# Get a template
# (templates for sensor, resulttemplate and featureofinterest and observation are
# stored in the database)
def GetTemplate(pgcur, tpltype, template):
sql = "SELECT contents FROM templates WHERE templatetype = %s AND templatename = %s"
pgcur.execute(sql, (tpltype, template))
return str(pgcur.fetchone()[0]).replace("\r", "\n") # just in case, when created on a mac
# Get the SensorML (produced by a database function)
# (SensorMl is also constructed from a template, but that's done in the database, easier
# because a lot of data have to be pulled together)
def GetSensorMl(pgcur, template, publishstatcode, component, inputname, outputname):
sql = "select process_sensor_tpl from " + SCHEMA + ".process_sensor_tpl(%s, %s, %s, %s, %s);"
pgcur.execute(sql, (template, publishstatcode, component, inputname, outputname))
return str(pgcur.fetchone()[0]).replace("\r", "\n") # just in case, when created on a mac
# ###################################################################################################################
# 'Main'
# Open global database connection
conn_string = "host='"+DBHOST+"' port='"+DBPORT+"' dbname='"+DATABASE+"' user='"+DBUSER+"' password='" + DBPWD + "'"
print "Connecting to database..."
conn = psycopg2.connect(conn_string)
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute("SET search_path = " + SCHEMA + ",public;")
print "Connected!\n"
# Get configuration (see top of file for examples)
SOSSERVER = GetConfigFromDb(cursor, 'sos.server.httpaddress')
SOSJSON = GetConfigFromDb(cursor, 'sos.server.api.json')
SOSKVP = GetConfigFromDb(cursor, 'sos.server.api.kvp')
AUTHTOKEN = GetConfigFromDb(cursor, 'sos.server.authtoken')
HTTPTIMEOUT = float(GetConfigFromDb(cursor, 'http.timeout'))
RETRYWAIT = float(GetConfigFromDb(cursor, 'http.retrywait'))
# Set XML namespaces
nsp = {'sml': 'http://www.opengis.net/sensorML/1.0.1'
, 'swe': 'http://www.opengis.net/swe/1.0.1'}
# Loop through the active and to publish sensors
sql = "SELECT * FROM vw_statsensunit WHERE publish_sos = true AND activityend is null;"
cursor.execute(sql)
result = cursor.fetchall()
tempIds = [] # list of temporary ids that need to be removed (observations are created to get the units inserted)
print 'Creating sensors and features of interest (by inserting temporary observations)'
c = 0
for r in result:
print r['publishstationcode'] + ' (' + r['name'] + '): ' + r['sensorcode']
# Get the SensorML, and initiate XML parser
sensorml = GetSensorMl(cursor, 'SensorML.basic', r['publishstationcode'], r['sensorcode'], 'air', r['sensorcode'] + '_value')
try:
root = ET.fromstring(sensorml)
except:
print sensorml
sys.exit()
# ObservableProperty read from the SensorML (so they always match :-) )
observableproperty = root.findall(".//sml:outputs/sml:OutputList/sml:output/swe:Category", nsp)[0].attrib['definition']
# Offering
offering = root.findall(".//*[@definition='urn:ogc:def:identifier:OGC:offeringID']/swe:value", nsp)[0].text
sensorid = root.findall(".//*[@definition='urn:ogc:def:identifier:OGC:1.0:uniqueID']/sml:value", nsp)[0].text
# FeatureOfInterest
foiId = root.findall(".//*/sml:capabilities[@name='featuresOfInterest']/swe:SimpleDataRecord/swe:field[@name='featureOfInterestID']/swe:Text/swe:value", nsp)[0].text
foiName = r["name"]
if r["municipality"] != '':
foiName = foiName + ", " + r['municipality']
foiSampledfeat = root.findall(".//sml:inputs/sml:InputList/sml:input/swe:ObservableProperty", nsp)[0].attrib['definition']
# X, Y, Z
posY = root.findall(".//*/sml:position[@name='sensorPosition']/swe:Position/swe:location/swe:Vector/swe:coordinate[@name='northing']/swe:Quantity/swe:value", nsp)[0].text
posX = root.findall(".//*/sml:position[@name='sensorPosition']/swe:Position/swe:location/swe:Vector/swe:coordinate[@name='easting']/swe:Quantity/swe:value", nsp)[0].text
posZ = root.findall(".//*/sml:position[@name='sensorPosition']/swe:Position/swe:location/swe:Vector/swe:coordinate[@name='altitude']/swe:Quantity/swe:value", nsp)[0].text
# Observation
unit = r["m_unit"] # from unit table, everything else can be default, will be thrown away
# Process the insertsensor template
insertsensor = GetTemplate(cursor, "JSON", "InsertSensor.SamplingPoint.Measurement")
insertsensor = insertsensor.replace("$procedure.sensorml$", sensorml.replace("\"","\\\""))
insertsensor = insertsensor.replace("$sensor.observableproperty.output.id$", observableproperty)
insertsensor = insertsensor.replace("\r","").replace("\n","")
if VERBOSE:
print insertsensor
if not DRYRUN:
j = HttpPostData(insertsensor)
try:
print ' => ' + json.loads(j)['request'] + ' ' + json.loads(j)['assignedProcedure']
except:
print ' => error, probably already present'
# Process the insertresulttemplate template
insertresulttpl = GetTemplate(cursor, "JSON", "InsertResultTemplate")
insertresulttpl = insertresulttpl.replace('$sensor.id$', sensorid)
insertresulttpl = insertresulttpl.replace('$sensor.resulttemplate.id$', sensorid + '/template/basic')
insertresulttpl = insertresulttpl.replace('$sensor.offering$', offering)
insertresulttpl = insertresulttpl.replace('$sensor.observableproperty.output.id$', observableproperty)
insertresulttpl = insertresulttpl.replace('$sensor.featureofinterest.id$', foiId)
insertresulttpl = insertresulttpl.replace("$sensor.featureofinterest.name$", foiName)
insertresulttpl = insertresulttpl.replace('$sensor.output.name$', 'value')
insertresulttpl = insertresulttpl.replace('$sensor.observableproperty.output.id$', observableproperty)
insertresulttpl = insertresulttpl.replace("$sensor.pos.northing$", posY)
insertresulttpl = insertresulttpl.replace("$sensor.pos.easting$", posX)
insertresulttpl = insertresulttpl.replace("$sensor.pos.altitude$", posZ)
insertresulttpl = insertresulttpl.replace("$observation.unit$", unit)
if VERBOSE:
print insertresulttpl
if not DRYRUN:
j = HttpPostData(insertresulttpl)
try:
print ' => ' + json.loads(j)['request'] + ' ' + json.loads(j)['acceptedTemplate']
except:
print ' => error, probably already present'
# Process the insertobservation template
tempid = sensorid + '/' + str(uuid.uuid4())
tempIds.append(tempid)
insertobservation = GetTemplate(cursor, 'JSON', 'InsertObservation')
insertobservation = insertobservation.replace("$sensor.offering.id$", offering)
insertobservation = insertobservation.replace("$sensor.observation.id$", tempid)
insertobservation = insertobservation.replace("$sensor.id$", sensorid)
insertobservation = insertobservation.replace("$sensor.observableproperty.output.id$", observableproperty)
insertobservation = insertobservation.replace("$sensor.featureofinterest.id$", foiId)
insertobservation = insertobservation.replace("$sensor.featureofinterest.name$", foiName)
insertobservation = insertobservation.replace("$sensor.featureofinterest.sampled$", foiSampledfeat)
insertobservation = insertobservation.replace("$sensor.pos.northing$", posY)
insertobservation = insertobservation.replace("$sensor.pos.easting$", posX)
insertobservation = insertobservation.replace("$sensor.pos.altitude$", posZ)
insertobservation = insertobservation.replace("$observation.time$", "2000-01-01T00:00:00+00:00")
insertobservation = insertobservation.replace("$result.time$", "2000-01-01T00:00:00+00:00")
insertobservation = insertobservation.replace("$observation.unit$", unit)
insertobservation = insertobservation.replace("$observation.value$", "0")
if VERBOSE:
print insertobservation
if not DRYRUN:
j = HttpPostData(insertobservation)
try:
print ' => ' + json.loads(j)['request'] + ' ' + sensorid
except:
print ' => error, probably already present'
c += 1
print "Deleting temporary observations (" + str(c) + ")"
n = 0
for t in tempIds:
deleterequest = "service=SOS&version=2.0.0&request=DeleteObservation&observation=" + urllib.quote_plus(t)
if not DRYRUN:
HttpGet(deleterequest)
if VERBOSE:
print deleterequest
if n >= 5:
sys.stderr.write(".")
n = 0
n += 1
print "Done."
print ""
print "Note: in order to fully remove the temporary observations, clean up"
print "'Deleted observations' using the SOS Admin interface."
conn.rollback() # Whatever happened, we do not want to write to the database.
conn.close()
# The end
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from metrics import Metric
from telemetry.core import bitmap
from telemetry.value import scalar
class SpeedIndexMetric(Metric):
"""The speed index metric is one way of measuring page load speed.
It is meant to approximate user perception of page load speed, and it
is based on the amount of time that it takes to paint to the visual
portion of the screen. It includes paint events that occur after the
onload event, and it doesn't include time loading things off-screen.
This speed index metric is based on WebPageTest.org (WPT).
For more info see: http://goo.gl/e7AH5l
"""
def __init__(self):
super(SpeedIndexMetric, self).__init__()
self._impl = None
@classmethod
def CustomizeBrowserOptions(cls, options):
options.AppendExtraBrowserArgs('--disable-infobars')
def Start(self, _, tab):
"""Start recording events.
This method should be called in the WillNavigateToPage method of
a PageTest, so that all the events can be captured. If it's called
in DidNavigateToPage, that will be too late.
"""
self._impl = (VideoSpeedIndexImpl() if tab.video_capture_supported else
PaintRectSpeedIndexImpl())
self._impl.Start(tab)
def Stop(self, _, tab):
"""Stop timeline recording."""
assert self._impl, 'Must call Start() before Stop()'
assert self.IsFinished(tab), 'Must wait for IsFinished() before Stop()'
self._impl.Stop(tab)
# Optional argument chart_name is not in base class Metric.
# pylint: disable=W0221
def AddResults(self, tab, results, chart_name=None):
"""Calculate the speed index and add it to the results."""
index = self._impl.CalculateSpeedIndex(tab)
# Release the tab so that it can be disconnected.
self._impl = None
results.AddValue(scalar.ScalarValue(
results.current_page, '%s.speed_index' % chart_name, 'ms', index))
def IsFinished(self, tab):
"""Decide whether the timeline recording should be stopped.
When the timeline recording is stopped determines which paint events
are used in the speed index metric calculation. In general, the recording
should continue if there has just been some data received, because
this suggests that painting may continue.
A page may repeatedly request resources in an infinite loop; a timeout
should be placed in any measurement that uses this metric, e.g.:
def IsDone():
return self._speedindex.IsFinished(tab)
util.WaitFor(IsDone, 60)
Returns:
True if 2 seconds have passed since last resource received, false
otherwise.
"""
return tab.HasReachedQuiescence()
class SpeedIndexImpl(object):
def Start(self, tab):
raise NotImplementedError()
def Stop(self, tab):
raise NotImplementedError()
def GetTimeCompletenessList(self, tab):
"""Returns a list of time to visual completeness tuples.
In the WPT PHP implementation, this is also called 'visual progress'.
"""
raise NotImplementedError()
def CalculateSpeedIndex(self, tab):
"""Calculate the speed index.
The speed index number conceptually represents the number of milliseconds
that the page was "visually incomplete". If the page were 0% complete for
1000 ms, then the score would be 1000; if it were 0% complete for 100 ms
then 90% complete (ie 10% incomplete) for 900 ms, then the score would be
1.0*100 + 0.1*900 = 190.
Returns:
A single number, milliseconds of visual incompleteness.
"""
time_completeness_list = self.GetTimeCompletenessList(tab)
prev_completeness = 0.0
speed_index = 0.0
prev_time = time_completeness_list[0][0]
for time, completeness in time_completeness_list:
# Add the incemental value for the interval just before this event.
elapsed_time = time - prev_time
incompleteness = (1.0 - prev_completeness)
speed_index += elapsed_time * incompleteness
# Update variables for next iteration.
prev_completeness = completeness
prev_time = time
return int(speed_index)
class VideoSpeedIndexImpl(SpeedIndexImpl):
def __init__(self):
super(VideoSpeedIndexImpl, self).__init__()
self._time_completeness_list = None
def Start(self, tab):
assert tab.video_capture_supported
# Blank out the current page so it doesn't count towards the new page's
# completeness.
tab.Highlight(bitmap.WHITE)
# TODO(tonyg): Bitrate is arbitrary here. Experiment with screen capture
# overhead vs. speed index accuracy and set the bitrate appropriately.
tab.StartVideoCapture(min_bitrate_mbps=4)
def Stop(self, tab):
# Ignore white because Chrome may blank out the page during load and we want
# that to count as 0% complete. Relying on this fact, we also blank out the
# previous page to white. The tolerance of 8 experimentally does well with
# video capture at 4mbps. We should keep this as low as possible with
# supported video compression settings.
video_capture = tab.StopVideoCapture()
histograms = [(time, bmp.ColorHistogram(ignore_color=bitmap.WHITE,
tolerance=8))
for time, bmp in video_capture.GetVideoFrameIter()]
start_histogram = histograms[0][1]
final_histogram = histograms[-1][1]
total_distance = start_histogram.Distance(final_histogram)
def FrameProgress(histogram):
if total_distance == 0:
if histogram.Distance(final_histogram) == 0:
return 1.0
else:
return 0.0
return 1 - histogram.Distance(final_histogram) / total_distance
self._time_completeness_list = [(time, FrameProgress(hist))
for time, hist in histograms]
def GetTimeCompletenessList(self, tab):
assert self._time_completeness_list, 'Must call Stop() first.'
return self._time_completeness_list
class PaintRectSpeedIndexImpl(SpeedIndexImpl):
def __init__(self):
super(PaintRectSpeedIndexImpl, self).__init__()
def Start(self, tab):
tab.StartTimelineRecording()
def Stop(self, tab):
tab.StopTimelineRecording()
def GetTimeCompletenessList(self, tab):
events = tab.timeline_model.GetAllEvents()
viewport = self._GetViewportSize(tab)
paint_events = self._IncludedPaintEvents(events)
time_area_dict = self._TimeAreaDict(paint_events, viewport)
total_area = sum(time_area_dict.values())
assert total_area > 0.0, 'Total paint event area must be greater than 0.'
completeness = 0.0
time_completeness_list = []
# TODO(tonyg): This sets the start time to the start of the first paint
# event. That can't be correct. The start time should be navigationStart.
# Since the previous screen is not cleared at navigationStart, we should
# probably assume the completeness is 0 until the first paint and add the
# time of navigationStart as the start. We need to confirm what WPT does.
time_completeness_list.append(
(tab.timeline_model.GetAllEvents()[0].start, completeness))
for time, area in sorted(time_area_dict.items()):
completeness += float(area) / total_area
# Visual progress is rounded to the nearest percentage point as in WPT.
time_completeness_list.append((time, round(completeness, 2)))
return time_completeness_list
def _GetViewportSize(self, tab):
"""Returns dimensions of the viewport."""
return tab.EvaluateJavaScript('[ window.innerWidth, window.innerHeight ]')
def _IncludedPaintEvents(self, events):
"""Get all events that are counted in the calculation of the speed index.
There's one category of paint event that's filtered out: paint events
that occur before the first 'ResourceReceiveResponse' and 'Layout' events.
Previously in the WPT speed index, paint events that contain children paint
events were also filtered out.
"""
def FirstLayoutTime(events):
"""Get the start time of the first layout after a resource received."""
has_received_response = False
for event in events:
if event.name == 'ResourceReceiveResponse':
has_received_response = True
elif has_received_response and event.name == 'Layout':
return event.start
assert False, 'There were no layout events after resource receive events.'
first_layout_time = FirstLayoutTime(events)
paint_events = [e for e in events
if e.start >= first_layout_time and e.name == 'Paint']
return paint_events
def _TimeAreaDict(self, paint_events, viewport):
"""Make a dict from time to adjusted area value for events at that time.
The adjusted area value of each paint event is determined by how many paint
events cover the same rectangle, and whether it's a full-window paint event.
"Adjusted area" can also be thought of as "points" of visual completeness --
each rectangle has a certain number of points and these points are
distributed amongst the paint events that paint that rectangle.
Args:
paint_events: A list of paint events
viewport: A tuple (width, height) of the window.
Returns:
A dictionary of times of each paint event (in milliseconds) to the
adjusted area that the paint event is worth.
"""
width, height = viewport
fullscreen_area = width * height
def ClippedArea(rectangle):
"""Returns rectangle area clipped to viewport size."""
_, x0, y0, x1, y1 = rectangle
clipped_width = max(0, min(width, x1) - max(0, x0))
clipped_height = max(0, min(height, y1) - max(0, y0))
return clipped_width * clipped_height
grouped = self._GroupEventByRectangle(paint_events)
event_area_dict = collections.defaultdict(int)
for rectangle, events in grouped.items():
# The area points for each rectangle are divided up among the paint
# events in that rectangle.
area = ClippedArea(rectangle)
update_count = len(events)
adjusted_area = float(area) / update_count
# Paint events for the largest-area rectangle are counted as 50%.
if area == fullscreen_area:
adjusted_area /= 2
for event in events:
# The end time for an event is used for that event's time.
event_time = event.end
event_area_dict[event_time] += adjusted_area
return event_area_dict
def _GetRectangle(self, paint_event):
"""Get the specific rectangle on the screen for a paint event.
Each paint event belongs to a frame (as in html <frame> or <iframe>).
This, together with location and dimensions, comprises a rectangle.
In the WPT source, this 'rectangle' is also called a 'region'.
"""
def GetBox(quad):
"""Gets top-left and bottom-right coordinates from paint event.
In the timeline data from devtools, paint rectangle dimensions are
represented x-y coordinates of four corners, clockwise from the top-left.
See: function WebInspector.TimelinePresentationModel.quadFromRectData
in file src/out/Debug/obj/gen/devtools/TimelinePanel.js.
"""
x0, y0, _, _, x1, y1, _, _ = quad
return (x0, y0, x1, y1)
assert paint_event.name == 'Paint'
frame = paint_event.args['frameId']
return (frame,) + GetBox(paint_event.args['data']['clip'])
def _GroupEventByRectangle(self, paint_events):
"""Group all paint events according to the rectangle that they update."""
result = collections.defaultdict(list)
for event in paint_events:
assert event.name == 'Paint'
result[self._GetRectangle(event)].append(event)
return result
|
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from functools import partial
from scipy import special
from scipy.special import entr, logsumexp, betaln, gammaln as gamln
from scipy._lib._util import _lazywhere, rng_integers
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
r"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is:
.. math::
f(k) = \binom{n}{k} p^k (1-p)^{n-k}
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n >= 0) & (p >= 0) & (p <= 1)
def _get_support(self, n, p):
return self.a, n
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
# binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
r"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is:
.. math::
f(k) = \begin{cases}1-p &\text{if } k = 0\\
p &\text{if } k = 1\end{cases}
for :math:`k` in :math:`\{0, 1\}`.
`bernoulli` takes :math:`p` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _get_support(self, p):
# Overrides binom_gen._get_support!x
return self.a, self.b
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
# bernoulli.pmf(k) = 1-p if k = 0
# = p if k = 1
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class betabinom_gen(rv_discrete):
r"""A beta-binomial discrete random variable.
%(before_notes)s
Notes
-----
The beta-binomial distribution is a binomial distribution with a
probability of success `p` that follows a beta distribution.
The probability mass function for `betabinom` is:
.. math::
f(k) = \binom{n}{k} \frac{B(k + a, n - k + b)}{B(a, b)}
for ``k`` in ``{0, 1,..., n}``, :math:`n \geq 0`, :math:`a > 0`,
:math:`b > 0`, where :math:`B(a, b)` is the beta function.
`betabinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters.
References
----------
.. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution
%(after_notes)s
.. versionadded:: 1.4.0
See Also
--------
beta, binom
%(example)s
"""
def _rvs(self, n, a, b):
p = self._random_state.beta(a, b, self._size)
return self._random_state.binomial(n, p, self._size)
def _get_support(self, n, a, b):
return 0, n
def _argcheck(self, n, a, b):
return (n >= 0) & (a > 0) & (b > 0)
def _logpmf(self, x, n, a, b):
k = floor(x)
combiln = -log(n + 1) - betaln(n - k + 1, k + 1)
return combiln + betaln(k + a, n - k + b) - betaln(a, b)
def _pmf(self, x, n, a, b):
return exp(self._logpmf(x, n, a, b))
def _stats(self, n, a, b, moments='mv'):
e_p = a / (a + b)
e_q = 1 - e_p
mu = n * e_p
var = n * (a + b + n) * e_p * e_q / (a + b + 1)
g1, g2 = None, None
if 's' in moments:
g1 = 1.0 / sqrt(var)
g1 *= (a + b + 2 * n) * (b - a)
g1 /= (a + b + 2) * (a + b)
if 'k' in moments:
g2 = a + b
g2 *= (a + b - 1 + 6 * n)
g2 += 3 * a * b * (n - 2)
g2 += 6 * n ** 2
g2 -= 3 * e_p * b * n * (6 - n)
g2 -= 18 * e_p * e_q * n ** 2
g2 *= (a + b) ** 2 * (1 + a + b)
g2 /= (n * a * b * (a + b + 2) * (a + b + 3) * (a + b + n))
g2 -= 3
return mu, var, g1, g2
betabinom = betabinom_gen(name='betabinom')
class nbinom_gen(rv_discrete):
r"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
Negative binomial distribution describes a sequence of i.i.d. Bernoulli
trials, repeated until a predefined, non-random number of successes occurs.
The probability mass function of the number of failures for `nbinom` is:
.. math::
f(k) = \binom{k+n-1}{n-1} p^n (1-p)^k
for :math:`k \ge 0`.
`nbinom` takes :math:`n` and :math:`p` as shape parameters where n is the
number of successes, whereas p is the probability of a single success.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
# nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
r"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is:
.. math::
f(k) = (1-p)^{k-1} p
for :math:`k \ge 1`.
`geom` takes :math:`p` as shape parameter.
%(after_notes)s
See Also
--------
planck
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log1p(-q) / log1p(-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
r"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
`M` is the total number of objects, `n` is total number of Type I objects.
The random variate represents the number of Type I objects in `N` drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not
universally accepted. See the Examples for a clarification of the
definitions used here.
The probability mass function is defined as,
.. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}}
{\binom{M}{N}}
for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial
coefficients are defined as,
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _get_support(self, M, n, N):
return np.maximum(N-(M-n), 0), np.minimum(n, N)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
result = (betaln(good+1, 1) + betaln(bad+1, 1) + betaln(tot-N+1, N+1) -
betaln(k+1, good-k+1) - betaln(N-k+1, bad-N+k+1) -
betaln(tot+1, 1))
return result
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
res = []
for quant, tot, good, draw in zip(k, M, n, N):
if (quant + 0.5) * (tot + 0.5) < (good - 0.5) * (draw - 0.5):
# Less terms to sum if we calculate log(1-cdf)
res.append(log1p(-exp(self.logcdf(quant, tot, good, draw))))
else:
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
def _logcdf(self, k, M, n, N):
res = []
for quant, tot, good, draw in zip(k, M, n, N):
if (quant + 0.5) * (tot + 0.5) > (good - 0.5) * (draw - 0.5):
# Less terms to sum if we calculate log(1-sf)
res.append(log1p(-exp(self.logsf(quant, tot, good, draw))))
else:
# Integration over probability mass function using logsumexp
k2 = np.arange(0, quant + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
r"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is:
.. math::
f(k) = - \frac{p^k}{k \log(1-p)}
for :math:`k \ge 1`.
`logser` takes :math:`p` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
# logser.pmf(k) = - p**k / (k*log(1-p))
return -np.power(p, k) * 1.0 / k / special.log1p(-p)
def _stats(self, p):
r = special.log1p(-p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
r"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is:
.. math::
f(k) = \exp(-\mu) \frac{\mu^k}{k!}
for :math:`k \ge 0`.
`poisson` takes :math:`\mu` as shape parameter.
%(after_notes)s
%(example)s
"""
# Override rv_discrete._argcheck to allow mu=0.
def _argcheck(self, mu):
return mu >= 0
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
return Pk
def _pmf(self, k, mu):
# poisson.pmf(k) = exp(-mu) * mu**k / k!
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
mu_nonzero = tmp > 0
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
r"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is:
.. math::
f(k) = (1-\exp(-\lambda)) \exp(-\lambda k)
for :math:`k \ge 0` and :math:`\lambda > 0`.
`planck` takes :math:`\lambda` as shape parameter. The Planck distribution
can be written as a geometric distribution (`geom`) with
:math:`p = 1 - \exp(-\lambda)` shifted by `loc = -1`.
%(after_notes)s
See Also
--------
geom
%(example)s
"""
def _argcheck(self, lambda_):
return lambda_ > 0
def _pmf(self, k, lambda_):
return -expm1(-lambda_)*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return -expm1(-lambda_*(k+1))
def _sf(self, x, lambda_):
return exp(self._logsf(x, lambda_))
def _logsf(self, x, lambda_):
k = floor(x)
return -lambda_*(k+1)
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(*(self._get_support(lambda_)))
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _rvs(self, lambda_):
# use relation to geometric distribution for sampling
p = -expm1(-lambda_)
return self._random_state.geometric(p, size=self._size) - 1.0
def _stats(self, lambda_):
mu = 1/expm1(lambda_)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
C = -expm1(-lambda_)
return lambda_*exp(-lambda_)/C - log(C)
planck = planck_gen(a=0, name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
r"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is:
.. math::
f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) / (1-\exp(-\lambda N))
for :math:`k = 0,..., N-1`.
`boltzmann` takes :math:`\lambda > 0` and :math:`N > 0` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_, N):
return (lambda_ > 0) & (N > 0)
def _get_support(self, lambda_, N):
return self.a, N - 1
def _pmf(self, k, lambda_, N):
# boltzmann.pmf(k) =
# (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann', a=0,
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
r"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is:
.. math::
f(k) = \frac{1}{high - low}
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
return (high > low)
def _get_support(self, low, high):
return low, high-1
def _pmf(self, k, low, high):
# randint.pmf(k) = 1./(high - low)
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high):
"""An array of *size* random integers >= ``low`` and < ``high``."""
if np.asarray(low).size == 1 and np.asarray(high).size == 1:
# no need to vectorize in that case
return rng_integers(self._random_state, low, high, size=self._size)
if self._size is not None:
# NumPy's RandomState.randint() doesn't broadcast its arguments.
# Use `broadcast_to()` to extend the shapes of low and high
# up to self._size. Then we can use the numpy.vectorize'd
# randint without needing to pass it a `size` argument.
low = np.broadcast_to(low, self._size)
high = np.broadcast_to(high, self._size)
randint = np.vectorize(partial(rng_integers, self._random_state),
otypes=[np.int_])
return randint(low, high)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
r"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is:
.. math::
f(k, a) = \frac{1}{\zeta(a) k^a}
for :math:`k \ge 1`.
`zipf` takes :math:`a` as shape parameter. :math:`\zeta` is the
Riemann zeta function (`scipy.special.zeta`)
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
# zipf.pmf(k, a) = 1/(zeta(a) * k**a)
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
r"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is:
.. math::
f(k) = \tanh(a/2) \exp(-a |k|)
for integers :math:`k` and :math:`a > 0`.
`dlaplace` takes :math:`a` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
# dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)),
log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
def _rvs(self, a):
# The discrete Laplace is equivalent to the two-sided geometric
# distribution with PMF:
# f(k) = (1 - alpha)/(1 + alpha) * alpha^abs(k)
# Reference:
# https://www.sciencedirect.com/science/
# article/abs/pii/S0378375804003519
# Furthermore, the two-sided geometric distribution is
# equivalent to the difference between two iid geometric
# distributions.
# Reference (page 179):
# https://pdfs.semanticscholar.org/61b3/
# b99f466815808fd0d03f5d2791eea8b541a1.pdf
# Thus, we can leverage the following:
# 1) alpha = e^-a
# 2) probability_of_success = 1 - alpha (Bernoulli trial)
probOfSuccess = -np.expm1(-np.asarray(a))
x = self._random_state.geometric(probOfSuccess, size=self._size)
y = self._random_state.geometric(probOfSuccess, size=self._size)
return x - y
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
r"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let :math:`k_1` and :math:`k_2` be two Poisson-distributed r.v. with
expected values :math:`\lambda_1` and :math:`\lambda_2`. Then,
:math:`k_1 - k_2` follows a Skellam distribution with parameters
:math:`\mu_1 = \lambda_1 - \rho \sqrt{\lambda_1 \lambda_2}` and
:math:`\mu_2 = \lambda_2 - \rho \sqrt{\lambda_1 \lambda_2}`, where
:math:`\rho` is the correlation coefficient between :math:`k_1` and
:math:`k_2`. If the two Poisson-distributed r.v. are independent then
:math:`\rho = 0`.
Parameters :math:`\mu_1` and :math:`\mu_2` must be strictly positive.
For details see: https://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes :math:`\mu_1` and :math:`\mu_2` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1 - _ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
class yulesimon_gen(rv_discrete):
r"""A Yule-Simon discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for the `yulesimon` is:
.. math::
f(k) = \alpha B(k, \alpha+1)
for :math:`k=1,2,3,...`, where :math:`\alpha>0`.
Here :math:`B` refers to the `scipy.special.beta` function.
The sampling of random variates is based on pg 553, Section 6.3 of [1]_.
Our notation maps to the referenced logic via :math:`\alpha=a-1`.
For details see the wikipedia entry [2]_.
References
----------
.. [1] Devroye, Luc. "Non-uniform Random Variate Generation",
(1986) Springer, New York.
.. [2] https://en.wikipedia.org/wiki/Yule-Simon_distribution
%(after_notes)s
%(example)s
"""
def _rvs(self, alpha):
E1 = self._random_state.standard_exponential(self._size)
E2 = self._random_state.standard_exponential(self._size)
ans = ceil(-E1 / log1p(-exp(-E2 / alpha)))
return ans
def _pmf(self, x, alpha):
return alpha * special.beta(x, alpha + 1)
def _argcheck(self, alpha):
return (alpha > 0)
def _logpmf(self, x, alpha):
return log(alpha) + special.betaln(x, alpha + 1)
def _cdf(self, x, alpha):
return 1 - x * special.beta(x, alpha + 1)
def _sf(self, x, alpha):
return x * special.beta(x, alpha + 1)
def _logsf(self, x, alpha):
return log(x) + special.betaln(x, alpha + 1)
def _stats(self, alpha):
mu = np.where(alpha <= 1, np.inf, alpha / (alpha - 1))
mu2 = np.where(alpha > 2,
alpha**2 / ((alpha - 2.0) * (alpha - 1)**2),
np.inf)
mu2 = np.where(alpha <= 1, np.nan, mu2)
g1 = np.where(alpha > 3,
sqrt(alpha - 2) * (alpha + 1)**2 / (alpha * (alpha - 3)),
np.inf)
g1 = np.where(alpha <= 2, np.nan, g1)
g2 = np.where(alpha > 4,
(alpha + 3) + (alpha**3 - 49 * alpha - 22) / (alpha *
(alpha - 4) * (alpha - 3)), np.inf)
g2 = np.where(alpha <= 2, np.nan, g2)
return mu, mu2, g1, g2
yulesimon = yulesimon_gen(name='yulesimon', a=1)
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from parameterized import parameterized
from airflow.api_connexion.exceptions import EXCEPTIONS_LINK_MAP
from airflow.models import Connection
from airflow.security import permissions
from airflow.utils.session import provide_session
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_connections
@pytest.fixture(scope="module")
def configured_app(minimal_app_for_api):
app = minimal_app_for_api
create_user(
app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION),
],
)
create_user(app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
yield app
delete_user(app, username="test") # type: ignore
delete_user(app, username="test_no_permissions") # type: ignore
class TestConnectionEndpoint:
@pytest.fixture(autouse=True)
def setup_attrs(self, configured_app) -> None:
self.app = configured_app
self.client = self.app.test_client() # type:ignore
# we want only the connection created here for this test
clear_db_connections(False)
def teardown_method(self) -> None:
clear_db_connections()
def _create_connection(self, session):
connection_model = Connection(conn_id='test-connection-id', conn_type='test_type')
session.add(connection_model)
session.commit()
class TestDeleteConnection(TestConnectionEndpoint):
def test_delete_should_respond_204(self, session):
connection_model = Connection(conn_id='test-connection', conn_type='test_type')
session.add(connection_model)
session.commit()
conn = session.query(Connection).all()
assert len(conn) == 1
response = self.client.delete(
"/api/v1/connections/test-connection", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 204
connection = session.query(Connection).all()
assert len(connection) == 0
def test_delete_should_respond_404(self):
response = self.client.delete(
"/api/v1/connections/test-connection", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
assert response.json == {
'detail': "The Connection with connection_id: `test-connection` was not found",
'status': 404,
'title': 'Connection not found',
'type': EXCEPTIONS_LINK_MAP[404],
}
def test_should_raises_401_unauthenticated(self):
response = self.client.delete("/api/v1/connections/test-connection")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
"/api/v1/connections/test-connection-id", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
class TestGetConnection(TestConnectionEndpoint):
def test_should_respond_200(self, session):
connection_model = Connection(
conn_id='test-connection-id',
conn_type='mysql',
description='test description',
host='mysql',
login='login',
schema='testschema',
port=80,
extra="{'param': 'value'}",
)
session.add(connection_model)
session.commit()
result = session.query(Connection).all()
assert len(result) == 1
response = self.client.get(
"/api/v1/connections/test-connection-id", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == {
"connection_id": "test-connection-id",
"conn_type": 'mysql',
"description": "test description",
"host": 'mysql',
"login": 'login',
'schema': 'testschema',
'port': 80,
'extra': "{'param': 'value'}",
}
def test_should_respond_404(self):
response = self.client.get(
"/api/v1/connections/invalid-connection", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
assert {
'detail': "The Connection with connection_id: `invalid-connection` was not found",
'status': 404,
'title': 'Connection not found',
'type': EXCEPTIONS_LINK_MAP[404],
} == response.json
def test_should_raises_401_unauthenticated(self):
response = self.client.get("/api/v1/connections/test-connection-id")
assert_401(response)
class TestGetConnections(TestConnectionEndpoint):
def test_should_respond_200(self, session):
connection_model_1 = Connection(conn_id='test-connection-id-1', conn_type='test_type')
connection_model_2 = Connection(conn_id='test-connection-id-2', conn_type='test_type')
connections = [connection_model_1, connection_model_2]
session.add_all(connections)
session.commit()
result = session.query(Connection).all()
assert len(result) == 2
response = self.client.get("/api/v1/connections", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json == {
'connections': [
{
"connection_id": "test-connection-id-1",
"conn_type": 'test_type',
"description": None,
"host": None,
"login": None,
'schema': None,
'port': None,
},
{
"connection_id": "test-connection-id-2",
"conn_type": 'test_type',
"description": None,
"host": None,
"login": None,
'schema': None,
'port': None,
},
],
'total_entries': 2,
}
def test_should_respond_200_with_order_by(self, session):
connection_model_1 = Connection(conn_id='test-connection-id-1', conn_type='test_type')
connection_model_2 = Connection(conn_id='test-connection-id-2', conn_type='test_type')
connections = [connection_model_1, connection_model_2]
session.add_all(connections)
session.commit()
result = session.query(Connection).all()
assert len(result) == 2
response = self.client.get(
"/api/v1/connections?order_by=-connection_id", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
# Using - means descending
assert response.json == {
'connections': [
{
"connection_id": "test-connection-id-2",
"conn_type": 'test_type',
"description": None,
"host": None,
"login": None,
'schema': None,
'port': None,
},
{
"connection_id": "test-connection-id-1",
"conn_type": 'test_type',
"description": None,
"host": None,
"login": None,
'schema': None,
'port': None,
},
],
'total_entries': 2,
}
def test_should_raises_401_unauthenticated(self):
response = self.client.get("/api/v1/connections")
assert_401(response)
class TestGetConnectionsPagination(TestConnectionEndpoint):
@parameterized.expand(
[
("/api/v1/connections?limit=1", ['TEST_CONN_ID1']),
("/api/v1/connections?limit=2", ['TEST_CONN_ID1', "TEST_CONN_ID2"]),
(
"/api/v1/connections?offset=5",
[
"TEST_CONN_ID6",
"TEST_CONN_ID7",
"TEST_CONN_ID8",
"TEST_CONN_ID9",
"TEST_CONN_ID10",
],
),
(
"/api/v1/connections?offset=0",
[
"TEST_CONN_ID1",
"TEST_CONN_ID2",
"TEST_CONN_ID3",
"TEST_CONN_ID4",
"TEST_CONN_ID5",
"TEST_CONN_ID6",
"TEST_CONN_ID7",
"TEST_CONN_ID8",
"TEST_CONN_ID9",
"TEST_CONN_ID10",
],
),
("/api/v1/connections?limit=1&offset=5", ["TEST_CONN_ID6"]),
("/api/v1/connections?limit=1&offset=1", ["TEST_CONN_ID2"]),
(
"/api/v1/connections?limit=2&offset=2",
["TEST_CONN_ID3", "TEST_CONN_ID4"],
),
]
)
@provide_session
def test_handle_limit_offset(self, url, expected_conn_ids, session):
connections = self._create_connections(10)
session.add_all(connections)
session.commit()
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json["total_entries"] == 10
conn_ids = [conn["connection_id"] for conn in response.json["connections"] if conn]
assert conn_ids == expected_conn_ids
def test_should_respect_page_size_limit_default(self, session):
connection_models = self._create_connections(200)
session.add_all(connection_models)
session.commit()
response = self.client.get("/api/v1/connections", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json["total_entries"] == 200
assert len(response.json["connections"]) == 100
def test_invalid_order_by_raises_400(self, session):
connection_models = self._create_connections(200)
session.add_all(connection_models)
session.commit()
response = self.client.get(
"/api/v1/connections?order_by=invalid", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 400
assert (
response.json['detail'] == "Ordering with 'invalid' is disallowed or"
" the attribute does not exist on the model"
)
def test_limit_of_zero_should_return_default(self, session):
connection_models = self._create_connections(200)
session.add_all(connection_models)
session.commit()
response = self.client.get("/api/v1/connections?limit=0", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json["total_entries"] == 200
assert len(response.json["connections"]) == 100
@conf_vars({("api", "maximum_page_limit"): "150"})
def test_should_return_conf_max_if_req_max_above_conf(self, session):
connection_models = self._create_connections(200)
session.add_all(connection_models)
session.commit()
response = self.client.get("/api/v1/connections?limit=180", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert len(response.json['connections']) == 150
def _create_connections(self, count):
return [
Connection(conn_id='TEST_CONN_ID' + str(i), conn_type='TEST_CONN_TYPE' + str(i))
for i in range(1, count + 1)
]
class TestPatchConnection(TestConnectionEndpoint):
@parameterized.expand(
[
({"connection_id": "test-connection-id", "conn_type": 'test_type', "extra": "{'key': 'var'}"},),
({"extra": "{'key': 'var'}"},),
]
)
@provide_session
def test_patch_should_respond_200(self, payload, session):
self._create_connection(session)
response = self.client.patch(
"/api/v1/connections/test-connection-id", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
def test_patch_should_respond_200_with_update_mask(self, session):
self._create_connection(session)
test_connection = "test-connection-id"
payload = {
"connection_id": test_connection,
"conn_type": 'test_type_2',
"extra": "{'key': 'var'}",
'login': "login",
"port": 80,
}
response = self.client.patch(
"/api/v1/connections/test-connection-id?update_mask=port,login",
json=payload,
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 200
connection = session.query(Connection).filter_by(conn_id=test_connection).first()
assert connection.password is None
assert response.json == {
"connection_id": test_connection, # not updated
"conn_type": 'test_type', # Not updated
"description": None, # Not updated
"extra": None, # Not updated
'login': "login", # updated
"port": 80, # updated
"schema": None,
"host": None,
}
@parameterized.expand(
[
(
{
"connection_id": 'test-connection-id',
"conn_type": 'test_type_2',
"extra": "{'key': 'var'}",
'login': "login",
"port": 80,
},
'update_mask=ports, login', # posts is unknown
"'ports' is unknown or cannot be updated.",
),
(
{
"connection_id": 'test-connection-id',
"conn_type": 'test_type_2',
"extra": "{'key': 'var'}",
'login': "login",
"port": 80,
},
'update_mask=port, login, conn_id', # conn_id is unknown
"'conn_id' is unknown or cannot be updated.",
),
(
{
"connection_id": 'test-connection-id',
"conn_type": 'test_type_2',
"extra": "{'key': 'var'}",
'login': "login",
"port": 80,
},
'update_mask=port, login, connection_id', # connection_id cannot be updated
"'connection_id' is unknown or cannot be updated.",
),
(
{
"connection_id": "test-connection", # trying to change connection_id
"conn_type": "test-type",
"login": "login",
},
'', # not necessary
"The connection_id cannot be updated.",
),
]
)
@provide_session
def test_patch_should_respond_400_for_invalid_fields_in_update_mask(
self, payload, update_mask, error_message, session
):
self._create_connection(session)
response = self.client.patch(
f"/api/v1/connections/test-connection-id?{update_mask}",
json=payload,
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 400
assert response.json['detail'] == error_message
@parameterized.expand(
[
(
{
"connection_id": "test-connection-id",
"conn_type": "test-type",
"extra": 0, # expected string
},
"0 is not of type 'string' - 'extra'",
),
(
{
"connection_id": "test-connection-id",
"conn_type": "test-type",
"extras": "{}", # extras not a known field e.g typo
},
"extras",
),
(
{
"connection_id": "test-connection-id",
"conn_type": "test-type",
"invalid_field": "invalid field", # unknown field
"_password": "{}", # _password not a known field
},
"_password",
),
]
)
@provide_session
def test_patch_should_respond_400_for_invalid_update(self, payload, error_message, session):
self._create_connection(session)
response = self.client.patch(
"/api/v1/connections/test-connection-id", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 400
assert error_message in response.json['detail']
def test_patch_should_respond_404_not_found(self):
payload = {"connection_id": "test-connection-id", "conn_type": "test-type", "port": 90}
response = self.client.patch(
"/api/v1/connections/test-connection-id", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
assert {
'detail': "The Connection with connection_id: `test-connection-id` was not found",
'status': 404,
'title': 'Connection not found',
'type': EXCEPTIONS_LINK_MAP[404],
} == response.json
def test_should_raises_401_unauthenticated(self, session):
self._create_connection(session)
response = self.client.patch(
"/api/v1/connections/test-connection-id",
json={"connection_id": "test-connection-id", "conn_type": 'test_type', "extra": "{'key': 'var'}"},
)
assert_401(response)
class TestPostConnection(TestConnectionEndpoint):
def test_post_should_respond_200(self, session):
payload = {"connection_id": "test-connection-id", "conn_type": 'test_type'}
response = self.client.post(
"/api/v1/connections", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
connection = session.query(Connection).all()
assert len(connection) == 1
assert connection[0].conn_id == 'test-connection-id'
def test_post_should_respond_400_for_invalid_payload(self):
payload = {
"connection_id": "test-connection-id",
} # conn_type missing
response = self.client.post(
"/api/v1/connections", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 400
assert response.json == {
'detail': "{'conn_type': ['Missing data for required field.']}",
'status': 400,
'title': 'Bad Request',
'type': EXCEPTIONS_LINK_MAP[400],
}
def test_post_should_respond_409_already_exist(self):
payload = {"connection_id": "test-connection-id", "conn_type": 'test_type'}
response = self.client.post(
"/api/v1/connections", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
# Another request
response = self.client.post(
"/api/v1/connections", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 409
assert response.json == {
'detail': 'Connection already exist. ID: test-connection-id',
'status': 409,
'title': 'Conflict',
'type': EXCEPTIONS_LINK_MAP[409],
}
def test_should_raises_401_unauthenticated(self):
response = self.client.post(
"/api/v1/connections", json={"connection_id": "test-connection-id", "conn_type": 'test_type'}
)
assert_401(response)
class TestConnection(TestConnectionEndpoint):
def test_should_respond_200(self):
payload = {"connection_id": "test-connection-id", "conn_type": 'sqlite'}
response = self.client.post(
"/api/v1/connections/test", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == {
'status': True,
'message': 'Connection successfully tested',
}
def test_post_should_respond_400_for_invalid_payload(self):
payload = {
"connection_id": "test-connection-id",
} # conn_type missing
response = self.client.post(
"/api/v1/connections/test", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 400
assert response.json == {
'detail': "{'conn_type': ['Missing data for required field.']}",
'status': 400,
'title': 'Bad Request',
'type': EXCEPTIONS_LINK_MAP[400],
}
def test_should_raises_401_unauthenticated(self):
response = self.client.post(
"/api/v1/connections/test", json={"connection_id": "test-connection-id", "conn_type": 'test_type'}
)
assert_401(response)
|
|
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
from . import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of15']
class port_stats_prop(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = port_stats_prop.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = port_stats_prop()
obj.type = reader.read("!H")[0]
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("port_stats_prop {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class ethernet(port_stats_prop):
type = 0
def __init__(self, rx_frame_err=None, rx_over_err=None, rx_crc_err=None, collisions=None):
if rx_frame_err != None:
self.rx_frame_err = rx_frame_err
else:
self.rx_frame_err = 0
if rx_over_err != None:
self.rx_over_err = rx_over_err
else:
self.rx_over_err = 0
if rx_crc_err != None:
self.rx_crc_err = rx_crc_err
else:
self.rx_crc_err = 0
if collisions != None:
self.collisions = collisions
else:
self.collisions = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append('\x00' * 4)
packed.append(struct.pack("!Q", self.rx_frame_err))
packed.append(struct.pack("!Q", self.rx_over_err))
packed.append(struct.pack("!Q", self.rx_crc_err))
packed.append(struct.pack("!Q", self.collisions))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = ethernet()
_type = reader.read("!H")[0]
assert(_type == 0)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
reader.skip(4)
obj.rx_frame_err = reader.read("!Q")[0]
obj.rx_over_err = reader.read("!Q")[0]
obj.rx_crc_err = reader.read("!Q")[0]
obj.collisions = reader.read("!Q")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.rx_frame_err != other.rx_frame_err: return False
if self.rx_over_err != other.rx_over_err: return False
if self.rx_crc_err != other.rx_crc_err: return False
if self.collisions != other.collisions: return False
return True
def pretty_print(self, q):
q.text("ethernet {")
with q.group():
with q.indent(2):
q.breakable()
q.text("rx_frame_err = ");
q.text("%#x" % self.rx_frame_err)
q.text(","); q.breakable()
q.text("rx_over_err = ");
q.text("%#x" % self.rx_over_err)
q.text(","); q.breakable()
q.text("rx_crc_err = ");
q.text("%#x" % self.rx_crc_err)
q.text(","); q.breakable()
q.text("collisions = ");
q.text("%#x" % self.collisions)
q.breakable()
q.text('}')
port_stats_prop.subtypes[0] = ethernet
class experimenter(port_stats_prop):
subtypes = {}
type = 65535
def __init__(self, experimenter=None, exp_type=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if exp_type != None:
self.exp_type = exp_type
else:
self.exp_type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.exp_type))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.experimenter = reader.read("!L")[0]
obj.exp_type = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
if self.exp_type != other.exp_type: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.text("exp_type = ");
q.text("%#x" % self.exp_type)
q.breakable()
q.text('}')
port_stats_prop.subtypes[65535] = experimenter
class experimenter_intel(experimenter):
type = 65535
experimenter = 43521
exp_type = 1
def __init__(self, rx_1_to_64_packets=None, rx_65_to_127_packets=None, rx_128_to_255_packets=None, rx_256_to_511_packets=None, rx_512_to_1023_packets=None, rx_1024_to_1522_packets=None, rx_1523_to_max_packets=None, tx_1_to_64_packets=None, tx_65_to_127_packets=None, tx_128_to_255_packets=None, tx_256_to_511_packets=None, tx_512_to_1023_packets=None, tx_1024_to_1522_packets=None, tx_1523_to_max_packets=None, tx_multicast_packets=None, rx_broadcast_packets=None, tx_broadcast_packets=None, rx_undersized_errors=None, rx_oversize_errors=None, rx_fragmented_errors=None, rx_jabber_errors=None):
if rx_1_to_64_packets != None:
self.rx_1_to_64_packets = rx_1_to_64_packets
else:
self.rx_1_to_64_packets = 0
if rx_65_to_127_packets != None:
self.rx_65_to_127_packets = rx_65_to_127_packets
else:
self.rx_65_to_127_packets = 0
if rx_128_to_255_packets != None:
self.rx_128_to_255_packets = rx_128_to_255_packets
else:
self.rx_128_to_255_packets = 0
if rx_256_to_511_packets != None:
self.rx_256_to_511_packets = rx_256_to_511_packets
else:
self.rx_256_to_511_packets = 0
if rx_512_to_1023_packets != None:
self.rx_512_to_1023_packets = rx_512_to_1023_packets
else:
self.rx_512_to_1023_packets = 0
if rx_1024_to_1522_packets != None:
self.rx_1024_to_1522_packets = rx_1024_to_1522_packets
else:
self.rx_1024_to_1522_packets = 0
if rx_1523_to_max_packets != None:
self.rx_1523_to_max_packets = rx_1523_to_max_packets
else:
self.rx_1523_to_max_packets = 0
if tx_1_to_64_packets != None:
self.tx_1_to_64_packets = tx_1_to_64_packets
else:
self.tx_1_to_64_packets = 0
if tx_65_to_127_packets != None:
self.tx_65_to_127_packets = tx_65_to_127_packets
else:
self.tx_65_to_127_packets = 0
if tx_128_to_255_packets != None:
self.tx_128_to_255_packets = tx_128_to_255_packets
else:
self.tx_128_to_255_packets = 0
if tx_256_to_511_packets != None:
self.tx_256_to_511_packets = tx_256_to_511_packets
else:
self.tx_256_to_511_packets = 0
if tx_512_to_1023_packets != None:
self.tx_512_to_1023_packets = tx_512_to_1023_packets
else:
self.tx_512_to_1023_packets = 0
if tx_1024_to_1522_packets != None:
self.tx_1024_to_1522_packets = tx_1024_to_1522_packets
else:
self.tx_1024_to_1522_packets = 0
if tx_1523_to_max_packets != None:
self.tx_1523_to_max_packets = tx_1523_to_max_packets
else:
self.tx_1523_to_max_packets = 0
if tx_multicast_packets != None:
self.tx_multicast_packets = tx_multicast_packets
else:
self.tx_multicast_packets = 0
if rx_broadcast_packets != None:
self.rx_broadcast_packets = rx_broadcast_packets
else:
self.rx_broadcast_packets = 0
if tx_broadcast_packets != None:
self.tx_broadcast_packets = tx_broadcast_packets
else:
self.tx_broadcast_packets = 0
if rx_undersized_errors != None:
self.rx_undersized_errors = rx_undersized_errors
else:
self.rx_undersized_errors = 0
if rx_oversize_errors != None:
self.rx_oversize_errors = rx_oversize_errors
else:
self.rx_oversize_errors = 0
if rx_fragmented_errors != None:
self.rx_fragmented_errors = rx_fragmented_errors
else:
self.rx_fragmented_errors = 0
if rx_jabber_errors != None:
self.rx_jabber_errors = rx_jabber_errors
else:
self.rx_jabber_errors = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.exp_type))
packed.append('\x00' * 4)
packed.append(struct.pack("!Q", self.rx_1_to_64_packets))
packed.append(struct.pack("!Q", self.rx_65_to_127_packets))
packed.append(struct.pack("!Q", self.rx_128_to_255_packets))
packed.append(struct.pack("!Q", self.rx_256_to_511_packets))
packed.append(struct.pack("!Q", self.rx_512_to_1023_packets))
packed.append(struct.pack("!Q", self.rx_1024_to_1522_packets))
packed.append(struct.pack("!Q", self.rx_1523_to_max_packets))
packed.append(struct.pack("!Q", self.tx_1_to_64_packets))
packed.append(struct.pack("!Q", self.tx_65_to_127_packets))
packed.append(struct.pack("!Q", self.tx_128_to_255_packets))
packed.append(struct.pack("!Q", self.tx_256_to_511_packets))
packed.append(struct.pack("!Q", self.tx_512_to_1023_packets))
packed.append(struct.pack("!Q", self.tx_1024_to_1522_packets))
packed.append(struct.pack("!Q", self.tx_1523_to_max_packets))
packed.append(struct.pack("!Q", self.tx_multicast_packets))
packed.append(struct.pack("!Q", self.rx_broadcast_packets))
packed.append(struct.pack("!Q", self.tx_broadcast_packets))
packed.append(struct.pack("!Q", self.rx_undersized_errors))
packed.append(struct.pack("!Q", self.rx_oversize_errors))
packed.append(struct.pack("!Q", self.rx_fragmented_errors))
packed.append(struct.pack("!Q", self.rx_jabber_errors))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = experimenter_intel()
_type = reader.read("!H")[0]
assert(_type == 65535)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 43521)
_exp_type = reader.read("!L")[0]
assert(_exp_type == 1)
reader.skip(4)
obj.rx_1_to_64_packets = reader.read("!Q")[0]
obj.rx_65_to_127_packets = reader.read("!Q")[0]
obj.rx_128_to_255_packets = reader.read("!Q")[0]
obj.rx_256_to_511_packets = reader.read("!Q")[0]
obj.rx_512_to_1023_packets = reader.read("!Q")[0]
obj.rx_1024_to_1522_packets = reader.read("!Q")[0]
obj.rx_1523_to_max_packets = reader.read("!Q")[0]
obj.tx_1_to_64_packets = reader.read("!Q")[0]
obj.tx_65_to_127_packets = reader.read("!Q")[0]
obj.tx_128_to_255_packets = reader.read("!Q")[0]
obj.tx_256_to_511_packets = reader.read("!Q")[0]
obj.tx_512_to_1023_packets = reader.read("!Q")[0]
obj.tx_1024_to_1522_packets = reader.read("!Q")[0]
obj.tx_1523_to_max_packets = reader.read("!Q")[0]
obj.tx_multicast_packets = reader.read("!Q")[0]
obj.rx_broadcast_packets = reader.read("!Q")[0]
obj.tx_broadcast_packets = reader.read("!Q")[0]
obj.rx_undersized_errors = reader.read("!Q")[0]
obj.rx_oversize_errors = reader.read("!Q")[0]
obj.rx_fragmented_errors = reader.read("!Q")[0]
obj.rx_jabber_errors = reader.read("!Q")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.rx_1_to_64_packets != other.rx_1_to_64_packets: return False
if self.rx_65_to_127_packets != other.rx_65_to_127_packets: return False
if self.rx_128_to_255_packets != other.rx_128_to_255_packets: return False
if self.rx_256_to_511_packets != other.rx_256_to_511_packets: return False
if self.rx_512_to_1023_packets != other.rx_512_to_1023_packets: return False
if self.rx_1024_to_1522_packets != other.rx_1024_to_1522_packets: return False
if self.rx_1523_to_max_packets != other.rx_1523_to_max_packets: return False
if self.tx_1_to_64_packets != other.tx_1_to_64_packets: return False
if self.tx_65_to_127_packets != other.tx_65_to_127_packets: return False
if self.tx_128_to_255_packets != other.tx_128_to_255_packets: return False
if self.tx_256_to_511_packets != other.tx_256_to_511_packets: return False
if self.tx_512_to_1023_packets != other.tx_512_to_1023_packets: return False
if self.tx_1024_to_1522_packets != other.tx_1024_to_1522_packets: return False
if self.tx_1523_to_max_packets != other.tx_1523_to_max_packets: return False
if self.tx_multicast_packets != other.tx_multicast_packets: return False
if self.rx_broadcast_packets != other.rx_broadcast_packets: return False
if self.tx_broadcast_packets != other.tx_broadcast_packets: return False
if self.rx_undersized_errors != other.rx_undersized_errors: return False
if self.rx_oversize_errors != other.rx_oversize_errors: return False
if self.rx_fragmented_errors != other.rx_fragmented_errors: return False
if self.rx_jabber_errors != other.rx_jabber_errors: return False
return True
def pretty_print(self, q):
q.text("experimenter_intel {")
with q.group():
with q.indent(2):
q.breakable()
q.text("rx_1_to_64_packets = ");
q.text("%#x" % self.rx_1_to_64_packets)
q.text(","); q.breakable()
q.text("rx_65_to_127_packets = ");
q.text("%#x" % self.rx_65_to_127_packets)
q.text(","); q.breakable()
q.text("rx_128_to_255_packets = ");
q.text("%#x" % self.rx_128_to_255_packets)
q.text(","); q.breakable()
q.text("rx_256_to_511_packets = ");
q.text("%#x" % self.rx_256_to_511_packets)
q.text(","); q.breakable()
q.text("rx_512_to_1023_packets = ");
q.text("%#x" % self.rx_512_to_1023_packets)
q.text(","); q.breakable()
q.text("rx_1024_to_1522_packets = ");
q.text("%#x" % self.rx_1024_to_1522_packets)
q.text(","); q.breakable()
q.text("rx_1523_to_max_packets = ");
q.text("%#x" % self.rx_1523_to_max_packets)
q.text(","); q.breakable()
q.text("tx_1_to_64_packets = ");
q.text("%#x" % self.tx_1_to_64_packets)
q.text(","); q.breakable()
q.text("tx_65_to_127_packets = ");
q.text("%#x" % self.tx_65_to_127_packets)
q.text(","); q.breakable()
q.text("tx_128_to_255_packets = ");
q.text("%#x" % self.tx_128_to_255_packets)
q.text(","); q.breakable()
q.text("tx_256_to_511_packets = ");
q.text("%#x" % self.tx_256_to_511_packets)
q.text(","); q.breakable()
q.text("tx_512_to_1023_packets = ");
q.text("%#x" % self.tx_512_to_1023_packets)
q.text(","); q.breakable()
q.text("tx_1024_to_1522_packets = ");
q.text("%#x" % self.tx_1024_to_1522_packets)
q.text(","); q.breakable()
q.text("tx_1523_to_max_packets = ");
q.text("%#x" % self.tx_1523_to_max_packets)
q.text(","); q.breakable()
q.text("tx_multicast_packets = ");
q.text("%#x" % self.tx_multicast_packets)
q.text(","); q.breakable()
q.text("rx_broadcast_packets = ");
q.text("%#x" % self.rx_broadcast_packets)
q.text(","); q.breakable()
q.text("tx_broadcast_packets = ");
q.text("%#x" % self.tx_broadcast_packets)
q.text(","); q.breakable()
q.text("rx_undersized_errors = ");
q.text("%#x" % self.rx_undersized_errors)
q.text(","); q.breakable()
q.text("rx_oversize_errors = ");
q.text("%#x" % self.rx_oversize_errors)
q.text(","); q.breakable()
q.text("rx_fragmented_errors = ");
q.text("%#x" % self.rx_fragmented_errors)
q.text(","); q.breakable()
q.text("rx_jabber_errors = ");
q.text("%#x" % self.rx_jabber_errors)
q.breakable()
q.text('}')
experimenter.subtypes[43521] = experimenter_intel
class optical(port_stats_prop):
type = 1
def __init__(self, flags=None, tx_freq_lmda=None, tx_offset=None, tx_grid_span=None, rx_freq_lmda=None, rx_offset=None, rx_grid_span=None, tx_pwr=None, rx_pwr=None, bias_current=None, temperature=None):
if flags != None:
self.flags = flags
else:
self.flags = 0
if tx_freq_lmda != None:
self.tx_freq_lmda = tx_freq_lmda
else:
self.tx_freq_lmda = 0
if tx_offset != None:
self.tx_offset = tx_offset
else:
self.tx_offset = 0
if tx_grid_span != None:
self.tx_grid_span = tx_grid_span
else:
self.tx_grid_span = 0
if rx_freq_lmda != None:
self.rx_freq_lmda = rx_freq_lmda
else:
self.rx_freq_lmda = 0
if rx_offset != None:
self.rx_offset = rx_offset
else:
self.rx_offset = 0
if rx_grid_span != None:
self.rx_grid_span = rx_grid_span
else:
self.rx_grid_span = 0
if tx_pwr != None:
self.tx_pwr = tx_pwr
else:
self.tx_pwr = 0
if rx_pwr != None:
self.rx_pwr = rx_pwr
else:
self.rx_pwr = 0
if bias_current != None:
self.bias_current = bias_current
else:
self.bias_current = 0
if temperature != None:
self.temperature = temperature
else:
self.temperature = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append('\x00' * 4)
packed.append(struct.pack("!L", self.flags))
packed.append(struct.pack("!L", self.tx_freq_lmda))
packed.append(struct.pack("!L", self.tx_offset))
packed.append(struct.pack("!L", self.tx_grid_span))
packed.append(struct.pack("!L", self.rx_freq_lmda))
packed.append(struct.pack("!L", self.rx_offset))
packed.append(struct.pack("!L", self.rx_grid_span))
packed.append(struct.pack("!H", self.tx_pwr))
packed.append(struct.pack("!H", self.rx_pwr))
packed.append(struct.pack("!H", self.bias_current))
packed.append(struct.pack("!H", self.temperature))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = optical()
_type = reader.read("!H")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
reader.skip(4)
obj.flags = reader.read("!L")[0]
obj.tx_freq_lmda = reader.read("!L")[0]
obj.tx_offset = reader.read("!L")[0]
obj.tx_grid_span = reader.read("!L")[0]
obj.rx_freq_lmda = reader.read("!L")[0]
obj.rx_offset = reader.read("!L")[0]
obj.rx_grid_span = reader.read("!L")[0]
obj.tx_pwr = reader.read("!H")[0]
obj.rx_pwr = reader.read("!H")[0]
obj.bias_current = reader.read("!H")[0]
obj.temperature = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.flags != other.flags: return False
if self.tx_freq_lmda != other.tx_freq_lmda: return False
if self.tx_offset != other.tx_offset: return False
if self.tx_grid_span != other.tx_grid_span: return False
if self.rx_freq_lmda != other.rx_freq_lmda: return False
if self.rx_offset != other.rx_offset: return False
if self.rx_grid_span != other.rx_grid_span: return False
if self.tx_pwr != other.tx_pwr: return False
if self.rx_pwr != other.rx_pwr: return False
if self.bias_current != other.bias_current: return False
if self.temperature != other.temperature: return False
return True
def pretty_print(self, q):
q.text("optical {")
with q.group():
with q.indent(2):
q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("tx_freq_lmda = ");
q.text("%#x" % self.tx_freq_lmda)
q.text(","); q.breakable()
q.text("tx_offset = ");
q.text("%#x" % self.tx_offset)
q.text(","); q.breakable()
q.text("tx_grid_span = ");
q.text("%#x" % self.tx_grid_span)
q.text(","); q.breakable()
q.text("rx_freq_lmda = ");
q.text("%#x" % self.rx_freq_lmda)
q.text(","); q.breakable()
q.text("rx_offset = ");
q.text("%#x" % self.rx_offset)
q.text(","); q.breakable()
q.text("rx_grid_span = ");
q.text("%#x" % self.rx_grid_span)
q.text(","); q.breakable()
q.text("tx_pwr = ");
q.text("%#x" % self.tx_pwr)
q.text(","); q.breakable()
q.text("rx_pwr = ");
q.text("%#x" % self.rx_pwr)
q.text(","); q.breakable()
q.text("bias_current = ");
q.text("%#x" % self.bias_current)
q.text(","); q.breakable()
q.text("temperature = ");
q.text("%#x" % self.temperature)
q.breakable()
q.text('}')
port_stats_prop.subtypes[1] = optical
|
|
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.contrib.messages.storage.fallback import FallbackStorage
from django.http import HttpRequest
from django.test import TestCase
from campaigns.models import *
from campaigns.forms import *
from campaigns.views import *
class Base(TestCase):
def setUp(self):
User.objects.create_user('john', 'lennon@thebeatles.com', 'johnpassword')
self.user = authenticate(username='john', password='johnpassword')
def tearDown(self):
User.objects.get(id = self.user.id).delete()
class HomePageTest(Base):
def test_does_root_url_resolves_the_home_page(self):
called = self.client.get('/')
self.assertTemplateUsed(called, 'home.html')
def test_does_logout_redirects_to_the_home_page(self):
response = self.client.get('/logout')
self.assertRedirects(response, '/')
class HallPopulationTest(Base):
def test_does_populate_halls_resolves_the_right_url(self):
self.client.login(username='john', password='johnpassword')
campaign = Campaign.objects.create(title = 'a', description = 'b')
hall = Hall.objects.create(name='abv', capacity=50, campaign = campaign)
called = self.client.get('/campaigns/%d/halls' % campaign.id)
self.assertTemplateUsed(called, 'populate_halls.html')
def test_does_populate_halls_renders_show_campaign_if_there_isnt_enough_capacity(self):
self.client.login(username='john', password='johnpassword')
campaign = Campaign.objects.create(title = 'a', description = 'b')
self.assertEqual(campaign.hall_set.count(), 0)
response = self.client.get('/campaigns/%d/halls' % campaign.id)
self.assertTemplateUsed(response, 'show_campaign.html')
def build_POST_request(user, args_dict):
request = HttpRequest()
request.method = "POST"
request.user = user
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
for key in args_dict:
request.POST[key] = args_dict[key]
return request
class HallsViewsTest(Base):
def test_does_show_campaign_resolves_the_right_url(self):
self.client.login(username='john', password='johnpassword')
campaign = Campaign.objects.create(title = 'a', description = 'b')
hall = Hall.objects.create(name='a', capacity = 10, campaign_id = campaign.id)
called = self.client.get('/campaigns/%d/halls/%d/' % (campaign.id, hall.id))
self.assertTemplateUsed(called, 'show_hall.html')
def test_does_create_hall_resolves_the_right_url(self):
self.client.login(username='john', password='johnpassword')
campaign = Campaign.objects.create(title = 'a', description = 'b')
called = self.client.get('/campaigns/%d/halls/new' % campaign.id)
self.assertTemplateUsed(called, 'create_hall.html')
def test_does_create_hall_creates_new_hall_object_with_POST_request(self):
campaign = Campaign.objects.create(title = 'a', description = 'b')
self.assertEqual(Hall.objects.count(), 0)
create_hall(
build_POST_request(
self.user,
{'name': 'hall1', 'capacity': '10'}
),
campaign.id
)
self.assertEqual(Hall.objects.count(), 1)
def test_does_edit_hall_resolves_the_right_url(self):
self.client.login(username='john', password='johnpassword')
campaign = Campaign.objects.create(title = 'a', description = 'b')
hall = Hall.objects.create(name='abv', capacity=50, campaign = campaign)
called = self.client.get('/campaigns/%d/halls/%d/edit' % (campaign.id, hall.id))
self.assertTemplateUsed(called, 'edit_hall.html')
def test_does_edit_hall_saves_edit_fields_correctly(self):
campaign = Campaign.objects.create(title = 'a', description = 'b')
self.assertEqual(Hall.objects.count(), 0)
create_hall(
build_POST_request(self.user, {'name': 'h1', 'capacity': '10'}),
campaign.id
)
self.assertEqual(Hall.objects.count(), 1)
hall = Hall.objects.first()
edit_hall(
build_POST_request(self.user, {'name': 'h2', 'capacity': '20'}),
campaign.id,
hall.id
)
self.assertEqual(Hall.objects.count(), 1)
hall = Hall.objects.first()
self.assertEqual(hall.name, 'h2')
self.assertEqual(hall.capacity, 20)
def test_does_delete_hall_deletes_the_right_hall(self):
campaign = Campaign.objects.create(title = 'a', description = 'b')
self.assertEqual(Hall.objects.count(), 0)
create_hall(
build_POST_request(self.user, {'name': 'h1', 'capacity': '10'}),
campaign.id
)
self.assertEqual(Hall.objects.count(), 1)
hall = Hall.objects.first()
delete_hall(build_POST_request(self.user, {}), campaign.id, hall.id)
self.assertEqual(Hall.objects.count(), 0)
def test_does_delete_hall_doesnt_works_with_other_than_POST_requests(self):
request = HttpRequest()
request.method = "GET"
request.user = self.user
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
campaign = Campaign.objects.create(title = 'a', description = 'b')
self.assertEqual(Hall.objects.count(), 0)
create_hall(
build_POST_request(self.user, {'name': 'h1', 'capacity': '10'}),
campaign.id
)
self.assertEqual(Hall.objects.count(), 1)
hall = Hall.objects.first()
delete_hall(request, campaign.id, hall.id)
self.assertEqual(Hall.objects.count(), 1)
class CampaignsViewsTest(Base):
def test_does_create_campaign_resolves_the_right_url(self):
self.client.login(username='john', password='johnpassword')
called = self.client.get('/campaigns/new')
self.assertTemplateUsed(called, 'create_campaign.html')
# CHECK THE DOCS FOR self.client.post AND TRY MAKING IT WITH THAT METHOD
def test_does_create_campaign_saves_objects_with_POST_requests(self):
self.assertEqual(Campaign.objects.count(), 0)
create_campaign(build_POST_request(self.user, {'title': 'C1', 'description': 'C1Descr'}))
campaign = Campaign.objects.first()
self.assertEqual(Campaign.objects.count(), 1)
self.assertEqual(campaign.title, 'C1')
self.assertEqual(campaign.description, 'C1Descr')
def test_create_campaign_dont_saves_empty_objects(self):
self.assertEqual(Campaign.objects.count(), 0)
create_campaign(build_POST_request(self.user, {'title': '', 'description': ''}))
self.assertEqual(Campaign.objects.count(), 0)
def test_create_campaign_redirects_to_show_campaign_on_success(self):
self.client.login(username='john', password='johnpassword')
self.assertEqual(Campaign.objects.count(), 0)
response = self.client.post(
'/campaigns/new',
data={'title': 'asd', 'description': 'asdf'}
)
campaign = Campaign.objects.first()
self.assertEqual(Campaign.objects.count(), 1)
self.assertRedirects(response, '/campaigns/%d/' % campaign.id)
def test_does_show_campaign_resolves_the_right_url(self):
self.client.login(username='john', password='johnpassword')
campaign = Campaign.objects.create(title = 'a', description = 'b')
called = self.client.get('/campaigns/%d/' % (campaign.id,))
self.assertTemplateUsed(called, 'show_campaign.html')
def test_does_show_campaign_redirects_home_if_campaign_is_None(self):
response = self.client.get('/campaigns/%d/' % (100))
self.assertRedirects(response, '/')
def test_does_show_campaign_list_title_and_description_if_campaign_exist(self):
self.client.login(username='john', password='johnpassword')
self.assertEqual(Campaign.objects.count(),0)
campaign = Campaign.objects.create(title = 'alright', description = 'base')
self.assertEqual(Campaign.objects.count(),1)
response = self.client.get('/campaigns/%d/' % (campaign.id))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'alright')
self.assertContains(response, 'base')
def test_does_show_campaign_lists_all_students_enrolled_in_it(self):
pass
def test_does_list_campaigns_resolves_the_right_url(self):
self.client.login(username='john', password='johnpassword')
called = self.client.get('/campaigns')
self.assertTemplateUsed(called, 'list_campaigns.html')
def test_does_list_campaigns_renders_all_campaigns(self):
self.assertEqual(Campaign.objects.count(), 0)
Campaign.objects.create(title = 'first', description='first_d')
Campaign.objects.create(title = 'second', description='second_d')
self.assertEqual(Campaign.objects.count(), 2)
request = HttpRequest()
request.user = self.user
response = list_campaigns(request)
self.assertContains(response,'first')
self.assertContains(response,'second_d')
def test_does_delete_campaign_deletes_the_right_campaign(self):
self.assertEqual(Campaign.objects.count(), 0)
Campaign.objects.create(title = 'first', description='first_d')
self.assertEqual(Campaign.objects.count(), 1)
delete_campaign(
build_POST_request(self.user, {}),
Campaign.objects.first().id
)
self.assertEqual(Campaign.objects.count(), 0)
def test_does_delete_campaign_works_only_with_POST_requests(self):
self.assertEqual(Campaign.objects.count(), 0)
Campaign.objects.create(title = 'first', description='first_d')
self.assertEqual(Campaign.objects.count(), 1)
request = HttpRequest()
request.method = "GET"
request.user = self.user
delete_campaign(request, Campaign.objects.first().id)
self.assertEqual(Campaign.objects.count(), 1)
sample_student_dict = {
'egn': '0011223344',
'first_name': 'Asen',
'second_name': 'Asenov',
'third_name': 'Asenski',
'address': 'ul. Random Randomizer',
'parent_name': 'Asen Asenov',
'parent_number': '0123456789',
'previous_school': 'SOU "Random Randomizer"',
'bel_school': '3',
'physics_school': '4',
'maths_exam': '4',
'maths_tues_exam': '5',
'bel_exam': '5',
'first_choice': 'SP',
'second_choice': 'KM'
}
class StudentViewTest(Base):
def test_does_create_student_resolves_the_right_url(self):
self.client.login(username='john', password='johnpassword')
campaign = Campaign.objects.create(title='a', description='b')
campaign.save()
called = self.client.get('/campaigns/%d/students/new' % campaign.id)
self.assertTemplateUsed(called, 'create_student.html')
def test_does_create_student_saves_new_student_on_POST_request(self):
campaign = Campaign.objects.create(title='a', description='b')
self.assertEqual(campaign.student_set.count(), 0)
response = create_student(
build_POST_request(self.user, sample_student_dict),
campaign.id
)
self.assertEqual(campaign.student_set.count(), 1)
self.assertEqual(
campaign,
Student.objects.first().campaign
)
self.assertEqual(campaign.student_set.first().entry_number, 1)
self.assertEqual(campaign.student_set.first().first_name, 'Asen')
self.assertEqual(campaign.student_set.first().egn, '0011223344')
def test_does_create_student_gives_students_appropriate_entry_numbers(self):
campaign = Campaign.objects.create(title='a', description='b')
self.assertEqual(Student.objects.count(), 0)
create_student(
build_POST_request(self.user, sample_student_dict),
campaign.id
)
create_student(
build_POST_request(self.user, sample_student_dict),
campaign.id
)
create_student(
build_POST_request(self.user, sample_student_dict),
campaign.id
)
self.assertEqual(Student.objects.count(), 3)
students = Student.objects.all()
for i,s in enumerate(students):
self.assertEqual(s.entry_number, i+1)
def test_does_create_student_redirects_to_the_campaign_he_belongs_to(self):
self.client.login(username='john', password='johnpassword')
campaign = Campaign.objects.create(title='a', description='b')
response = self.client.post(
'/campaigns/%d/students/new' % campaign.id,
data={
'first_name': 'asen', 'second_name': 'asenov',
'third_name': 'asenski', 'egn': '1234567890',
'previous_school': 'adsd', 'parent_name': 'adsad',
'address': 'asda', 'bel_school': 4,
'physics_school': 5, 'bel_exam': 3,
'maths_exam': 4, 'maths_tues_exam': 5,
'first_choice': 'sp', 'second_choice': 'km'
}
)
self.assertRedirects(response, '/campaigns/%d/' % campaign.id)
def test_does_show_student_resolves_the_right_url(self):
self.client.login(username='john', password='johnpassword')
self.assertEqual(Campaign.objects.count(), 0)
self.assertEqual(Student.objects.count(), 0)
campaign = Campaign.objects.create(title='a', description='b')
student = Student.objects.create(
campaign=campaign, first_name='Pesho', second_name='Petrov',
third_name='Popov', egn = '1234567891', entry_number=1
)
response = self.client.get('/campaigns/%d/students/%d/' % (campaign.id, student.id))
self.assertTemplateUsed(response, 'show_student.html')
def test_does_show_student_lists_appropriate_fields(self):
self.assertEqual(Campaign.objects.count(), 0)
self.assertEqual(Student.objects.count(), 0)
campaign = Campaign.objects.create(title='a', description='b')
student = Student.objects.create(
campaign=campaign, first_name='Pesho', second_name='Petrov',
third_name='Popov', egn = '1234567891', entry_number=1
)
request = HttpRequest()
request.user = self.user
response = show_student(request, campaign.id, student.id)
self.assertContains(response, 'Pesho')
self.assertContains(response, 'Petrov')
self.assertContains(response, 'Popov')
self.assertContains(response, '1234567891')
def test_does_edit_student_resolves_the_right_url_fields(self):
self.client.login(username='john', password='johnpassword')
self.assertEqual(Campaign.objects.count(), 0)
self.assertEqual(Student.objects.count(), 0)
campaign = Campaign.objects.create(title='a', description='b')
student = Student.objects.create(
campaign=campaign, first_name='Pesho', second_name='Petrov',
third_name='Popov', egn = '1234567891', entry_number=1
)
response = self.client.get('/campaigns/%d/students/%d/edit' % (campaign.id, student.id))
self.assertTemplateUsed(response, 'edit_student.html')
def test_does_edit_student_redirects_to_the_campaigns_url_if_ids_does_not_exist(self):
self.client.login(username='john', password='johnpassword')
self.assertEqual(Campaign.objects.count(), 0)
self.assertEqual(Student.objects.count(), 0)
response = self.client.get('/campaigns/%d/students/%d/edit' % (0, 0))
self.assertRedirects(response, '/campaigns')
def test_does_edit_student_saves_the_edited_fields_correctly(self):
self.assertEqual(Campaign.objects.count(), 0)
self.assertEqual(Student.objects.count(), 0)
campaign = Campaign.objects.create(title='a', description='b')
student = Student.objects.create(
campaign=campaign, first_name='Pesho', second_name='Petrov',
third_name='Popov', egn = '1234567891', entry_number=1
)
response = edit_student(build_POST_request(self.user, sample_student_dict), campaign.id, student.id)
student = Student.objects.get(id = student.id)
self.assertTemplateUsed(response, 'edit_student.html')
self.assertEqual(student.first_name, 'Asen')
self.assertEqual(student.second_name, 'Asenov')
self.assertEqual(student.third_name, 'Asenski')
def test_does_create_student_evaluates_the_given_grades_and_saves_the_result(self):
self.assertEqual(Campaign.objects.count(), 0)
self.assertEqual(Student.objects.count(), 0)
campaign = Campaign.objects.create(title='a', description='b')
create_student(
build_POST_request(self.user, sample_student_dict),
campaign.id
)
saved_student = Student.objects.first()
self.assertEqual(saved_student.grades_evaluated, 36.0)
def test_does_create_student_evaluates_the_given_grades_and_saves_the_result(self):
self.assertEqual(Campaign.objects.count(), 0)
self.assertEqual(Student.objects.count(), 0)
campaign = Campaign.objects.create(title='a', description='b')
create_student(
build_POST_request(self.user, sample_student_dict),
campaign.id
)
saved_student = Student.objects.first()
request = build_POST_request(self.user, sample_student_dict)
request.POST['bel_school'] = 4
request.POST['physics_school'] = 5
edit_student(request, campaign.id, saved_student.id)
saved_student = Student.objects.get(id = saved_student.id)
self.assertEqual(saved_student.grades_evaluated, 38.0)
def test_does_delete_student_deletes_the_right_student(self):
self.client.login(username='john', password='johnpassword')
self.assertEqual(Student.objects.count(), 0)
campaign = Campaign.objects.create(title='a', description='b')
create_student(
build_POST_request(self.user, sample_student_dict),
campaign.id
)
self.assertEqual(Student.objects.count(), 1)
student = Student.objects.last()
request = build_POST_request(self.user, {})
delete_student(request, student.campaign_id, student.id)
self.assertEqual(Student.objects.count(), 0)
def test_does_delete_student_deletes_only_if_the_request_method_is_POST(self):
self.client.login(username='john', password='johnpassword')
self.assertEqual(Student.objects.count(), 0)
campaign = Campaign.objects.create(title='a', description='b')
create_student(
build_POST_request(self.user, sample_student_dict),
campaign.id
)
self.assertEqual(Student.objects.count(), 1)
student = Student.objects.last()
request = HttpRequest()
request.method = "GET"
request.user = self.user
delete_student(request, student.campaign_id, student.id)
self.assertEqual(Student.objects.count(), 1)
|
|
"""
Interact with the world:
- swing the arm, sneak, sprint, jump with a horse, leave the bed
- look around
- dig/place/use blocks
- use the held (active) item
- use/attack entities
- steer vehicles
- edit and sign books
By default, the client sends swing and look packets like the vanilla client.
This can be disabled by setting the ``auto_swing`` and ``auto_look`` flags.
"""
from spockbot.mcdata import constants
from spockbot.mcp import nbt
from spockbot.mcp.proto import MC_SLOT
from spockbot.plugins.base import PluginBase, pl_announce
from spockbot.vector import Vector3
@pl_announce('Interact')
class InteractPlugin(PluginBase):
requires = ('ClientInfo', 'Inventory', 'Net', 'Channels')
def __init__(self, ploader, settings):
super(InteractPlugin, self).__init__(ploader, settings)
ploader.provides('Interact', self)
self.sneaking = False
self.sprinting = False
self.dig_pos_dict = {'x': 0, 'y': 0, 'z': 0}
self.auto_swing = True # move arm when clicking
self.auto_look = True # look at clicked things
def swing_arm(self):
self.net.push_packet('PLAY>Animation', {})
def _entity_action(self, action, jump_boost=100):
entity_id = self.clientinfo.eid
self.net.push_packet('PLAY>Entity Action', {
'eid': entity_id,
'action': action,
'jump_boost': jump_boost,
})
def leave_bed(self):
self._entity_action(constants.ENTITY_ACTION_LEAVE_BED)
def sneak(self, sneak=True):
self._entity_action(constants.ENTITY_ACTION_SNEAK
if sneak else constants.ENTITY_ACTION_UNSNEAK)
self.sneaking = sneak
def unsneak(self):
self.sneak(False)
def sprint(self, sprint=True):
self._entity_action(constants.ENTITY_ACTION_START_SPRINT if sprint
else constants.ENTITY_ACTION_STOP_SPRINT)
self.sprinting = sprint
def unsprint(self):
self.sprint(False)
def jump_horse(self, jump_boost=100):
self._entity_action(constants.ENTITY_ACTION_JUMP_HORSE, jump_boost)
def open_inventory(self):
self._entity_action(constants.ENTITY_ACTION_OPEN_INVENTORY)
def look(self, yaw=0.0, pitch=0.0):
"""
Turn the head. Both angles are in degrees.
"""
self.clientinfo.position.pitch = pitch
self.clientinfo.position.yaw = yaw
def look_rel(self, d_yaw=0.0, d_pitch=0.0):
self.look(self.clientinfo.position.yaw + d_yaw,
self.clientinfo.position.pitch + d_pitch)
def look_at_rel(self, delta):
self.look(*delta.yaw_pitch)
def look_at(self, pos):
delta = pos - self.clientinfo.position
delta.y -= constants.PLAYER_HEIGHT
if delta.x or delta.z:
self.look_at_rel(delta)
else:
self.look(self.clientinfo.position.yaw, delta.yaw_pitch.pitch)
def _send_dig_block(self, status, pos=None, face=constants.FACE_Y_POS):
if status == constants.DIG_START:
self.dig_pos_dict = pos.get_dict().copy()
self.net.push_packet('PLAY>Player Digging', {
'status': status,
'location': self.dig_pos_dict,
'face': face,
})
def start_digging(self, pos):
if self.auto_look:
self.look_at(pos) # TODO look at block center
self._send_dig_block(constants.DIG_START, pos)
if self.auto_swing:
self.swing_arm()
# TODO send swing animation until done or stopped
def cancel_digging(self):
self._send_dig_block(constants.DIG_CANCEL)
def finish_digging(self):
self._send_dig_block(constants.DIG_FINISH)
def dig_block(self, pos):
"""
Not cancelable.
"""
self.start_digging(pos)
self.finish_digging()
def _send_click_block(self, pos, face=1, cursor_pos=Vector3(8, 8, 8)):
self.net.push_packet('PLAY>Player Block Placement', {
'location': pos.get_dict(),
'direction': face,
'held_item': self.inventory.active_slot.get_dict(),
'cur_pos_x': int(cursor_pos.x),
'cur_pos_y': int(cursor_pos.y),
'cur_pos_z': int(cursor_pos.z),
})
def click_block(self, pos, face=1, cursor_pos=Vector3(8, 8, 8),
look_at_block=True, swing=True):
"""
Click on a block.
Examples: push button, open window, make redstone ore glow
Args:
face (int): side of the block on which the block is placed on
cursor_pos (Vector3): where to click inside the block,
each dimension 0-15
"""
if look_at_block and self.auto_look:
# TODO look at cursor_pos
self.look_at(pos)
self._send_click_block(pos, face, cursor_pos)
if swing and self.auto_swing:
self.swing_arm()
def place_block(self, pos, face=1, cursor_pos=Vector3(8, 8, 8),
sneak=True, look_at_block=True, swing=True):
"""
Place a block next to ``pos``.
If the block at ``pos`` is air, place at ``pos``.
"""
sneaking_before = self.sneaking
if sneak:
self.sneak()
self.click_block(pos, face, cursor_pos, look_at_block, swing)
if sneak:
self.sneak(sneaking_before)
def use_bucket(self, pos): # TODO
"""
Using buckets is different from placing blocks.
See "Special note on using buckets"
in http://wiki.vg/Protocol#Player_Block_Placement
"""
raise NotImplementedError(self.use_bucket.__doc__)
def activate_item(self):
"""
Use (hold right-click) the item in the active slot.
Examples: pull the bow, start eating once, throw an egg.
"""
self._send_click_block(pos=Vector3(-1, 255, -1),
face=-1,
cursor_pos=Vector3(-1, -1, -1))
def deactivate_item(self):
"""
Stop using (release right-click) the item in the active slot.
Examples: shoot the bow, stop eating.
"""
self._send_dig_block(constants.DIG_DEACTIVATE_ITEM)
def use_entity(self, entity, cursor_pos=None,
action=constants.INTERACT_ENTITY):
"""
Uses (right-click) an entity to open its window.
Setting ``cursor_pos`` sets ``action`` to "interact at".
"""
if self.auto_look:
self.look_at(Vector3(entity)) # TODO look at cursor_pos
if cursor_pos is not None:
action = constants.INTERACT_ENTITY_AT
packet = {'target': entity.eid, 'action': action}
if action == constants.INTERACT_ENTITY_AT:
packet['target_x'] = cursor_pos.x
packet['target_y'] = cursor_pos.y
packet['target_z'] = cursor_pos.z
self.net.push_packet('PLAY>Use Entity', packet)
if self.auto_swing:
self.swing_arm()
def attack_entity(self, entity):
self.use_entity(entity, action=constants.ATTACK_ENTITY)
def mount_vehicle(self, entity):
self.use_entity(entity)
def steer_vehicle(self, sideways=0.0, forward=0.0,
jump=False, unmount=False):
flags = 0
if jump:
flags += 1
if unmount:
flags += 2
self.net.push_packet('PLAY>Steer Vehicle', {
'sideways': sideways,
'forward': forward,
'flags': flags,
})
def unmount_vehicle(self):
self.steer_vehicle(unmount=True)
def jump_vehicle(self):
self.steer_vehicle(jump=True)
def write_book(self, text, author="", title="", sign=False):
"""Write text to the current book in hand, optionally sign the book"""
book = self._setup_book()
if book is None:
return False
pages = (text[0+i:constants.BOOK_CHARS_PER_PAGE+i]
for i in range(0, len(text), constants.BOOK_CHARS_PER_PAGE))
self.edit_book(pages)
if sign:
self.sign_book(author, title)
def edit_book(self, pages):
"""Set the pages of current book in hand"""
book = self._setup_book()
if book is None:
return False
nbtpages = nbt.TagList(nbt.TagString)
for i, page in enumerate(pages):
if i >= constants.BOOK_MAXPAGES:
break
nbtpages.insert(i, nbt.TagString(page))
book.nbt["pages"] = nbtpages
self.channels.send("MC|BEdit", self._pack_book(book))
def sign_book(self, author, title):
"""Sign current book in hand"""
book = self._setup_book()
if book is None:
return False
book.nbt["author"] = nbt.TagString(author)
book.nbt["title"] = nbt.TagString(title)
# TODO: don't use hard coded id
book.item_id = 387 # written book
self.channels.send("MC|BSign", self._pack_book(book))
def _setup_book(self):
book = self.inventory.active_slot
# TODO: Dont use hard coded ID
if book.item_id != 386: # book and quill
return None
if book.nbt is None:
book.nbt = nbt.TagCompound()
return book
def _pack_book(self, book):
return self.channels.encode(((MC_SLOT, "slot"),),
{"slot": book.get_dict()})
|
|
import copy
from os.path import join
import pytest
from jinja2 import Environment, FileSystemLoader
from invoke import UnexpectedExit
from cosmo_tester.framework.util import (generate_ca_cert,
generate_ssl_certificate)
from .cfy_cluster_manager_shared import (
CLUSTER_MANAGER_RESOURCES_PATH,
_get_config_dict,
_install_cluster,
REMOTE_LICENSE_PATH,
_update_nine_nodes_config_dict_vms,
_update_three_nodes_config_dict_vms,
)
REMOTE_CERTS_PATH = '/tmp/certs'
REMOTE_CONFIGS_PATH = '/tmp/config_files'
@pytest.fixture()
def local_certs_path(tmp_path):
dir_path = tmp_path / 'certs'
dir_path.mkdir()
return dir_path
@pytest.fixture()
def local_config_files(tmp_path):
dir_path = tmp_path / 'config_files'
dir_path.mkdir()
return dir_path
@pytest.mark.three_vms
def test_create_three_nodes_cluster(three_vms, test_config, ssh_key, logger):
"""Tests that a three nodes cluster is successfully created."""
node1, node2, node3 = three_vms
three_nodes_config_dict = _get_config_dict(3, test_config, node1.username)
_update_three_nodes_config_dict_vms(three_nodes_config_dict,
[node1, node2, node3])
_install_cluster(node1, three_vms, three_nodes_config_dict, test_config,
ssh_key, logger)
@pytest.mark.nine_vms
def test_create_nine_nodes_cluster(nine_vms, test_config, ssh_key, logger):
"""Tests that a nine nodes cluster is successfully created."""
nodes_list = [node for node in nine_vms]
nine_nodes_config_dict = _get_config_dict(9, test_config,
nodes_list[0].username)
_update_nine_nodes_config_dict_vms(nine_nodes_config_dict, nodes_list)
_install_cluster(nodes_list[6], nodes_list, nine_nodes_config_dict,
test_config, ssh_key, logger)
@pytest.mark.three_vms
def test_three_nodes_cluster_using_provided_certificates(
three_vms, test_config, ssh_key, local_certs_path, logger, tmpdir):
"""Tests that the provided certificates are being used in the cluster."""
node1, node2, node3 = three_vms
nodes_list = [node1, node2, node3]
logger.info('Creating certificates')
_create_certificates(local_certs_path, nodes_list, tmpdir)
logger.info('Copying certificates to node-1')
for cert in local_certs_path.iterdir():
node1.put_remote_file(local_path=str(cert),
remote_path=join(REMOTE_CERTS_PATH, cert.name))
logger.info('Preparing cluster install configuration file')
three_nodes_config_dict = _get_config_dict(3, test_config, node1.username)
_update_three_nodes_config_dict_vms(three_nodes_config_dict, nodes_list)
three_nodes_config_dict['ca_cert_path'] = join(REMOTE_CERTS_PATH, 'ca.pem')
three_nodes_config_dict['ca_key_path'] = join(REMOTE_CERTS_PATH, 'ca.key')
for i, node in enumerate(nodes_list, start=1):
three_nodes_config_dict['existing_vms']['node-{0}'.format(i)].update({
'cert_path': join(REMOTE_CERTS_PATH, 'node-{0}.crt'.format(i)),
'key_path': join(REMOTE_CERTS_PATH, 'node-{0}.key'.format(i))
})
_install_cluster(node1, three_vms, three_nodes_config_dict, test_config,
ssh_key, logger)
logger.info('Asserting certs were successfully copied')
local_ca_path = local_certs_path / 'ca.pem'
ca_path_in_use = '/etc/cloudify/ssl/cloudify_internal_ca_cert.pem'
for i, node in enumerate(nodes_list, start=1):
node_name = 'node-{0}'.format(i)
logger.info('Asserting certificates for %s', node_name)
local_node_cert_path = local_certs_path / '{0}.crt'.format(node_name)
local_node_key_path = local_certs_path / '{0}.key'.format(node_name)
cert_path_in_use = '/etc/cloudify/ssl/cloudify_internal_cert.pem'
key_path_in_use = '/etc/cloudify/ssl/cloudify_internal_key.pem'
assert (local_node_cert_path.read_text() ==
node.get_remote_file_content(cert_path_in_use))
assert (local_node_key_path.read_text() ==
node.get_remote_file_content(key_path_in_use))
assert (local_ca_path.read_text() ==
node.get_remote_file_content(ca_path_in_use))
@pytest.mark.three_vms
def test_three_nodes_using_provided_config_files(
three_vms, test_config, ssh_key, local_certs_path,
local_config_files, logger, tmpdir):
node1, node2, node3 = three_vms
nodes_list = [node1, node2, node3]
three_nodes_config_dict = _get_config_dict(3, test_config,
node1.username)
_install_cluster_using_provided_config_files(
nodes_list, three_nodes_config_dict, test_config,
ssh_key, local_certs_path, local_config_files, logger, tmpdir)
logger.info('Asserting config_files')
cluster_manager_config_files = '/tmp/cloudify_cluster_manager/config_files'
for i, node in enumerate(nodes_list, start=1):
logger.info('Asserting config.yaml files for %s', 'node-{0}'.format(i))
assert (node.local_manager_config_path.read_text() ==
node.get_remote_file_content(
join(cluster_manager_config_files,
node.local_manager_config_path.name))
)
assert (node.local_postgresql_config_path.read_text() ==
node.get_remote_file_content(
join(cluster_manager_config_files,
node.local_postgresql_config_path.name))
)
assert (node.local_rabbitmq_config_path.read_text() ==
node.get_remote_file_content(
join(cluster_manager_config_files,
node.local_rabbitmq_config_path.name))
)
@pytest.mark.three_vms
def test_three_nodes_cluster_override(
three_vms, test_config, ssh_key, local_certs_path,
local_config_files, logger, tmpdir):
"""Tests the override install Mechanism.
The test goes as follows:
1. Install a three node cluster using an erroneous
manager config.yaml file. This will of course, cause an error.
2. Catch the error, and install the cluster from the start using
the flag `--override`. This step doesn't use generated config.yaml
files.
"""
node1, node2, node3 = three_vms
nodes_list = [node1, node2, node3]
three_nodes_config_dict = _get_config_dict(3, test_config, node1.username)
first_config_dict = copy.deepcopy(three_nodes_config_dict)
try:
_install_cluster_using_provided_config_files(
nodes_list, first_config_dict, test_config, ssh_key,
local_certs_path, local_config_files, logger, tmpdir,
cause_error=True)
except UnexpectedExit: # This is the error Fabric raises
logger.info('Error caught. Installing the cluster using override.')
_update_three_nodes_config_dict_vms(three_nodes_config_dict,
[node1, node2, node3])
_install_cluster(node1, three_vms, three_nodes_config_dict,
test_config, ssh_key, logger, override=True)
@pytest.mark.three_vms
def test_three_nodes_cluster_offline(
three_vms, test_config, ssh_key, logger):
"""Tests the cluster install in offline environment."""
node1, node2, node3 = three_vms
three_nodes_config_dict = _get_config_dict(3, test_config, node1.username)
local_rpm_path = '/tmp/manager_install_rpm_path.rpm'
node1.run_command('curl -o {0} {1}'.format(
local_rpm_path, three_nodes_config_dict['manager_rpm_path']))
three_nodes_config_dict['manager_rpm_path'] = local_rpm_path
_update_three_nodes_config_dict_vms(three_nodes_config_dict,
[node1, node2, node3])
_install_cluster(node1, three_vms, three_nodes_config_dict, test_config,
ssh_key, logger)
def _install_cluster_using_provided_config_files(
nodes_list, three_nodes_config_dict, test_config,
ssh_key, local_certs_path, local_config_files, logger, tmpdir,
cause_error=False, override=False):
"""Install a Cloudify cluster using generated config files.
In order to do so, the function:
1. Generates certificates for each node in the `nodes_list`.
2. Passes the generated certificates to the different nodes.
3. Generates the config files for each node based on templates.
4. Installs the cluster using the generated config files.
:param cause_error: Whether to cause an error during the installation.
:param override: Whether to run the installation with override flag.
"""
node1 = nodes_list[0]
logger.info('Creating certificates and passing them to the instances')
_create_certificates(local_certs_path, nodes_list, tmpdir,
pass_certs=True)
logger.info('Preparing config files')
_prepare_three_nodes_config_files(nodes_list, local_config_files,
cause_error)
_update_three_nodes_config_dict_vms(three_nodes_config_dict, nodes_list)
for i, node in enumerate(nodes_list, start=1):
three_nodes_config_dict['existing_vms']['node-{0}'.format(i)][
'config_path'].update({
'manager_config_path': node.remote_manager_config_path,
'postgresql_config_path': node.remote_postgresql_config_path,
'rabbitmq_config_path': node.remote_rabbitmq_config_path
})
_install_cluster(node1, nodes_list, three_nodes_config_dict, test_config,
ssh_key, logger, override)
def _prepare_three_nodes_config_files(nodes_list,
local_config_files,
cause_error=False):
"""Prepare the config files for the three nodes cluster installation.
:param nodes_list: The three VMs list.
:param local_config_files: The local config files' directory.
It's created using a pytest fixture.
:param cause_error: If true, an error will be raised during the 1st
Manager installation.
"""
rabbitmq_cluster = {
node.hostname: {
'networks': {
'default': str(node.private_ip_address)
}
} for node in nodes_list
}
postgresql_cluster = {
node.hostname: {
'ip': str(node.private_ip_address)
} for node in nodes_list
}
manager_postgresql_server = {} if cause_error else postgresql_cluster
templates_env = Environment(loader=FileSystemLoader(
join(CLUSTER_MANAGER_RESOURCES_PATH, 'config_files_templates')))
_prepare_manager_config_files(
templates_env.get_template('manager_config.yaml'), nodes_list,
rabbitmq_cluster, manager_postgresql_server, local_config_files)
_prepare_postgresql_config_files(
templates_env.get_template('postgresql_config.yaml'),
nodes_list, postgresql_cluster, local_config_files)
_prepare_rabbitmq_config_files(
templates_env.get_template('rabbitmq_config.yaml'),
nodes_list, rabbitmq_cluster, local_config_files)
def _prepare_manager_config_files(template, nodes_list, rabbitmq_cluster,
postgresql_cluster, local_config_files):
for i, node in enumerate(nodes_list, start=1):
rendered_date = template.render(
node=node,
ca_path=join(REMOTE_CERTS_PATH, 'ca.pem'),
license_path=REMOTE_LICENSE_PATH,
rabbitmq_cluster=rabbitmq_cluster,
postgresql_cluster=postgresql_cluster
)
config_name = 'manager-{0}_config.yaml'.format(i)
remote_config_path = join(REMOTE_CONFIGS_PATH, config_name)
local_config_file = local_config_files / config_name
local_config_file.write_text(u'{0}'.format(rendered_date))
nodes_list[0].put_remote_file(remote_config_path,
str(local_config_file))
node.local_manager_config_path = local_config_file
node.remote_manager_config_path = remote_config_path
def _prepare_postgresql_config_files(template, nodes_list, postgresql_cluster,
local_config_files):
for i, node in enumerate(nodes_list, start=1):
rendered_date = template.render(
node=node,
ca_path=join(REMOTE_CERTS_PATH, 'ca.pem'),
postgresql_cluster=postgresql_cluster
)
config_name = 'postgresql-{0}_config.yaml'.format(i)
remote_config_path = join(REMOTE_CONFIGS_PATH, config_name)
local_config_file = local_config_files / config_name
local_config_file.write_text(u'{0}'.format(rendered_date))
nodes_list[0].put_remote_file(remote_config_path,
str(local_config_file))
node.local_postgresql_config_path = local_config_file
node.remote_postgresql_config_path = remote_config_path
def _prepare_rabbitmq_config_files(template, nodes_list, rabbitmq_cluster,
local_config_files):
for i, node in enumerate(nodes_list, start=1):
rendered_date = template.render(
node=node,
ca_path=join(REMOTE_CERTS_PATH, 'ca.pem'),
rabbitmq_cluster=rabbitmq_cluster,
join_cluster=nodes_list[0].hostname if i > 1 else None
)
config_name = 'rabbitmq-{0}_config.yaml'.format(i)
remote_config_path = join(REMOTE_CONFIGS_PATH, config_name)
local_config_file = local_config_files / config_name
local_config_file.write_text(u'{0}'.format(rendered_date))
nodes_list[0].put_remote_file(remote_config_path,
str(local_config_file))
node.local_rabbitmq_config_path = local_config_file
node.remote_rabbitmq_config_path = remote_config_path
def _create_certificates(local_certs_path, nodes_list, tmpdir,
pass_certs=False):
ca_base = str(local_certs_path / 'ca.')
ca_cert = ca_base + 'pem'
ca_key = ca_base + 'key'
generate_ca_cert(ca_cert, ca_key)
for i, node in enumerate(nodes_list, start=1):
node_cert = str(local_certs_path / 'node-{0}.crt'.format(i))
node_key = str(local_certs_path / 'node-{0}.key'.format(i))
generate_ssl_certificate(
[node.private_ip_address, node.ip_address],
node.hostname,
tmpdir,
node_cert,
node_key,
ca_cert,
ca_key
)
if pass_certs:
remote_cert = join(REMOTE_CERTS_PATH, 'node-{0}.crt'.format(i))
remote_key = join(REMOTE_CERTS_PATH, 'node-{0}.key'.format(i))
node.cert_path = remote_cert
node.key_path = remote_key
node.put_remote_file(remote_cert, node_cert)
node.put_remote_file(remote_key, node_key)
node.put_remote_file(join(REMOTE_CERTS_PATH, 'ca.pem'), ca_cert)
node.put_remote_file(join(REMOTE_CERTS_PATH, 'ca.key'), ca_key)
|
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script runs an automated Cronet performance benchmark.
This script:
1. Sets up "USB reverse tethering" which allow network traffic to flow from
an Android device connected to the host machine via a USB cable.
2. Starts HTTP and QUIC servers on the host machine.
3. Installs an Android app on the attached Android device and runs it.
4. Collects the results from the app.
Prerequisites:
1. A rooted (i.e. "adb root" succeeds) Android device connected via a USB cable
to the host machine (i.e. the computer running this script).
2. quic_server has been built for the host machine, e.g. via:
gn gen out/Release --args="is_debug=false"
ninja -C out/Release quic_server
3. cronet_perf_test_apk has been built for the Android device, e.g. via:
./components/cronet/tools/cr_cronet.py gn -r
ninja -C out/Release cronet_perf_test_apk
4. If "sudo ufw status" doesn't say "Status: inactive", run "sudo ufw disable".
5. sudo apt-get install lighttpd
6. If the usb0 interface on the host keeps losing it's IPv4 address
(WaitFor(HasHostAddress) will keep failing), NetworkManager may need to be
told to leave usb0 alone with these commands:
sudo bash -c "printf \"\\n[keyfile]\
\\nunmanaged-devices=interface-name:usb0\\n\" \
>> /etc/NetworkManager/NetworkManager.conf"
sudo service network-manager restart
Invocation:
./run.py
Output:
Benchmark timings are output by telemetry to stdout and written to
./results.html
"""
import json
import argparse
import os
import shutil
import sys
import tempfile
import time
import six.moves.urllib_parse # pylint: disable=import-error
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', '..', '..'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'tools', 'perf'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'build', 'android'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'components'))
# pylint: disable=wrong-import-position
from chrome_telemetry_build import chromium_config
from devil.android import device_utils
from devil.android.sdk import intent
from core import benchmark_runner
from cronet.tools import android_rndis_forwarder
from cronet.tools import perf_test_utils
import lighttpd_server
from pylib import constants
from telemetry import android
from telemetry import benchmark
from telemetry import story as story_module
from telemetry.web_perf import timeline_based_measurement
# pylint: enable=wrong-import-position
# pylint: disable=super-with-arguments
def GetDevice():
devices = device_utils.DeviceUtils.HealthyDevices()
assert len(devices) == 1
return devices[0]
class CronetPerfTestAndroidStory(android.AndroidStory):
# Android AppStory implementation wrapping CronetPerfTest app.
# Launches Cronet perf test app and waits for execution to complete
# by waiting for presence of DONE_FILE.
def __init__(self, device):
self._device = device
config = perf_test_utils.GetConfig(device)
device.RemovePath(config['DONE_FILE'], force=True)
self.url ='http://dummy/?' + six.moves.urllib_parse.urlencode(config)
start_intent = intent.Intent(
package=perf_test_utils.APP_PACKAGE,
activity=perf_test_utils.APP_ACTIVITY,
action=perf_test_utils.APP_ACTION,
# |config| maps from configuration value names to the configured values.
# |config| is encoded as URL parameter names and values and passed to
# the Cronet perf test app via the Intent data field.
data=self.url,
extras=None,
category=None)
super(CronetPerfTestAndroidStory, self).__init__(
start_intent, name='CronetPerfTest',
# No reason to wait for app; Run() will wait for results. By default
# StartActivity will timeout waiting for CronetPerfTest, so override
# |is_app_ready_predicate| to not wait.
is_app_ready_predicate=lambda app: True)
def Run(self, shared_state):
while not self._device.FileExists(
perf_test_utils.GetConfig(self._device)['DONE_FILE']):
time.sleep(1.0)
class CronetPerfTestStorySet(story_module.StorySet):
def __init__(self, device):
super(CronetPerfTestStorySet, self).__init__()
# Create and add Cronet perf test AndroidStory.
self.AddStory(CronetPerfTestAndroidStory(device))
class CronetPerfTestMeasurement(
timeline_based_measurement.TimelineBasedMeasurement):
# For now AndroidStory's SharedAppState works only with
# TimelineBasedMeasurements, so implement one that just forwards results from
# Cronet perf test app.
def __init__(self, device, options):
super(CronetPerfTestMeasurement, self).__init__(options)
self._device = device
def WillRunStory(self, platform, story=None):
# Skip parent implementation which doesn't apply to Cronet perf test app as
# it is not a browser with a timeline interface.
pass
def Measure(self, platform, results):
# Reads results from |RESULTS_FILE| on target and adds to |results|.
jsonResults = json.loads(self._device.ReadFile(
perf_test_utils.GetConfig(self._device)['RESULTS_FILE']))
for test in jsonResults:
results.AddMeasurement(test, 'ms', jsonResults[test])
def DidRunStory(self, platform, results):
# Skip parent implementation which calls into tracing_controller which this
# doesn't have.
pass
class CronetPerfTestBenchmark(benchmark.Benchmark):
# Benchmark implementation spawning off Cronet perf test measurement and
# StorySet.
SUPPORTED_PLATFORMS = [story_module.expectations.ALL_ANDROID]
def __init__(self, max_failures=None):
super(CronetPerfTestBenchmark, self).__init__(max_failures)
self._device = GetDevice()
def CreatePageTest(self, options):
return CronetPerfTestMeasurement(self._device, options)
def CreateStorySet(self, options):
return CronetPerfTestStorySet(self._device)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output-format', default='html',
help='The output format of the results file.')
parser.add_argument('--output-dir', default=None,
help='The directory for the output file. Default value is '
'the base directory of this script.')
args, _ = parser.parse_known_args()
constants.SetBuildType(perf_test_utils.BUILD_TYPE)
# Install APK
device = GetDevice()
device.EnableRoot()
device.Install(perf_test_utils.APP_APK)
# Start USB reverse tethering.
android_rndis_forwarder.AndroidRndisForwarder(device,
perf_test_utils.GetAndroidRndisConfig(device))
# Start HTTP server.
http_server_doc_root = perf_test_utils.GenerateHttpTestResources()
config_file = tempfile.NamedTemporaryFile()
http_server = lighttpd_server.LighttpdServer(http_server_doc_root,
port=perf_test_utils.HTTP_PORT,
base_config_path=config_file.name)
perf_test_utils.GenerateLighttpdConfig(config_file, http_server_doc_root,
http_server)
assert http_server.StartupHttpServer()
config_file.close()
# Start QUIC server.
quic_server_doc_root = perf_test_utils.GenerateQuicTestResources(device)
quic_server = perf_test_utils.QuicServer(quic_server_doc_root)
quic_server.StartupQuicServer(device)
# Launch Telemetry's benchmark_runner on CronetPerfTestBenchmark.
# By specifying this file's directory as the benchmark directory, it will
# allow benchmark_runner to in turn open this file up and find the
# CronetPerfTestBenchmark class to run the benchmark.
top_level_dir = os.path.dirname(os.path.realpath(__file__))
expectations_files = [os.path.join(top_level_dir, 'expectations.config')]
runner_config = chromium_config.ChromiumConfig(
top_level_dir=top_level_dir,
benchmark_dirs=[top_level_dir],
expectations_files=expectations_files)
sys.argv.insert(1, 'run')
sys.argv.insert(2, 'run.CronetPerfTestBenchmark')
sys.argv.insert(3, '--browser=android-system-chrome')
sys.argv.insert(4, '--output-format=' + args.output_format)
if args.output_dir:
sys.argv.insert(5, '--output-dir=' + args.output_dir)
benchmark_runner.main(runner_config)
# Shutdown.
quic_server.ShutdownQuicServer()
shutil.rmtree(quic_server_doc_root)
http_server.ShutdownHttpServer()
shutil.rmtree(http_server_doc_root)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python3
'''
Manga-Mania
------------
Author: Koushtav Chakrabarty <TheIllusionistMirage>
Email: koushtav at fleptic dot eu
This code is licensed under the MIT license. Please see
`LICENSE` file for more info.
'''
import sys
import os
from pathlib import Path
from scraper import Scraper
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class MangaMania(QMainWindow):#, QWidget):
'''
Class GUI provides the UI part of Manga-Mania. An
instance of the `GUI` class should be used to handle
all GUI related activities.
'''
def __init__(self):
'''
Default constructor
'''
super().__init__()
self.initializeUI()
def initializeUI(self):
# Base URL for search
self.sURL = 'http://mangafox.me/search.php?'
# Search method
# (cw = contain, bw = begin, ew = end)
self.sNAME_METHOD = 'name_method=cw'
# Search key (text, max. 150 chars)
self.sNAME = '&name='
# Search type
# (0 = Any, 1 = Japanese Manga, 2 = Korean Manhwa, 3 = Chinese Manhua)
self.sTYPE = '&type=0'
# Author method
# (cw = contain, bw = begin, ew = end)
self.sAUTHOR_METHOD = '&author_method=cw'
# Author name (text, max. 50 chars)
self.sAUTHOR = '&author='
# Artist method
# (cw = contain, bw = begin, ew = end)
self.sARTIST_METHOD = '&artist_method=cw'
# Artist name (text, max. 50 chars)
self.sARTIST = '&artist='
# Genres
# (0 = default, 1 = include, 2 = exclude)
self.sGENRE_ACTION = '&genres%5BAction%5D=0'
self.sGENRE_ADULT = '&genres%5BAdult%5D=0'
self.sGENRE_ADVENTURE = '&genres%5BAdventure%5D=0'
self.sGENRE_COMEDY = '&genres%5BComedy%5D=0'
self.sGENRE_DOUJINSHI = '&genres%5BDoujinshi%5D=0'
self.sGENRE_DRAMA = '&genres%5BDrama%5D=0'
self.sGENRE_ECCHI = '&genres%5BEcchi%5D=0'
self.sGENRE_FANTASY = '&genres%5BFantasy%5D=0'
self.sGENRE_GENDER_BENDER = '&genres%5BGender+Bender%5D=0'
self.sGENRE_HAREM = '&genres%5BHarem%5D=0'
self.sGENRE_HISTORICAL = '&genres%5BHistorical%5D=0'
self.sGENRE_HORROR = '&genres%5BHorror%5D=0'
self.sGENRE_JOSEI = '&genres%5BJosei%5D=0'
self.sGENRE_MARTIAL_ARTS = '&genres%5BMartial+Arts%5D=0'
self.sGENRE_MATURE = '&genres%5BMature%5D=0'
self.sGENRE_MECHA = '&genres%5BMecha%5D=0'
self.sGENRE_MYSTERY = '&genres%5BMystery%5D=0'
self.sGENRE_ONE_SHOT = '&genres%5BOne+Shot%5D=0'
self.sGENRE_PSYCHOLOGICAL = '&genres%5BPsychological%5D=0'
self.sGENRE_ROMANCE = '&genres%5BRomance%5D=0'
self.sGENRE_SCHOOL_LIFE = '&genres%5BSchool+Life%5D=0'
self.sGENRE_SCI_FI = '&genres%5BSci-fi%5D=0'
self.sGENRE_SEINEN = '&genres%5BSeinen%5D=0'
self.sGENRE_SHOUJO = '&genres%5BShoujo%5D=0'
self.sGENRE_SHOUJO_AI = '&genres%5BShoujo+Ai%5D=0'
self.sGENRE_SHOUNEN = '&genres%5BShounen%5D=0'
self.sGENRE_SHOUNEN_AI = '&genres%5BShounen+Ai%5D=0'
self.sGENRE_SLICE_OF_LIFE = '&genres%5BSlice+of+Life%5D=0'
self.sGENRE_SMUT = '&genres%5BSmut%5D=0'
self.sGENRE_SPORTS = '&genres%5BSports%5D=0'
self.sGENRE_SUPERNATURAL = '&genres%5BSupernatural%5D=0'
self.sGENRE_TRAGEDY = '&genres%5BTragedy%5D=0'
self.sGENRE_WEBTOONS = '&genres%5BWebtoons%5D=0'
self.sGENRE_YAOI = '&genres%5BYaoi%5D=0'
self.sGENRE_YURI = '&genres%5BYuri%5D=0'
# Release method
# (eq = On, before = lt, after = gt)
self.sRELEASE_METHOD = '&released_method=eq'
# Relase data (YYYY)
self.sRELEASE_DATE = '&released='
# Rating method
# (eq = Equal, lt = Less Than, gt = Greater Than)
self.sRATING_METHOD = '&rating_method=eq'
# Rating (0-5 stars)
# (0 to 5 = zero to five stars)
# Leave blank if rating is not required
self.sRATING = '&rating='
# Series running or completed
# (0 = No, 1 = Yes)
# Leave blank if running info is not required
self.sRUNNING = '&is_completed='
# Set advanced search options ON
self.sADVANCED_OPTIONS = '&advopts=1i'
# permanent status message label
self.statusLabel = QLabel('<b>Now reading</b>: No chapter loaded', self)
# Add status bar
self.statusBar().showMessage('Ready')
self.statusBar().addPermanentWidget(self.statusLabel)
# Set initial geometry
#self.setGeometry(0, 0, 1100, 700)
#self.resize(1100, 650)
self.move(20,50)
#self.setFixedSize(1100, 650)
self.setMinimumSize(1100, 650)
self.setWindowTitle('Manga-Mania - Read manga with more fun!')
# Search action
self.searchAction = QAction(QIcon('resources/images/search-icon.png'), '&Search', self)
self.searchAction.setShortcut('Ctrl+S')
self.searchAction.setStatusTip('Search the MangaFox database')
self.searchAction.triggered.connect(self.searchMFDB)
# Exit action
self.exitAction = QAction(QIcon('resources/images/exit-icon.png'), '&Exit', self)
self.exitAction.setShortcut('Ctrl+X')
self.exitAction.setStatusTip('Exit the application')
self.exitAction.triggered.connect(qApp.quit)
# About action
self.aboutAction = QAction(QIcon('resources/images/about-icon.png'), '&About', self)
self.aboutAction.setStatusTip('Show brief application info')
self.aboutAction.triggered.connect(self.aboutInfo)
# Previous page action
self.previousAction = QAction(self)
self.previousAction.setShortcut(Qt.Key_A)
self.previousAction.triggered.connect(self.loadPreviousPage)
# Next page action
self.nextAction = QAction(self)
self.nextAction.setShortcut(Qt.Key_D)
self.nextAction.triggered.connect(self.loadNextPage)
# Add menubar
self.menubar = self.menuBar()
# File menu
self.fileMenu = self.menubar.addMenu('&File')
self.fileMenu.addAction(self.searchAction)
self.fileMenu.addAction(self.exitAction)
# About menu
self.aboutMenu = self.menubar.addMenu('&About')
#aboutMenu.addAction(helpAction)
self.aboutMenu.addAction(self.aboutAction)
# Main display area
self.chapterLabel = QLabel('<b>Chapters:</b>', self)
self.chapterLabel.move(20, 40)
self.chapterLabel.adjustSize()
self.chapterList = QListWidget(self)
self.chapterList.move(20, 60)
self.chapterList.resize(200, 560)
self.chapterList.doubleClicked.connect(self.openSelectedChapter)
self.chapterURLs = {}
self.nextPixmap = QPixmap('resources/images/next-image-icon.png')
self.previousPixmap = QPixmap('resources/images/previous-image-icon.png')
#self.nextPixmap.scaled(50, 50)
self.previousLabel = QLabel(self)
self.previousLabel.setPixmap(self.previousPixmap)
self.previousLabel.adjustSize()
#self.nextLabel.resize(50, 50)
self.previousLabel.move(350, 35)
self.previousLabel.addAction(self.previousAction)
self.previousText = QLabel(self)
self.previousText.setText('<b>Previous Page</b> (<b>A</b>)')
self.previousText.adjustSize()
#self.nextLabel.resize(50, 50)
self.previousText.move(375, 37)
self.nextLabel = QLabel(self)
self.nextLabel.setPixmap(self.nextPixmap)
#self.nextLabel.setText('<b>Next</b> (right arrow)')
self.nextLabel.adjustSize()
#self.nextLabel.resize(50, 50)
self.nextLabel.move(1060, 35)
self.nextLabel.addAction(self.nextAction)
self.nextText = QLabel(self)
self.nextText.setText('<b>Next Page</b> (<b>D</b>)')
self.nextText.adjustSize()
#self.nextLabel.resize(50, 50)
self.nextText.move(940, 37)
self.pageLabel = QLabel(self)
self.pageLabel.setText('<b>Page</b>')
#self.pageLabel.move(750, 30)
self.totalPageText = QLabel(self)
self.totalPageText.setText('of <b>0</b>')
#self.totalPageText.move(750, 30)
self.currentPageBox = QLineEdit(self)
# a single chapter of a manga typically has <= 100 pages
self.currentPageBox.setMaxLength(2)
self.currentPageBox.setText('0')
self.currentPageBox.resize(30, 25)
#self.currentPageBox.move(718, 32)
self.currentPageBox.setReadOnly(True)
# TODO: Make the jump action depend on the `editingFinished`
# event rather than `returnPressed`
self.currentPageBox.returnPressed.connect(self.jumpToPage)
# denotes whether a manga is currently loaded
self.isMangaLoaded = False
#self.pgPixmap = QPixmap('resources/images/search-icon.png')
'''
self.pgPixmap = QPixmap('current.jpg')
self.pgLabel = QLabel(self)
self.pgLabel.setPixmap(self.pgPixmap)
self.pgLabel.adjustSize()
self.pgLabel.move(100, 0)
'''
#self.d = QDialog(self)
#self.d.setWindowTitle('foo')
#self.d.resize(700, 500)
#self.pgPixmap = QPixmap('current.jpg')
self.pgLabel = QLabel(self)
#self.pgLabel.setPixmap(self.pgPixmap)
self.pgLabel.adjustSize()
#pgLabel.move(300, 60)
self.scrollArea = QScrollArea(self)
#self.setCentralWidget(self.scrollArea)
self.scrollArea.setWidget(self.pgLabel)
self.scrollArea.move(340, 60)
self.scrollArea.resize(750, 560)
self.scrollArea.setAlignment(Qt.AlignHCenter)
# Set main screen layout
self.grid = QGridLayout()
#self.grid.setSpacing(20)
self.grid.addWidget(self.chapterLabel, 1, 1)
self.grid.addWidget(self.previousLabel, 1, 7)
self.grid.addWidget(self.previousText, 1, 8)
self.grid.addWidget(self.pageLabel, 1, 28)
self.grid.addWidget(self.currentPageBox, 1, 30, 1, 1)
self.grid.addWidget(self.totalPageText, 1, 31)
self.grid.addWidget(self.nextLabel, 1, 53)
self.grid.addWidget(self.nextText, 1, 54)
self.grid.addWidget(self.chapterList, 3, 1, 20, 5)
#self.grid.addWidget(self.pgLabel, 3, 5, 20, 20)
self.grid.addWidget(self.scrollArea, 3, 6, 20, 50)
self.widget = QWidget(self)
self.widget.setLayout(self.grid)
#self.widget.resizeEvent.connect(self.resizeHandler)
self.setCentralWidget(self.widget)
'''
self.hbox = QHBoxLayout()
self.hbox.addStretch(2)
self.hbox.addWidget(self.chapterList)
self.hbox.addWidget(self.scrollArea)
self.hbox.addWidget(self.chapterLabel)
self.hbox.addWidget(self.previousLabel)
self.hbox.addWidget(self.previousText)
self.hbox.addWidget(self.nextLabel)
self.hbox.addWidget(self.nextText)
#self.setLayout(self.hbox)
self.widget = QWidget(self)
#self.setCentralWidget(self.widget)
self.widget.setLayout(self.hbox)
self.setCentralWidget(self.widget)
'''
# display main screen
self.show()
# Search dialog
self.sd = QDialog(self)
self.sd.setWindowTitle('Search MangaFox database')
#self.sd.resize(900, 570)
self.sd.setFixedSize(900, 570)
self.sd.setMinimumSize(900, 570)
self.searchLabel = QLabel(self.sd)
self.searchLabel.setText('<b>Search:</b>')
self.searchLabel.move(20, 20)
self.searchLabel.adjustSize()
self.searchBox = QLineEdit(self.sd)
self.searchBox.setText('')
self.searchBox.setMaxLength(150)
self.searchBox.resize(200, 20)
self.searchBox.move(90, 18)
#self.searchBox.textChanged[str].connect(self.updateName)
self.scw = QRadioButton('Contain', self.sd)
self.scw.move(350, 18)
self.sbw = QRadioButton('Begin', self.sd)
self.sbw.move(440, 18)
self.sew = QRadioButton('End', self.sd)
self.sew.move(520, 18)
self.sbg = QButtonGroup(self.sd)
self.sbg.addButton(self.scw)
self.sbg.addButton(self.sbw)
self.sbg.addButton(self.sew)
self.sbg.setExclusive(True)
#self.sbg.clicked.connect(self.updateNameMethod)
self.scw.toggle()
# Select the type of manga
self.typeLabel = QLabel('<b>Type:</b>', self.sd)
self.typeLabel.move(20, 62)
self.tjp = QRadioButton('Japanese Manga', self.sd)
self.tjp.move(80, 60)
self.tkr = QRadioButton('Korean Manhwa', self.sd)
self.tkr.move(230, 60)
self.tch = QRadioButton('Chinese Manhua', self.sd)
self.tch.move(370, 60)
self.tan = QRadioButton('Any', self.sd)
self.tan.move(520, 60)
self.tbg = QButtonGroup(self.sd)
self.tbg.addButton(self.tjp)
self.tbg.addButton(self.tkr)
self.tbg.addButton(self.tch)
self.tbg.addButton(self.tan)
self.tbg.setExclusive(True)
self.tan.toggle()
# Author name
self.authorLabel = QLabel('<b>Author Name:</b>', self.sd)
self.authorLabel.move(20, 102)
self.authorBox = QLineEdit('', self.sd)
self.authorBox.setMaxLength(50)
self.authorBox.resize(200, 20)
self.authorBox.move(130, 100)
self.aucw = QRadioButton('Contain', self.sd)
self.aucw.move(350, 100)
self.aubw = QRadioButton('Begin', self.sd)
self.aubw.move(440, 100)
self.auew = QRadioButton('End', self.sd)
self.auew.move(520, 100)
self.aubg = QButtonGroup(self.sd)
self.aubg.addButton(self.aucw)
self.aubg.addButton(self.aubw)
self.aubg.addButton(self.auew)
self.aubg.setExclusive(True)
self.aucw.toggle()
# Artist name
self.artistLabel = QLabel('<b>Artist Name:</b>', self.sd)
self.artistLabel.move(20, 142)
self.artistBox = QLineEdit('', self.sd)
self.artistBox.setMaxLength(50)
self.artistBox.resize(200, 20)
self.artistBox.move(130, 140)
self.arcw = QRadioButton('Contain', self.sd)
self.arcw.move(350, 140)
self.arbw = QRadioButton('Begin', self.sd)
self.arbw.move(440, 140)
self.arew = QRadioButton('End', self.sd)
self.arew.move(520, 140)
self.arbg = QButtonGroup(self.sd)
self.arbg.addButton(self.arcw)
self.arbg.addButton(self.arbw)
self.arbg.addButton(self.arew)
self.arbg.setExclusive(True)
self.arcw.toggle()
# Genre
self.genreLabel =QLabel('<b>Genre</b> (single click to select, double click to exclude):', self.sd)
self.genreLabel.move(20, 192)
self.gaction = QCheckBox('Action', self.sd)
self.gaction.move(20, 220)
self.gadult = QCheckBox('Adult', self.sd)
self.gadult.move(140, 220)
self.gadventure = QCheckBox('Adventure', self.sd)
self.gadventure.move(260, 220)
self.gcomedy = QCheckBox('Comedy', self.sd)
self.gcomedy.move(380, 220)
self.gdoujinshi = QCheckBox('Doujinshi', self.sd)
self.gdoujinshi.move(500, 220)
self.gdrama = QCheckBox('Drama', self.sd)
self.gdrama.move(20, 240)
self.gecchi = QCheckBox('Ecchi', self.sd)
self.gecchi.move(140, 240)
self.gfantasy = QCheckBox('Fantasy', self.sd)
self.gfantasy.move(260, 240)
self.ggenben = QCheckBox('Gender Bender', self.sd)
self.ggenben.move(380, 240)
self.gharem = QCheckBox('Harem', self.sd)
self.gharem.move(500, 240)
self.ghistorical = QCheckBox('Historical', self.sd)
self.ghistorical.move(20, 260)
self.ghorror = QCheckBox('Horror', self.sd)
self.ghorror.move(140, 260)
self.gjosei = QCheckBox('Josei', self.sd)
self.gjosei.move(260, 260)
self.gmartart = QCheckBox('Martial Arts', self.sd)
self.gmartart.move(380, 260)
self.gmature = QCheckBox('Mature', self.sd)
self.gmature.move(500, 260)
self.gmecha = QCheckBox('Mecha', self.sd)
self.gmecha.move(20, 280)
self.gmystery = QCheckBox('Mystery', self.sd)
self.gmystery.move(140, 280)
self.gones = QCheckBox('One Shot', self.sd)
self.gones.move(260, 280)
self.gpsych = QCheckBox('Psychological', self.sd)
self.gpsych.move(380, 280)
self.gromance = QCheckBox('Romance', self.sd)
self.gromance.move(500, 280)
self.gschlyf = QCheckBox('School Life', self.sd)
self.gschlyf.move(20, 300)
self.gscifi = QCheckBox('Sci-fi', self.sd)
self.gscifi.move(140, 300)
self.gseinen = QCheckBox('Seinen', self.sd)
self.gseinen.move(260, 300)
self.gshoujo = QCheckBox('Shoujo', self.sd)
self.gshoujo.move(380, 300)
self.gshoujoai = QCheckBox('Shoujo Ai', self.sd)
self.gshoujoai.move(500, 300)
self.gshou = QCheckBox('Shounen', self.sd)
self.gshou.move(20, 320)
self.gshouai = QCheckBox('Shounen Ai', self.sd)
self.gshouai.move(140, 320)
self.gslice = QCheckBox('Slice of Life', self.sd)
self.gslice.move(260, 320)
self.gsmut = QCheckBox('Smut', self.sd)
self.gsmut.move(380, 320)
self.gsports = QCheckBox('Sports', self.sd)
self.gsports.move(500, 320)
self.gsuper = QCheckBox('Supernatural', self.sd)
self.gsuper.move(20, 340)
self.gtrag = QCheckBox('Tragedy', self.sd)
self.gtrag.move(140, 340)
self.gwtoons = QCheckBox('Webtoons', self.sd)
self.gwtoons.move(260, 340)
self.gyaoi = QCheckBox('Yaoi', self.sd)
self.gyaoi.move(380, 340)
self.gyuri = QCheckBox('Yuri', self.sd)
self.gyuri.move(500, 340)
self.gbg = QButtonGroup(self.sd)
self.gbg.addButton(self.gaction)
self.gbg.addButton(self.gadult)
self.gbg.addButton(self.gadventure)
self.gbg.addButton(self.gcomedy)
self.gbg.addButton(self.gdoujinshi)
self.gbg.addButton(self.gdrama)
self.gbg.addButton(self.gecchi)
self.gbg.addButton(self.gfantasy)
self.gbg.addButton(self.ggenben)
self.gbg.addButton(self.gharem)
self.gbg.addButton(self.ghistorical)
self.gbg.addButton(self.ghorror)
self.gbg.addButton(self.gjosei)
self.gbg.addButton(self.gmartart)
self.gbg.addButton(self.gmature)
self.gbg.addButton(self.gmecha)
self.gbg.addButton(self.gmystery)
self.gbg.addButton(self.gones)
self.gbg.addButton(self.gpsych)
self.gbg.addButton(self.gromance)
self.gbg.addButton(self.gschlyf)
self.gbg.addButton(self.gscifi)
self.gbg.addButton(self.gseinen)
self.gbg.addButton(self.gshoujo)
self.gbg.addButton(self.gshoujoai)
self.gbg.addButton(self.gshou)
self.gbg.addButton(self.gshouai)
self.gbg.addButton(self.gslice)
self.gbg.addButton(self.gsmut)
self.gbg.addButton(self.gsports)
self.gbg.addButton(self.gsuper)
self.gbg.addButton(self.gtrag)
self.gbg.addButton(self.gwtoons)
self.gbg.addButton(self.gyaoi)
self.gbg.addButton(self.gyuri)
self.gbg.setExclusive(False)
# Release Year
self.yearLabel = QLabel('<b>Year of Release:</b>', self.sd)
self.yearLabel.move(20, 382)
self.yearBox = QLineEdit('', self.sd)
self.yearBox.setMaxLength(4)
self.yearBox.resize(50, 20)
self.yearBox.move(150, 380)
self.yon = QRadioButton('On', self.sd)
self.yon.move(220, 380)
self.ybf = QRadioButton('Before', self.sd)
self.ybf.move(290, 380)
self.yaf = QRadioButton('After', self.sd)
self.yaf.move(380, 380)
self.ybg = QButtonGroup(self.sd)
self.ybg.addButton(self.yon)
self.ybg.addButton(self.ybf)
self.ybg.addButton(self.yaf)
self.ybg.setExclusive(True)
self.yon.toggle()
# Rating
self.ratingLabel = QLabel('<b>Rating:</b>', self.sd)
self.ratingLabel.move(20, 412)
self.ris = QRadioButton('is', self.sd)
self.ris.move(100, 410)
self.rlt = QRadioButton('less than', self.sd)
self.rlt.move(160, 410)
self.rgt = QRadioButton('more than', self.sd)
self.rgt.move(265, 410)
self.rbg = QButtonGroup(self.sd)
self.rbg.addButton(self.ris)
self.rbg.addButton(self.rlt)
self.rbg.addButton(self.rgt)
self.rbg.setExclusive(True)
self.ris.toggle()
self.rany = QRadioButton('Any', self.sd)
self.rany.move(20, 435)
self.r0 = QRadioButton('0 stars', self.sd)
self.r0.move(80, 435)
self.r1 = QRadioButton('1 star', self.sd)
self.r1.move(160, 435)
self.r2 = QRadioButton('2 stars', self.sd)
self.r2.move(240, 435)
self.r3 = QRadioButton('3 stars', self.sd)
self.r3.move(320, 435)
self.r4 = QRadioButton('4 stars', self.sd)
self.r4.move(400, 435)
self.r5 = QRadioButton('5 stars', self.sd)
self.r5.move(480, 435)
self.rrbg = QButtonGroup(self.sd)
self.rrbg.addButton(self.rany)
self.rrbg.addButton(self.r0)
self.rrbg.addButton(self.r1)
self.rrbg.addButton(self.r2)
self.rrbg.addButton(self.r3)
self.rrbg.addButton(self.r4)
self.rrbg.addButton(self.r5)
self.rrbg.setExclusive(True)
self.rany.toggle()
# Series running
self.runLabel = QLabel('<b>Completed Series:</b>', self.sd)
self.runLabel.move(20, 477)
self.ryes = QRadioButton('Yes', self.sd)
self.ryes.move(170, 475)
self.rno = QRadioButton('No', self.sd)
self.rno.move(240, 475)
self.ryn = QRadioButton('Either', self.sd)
self.ryn.move(310, 475)
self.cbg = QButtonGroup(self.sd)
self.cbg.addButton(self.ryes)
self.cbg.addButton(self.rno)
self.cbg.addButton(self.ryn)
self.cbg.setExclusive(True)
self.ryn.toggle()
# search button
self.searchButton = QPushButton('Search', self.sd)
self.searchButton.move(290, 530)
#self.searchButton.clicked.connect(qApp.quit)
self.searchButton.clicked.connect(self.searchMangaFox)
# Results
self.resultLabel = QLabel('<b>Results:</b>', self.sd)
self.resultLabel.move(630, 20)
# result list
self.resultList = QListWidget(self.sd)
self.resultList.resize(250, 460)
self.resultList.move(630, 40)
self.resultList.doubleClicked.connect(self.populateChapterList)
# Dictionary in form of {'manga title' : 'manga URL'}
self.resultURLs = {}
def searchMFDB(self):
print('Search initiated\n')
self.searchDialog()
def aboutInfo(self):
dialog = QDialog(self)
dialog.setWindowTitle('About MangaFox Search Scraper')
dialog.resize(400, 200)
dialog.setModal(False)
dialog.exec_() # dialog.exec_()
def searchDialog(self):
# Draw the dialog
self.sd.setModal(True)
self.sd.exec_()
def generateSearchURL(self):
"""
Constructs the expected search URL with all query fields which is then fed to
`http://mangafox.me/search.php`
"""
finalURL = (self.sURL + self.sNAME_METHOD + self.sNAME + self.sTYPE +
self.sAUTHOR_METHOD + self.sAUTHOR + self.sARTIST_METHOD +
self.sARTIST + self.sGENRE_ACTION + self.sGENRE_ADULT +
self.sGENRE_ADVENTURE + self.sGENRE_COMEDY + self.sGENRE_DOUJINSHI +
self.sGENRE_DRAMA + self.sGENRE_ECCHI + self.sGENRE_FANTASY +
self.sGENRE_GENDER_BENDER + self.sGENRE_HAREM + self.sGENRE_HISTORICAL +
self.sGENRE_HORROR + self.sGENRE_JOSEI + self.sGENRE_MARTIAL_ARTS +
self.sGENRE_MATURE + self.sGENRE_MECHA + self.sGENRE_MYSTERY +
self.sGENRE_ONE_SHOT + self.sGENRE_PSYCHOLOGICAL + self.sGENRE_ROMANCE +
self.sGENRE_SCHOOL_LIFE + self.sGENRE_SCI_FI + self.sGENRE_SEINEN +
self.sGENRE_SHOUJO + self.sGENRE_SHOUJO_AI + self.sGENRE_SHOUNEN +
self.sGENRE_SHOUNEN_AI + self.sGENRE_SLICE_OF_LIFE + self.sGENRE_SMUT +
self.sGENRE_SPORTS + self.sGENRE_SUPERNATURAL + self.sGENRE_TRAGEDY +
self.sGENRE_WEBTOONS + self.sGENRE_YAOI + self.sGENRE_YURI +
self.sRELEASE_METHOD + self.sRELEASE_DATE + self.sRATING_METHOD +
self.sRATING + self.sRUNNING + self.sADVANCED_OPTIONS)
return finalURL
def updateNameMethod(self):
if self.scw.isChecked():
self.sNAME_METHOD = 'name_method=cw'
elif self.sbw.isChecked():
self.sNAME_METHOD = 'name_method=bw'
else:
self.sNAME_METHOD = 'name_method=ew'
def updateName(self):
self.sNAME = '&name=' + str(self.searchBox.text())
def updateMangaType(self):
if self.tjp.isChecked():
self.sTYPE = '&type=1'
elif self.tkr.isChecked():
self.sTYPE = '&type=2'
elif self.tch.isChecked():
self.sTYPE = '&type=3'
else:
self.sTYPE = '&type=0'
def updateAuthorMethod(self):
if self.aucw.isChecked():
self.sAUTHOR_METHOD = '&author_method=cw'
elif self.aubw.isChecked():
self.sAUTHOR_METHOD = '&author_method=bw'
#elif self.auew.isChecked():
else:
self.sAUTHOR_METHOD = '&author_method=ew'
def updateAuthor(self):
self.sAUTHOR = '&author=' + str(self.authorBox.text())
def updateArtistMethod(self):
if self.arcw.isChecked():
self.sARTIST_METHOD = '&artist_method=cw'
elif self.arbw.isChecked():
self.sARTIST_METHOD = '&artist_method=bw'
#elif self.arew.isChecked():
else:
self.sARTIST_METHOD = '&artist_method=ew'
def updateArtist(self):
self.sARTIST = '&artist=' + str(self.artistBox.text())
def updateGenres(self):
#
foo = 1
def updateReleaseMethod(self):
if self.yon.isChecked():
self.sRELEASE_METHOD = '&released_method=eq'
elif self.ybf.isChecked():
self.sRELEASE_METHOD = '&released_method=lt'
#elif self.yaf.isChecked():
else:
self.sRELEASE_METHOD = '&released_method=gt'
def updateReleaseYear(self):
self.sRELEASE_DATE = '&released=' + str(self.yearBox.text())
def updateRatingMethod(self):
if self.ris.isChecked():
self.sRATING_METHOD = '&rating_method=eq'
elif self.rlt.isChecked():
self.sRATING_METHOD = '&rating_method=lt'
#elif self.rgt.isChecked():
else:
self.sRATING_METHOD = '&rating_method=gt'
def updateRating(self):
if self.r0.isChecked():
self.sRATING = '&rating=0'
elif self.r1.isChecked():
self.sRATING = '&rating=1'
elif self.r2.isChecked():
self.sRATING = '&rating=2'
elif self.r3.isChecked():
self.sRATING = '&rating=3'
elif self.r4.isChecked():
self.sRATING = '&rating=4'
elif self.r5.isChecked():
self.sRATING = '&rating=5'
#elif self.rany.isChecked():
else:
self.sRATING = '&rating='
def isRunning(self):
if self.ryes.isChecked():
self.sRUNNING = '&is_completed=1'
elif self.rno.isChecked():
self.sRUNNING = '&is_completed=0'
#elif self.ryn.isChecked():
else:
self.sRUNNING = '&is_completed='
def searchMangaFox(self):
self.updateNameMethod()
self.updateName()
self.updateMangaType()
self.updateAuthorMethod()
self.updateAuthor()
self.updateArtistMethod()
self.updateArtist()
self.updateGenres()
self.updateReleaseMethod()
self.updateReleaseYear()
self.updateRatingMethod()
self.updateRating()
self.isRunning()
self.mSEARCH_URL = self.generateSearchURL()
print('Generated search URL string:')
print(self.mSEARCH_URL + '\n')
#return self.generateSearchURL()
res = Scraper.search('MangaFox', self.mSEARCH_URL)
self.resultList.clear()
for i in res:
#print(i[1], end=' ')
#print(i[0] + '\n')
#litem = QListWidgetItem(i[1])
#self.resultList.addItem(litem)
#self.resultURLs.append(i)
#self.resultURLs[i[1]] = i[0]
litem = QListWidgetItem(i[0])
self.resultList.addItem(litem)
self.resultURLs[i[0]] = i[1]
#self.resultList.adjustSize()
#print(self.resultURLs)
def populateChapterList(self):
self.isMangaLoaded = False
print('Populating chapter list...\n')
title = str(self.resultList.currentItem().text())
print('Selected option: ' + title)
print('URL: ' + self.resultURLs[title] + '\n')
#print(str(item.text()))
ch = Scraper.fetchAllChapters(self.resultURLs[title])
print('Successfully fetched chapter info. %s chapters in total' % str(len(ch)) + '\n')
self.chapterList.clear()
for i in ch:
#print(i[0] + ' ' + i[1])
litem = QListWidgetItem(i[0])
self.chapterList.addItem(litem)
self.chapterURLs[i[0]] = i[1]
self.sd.close()
#for i in range(self.chapterList.count()):
#print(i)
#qitem = self.chapterList.item(i)
#print(str(qitem.text()))
self.currentChapterIndex = 0
self.currentPageBox.setReadOnly(True)
self.currentPageBox.setText('0')
self.totalPageText.setText('of <b>0</b>')
newPixmap = QPixmap('')
self.pgLabel.setPixmap(newPixmap)
self.pgLabel.adjustSize()
self.statusLabel.setText('<b>Now reading</b>: No chapter loaded')
def getChapterBaseURL(self, url):
#print(url.split('/'))
p = url.split('/')
p.pop()
chapterURL = '/'.join(p) + '/'
#print('Base URL: ' + chapterURL)
return chapterURL
def openSelectedChapter(self):
ch = str(self.chapterList.currentItem().text())
#print('current reow: ' + str(self.chapterList.currentRow()))
self.chapterBaseURL = self.getChapterBaseURL(self.chapterURLs[ch])
print('Chapter base URL: ' + self.chapterBaseURL)
self.currentPageNumber = 1
print('Opening chapter \"' + ch + '\"...')
print('Chapter URL: ' + self.chapterURLs[ch])
self.currentChapterIndex = self.chapterList.currentRow()
self.currentChapterTotalPages = Scraper.fetchChapterInfo(self.chapterURLs[ch])
self.currentPageBox.setText(str(self.currentPageNumber))
self.totalPageText.setText('of <b>' + str(self.currentChapterTotalPages) + '</b>')
self.isMangaLoaded = True
if Scraper.fetchPageImage(self.chapterURLs[ch]):
newPixmap = QPixmap('current.jpg')
self.pgLabel.setPixmap(newPixmap)
self.pgLabel.adjustSize()
#pageNo = Scraper.fetchNextPageURL(self.chapterURLs[ch])
self.currentPageURL = self.chapterURLs[ch]
#print(self.chapterURLs[ch].split('/'))
self.currentPageBox.setReadOnly(False)
#self.statusLabel.setText('<b>Now reading</b>: ' + ch)
self.updateStatusMessage()
self.resetScrollArea()
else:
print('Unable to fetch page image from image URL!')
def loadPreviousPage(self):
if self.isMangaLoaded == False:
return
print('Previous page action initiated\n')
if self.currentPageNumber > 1:
self.currentPageNumber -=1
nextURL = self.chapterBaseURL + str(self.currentPageNumber) + '.html'
print('Page URL: ' + nextURL)
if Scraper.fetchPageImage(nextURL):
newPixmap = QPixmap('current.jpg')
self.pgLabel.setPixmap(newPixmap)
self.pgLabel.adjustSize()
self.currentPageURL = nextURL
self.currentPageBox.setText(str(self.currentPageNumber))
self.resetScrollArea()
else:
print('Unable to fetch page image from image URL!')
else:
print('Loading previous chapter...')
self.currentChapterIndex -= 1
if self.currentChapterIndex < 0:
print('Cannot go to preceding chapter, this is the first chapter!')
self.currentChapterIndex = 0
else:
self.openChapter()
def loadNextPage(self):
if self.isMangaLoaded == False:
return
print('Next page action initiated\n')
if self.currentPageNumber < self.currentChapterTotalPages:
self.currentPageNumber += 1
nextURL = self.chapterBaseURL + str(self.currentPageNumber) + '.html'
print('Page URL: ' + nextURL)
if Scraper.fetchPageImage(nextURL):
newPixmap = QPixmap('current.jpg')
self.pgLabel.setPixmap(newPixmap)
self.pgLabel.adjustSize()
self.currentPageURL = nextURL
self.currentPageBox.setText(str(self.currentPageNumber))
self.resetScrollArea()
else:
print('Unable to fetch page image from image URL!')
else:
print('Loading next chapter...')
self.currentChapterIndex += 1
if self.currentChapterIndex > self.chapterList.count() - 1:
print('Cannot go to next chapter, this is the last chapter!')
self.currentChapterIndex -= 1
else:
self.openChapter()
def openChapter(self):
ch = str(self.chapterList.item(self.currentChapterIndex).text())
self.chapterBaseURL = self.getChapterBaseURL(self.chapterURLs[ch])
print('Chapter base URL: ' + self.chapterBaseURL)
self.currentPageNumber = 1
print('Opening chapter \"' + ch + '\"...')
print('Chapter URL: ' + self.chapterURLs[ch])
self.chapterList.setCurrentItem(self.chapterList.item(self.currentChapterIndex))
self.currentChapterIndex = self.chapterList.currentRow()
self.currentChapterTotalPages = Scraper.fetchChapterInfo(self.chapterURLs[ch])
self.currentPageBox.setText(str(self.currentPageNumber))
self.totalPageText.setText('of <b>' + str(self.currentChapterTotalPages) + '</b>')
self.isMangaLoaded = True
if Scraper.fetchPageImage(self.chapterURLs[ch]):
newPixmap = QPixmap('current.jpg')
self.pgLabel.setPixmap(newPixmap)
self.pgLabel.adjustSize()
self.currentPageURL = self.chapterURLs[ch]
self.currentPageBox.setText(str(self.currentPageNumber))
self.updateStatusMessage()
self.resetScrollArea()
else:
print('Unable to fetch page image from image URL!')
def jumpToPage(self):
if self.currentPageBox.isReadOnly():
return
self.currentPageBox.clearFocus()
self.scrollArea.setFocus()
pageNo = int(self.currentPageBox.text())
print('Jumping to page: ' + str(pageNo))
self.currentPageNumber = pageNo
nextURL = self.chapterBaseURL + str(self.currentPageNumber) + '.html'
if Scraper.fetchPageImage(nextURL):
newPixmap = QPixmap('current.jpg')
self.pgLabel.setPixmap(newPixmap)
self.pgLabel.adjustSize()
self.currentPageURL = nextURL
else:
print('Unable to fetch page image from image URL!')
def updateStatusMessage(self):
s = self.chapterBaseURL.split('/')
volStr = s[-3]
chStr = s[-2]
vol = volStr.split('v')[1]
chap = chStr.split('c')[1]
ch = str(self.chapterList.item(self.currentChapterIndex).text())
self.statusLabel.setText('<b>Now reading</b>: ' + ch + ', <b>Volume</b>: ' + vol + ', <b>Chapter</b>: ' + chap)
def resetScrollArea(self):
self.scrollArea.verticalScrollBar().setValue(0)
self.scrollArea.horizontalScrollBar().setValue(0)
'''
def resizeEvent(self, event):
#QMainWindow.resizeEvent(event)
#print('foobar')
self.currentPageBox.move(self.pageLabel.pos() + QPoint(self.pageLabel.width(), self.pageLabel.height()-3))
'''
#def resizeHandle(self):
#
# print("Foobar")
#end of class MangaMania
if __name__ == '__main__':
qapp = QApplication(sys.argv)
g = MangaMania()
g.show()
r = qapp.exec_()
currentPage = Path('current.jpg')
if currentPage.is_file():
os.remove('current.jpg')
sys.exit(r)
|
|
# =============================================================================
# fabfile.py
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# # Author: Klaudiusz Staniek
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from fabric.api import *
from fabric.utils import warn
from fabric.api import run
from fabric.contrib.files import exists
remote_csm_dir = '/opt/csm'
remote_app_dir = '/opt/csm/csmserver'
github_repo = 'https://github.com/csm-aut/csm.git'
sql_pass = 'root'
def detect_os():
if 'OS' in env:
return env.OS
output = run('python -c "import platform; print platform.dist()"')
distname, version, osid = eval(output)
puts("{} {} {} detected".format(distname, version, osid))
env.OS = distname
env.OS_VER = str(version)
return
def _install_python27():
output = run("python -V")
if "2.7" in output:
warn("Python 2.7 already installed")
return
with cd("/usr/src"):
sudo("wget https://www.python.org/ftp/python/2.7.10/Python-2.7.10.tgz")
sudo("tar xzf Python-2.7.10.tgz")
with cd("/usr/src/Python-2.7.10"):
sudo("./configure")
sudo("make install")
def install_requirements():
""" Install required Linux packages."""
if env.OS == "centos":
if env.OS_VER.startswith("6"):
_install_python27()
packages = "git python-setuptools python-devel python-crypto gcc python-virtualenv gunicorn"
yum(packages)
sudo("easy_install pip")
elif env.OS in ["debian", "Ubuntu"]:
packages = "git python-pip python-dev python-virtualenv gunicorn"
apt_get(packages)
def apt_get(*packages):
return sudo('apt-get -y --no-upgrade install %s' % ' '.join(packages), shell=False)
def yum(*packages):
return sudo('yum -y install {}'.format(' '.join(packages)), shell=False)
def install_mysql():
""" Install mysql server """
with settings(hide('warnings', 'stderr'), warn_only=True):
if env.OS == "centos":
if env.OS_VER.startswith("7"):
result = sudo("rpm -q mysql-community-release-el7-5.noarch")
elif env.OS_VER.startswith("6"):
result = sudo("rpm -q mysql-server-5.1.73-5.el6_6.x86_64")
#result = yum(["mysql-server"])
elif env.OS in ["debian", "Ubuntu"]:
with settings(hide('warnings', 'stderr'), warn_only=True):
result = sudo('mysql --version')
if result.failed is False:
warn('MySQL is already installed')
return
mysql_password = sql_pass if sql_pass else prompt('Please enter MySQL root password:')
if env.OS == "centos":
if env.OS_VER.startswith("7"):
sudo("rpm -Uvh http://dev.mysql.com/get/mysql-community-release-el7-5.noarch.rpm")
sudo("yum -y update")
result = yum("mysql-server")
sudo("/sbin/service mysqld start")
queries = [
"DELETE FROM mysql.user WHERE User='';",
"DELETE FROM mysql.user WHERE User='root' "
"AND Host NOT IN ('localhost', '127.0.0.1', '::1');",
"FLUSH PRIVILEGES;",
"ALTER USER 'root'@'localhost' IDENTIFIED BY '{}';".format(mysql_password),
"SET PASSWORD FOR 'root'@'localhost' = PASSWORD('{}');".format(mysql_password),
]
with warn_only():
for query in queries:
run('mysql -u root -e "%s"' % query)
sudo('chkconfig mysqld on')
elif env.OS in ["debian", "Ubuntu"]:
sudo('echo "mysql-server-5.0 mysql-server/root_password password ' \
'%s" | debconf-set-selections' % mysql_password)
sudo('echo "mysql-server-5.0 mysql-server/root_password_again password ' \
'%s" | debconf-set-selections' % mysql_password)
apt_get('mysql-server')
def install_pip_requirements():
""" Install required python modules """
with cd(remote_app_dir):
if not exists("env"):
if env.OS == 'centos' and env.OS_VER.startswith("6"):
sudo('pip install virtualenv')
sudo('virtualenv --python=/usr/local/bin/python2.7 env')
else:
sudo('virtualenv env')
with virtualenv():
if hasattr(env, "proxy"):
cmd = "pip install --proxy {} -r requirements.txt".format(env.proxy)
else:
cmd = "pip install -r requirements.txt"
sudo(cmd)
def install_csm():
""" Install CSM Server from github. Clone or pull if already exists. """
if hasattr(env, "proxy"):
print("Proxy")
sudo('git config --global --add http.proxy {}'.format(env.proxy))
if exists(remote_app_dir) is False:
sudo('git clone {} {}'.format(github_repo, remote_csm_dir))
else:
with cd(remote_csm_dir):
try:
sudo('git pull')
except:
pass
def virtualenv():
"""
Context manager. Use it for perform actions with virtualenv activated::
with virtualenv():
# virtualenv is active here
"""
return prefix('source {}/env/bin/activate'.format(remote_app_dir))
def vagrant():
""" Setup the environment for vagrant. """
# change from the default user to 'vagrant'
env.user = 'vagrant'
# connect to the port-forwarded ssh
env.hosts = ['127.0.0.1:2222']
# use vagrant ssh key
result = local('vagrant ssh-config | grep IdentityFile', capture=True)
env.key_filename = result.split()[1]
# env.proxy = "https://proxy-ams-1.cisco.com:8080"
def install():
""" Install CSM Server """
detect_os()
install_requirements()
install_mysql()
install_csm()
install_pip_requirements()
start()
#local("open http://localhost:5000")
def deploy():
""" Deploy changes and restart CSM Server """
commit_message = prompt("Commit message?")
local('git commit -am "{0}"'.format(commit_message))
local('git push origin master')
stop()
install_csm()
start()
def restart():
""" Start CSM Server """
with cd(remote_app_dir):
with virtualenv():
sudo('./csmserver restart')
def start():
""" Start CSM Server """
with cd(remote_app_dir):
with virtualenv():
result = sudo('./csmserver start', pty=False)
#sudo('supervisorctl restart all')
def stop():
""" Stop CSM Server """
with cd(remote_app_dir):
with virtualenv():
sudo('./csmserver stop', pty=False)
def uname():
""" Run uname on server """
run('uname -a')
|
|
# Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import gym
import numpy as np
import os
import pickle
import time
import tensorflow as tf
import ray
from ray.rllib.common import Agent, TrainingResult
from ray.rllib.models import ModelCatalog
from ray.rllib.es import optimizers
from ray.rllib.es import policies
from ray.rllib.es import tabular_logger as tlogger
from ray.rllib.es import tf_util
from ray.rllib.es import utils
Result = namedtuple("Result", [
"noise_inds_n", "returns_n2", "sign_returns_n2", "lengths_n2",
"eval_return", "eval_length", "ob_sum", "ob_sumsq", "ob_count"
])
DEFAULT_CONFIG = dict(
l2coeff=0.005,
noise_stdev=0.02,
episodes_per_batch=1000,
timesteps_per_batch=10000,
calc_obstat_prob=0.01,
eval_prob=0,
snapshot_freq=0,
return_proc_mode="centered_rank",
episode_cutoff_mode="env_default",
num_workers=10,
stepsize=.01)
@ray.remote
def create_shared_noise():
"""Create a large array of noise to be shared by all workers."""
seed = 123
count = 250000000
noise = np.random.RandomState(seed).randn(count).astype(np.float32)
return noise
class SharedNoiseTable(object):
def __init__(self, noise):
self.noise = noise
assert self.noise.dtype == np.float32
def get(self, i, dim):
return self.noise[i:i + dim]
def sample_index(self, stream, dim):
return stream.randint(0, len(self.noise) - dim + 1)
@ray.remote
class Worker(object):
def __init__(self, config, policy_params, env_name, noise,
min_task_runtime=0.2):
self.min_task_runtime = min_task_runtime
self.config = config
self.policy_params = policy_params
self.noise = SharedNoiseTable(noise)
self.env = gym.make(env_name)
self.preprocessor = ModelCatalog.get_preprocessor(
env_name, self.env.observation_space.shape)
self.preprocessor_shape = self.preprocessor.transform_shape(
self.env.observation_space.shape)
self.sess = utils.make_session(single_threaded=True)
self.policy = policies.GenericPolicy(
self.env.observation_space, self.env.action_space,
self.preprocessor, **policy_params)
tf_util.initialize()
self.rs = np.random.RandomState()
assert (
self.policy.needs_ob_stat ==
(self.config["calc_obstat_prob"] != 0))
def rollout_and_update_ob_stat(self, timestep_limit, task_ob_stat):
if (self.policy.needs_ob_stat and
self.config["calc_obstat_prob"] != 0 and
self.rs.rand() < self.config["calc_obstat_prob"]):
rollout_rews, rollout_len, obs = self.policy.rollout(
self.env, self.preprocessor, timestep_limit=timestep_limit,
save_obs=True, random_stream=self.rs)
task_ob_stat.increment(obs.sum(axis=0), np.square(obs).sum(axis=0),
len(obs))
else:
rollout_rews, rollout_len = self.policy.rollout(
self.env, self.preprocessor, timestep_limit=timestep_limit,
random_stream=self.rs)
return rollout_rews, rollout_len
def do_rollouts(self, params, ob_mean, ob_std, timestep_limit=None):
# Set the network weights.
self.policy.set_trainable_flat(params)
if self.policy.needs_ob_stat:
self.policy.set_ob_stat(ob_mean, ob_std)
if self.config["eval_prob"] != 0:
raise NotImplementedError("Eval rollouts are not implemented.")
noise_inds, returns, sign_returns, lengths = [], [], [], []
# We set eps=0 because we're incrementing only.
task_ob_stat = utils.RunningStat(self.preprocessor_shape, eps=0)
# Perform some rollouts with noise.
task_tstart = time.time()
while (len(noise_inds) == 0 or
time.time() - task_tstart < self.min_task_runtime):
noise_idx = self.noise.sample_index(
self.rs, self.policy.num_params)
perturbation = self.config["noise_stdev"] * self.noise.get(
noise_idx, self.policy.num_params)
# These two sampling steps could be done in parallel on different
# actors letting us update twice as frequently.
self.policy.set_trainable_flat(params + perturbation)
rews_pos, len_pos = self.rollout_and_update_ob_stat(timestep_limit,
task_ob_stat)
self.policy.set_trainable_flat(params - perturbation)
rews_neg, len_neg = self.rollout_and_update_ob_stat(timestep_limit,
task_ob_stat)
noise_inds.append(noise_idx)
returns.append([rews_pos.sum(), rews_neg.sum()])
sign_returns.append(
[np.sign(rews_pos).sum(), np.sign(rews_neg).sum()])
lengths.append([len_pos, len_neg])
return Result(
noise_inds_n=np.array(noise_inds),
returns_n2=np.array(returns, dtype=np.float32),
sign_returns_n2=np.array(sign_returns, dtype=np.float32),
lengths_n2=np.array(lengths, dtype=np.int32),
eval_return=None,
eval_length=None,
ob_sum=(None if task_ob_stat.count == 0 else task_ob_stat.sum),
ob_sumsq=(None if task_ob_stat.count == 0
else task_ob_stat.sumsq),
ob_count=task_ob_stat.count)
class ESAgent(Agent):
def __init__(self, env_name, config, upload_dir=None):
config.update({"alg": "EvolutionStrategies"})
Agent.__init__(self, env_name, config, upload_dir=upload_dir)
with tf.Graph().as_default():
self._init()
def _init(self):
policy_params = {
"ac_noise_std": 0.01
}
env = gym.make(self.env_name)
preprocessor = ModelCatalog.get_preprocessor(
self.env_name, env.observation_space.shape)
preprocessor_shape = preprocessor.transform_shape(
env.observation_space.shape)
self.sess = utils.make_session(single_threaded=False)
self.policy = policies.GenericPolicy(
env.observation_space, env.action_space, preprocessor,
**policy_params)
tf_util.initialize()
self.optimizer = optimizers.Adam(self.policy, self.config["stepsize"])
self.ob_stat = utils.RunningStat(preprocessor_shape, eps=1e-2)
# Create the shared noise table.
print("Creating shared noise table.")
noise_id = create_shared_noise.remote()
self.noise = SharedNoiseTable(ray.get(noise_id))
# Create the actors.
print("Creating actors.")
self.workers = [
Worker.remote(self.config, policy_params, self.env_name, noise_id)
for _ in range(self.config["num_workers"])]
self.episodes_so_far = 0
self.timesteps_so_far = 0
self.tstart = time.time()
self.iteration = 0
def _collect_results(self, theta_id, min_eps, min_timesteps):
num_eps, num_timesteps = 0, 0
results = []
while num_eps < min_eps or num_timesteps < min_timesteps:
print(
"Collected {} episodes {} timesteps so far this iter".format(
num_eps, num_timesteps))
rollout_ids = [worker.do_rollouts.remote(
theta_id,
self.ob_stat.mean if self.policy.needs_ob_stat else None,
self.ob_stat.std if self.policy.needs_ob_stat else None)
for worker in self.workers]
# Get the results of the rollouts.
for result in ray.get(rollout_ids):
results.append(result)
num_eps += result.lengths_n2.size
num_timesteps += result.lengths_n2.sum()
return results
def train(self):
config = self.config
step_tstart = time.time()
theta = self.policy.get_trainable_flat()
assert theta.dtype == np.float32
# Put the current policy weights in the object store.
theta_id = ray.put(theta)
# Use the actors to do rollouts, note that we pass in the ID of the
# policy weights.
results = self._collect_results(
theta_id,
config["episodes_per_batch"],
config["timesteps_per_batch"])
curr_task_results = []
ob_count_this_batch = 0
# Loop over the results
for result in results:
assert result.eval_length is None, "We aren't doing eval rollouts."
assert result.noise_inds_n.ndim == 1
assert result.returns_n2.shape == (len(result.noise_inds_n), 2)
assert result.lengths_n2.shape == (len(result.noise_inds_n), 2)
assert result.returns_n2.dtype == np.float32
result_num_eps = result.lengths_n2.size
result_num_timesteps = result.lengths_n2.sum()
self.episodes_so_far += result_num_eps
self.timesteps_so_far += result_num_timesteps
curr_task_results.append(result)
# Update ob stats.
if self.policy.needs_ob_stat and result.ob_count > 0:
self.ob_stat.increment(
result.ob_sum, result.ob_sumsq, result.ob_count)
ob_count_this_batch += result.ob_count
# Assemble the results.
noise_inds_n = np.concatenate(
[r.noise_inds_n for r in curr_task_results])
returns_n2 = np.concatenate([r.returns_n2 for r in curr_task_results])
lengths_n2 = np.concatenate([r.lengths_n2 for r in curr_task_results])
assert (noise_inds_n.shape[0] == returns_n2.shape[0] ==
lengths_n2.shape[0])
# Process the returns.
if config["return_proc_mode"] == "centered_rank":
proc_returns_n2 = utils.compute_centered_ranks(returns_n2)
else:
raise NotImplementedError(config["return_proc_mode"])
# Compute and take a step.
g, count = utils.batched_weighted_sum(
proc_returns_n2[:, 0] - proc_returns_n2[:, 1],
(self.noise.get(idx, self.policy.num_params)
for idx in noise_inds_n),
batch_size=500)
g /= returns_n2.size
assert (
g.shape == (self.policy.num_params,) and
g.dtype == np.float32 and
count == len(noise_inds_n))
update_ratio = self.optimizer.update(-g + config["l2coeff"] * theta)
# Update ob stat (we're never running the policy in the master, but we
# might be snapshotting the policy).
if self.policy.needs_ob_stat:
self.policy.set_ob_stat(self.ob_stat.mean, self.ob_stat.std)
step_tend = time.time()
tlogger.record_tabular("EpRewMean", returns_n2.mean())
tlogger.record_tabular("EpRewStd", returns_n2.std())
tlogger.record_tabular("EpLenMean", lengths_n2.mean())
tlogger.record_tabular(
"Norm", float(np.square(self.policy.get_trainable_flat()).sum()))
tlogger.record_tabular("GradNorm", float(np.square(g).sum()))
tlogger.record_tabular("UpdateRatio", float(update_ratio))
tlogger.record_tabular("EpisodesThisIter", lengths_n2.size)
tlogger.record_tabular("EpisodesSoFar", self.episodes_so_far)
tlogger.record_tabular("TimestepsThisIter", lengths_n2.sum())
tlogger.record_tabular("TimestepsSoFar", self.timesteps_so_far)
tlogger.record_tabular("ObCount", ob_count_this_batch)
tlogger.record_tabular("TimeElapsedThisIter", step_tend - step_tstart)
tlogger.record_tabular("TimeElapsed", step_tend - self.tstart)
tlogger.dump_tabular()
if (config["snapshot_freq"] != 0 and
self.iteration % config["snapshot_freq"] == 0):
filename = os.path.join(
self.logdir, "snapshot_iter{:05d}.h5".format(self.iteration))
assert not os.path.exists(filename)
self.policy.save(filename)
tlogger.log("Saved snapshot {}".format(filename))
info = {
"weights_norm": np.square(self.policy.get_trainable_flat()).sum(),
"grad_norm": np.square(g).sum(),
"update_ratio": update_ratio,
"episodes_this_iter": lengths_n2.size,
"episodes_so_far": self.episodes_so_far,
"timesteps_this_iter": lengths_n2.sum(),
"timesteps_so_far": self.timesteps_so_far,
"ob_count": ob_count_this_batch,
"time_elapsed_this_iter": step_tend - step_tstart,
"time_elapsed": step_tend - self.tstart
}
res = TrainingResult(self.experiment_id.hex, self.iteration,
returns_n2.mean(), lengths_n2.mean(), info)
self.iteration += 1
return res
def save(self):
checkpoint_path = os.path.join(
self.logdir, "checkpoint-{}".format(self.iteration))
weights = self.policy.get_trainable_flat()
objects = [
weights,
self.ob_stat,
self.episodes_so_far,
self.timesteps_so_far,
self.iteration]
pickle.dump(objects, open(checkpoint_path, "wb"))
return checkpoint_path
def restore(self, checkpoint_path):
objects = pickle.load(open(checkpoint_path, "rb"))
self.policy.set_trainable_flat(objects[0])
self.ob_stat = objects[1]
self.episodes_so_far = objects[2]
self.timesteps_so_far = objects[3]
self.iteration = objects[4]
def compute_action(self, observation):
return self.policy.act([observation])[0]
|
|
from ct.models import UnitStatus, NEED_HELP_STATUS, NEED_REVIEW_STATUS, DONE_STATUS, Lesson
from core.common.mongo import c_chat_context
from ..models import Message
def help_egde_decision(self, edge, fsmStack, request, useCurrent=False, **kwargs):
fsm = edge.fromNode.fsm
additionals = Message.objects.filter(is_additional=True,
chat=fsmStack,
timestamp__isnull=True)
if not additionals:
return fsm.get_node('END')
return edge.toNode
def next_additional_lesson(self, edge, fsmStack, request, useCurrent=False, **kwargs):
"""
Edge method that moves us to right state for next lesson (or END).
"""
fsm = edge.fromNode.fsm
_status = fsmStack.next_point.student_error.status
if _status == NEED_HELP_STATUS:
additionals = Message.objects.filter(is_additional=True,
chat=fsmStack,
timestamp__isnull=True)
elif _status in [NEED_REVIEW_STATUS, DONE_STATUS]:
if _status == DONE_STATUS:
c_chat_context().update_one(
{"chat_id": fsmStack.id},
{"$set": {"need_faqs": False}}
)
Message.objects.filter(student_error=fsmStack.next_point.student_error,
is_additional=True,
chat=fsmStack,
timestamp__isnull=True).delete()
additionals = Message.objects.filter(is_additional=True,
chat=fsmStack,
timestamp__isnull=True)
if additionals:
next_message = additionals.order_by('student_error').first()
fsmStack.state.unitLesson = next_message.content
if next_message.student_error != fsmStack.next_point.student_error:
return fsm.get_node('NEED_HELP_MESSAGE') if _status == NEED_HELP_STATUS else fsm.get_node('STUDENTERROR')
if fsmStack.state.unitLesson.lesson.kind in ('orct', 'choices'):
return fsm.get_node('ORCT_LETS_START_MESSAGE')
else:
return fsm.get_node('NEED_HELP_MESSAGE') if _status == NEED_HELP_STATUS else fsm.get_node('END')
return edge.toNode
def check_selfassess_and_next_lesson(self, edge, fsmStack, request, useCurrent=False, **kwargs):
fsm = edge.fromNode.fsm
if fsmStack.state.unitLesson.lesson.enable_auto_grading and not fsmStack.state.fsmNode.name == 'ADDITIONAL_GRADING':
return fsm.get_node('ADDITIONAL_GRADING')
if not fsmStack.next_point.content.selfeval == 'correct':
return fsm.get_node('HOPENESS_MESSAGE')
else:
return fsm.get_node('GREAT_MESSAGE')
def get_lesson_url(self, node, state, request, **kwargs):
"""
Get URL for any lesson.
"""
course = state.get_data_attr('course')
unitStatus = state.get_data_attr('unitStatus')
ul = unitStatus.get_lesson()
return ul.get_study_url(course.pk)
class ADDITIONAL_GRADING(object):
get_path = get_lesson_url
next_edge = check_selfassess_and_next_lesson
# node specification data goes here
title = 'Grading for student answer'
edges = (
dict(name='next', toNode='HOPENESS_MESSAGE', title='View Next Lesson'),
)
class ADDITIONAL_CORRECT_ANSWER(object):
title = 'Show correct answer for Multiple Choices'
edges = (
dict(name='next', toNode='GREAT_MESSAGE', title='Assess yourself'),
)
class ADDITIONAL_INCORRECT_ANSWER(object):
title = 'Show correct answer for Multiple Choices'
edges = (
dict(name='next', toNode='ADDITIONAL_INCORRECT_CHOICE', title='Assess yourself'),
)
class ADDITIONAL_INCORRECT_CHOICE(object):
title = 'Show incorrect choice for Multiple Choices'
edges = (
dict(name='next', toNode='HOPENESS_MESSAGE', title='Assess yourself'),
)
class IF_RESOURCES(object):
help = '''Congratulations! You have completed the core lessons for this
courselet.'''
title = 'Courselet core lessons completed'
edges = (
dict(name='next', toNode='END', title='View Next Lesson'),
)
class ORCT_LETS_START_MESSAGE(object):
title = 'Just show message to user'
edges = (
dict(name="next", toNode="ADDITIONAL_ASK", title="title"),
)
help = "Let's try another question that could help you with this."
class GREAT_MESSAGE(object):
title = 'Greate Message'
help = 'Great! It looks like you understand this now.'
edges = (
dict(name='next', toNode='HOPENESS_MESSAGE', title='something'),
)
class HOPENESS_MESSAGE(object):
title = 'We all hope you are ok))'
help = 'Hope you\'ve overcame this misconception'
edges = (
dict(name='next', toNode='MESSAGE_NODE', title='does not metter'),
)
# next_edge = next_additional_lesson
class ADDITIONAL_ASK(object):
get_path = get_lesson_url
# node specification data goes here
title = 'View an explanation'
edges = (
dict(name='next', toNode='ADDITIONAL_GET_ANSWER', title='Answer a question'),
)
class ADDITIONAL_GET_ANSWER(object):
title = 'ADDITIONAL_GET_ANSWER'
edges = (
dict(
name='next',
toNode='CONFIDENCE',
title='Answer a question'
)
)
def next_edge(self, edge, fsmStack, request, useCurrent=False, **kwargs):
fsm = edge.fromNode.fsm
unitStatus = fsmStack.state.get_data_attr('unitStatus')
unit_lesson = unitStatus.get_lesson()
if unit_lesson.sub_kind == Lesson.MULTIPLE_CHOICES and unit_lesson.lesson.mc_simplified:
nextUL = unitStatus.start_next_lesson()
if not nextUL: # pragma: no cover
unit = fsmStack.state.get_data_attr('unit')
if unit.unitlesson_set.filter(
kind=UnitLesson.COMPONENT, order__isnull=True
).exists():
return fsm.get_node('IF_RESOURCES')
else:
return fsm.get_node('END')
else: # just a lesson to read
fsmStack.state.unitLesson = nextUL
return fsm.get_node('TITLE')
else:
return edge.toNode
title = 'It is time to answer'
edges = (
dict(name='next', toNode='ADDITIONAL_CONFIDENCE', title='Go to confidence'),
)
class ADDITIONAL_CONFIDENCE(object):
title = 'How confident are you?'
edges = (
dict(name='next', toNode='ADDITIONAL_GET_CONFIDENCE', title='Go to choosing your confidence'),
)
class ADDITIONAL_GET_CONFIDENCE(object):
def next_edge(self, edge, fsmStack, request, useCurrent=False, **kwargs):
fsm = edge.fromNode.fsm
if not fsmStack.next_point.content.selfeval:
return edge.toNode
if fsmStack.next_point.content.selfeval != 'correct':
return fsm.get_node('ADDITIONAL_INCORRECT_ANSWER')
elif fsmStack.next_point.content.selfeval == 'correct':
return fsm.get_node('ADDITIONAL_CORRECT_ANSWER')
else:
return edge.toNode
return edge.toNode
title = 'Choose confidence'
edges = (
dict(name='next', toNode='ADDITIONAL_ASSESS', title='Go to self-assessment'),
)
class ADDITIONAL_ASSESS(object):
get_path = get_lesson_url
# node specification data goes here
title = 'Assess your answer'
edges = (
dict(name='next', toNode='ADDITIONAL_ASSESS_QUESTION_MESSAGE', title='Assess yourself'),
)
class ADDITIONAL_ASSESS_QUESTION_MESSAGE(object):
get_path = get_lesson_url
# node specification data goes here
title = 'Assess your answer'
edges = (
dict(name='next', toNode='ADDITIONAL_GET_ASSESS', title='Assess yourself'),
)
help = 'How close was your answer to the one shown here?'
class ADDITIONAL_GET_ASSESS(object):
get_path = get_lesson_url
next_edge = check_selfassess_and_next_lesson
# node specification data goes here
title = 'Assess your answer'
edges = (
dict(name='next', toNode='START', title='View Next Lesson'),
)
class NEED_HELP_MESSAGE(object):
get_path = get_lesson_url
next_edge = help_egde_decision
title = 'Additional message'
edges = (
dict(name='next', toNode='STUDENTERROR', title='Go to self-assessment'),
)
help = 'We will try to provide more explanation for this.'
class START(object):
"""
Initialize data for viewing a courselet, and go immediately
to first lesson (not yet completed).
"""
def start_event(self, node, fsmStack, request, **kwargs):
"""
Event handler for START node.
"""
unit = fsmStack.state.get_data_attr('unit')
fsmStack.state.title = 'Study: %s' % unit.title
try: # use unitStatus if provided
unitStatus = fsmStack.state.get_data_attr('unitStatus')
except AttributeError: # create new, empty unitStatus
unitStatus = UnitStatus(unit=unit, user=request.user)
unitStatus.save()
fsmStack.state.set_data_attr('unitStatus', unitStatus)
fsmStack.state.unitLesson = kwargs['unitlesson']
return fsmStack.state.transition(
fsmStack, request, 'next', useCurrent=True, **kwargs
)
# node specification data goes here
title = 'Start This Courselet'
edges = (
dict(name='next', toNode='STUDENTERROR', title='View Next Lesson'),
)
class START_MESSAGE(object):
get_path = get_lesson_url
# node specification data goes here
title = 'Let\'s address each blindspot'
edges = (
dict(name='next', toNode='STUDENTERROR', title='View Next Lesson'),
)
class STUDENTERROR(object):
get_path = get_lesson_url
# node specification data goes here
title = 'Additional lessons begin'
edges = (
dict(name='next', toNode='RESOLVE', title='View Next Lesson'),
)
class RESOLVE(object):
get_path = get_lesson_url
# node specification data goes here
title = 'It is time to answer'
edges = (
dict(name='next', toNode='MESSAGE_NODE', title='Go to self-assessment'),
)
def next_edge(self, edge, fsmStack, request, useCurrent=False, **kwargs):
fsm = edge.fromNode.fsm
unitStatus = fsmStack.state.get_data_attr('unitStatus')
unit_lesson = unitStatus.get_lesson()
if fsmStack.state.unitLesson.lesson.kind == 'orct':
return fsm.get_node('ADDITIONAL_GET_ANSWER')
else:
return edge.toNode
class MESSAGE_NODE(object):
get_path = get_lesson_url
# node specification data goes here
title = 'How well do you feel you understand this blindspot now? If you need more clarifications, tell us.'
edges = (
dict(name='next', toNode='GET_RESOLVE', title='Go to self-assessment'),
)
class GET_RESOLVE(object):
get_path = get_lesson_url
next_edge = next_additional_lesson
# node specification data goes here
title = 'It is time to answer'
edges = (
dict(name='next', toNode='RESOLVE', title='Go to self-assessment'),
)
class END(object):
def get_path(self, node, state, request, **kwargs):
"""
Get URL for next steps in this unit.
"""
unitStatus = state.get_data_attr('unitStatus')
return unitStatus.unit.get_study_url(request.path)
# node specification data goes here
title = 'Additional lessons completed'
help = '''OK, let's continue.'''
def get_specs():
"""
Get FSM specifications stored in this file.
"""
from fsm.fsmspec import FSMSpecification
spec = FSMSpecification(
name='additional',
hideTabs=True,
title='Take the courselet core lessons',
pluginNodes=[
START,
START_MESSAGE,
STUDENTERROR,
RESOLVE,
MESSAGE_NODE,
NEED_HELP_MESSAGE,
ADDITIONAL_ASK,
GET_RESOLVE,
ADDITIONAL_GET_ANSWER,
ADDITIONAL_CONFIDENCE,
ADDITIONAL_GET_CONFIDENCE,
ADDITIONAL_ASSESS,
ADDITIONAL_ASSESS_QUESTION_MESSAGE,
ADDITIONAL_GET_ASSESS,
GREAT_MESSAGE,
HOPENESS_MESSAGE,
ADDITIONAL_CORRECT_ANSWER,
ADDITIONAL_INCORRECT_ANSWER,
ADDITIONAL_INCORRECT_CHOICE,
ADDITIONAL_GRADING,
ORCT_LETS_START_MESSAGE,
END],
)
return (spec,)
|
|
"""
Stability evaluation
Original source:
https://github.com/hooshmandshr/yass_visualization/blob/master/src/stability/stability_evaluation.py
"""
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import pdist, squareform, cdist
from tqdm import tqdm
from yass.geometry import find_channel_neighbors, parse
from yass.evaluate.stability_filters import butterworth, whitening
def clean_spike_train(spt):
units = np.unique(spt[:, 1])
spt[:, 1] += len(units)
units += len(units)
for i, u in enumerate(units):
u_idx = spt[:, 1] == u
spt[u_idx, 1] = i
return spt
# TODO: remove this and use new yass Reader
class RecordingBatchIterator(object):
def __init__(self, rec_file, geom_file, sample_rate,
n_batches, batch_time_samples, n_chan,
radius, scale=1e2, filter_std=True, whiten=True):
"""Sets up the object for reading from a binary file.
Parameters
----------
rec_file: str
Path to binary file that contains the raw recording file.
geom_file: str
Path to text file containing the geometry file. The file should
contain n_chan lines and each line should contain two numbers that
are separated by ' '.
sample_rate: int
Recording sample rate in Hz
n_batches: int
Processes the recording in n_batches number of consecuitive
segments that start from the beginning.
batch_time_samples: int
Number of time samples per each batch to be used.
filter_std: bool
The iterator both filters and standarizes the recording (dividing
by standard deviation.
whiten: bool
Spatially whiten the recording.
scale: float
In case filter and whitening is not needed and the binary data is
scaled up.
"""
self.s_rate = sample_rate
self.batch_time_samples = batch_time_samples
self.n_batches = n_batches
self.n_chan = n_chan
self.radius = radius
self.geometry = parse(geom_file, n_chan)
self.neighbs = find_channel_neighbors(
self.geometry, self.radius)
self.filter_std = filter_std
self.whiten = whiten
self.scale = scale
self.file = open(rec_file, 'r')
def next_batch(self):
"""Gets the next temporal batch of recording."""
ts = np.fromfile(
self.file,
count=self.n_chan * self.batch_time_samples,
dtype=np.int16)
ts = np.reshape(ts, [self.batch_time_samples, self.n_chan])
if not self.filter_std:
return ts / self.scale
ts = butterworth(ts, 300, 0.1, 3, self.s_rate)
ts = ts / np.std(ts)
if not self.whiten:
return ts
ts = whitening(ts, self.neighbs, 40)
return ts
def reset_cursor(self):
"""Resets the cursor of the open file to the beginning."""
self.file.seek(0)
def close_iterator(self):
self.file.close()
class MeanWaveCalculator(object):
def __init__(self, batch_reader, spike_train, window=range(-10, 30)):
"""Sets up the object for mean wave computation.
Parameters
----------
spt: numpy.ndarray
Shape [N, 2] where N is the total number of events. First column
indicates the spike times in time sample and second is cluster
identity of the spike times.
window: list
List of consecuitive integers. Indicating the window around spike
times that indicate an event.
Returns
-------
int
The number of boundary violations in batch processing part of the
mean wave calculation.
"""
self.batch_reader = batch_reader
self.spike_train = spike_train
self.window = window
self.spike_train = clean_spike_train(
self.spike_train)
self.n_units = max(self.spike_train[:, 1] + 1)
self.templates = np.zeros(
[len(self.window), batch_reader.n_chan, self.n_units])
def compute_templates(self, n_batches):
"""Computes the templates from a given number of batches."""
self.batch_reader.reset_cursor()
counts = np.zeros(self.n_units)
boundary_violation = 0
n_samples = self.batch_reader.batch_time_samples
for i in tqdm(range(n_batches)):
batch_idx = np.logical_and(
self.spike_train[:, 0] > i * n_samples,
self.spike_train[:, 0] < (i + 1) * n_samples)
spt = self.spike_train[batch_idx, :]
spt[:, 0] -= n_samples * i
ts = self.batch_reader.next_batch()
for j in range(spt.shape[0]):
try:
self.templates[:, :, spt[j, 1]] += (ts[spt[j, 0] +
self.window, :])
counts[spt[j, 1]] += 1
except Exception:
boundary_violation += 1
for u in range(self.n_units):
if counts[u]:
self.templates[:, :, u] /= counts[u]
return boundary_violation
def close_reader(self):
self.batch_reader.close()
class RecordingAugmentation(object):
def __init__(self, mean_wave_calculator, move_rate,
augment_rate, dist_factor=0.5, refractory_period=2.0):
"""Sets up the object for stability metric computations.
Parameters
----------
mean_wave_calculator: MeanWaveCalculator
mean_wave_calculator: MeanWaveCalculator object.
move_rate: float [0, 1]. The rate at which original clusters
will be moved spatially around.
dist_factor: float [0, 1]. How far the should the template
move spatially. 0 represents no movement and 1 furtherst.
refractory_period: float
The minimum time between spikes of the same unit/cluster in
milli-seconds.
"""
self.template_comp = mean_wave_calculator
self.geometry = mean_wave_calculator.batch_reader.geometry
self.n_chan = self.geometry.shape[0]
self.template_calculator = mean_wave_calculator
# Number of samples per batches.
n_samp = self.template_calculator.batch_reader.batch_time_samples
self.batch_num_samples = n_samp
self.construct_channel_map()
self.compute_stat_summary()
self.move_rate = move_rate
self.augment_rate = augment_rate
self.dist_factor = dist_factor
# Convert refractory period to time samples.
sampling_rate = mean_wave_calculator.batch_reader.s_rate
self.refrac_period = refractory_period * 1e-3 * sampling_rate
def construct_channel_map(self):
"""Constucts a map of coordinate to channel index."""
self.geom_map = {}
for i in range(self.n_chan):
self.geom_map[(self.geometry[i, 0], self.geometry[i, 1])] = i
pair_dist = squareform(pdist(self.geometry))
self.closest_channels = np.argsort(pair_dist, axis=1)
def correct_spike_time(self, spike_times, aug_spike_times):
"""Corrects any violation of refractory period for spike times.
Parameters
----------
spike_times: numpy.array
Sorted numpy.array of base spike times.
aug_spike_times: numpy.array
Sorted numpy.array of spike times to be added to the base. These
should not violate refractory period among themselves.
Returns
-------
numpy.array
New augmented spike times where there is no violation of refractory
period time with respect to the combined spike train.
"""
if len(spike_times) == 0 or len(aug_spike_times) == 0:
return aug_spike_times
# Number of spikes that violate refractory period.
num_violation = 0
# Silent periods that more spikes can be added.
silent_period = []
# Last spike time that was added, combined between the two.
last_spike_time = 0
# The spike that was added the current iteration, combined between
# the two spike trains.
current_spike_time = 0
valid_spike_times = []
remove_idx = []
for i in range(1, len(aug_spike_times)):
diff = aug_spike_times[i] - aug_spike_times[i - 1]
if diff < self.refrac_period:
remove_idx.append(i - 1)
num_violation += 1
aug_spike_times = np.delete(aug_spike_times, remove_idx)
# Cursor on the base spike times.
i = 0
# Cursor on the augmented spike_times.
j = 0
while i < len(spike_times) or j < len(aug_spike_times):
diff = 0
if i >= len(spike_times):
# We saw all base spike times.
diff = self.refrac_period + 1
elif j >= len(aug_spike_times):
# We saw all augmented spikes.
diff = - self.refrac_period - 1
else:
diff = spike_times[i] - aug_spike_times[j]
if diff > self.refrac_period:
current_spike_time = aug_spike_times[j]
valid_spike_times.append(current_spike_time)
j += 1
elif diff > - self.refrac_period and diff < self.refrac_period:
# Violating refrac period with respect to base spike_times
j += 1
current_spike_time = last_spike_time
num_violation += 1
else:
current_spike_time = spike_times[i]
i += 1
# Check whether there is a silent period.
silence = current_spike_time - last_spike_time
if silence > 2 * self.refrac_period:
silent_period.append((last_spike_time, current_spike_time))
last_spike_time = current_spike_time.astype('int')
# Add as many unvalid augmented spike times as possible back.
i = 0
while num_violation > 0 and i < len(silent_period):
valid_spike_times.append(
silent_period[i][0] + self.refrac_period)
i += 1
num_violation -= 1
return np.sort(np.array(valid_spike_times)).astype('int')
def move_spatial_trace(self, template, spatial_size=10, mode='amp'):
"""Moves the waveform spatially around the probe.
Parameters
----------
template: numpy.ndarray
Shape [T, C]
spatial_size: int
How many channels comprise the spatial trace of the given template.
mode: str
Main channels are detected using amplitude if 'amp' and energy
otherwise.
"""
new_temp = np.zeros(template.shape)
if mode == 'amp':
location = np.argsort(
np.max(np.abs(template), axis=0))[-spatial_size:]
main_channel = location[-1]
# Move the main channel to another channel which is sampled
# according to a binomial distribution with n_channel trial
# on a sorted channel list based on distance from the original
# channel.
rand_int = np.random.binomial(n=self.n_chan, p=self.dist_factor)
new_main_channel = self.closest_channels[main_channel, rand_int]
prior_coordinates = self.geometry[main_channel, :]
new_coordinates = self.geometry[new_main_channel, :]
translation = new_coordinates - prior_coordinates
x_move = translation[0]
y_move = translation[1]
# the vector of translation from original location to new one.
trans = np.zeros([len(location), 2]).astype('int') - 1
trans[:, 0] = location
for i, l in enumerate(location):
new_x_coord = self.geometry[l, 0] + x_move
new_y_coord = self.geometry[l, 1] + y_move
candidate = (new_x_coord, new_y_coord)
if candidate in self.geom_map:
trans[i, 1] = self.geom_map[candidate]
else:
continue
idx_origin = trans[trans[:, 1] >= 0, 0]
idx_moved = trans[trans[:, 1] >= 0, 1]
new_temp[:, idx_moved] = template[:, idx_origin]
return new_temp
def compute_stat_summary(self):
"""Sets up statistic summary of given spike train.
This function models the difference in time sample
between consecutive firings of a particular unit
as a log-normal distribution.
Returns
-------
np.ndarray
Shape [U, 3] where U is the number of units in the spike train.
The columns of the summary respectively correspond to mean,
standard devation of the log-normal and the total count of spikes
for units.
"""
self.stat_summary = np.zeros(
[self.template_comp.n_units, 3])
spt = self.template_comp.spike_train
for u in range(self.template_comp.n_units):
# spike train of unit u
spt_u = np.sort(spt[spt[:, 1] == u, 0])
if len(spt > 2):
# We estimate the difference between
# consecutive firing times of the same unit
u_firing_diff = spt_u[1:] - spt_u[:-1]
# Getting rid of duplicates.
# TODO: do this more sensibly.
u_firing_diff[u_firing_diff == 0] = 1
u_firing_diff = np.log(u_firing_diff)
u_mean = np.mean(u_firing_diff)
u_std = np.std(u_firing_diff)
self.stat_summary[u, :] = u_mean, u_std, len(spt_u)
return self.stat_summary
def make_fake_spike_train(self, augment_rate):
"""Augments the data and saves the result to binary.
Parameters
----------
augment_rate: float
Between 0 and 1. Augmented spikes per unit (percentage of total
spikes per unit).
"""
spt = self.template_comp.spike_train
# We sample a new set of spike times per cluster.
times = []
cid = []
for u in range(self.template_comp.n_units):
if (np.isnan(self.stat_summary[u, 0]) or
np.isnan(self.stat_summary[u, 1])):
continue
spt_u = np.sort(spt[spt[:, 1] == u, 0])
new_spike_count = int(
self.stat_summary[u, 2] * augment_rate)
diffs = np.exp(np.random.normal(
self.stat_summary[u, 0],
self.stat_summary[u, 1],
new_spike_count)).astype('int')
# Offsets for adding new spikes based on the
# sampled differential times.
offsets = np.sort(
np.random.choice(spt_u, new_spike_count, replace=False))
# Enforce refractory period.
diffs[diffs < self.refrac_period] = self.refrac_period
new_spikes = offsets + diffs
new_spikes = self.correct_spike_time(spt_u, new_spikes)
times += list(new_spikes)
cid += [u] * new_spike_count
return np.array([times, cid]).T
def save_augment_recording(self, out_file_name, length, scale=1e2):
"""Augments recording and saves it to file.
Parameters
----------
out_file_name: str
Name of output file where the augmented recording is writen to.
length: int
Length of augmented recording in batch size of the originial batch
iterator object which is in the mean wave calculatro object.
move_rate: float
Between 0 and 1. Percentage of units whose augmented spike wave
form is spatially moved.
Returns
-------
tuple
Tuple with two members. First is a numpy.ndarray which
is the new ground truth spike train. Second is the status
which is a list of string, each is an error regarding
boundary violation for batch processing.
"""
status = []
reader = self.template_comp.batch_reader
reader.reset_cursor()
# Determine which clusters are spatially moved.
orig_templates = self.template_comp.templates
n_units = self.template_comp.n_units
# list of unit numbers which we move spatially.
moved_units = np.sort(
np.random.choice(range(n_units),
int(self.move_rate * n_units),
replace=False))
temp_shape = self.template_comp.templates.shape
moved_templates = np.zeros(
[temp_shape[0], temp_shape[1], len(moved_units)])
# An array size of n_units where 0 indicates no movement
# otherwise the index of the moved template in the move_templates
# np.ndarray.
moved = np.zeros(n_units)
for i, u in enumerate(moved_units):
moved[u] = i
# Spatial distance is drawn from a poisson distribution.
moved_templates[:, :, i] = self.move_spatial_trace(
orig_templates[:, :, u])
# Create augmented spike train.
aug_spt = self.make_fake_spike_train(self.move_rate)
reader = self.template_comp.batch_reader
boundary_violation = 0
n_samples = reader.batch_time_samples
f = open(out_file_name, 'w')
# TODO: for debugging, remove later.
moved = moved.astype('int')
for i in tqdm(range(length)):
batch_idx = np.logical_and(
aug_spt[:, 0] > i * n_samples,
aug_spt[:, 0] < (i + 1) * n_samples)
spt = aug_spt[batch_idx, :]
spt[:, 0] -= n_samples * i
ts = reader.next_batch()
for j in range(spt.shape[0]):
cid = spt[j, 1]
try:
# Time window around spike
spike_win = spt[j, 0] + self.template_comp.window
if moved[cid]:
sup_signal = moved_templates[:, :, moved[cid]]
ts[spike_win, :] += sup_signal
else:
sup_signal = orig_templates[:, :, cid]
ts[spike_win, :] += sup_signal
except Exception as e:
status.append('warning:{}'.format(str(e)))
boundary_violation += 1
ts *= scale
ts = ts.astype('int16')
ts.tofile(f)
# Reassign spikes from moved clusters to new units.
new_unit_id = self.template_comp.n_units
for u in range(self.template_comp.n_units):
if moved[u]:
aug_spt[aug_spt[:, 1] == u, 1] = new_unit_id
new_unit_id += 1
f.close()
orig_count = self.template_comp.spike_train.shape[0]
aug_count = aug_spt.shape[0]
# Appends the new synthetic spike train to the base spike train.
new_aug_spike_train = np.append(
np.append(self.template_comp.spike_train,
np.zeros([orig_count, 1], dtype='int'),
axis=1),
np.append(aug_spt, np.ones([aug_count, 1], dtype='int'), axis=1),
axis=0)
# Gets rid of any spike times beyond the length of the augmented
# Data set.
aug_rec_len = length * self.batch_num_samples
valid_idx = new_aug_spike_train[:, 0] < aug_rec_len
new_aug_spike_train = new_aug_spike_train[valid_idx, :]
return new_aug_spike_train, status
class SpikeSortingEvaluation(object):
def __init__(self, spt_base, spt, tmp_base=None, tmp=None,
method='hungarian'):
"""Sets up the evaluation object with two spike trains.
Parameters
----------
spt_base: numpy.ndarray
Shape [N, 2]. base line spike train. First column is spike times
and second the cluster identities.
spt: numpy.ndarray
Shape [M, 2].
tmp_base: numpy.ndarray or None
Shape [T1, C, N]. Ground truth unit mean waveforms. If None,
hungarian algorithm is used for matching.
tmp_base: numpy.ndarray or None
Shape [T2, C, M]. Clustering units mean waveforms. If None,
the hungarian algorithm is used for matching.
method: str, 'greedy' or 'hungarian'
Method for matching clusters/units.
"""
if tmp_base is None or tmp is None:
method = 'hungarian'
# clean the spike train before calling this function.
self.tmp_base = tmp_base
self.tmp = tmp
spt_base = clean_spike_train(spt_base)
spt = clean_spike_train(spt)
self.n_units = np.max(spt_base[:, 1]) + 1
self.n_clusters = np.max(spt[:, 1]) + 1
self.spt_base = spt_base
self.spt = spt
# Spike counts per unit and cluster
self.spike_count_base = self.count_spikes(spt_base)
self.spike_count_cluster = self.count_spikes(spt)
# Compute matching and accuracies.
self.confusion_matrix = None
self.compute_confusion_matrix()
self.true_positive = np.zeros(self.n_units)
self.false_positive = np.zeros(self.n_units)
self.unit_cluster_map = np.zeros(self.n_units, dtype='int')
self.compute_accuracies(method)
def count_spikes(self, spt):
"""Counts spike events per cluster/units.
Parameters
----------
spt: numpy.ndarray
Shape [N, 2]. Clean spike train where cluster ids are 0, ..., N-1.
"""
n_cluster = np.max(spt[:, 1]) + 1
counts = np.zeros(n_cluster)
for u in range(n_cluster):
counts[u] = np.sum(spt[:, 1] == u)
return counts
def compute_confusion_matrix(self):
"""Calculates the confusion matrix of two spike trains.
The first spike train is the instances original spike train.
The second one is given as an argument.
"""
confusion_matrix = np.zeros(
[self.n_units, self.n_clusters])
for unit in tqdm(range(self.n_units)):
idx = self.spt_base[:, 1] == unit
spike_times_base = np.sort(self.spt_base[idx, 0])
for cluster in range(self.n_clusters):
idx = self.spt[:, 1] == cluster
spike_times_cluster = np.sort(self.spt[idx, 0])
confusion_matrix[unit, cluster] = self.count_matches(
spike_times_base, spike_times_cluster)
self.confusion_matrix = confusion_matrix
def count_matches(self, array1, array2):
"""Finds the matches between two count process.
Returns
-------
int
Number of temporal collisions of spikes in array1 vs spikes in
array2.
"""
# In time samples
self.admissible_proximity = 60
m, n = len(array1), len(array2)
i, j = 0, 0
count = 0
while i < m and j < n:
if abs(array1[i] - array2[j]) < self.admissible_proximity:
i += 1
j += 1
count += 1
elif array1[i] < array2[j]:
i += 1
else:
j += 1
return count
def compute_accuracies(self, method):
"""Computes the TP/FP accuracies for the given spike trains.
Parameters:
-----------
method: str. 'hungarian', 'greedy'
Method of matching base units to clusters.
"""
# Maps ground truth unit to matched cluster unit.
# -1 indicates no matching if n_units > n_clusters.
unmatched_clusters = list(range(self.n_clusters))
self.unit_cluster_map = np.zeros(self.n_units, dtype='int') - 1
if method == 'hungarian':
# Compute the accuracy confusion matrix.
percent_matrix = self.confusion_matrix / np.reshape(
self.spike_count_base, [self.n_units, 1])
units, clusters = linear_sum_assignment(
-percent_matrix)
self.unit_cluster_map[units] = clusters
elif method == 'greedy':
# Calculate and match energy of templates.
# The energy is based on amplitude (l-inf norm).
energy_base = np.max(self.tmp_base, axis=0)
energy = np.max(self.tmp, axis=0)
energy_dist = cdist(energy_base.T, energy.T)
ordered_units = reversed(
np.argsort(np.linalg.norm(energy_base, axis=0)))
# First match the largest energy ground truth templates.
for unit in ordered_units:
if len(unmatched_clusters) < 1:
break
# TODO(hooshmand): Find a fix for template comparison.
# If the closest template is not very similar skip it.
# if (np.min(energy_dist[unit, unmatched_clusters]) >
# 1/4 * np.linalg.norm(energy_base[:, unit])):
# continue
# Also, something like selecting the template with
# the closest shape, e.g. the following zombie code line.
matched_cluster_id = unmatched_clusters[np.argmin(
energy_dist[unit, unmatched_clusters])]
matched_cluster_id = unmatched_clusters[np.argmax(
self.confusion_matrix[unit, unmatched_clusters])]
unmatched_clusters.remove(matched_cluster_id)
self.unit_cluster_map[unit] = matched_cluster_id
# Units which have a match in the clusters.
rec_units = np.where(self.unit_cluster_map > -1)[0]
recovered = np.zeros(self.n_units)
for unit in rec_units:
recovered[unit] = (self.confusion_matrix[unit,
self.unit_cluster_map[unit]])
self.true_positive = recovered / self.spike_count_base
match_count = self.spike_count_cluster[self.unit_cluster_map]
self.false_positive = (match_count - recovered) / match_count
|
|
"""Metric Something by Something."""
import datetime
import pandas as pd
from pyiem.util import get_autoplot_context, get_sqlalchemy_conn
from pyiem.plot import figure_axes
from pyiem.exceptions import NoDataFound
from sqlalchemy import text
VDICT = {
"dwpf": "Air Dew Point Temp [F]",
"tmpf": "Air Temperature [F]",
"feel": "Feels Like Temp [F]",
"p01i": "Hourly Precipitation [inch]",
"alti": "Pressure Altimeter [in Hg]",
"mslp": "Pressure Mean Sea Level [mb]",
"relh": "Relative Humidity [%]",
"vsby": "Visibility [mile]",
"gust": "Wind Gust [kts]",
"sknt": "Wind Speed [kts]",
}
ADICT = {"min": "Minimum", "avg": "Average", "max": "Maximum"}
MDICT = dict(
[
("all", "No Month/Time Limit"),
("spring", "Spring (MAM)"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("jan", "January"),
("feb", "February"),
("mar", "March"),
("apr", "April"),
("may", "May"),
("jun", "June"),
("jul", "July"),
("aug", "August"),
("sep", "September"),
("oct", "October"),
("nov", "November"),
("dec", "December"),
]
)
def get_description():
"""Return a dict describing how to call this plotter"""
desc = {}
desc["data"] = True
desc["cache"] = 86400
desc["highcharts"] = True
desc[
"description"
] = """This plot generates a comparison between two hourly ASOS
observation values. The interactive chart version and raw data download
also presents the most recent UTC timestamp for that given combination.
Apps like these are very good at quickly seeing bad-data outliers :(
Please review any results you find here to see if the values match
what the reality may have been. Additionally, for the case of mean
values, the presented timestamp is not of much use."""
desc["arguments"] = [
dict(
type="zstation",
name="zstation",
default="DSM",
label="Select Station:",
network="IA_ASOS",
),
dict(
type="select",
name="month",
default="all",
label="Month Limiter",
options=MDICT,
),
dict(
type="select",
name="x",
default="tmpf",
label="X-Axis Categorical Variable",
options=VDICT,
),
dict(
type="select",
name="y",
default="p01i",
label="Y-Axis Variable to Summarize",
options=VDICT,
),
dict(
type="select",
name="agg",
default="max",
label="How to Summarize Y-axis Variable",
options=ADICT,
),
dict(
type="year",
name="syear",
default=1920,
minval=1920,
label="Limit to Potential Start Year for the plot:",
),
]
return desc
def highcharts(fdict):
"""Fancy plot."""
ctx = get_data(fdict)
df = ctx["df"]
ISO = "%Y-%m-%d %H:%M Z"
return (
"""
var x = """
+ str(df["x"].values.tolist())
+ """;
var dates = """
+ str(df["utc_valid"].dt.strftime(ISO).tolist())
+ """;
$("#ap_container").highcharts({
title: {text: '"""
+ ctx["title"]
+ """'},
subtitle: {text: '"""
+ ctx["subtitle"]
+ """'},
chart: {zoomType: 'x'},
tooltip: {
formatter: function() {
var idx = x.indexOf(this.x);
return "X: " + this.x + " Y: " + this.y + " @ " + dates[idx];
}
},
xAxis: {
categories: """
+ str(df["x"].values.tolist())
+ """,
title: {text: '"""
+ ctx["xlabel"]
+ """'}},
yAxis: {title: {text: '"""
+ ctx["ylabel"]
+ """'}},
series: [{
type: 'column',
width: 0.8,
tooltip: {
valueDecimals: 2
},
data: """
+ str(df["y"].values.tolist())
+ """
}]
});
"""
)
def get_data(fdict):
"""Build out the context."""
ctx = get_autoplot_context(fdict, get_description())
station = ctx["zstation"]
month = ctx["month"]
agg = ctx["agg"]
x = ctx["x"]
y = ctx["y"]
# belt and suspenders
assert x in VDICT
assert y in VDICT
assert agg in ADICT
if month == "all":
months = range(1, 13)
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
elif month == "spring":
months = [3, 4, 5]
elif month == "summer":
months = [6, 7, 8]
else:
ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
# make sure it is length two for the trick below in SQL
months = [ts.month, 999]
cast = "int" if x in ["tmpf", "dwpf", "feel"] else "real"
basets = datetime.date(ctx["syear"], 1, 1)
direction = "DESC" if agg == "max" else "ASC"
with get_sqlalchemy_conn("asos") as conn:
ctx["df"] = pd.read_sql(
text(
f"""
WITH data as (
SELECT {x}::{cast} as x, ({y}) as yv,
first_value(valid at time zone 'UTC') OVER (
PARTITION by {x}::{cast}
ORDER by {y} {direction}, valid DESC) as timestamp
from alldata where station = :station
and extract(month from valid) in :months
and report_type = 2 and valid >= :basets
and {x} is not null and {y} is not null
ORDER by x ASC)
SELECT x, {agg}(yv) as y, max(timestamp) as utc_valid from data
GROUP by x ORDER by x ASC
"""
),
conn,
params={
"station": station,
"months": tuple(months),
"basets": basets,
},
index_col=None,
)
if ctx["df"].empty:
raise NoDataFound("No Data Found.")
ab = ctx["_nt"].sts[station]["archive_begin"]
if ab is None:
raise NoDataFound("Unknown station metadata.")
minyear = ctx["df"]["utc_valid"].dt.year.min()
ctx["xlabel"] = VDICT[ctx["x"]]
ctx["ylabel"] = ADICT[ctx["agg"]] + " " + VDICT[ctx["y"]]
ctx["title"] = ("%s [%s]") % (ctx["_nt"].sts[station]["name"], station)
ctx["subtitle"] = ("%s %s by %s (month=%s) (%s-%s)") % (
ADICT[agg],
VDICT[y],
VDICT[x],
month.upper(),
minyear,
datetime.datetime.now().year,
)
return ctx
def plotter(fdict):
"""Go"""
ctx = get_data(fdict)
df = ctx["df"]
(fig, ax) = figure_axes(apctx=ctx)
ax.bar(df["x"].values, df["y"].values, color="blue")
ax.grid(True)
ax.set_title(ctx["title"] + "\n" + ctx["subtitle"])
ax.set_xlabel(ctx["xlabel"])
ax.set_ylabel(ctx["ylabel"])
df = df.rename({"x": VDICT[ctx["x"]], "y": VDICT[ctx["y"]]}, axis=1)
return fig, df
if __name__ == "__main__":
plotter(dict(month="nov"))
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for Linear and DNN joined training models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.learn.python.learn.estimators import composable_model
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
def _changing_default_center_bias():
logging.warn(
"Change warning: default value of `enable_centered_bias` will change"
" after 2016-10-09. It will be disabled by default."
"Instructions for keeping existing behaviour:\n"
"Explicitly set `enable_centered_bias` to 'True' if you want to keep "
"existing behaviour.")
# TODO(ispir): Increase test coverage
class _DNNLinearCombinedBaseEstimator(estimator.BaseEstimator):
"""An estimator for TensorFlow Linear and DNN joined training models.
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
head,
model_dir=None,
linear_feature_columns=None,
linear_optimizer=None,
_joint_linear_weights=False,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
config=None,
feature_engineering_fn=None,
default_prediction_key=None):
"""Initializes a _DNNLinearCombinedBaseEstimator instance.
Args:
head: A _Head object.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set should be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
_joint_linear_weights: If True will use a single (possibly partitioned)
variable to store all weights for the linear model. More efficient if
there are many columns, however requires all columns are sparse and
have the 'sum' combiner.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set should be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
default_prediction_key: Default prediction key to use with metrics.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
super(_DNNLinearCombinedBaseEstimator, self).__init__(
model_dir=model_dir, config=config)
num_ps_replicas = config.num_ps_replicas if config else 0
self._linear_model = composable_model.LinearComposableModel(
num_label_columns=head.logits_dimension,
optimizer=linear_optimizer,
_joint_weights=_joint_linear_weights,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas)
self._dnn_model = composable_model.DNNComposableModel(
num_label_columns=head.logits_dimension,
hidden_units=dnn_hidden_units,
optimizer=dnn_optimizer,
activation_fn=dnn_activation_fn,
dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas) if dnn_hidden_units else None
self._linear_feature_columns = linear_feature_columns
self._linear_optimizer = linear_optimizer
self._dnn_feature_columns = dnn_feature_columns
self._dnn_hidden_units = dnn_hidden_units
self._head = head
self._default_prediction_key = default_prediction_key
self._feature_engineering_fn = (
feature_engineering_fn or
(lambda features, targets: (features, targets)))
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def linear_weights_(self):
"""Returns weights per feature of the linear part."""
return self._linear_model.get_weights(model_dir=self._model_dir)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def linear_bias_(self):
"""Returns bias of the linear part."""
return (self._linear_model.get_bias(model_dir=self._model_dir) +
self.get_variable_value("centered_bias_weight"))
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def dnn_weights_(self):
"""Returns weights of deep neural network part."""
return self._dnn_model.get_weights(model_dir=self._model_dir)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def dnn_bias_(self):
"""Returns bias of deep neural network part."""
return (self._dnn_model.get_bias(model_dir=self._model_dir) +
[self.get_variable_value("centered_bias_weight")])
# TODO(zakaria): Remove this function once export. export_estimator is
# obsolete.
def _create_signature_fn(self):
"""Returns a function to create export signature of this Estimator."""
# pylint: disable=protected-access
return self._head._create_signature_fn()
def _get_feature_dict(self, features):
if isinstance(features, dict):
return features
return {"": features}
def _get_train_ops(self, features, targets):
"""See base class."""
features = self._get_feature_dict(features)
features, targets = self._feature_engineering_fn(features, targets)
logits = self._logits(features, is_training=True)
def _make_training_op(training_loss):
global_step = contrib_variables.get_global_step()
assert global_step
linear_train_step = self._linear_model.get_train_step(training_loss)
dnn_train_step = (self._dnn_model.get_train_step(training_loss) if
self._dnn_model else [])
with ops.control_dependencies(linear_train_step + dnn_train_step):
with ops.get_default_graph().colocate_with(global_step):
return state_ops.assign_add(global_step, 1).op
model_fn_ops = self._head.head_ops(features, targets,
estimator.ModeKeys.TRAIN,
_make_training_op,
logits=logits)
return model_fn_ops.training_op, model_fn_ops.loss
def _get_eval_ops(self, features, targets, metrics=None):
"""See base class."""
features = self._get_feature_dict(features)
features, targets = self._feature_engineering_fn(features, targets)
logits = self._logits(features)
model_fn_ops = self._head.head_ops(features, targets,
estimator.ModeKeys.EVAL, None,
logits=logits)
all_metrics = model_fn_ops.default_metrics
if metrics:
for name, metric in six.iteritems(metrics):
if not isinstance(name, tuple):
# TODO(zakaria): remove once deprecation is finished (b/31229024)
all_metrics[(name, self._default_prediction_key)] = metric
else:
all_metrics[name] = metric
# TODO(zakaria): Remove this once we refactor this class to delegate
# to estimator.
# pylint: disable=protected-access
result = estimator._make_metrics_ops(all_metrics, features, targets,
model_fn_ops.predictions)
return result
def _get_predict_ops(self, features):
"""See base class."""
features = self._get_feature_dict(features)
features, _ = self._feature_engineering_fn(features, None)
logits = self._logits(features)
model_fn_ops = self._head.head_ops(features, None, estimator.ModeKeys.INFER,
None, logits=logits)
return model_fn_ops.predictions
@deprecated(
"2016-09-23",
"The signature of the input_fn accepted by export is changing to be "
"consistent with what's used by tf.Learn Estimator's train/evaluate, "
"which makes this function useless. This will be removed after the "
"deprecation date.")
def _get_feature_ops_from_example(self, examples_batch):
column_types = layers.create_feature_spec_for_parsing((
self._get_linear_feature_columns() or []) + (
self._get_dnn_feature_columns() or []))
features = parsing_ops.parse_example(examples_batch, column_types)
return features
def _get_linear_feature_columns(self):
if not self._linear_feature_columns:
return None
feature_column_ops.check_feature_columns(self._linear_feature_columns)
return sorted(set(self._linear_feature_columns), key=lambda x: x.key)
def _get_dnn_feature_columns(self):
if not self._dnn_feature_columns:
return None
feature_column_ops.check_feature_columns(self._dnn_feature_columns)
return sorted(set(self._dnn_feature_columns), key=lambda x: x.key)
def _dnn_logits(self, features, is_training):
return self._dnn_model.build_model(
features, self._dnn_feature_columns, is_training)
def _linear_logits(self, features, is_training):
return self._linear_model.build_model(
features, self._linear_feature_columns, is_training)
def _logits(self, features, is_training=False):
linear_feature_columns = self._get_linear_feature_columns()
dnn_feature_columns = self._get_dnn_feature_columns()
if not (linear_feature_columns or dnn_feature_columns):
raise ValueError("Either linear_feature_columns or dnn_feature_columns "
"should be defined.")
if linear_feature_columns and dnn_feature_columns:
logits = (self._linear_logits(features, is_training) +
self._dnn_logits(features, is_training))
elif dnn_feature_columns:
logits = self._dnn_logits(features, is_training)
else:
logits = self._linear_logits(features, is_training)
return logits
class DNNLinearCombinedClassifier(_DNNLinearCombinedBaseEstimator):
"""A classifier for TensorFlow Linear and DNN joined training models.
Example:
```python
education = sparse_column_with_hash_bucket(column_name="education",
hash_bucket_size=1000)
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
education_x_occupation = crossed_column(columns=[education, occupation],
hash_bucket_size=10000)
education_emb = embedding_column(sparse_id_column=education, dimension=16,
combiner="sum")
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
estimator = DNNLinearCombinedClassifier(
# common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# wide settings
linear_feature_columns=[education_x_occupation],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[education_emb, occupation_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
model_dir=None,
n_classes=2,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
_joint_linear_weights=False,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=None,
config=None,
feature_engineering_fn=None):
"""Constructs a DNNLinearCombinedClassifier instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of target classes. Default is binary classification.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training.
It will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
_joint_linear_weights: If True a single (possibly partitioned) variable
will be used to store the linear model weights. It's faster, but
requires all columns are sparse and have the 'sum' combiner.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Raises:
ValueError: If `n_classes` < 2.
ValueError: If both `linear_feature_columns` and `dnn_features_columns`
are empty at the same time.
"""
if n_classes < 2:
raise ValueError("n_classes should be greater than 1. Given: {}".format(
n_classes))
if enable_centered_bias is None:
enable_centered_bias = True
_changing_default_center_bias()
# pylint: disable=protected-access
head = head_lib._multi_class_head(
n_classes=n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias)
super(DNNLinearCombinedClassifier, self).__init__(
model_dir=model_dir,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
_joint_linear_weights=_joint_linear_weights,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
head=head,
config=config,
feature_engineering_fn=feature_engineering_fn,
default_prediction_key=head_lib.PedictionKey.CLASSES)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes (or an iterable of predicted classes if
as_iterable is True).
"""
predictions = self.predict_proba(
x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable)
if as_iterable:
return (np.argmax(p, axis=0) for p in predictions)
else:
return np.argmax(predictions, axis=1)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(
self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Returns prediction probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities (or an iterable of predicted
probabilities if as_iterable is True).
"""
return super(DNNLinearCombinedClassifier, self).predict(
x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable)
def _get_predict_ops(self, features):
"""See base class."""
return super(DNNLinearCombinedClassifier, self)._get_predict_ops(features)[
head_lib.PedictionKey.PROBABILITIES]
class DNNLinearCombinedRegressor(_DNNLinearCombinedBaseEstimator):
"""A regressor for TensorFlow Linear and DNN joined training models.
Example:
```python
education = sparse_column_with_hash_bucket(column_name="education",
hash_bucket_size=1000)
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
education_x_occupation = crossed_column(columns=[education, occupation],
hash_bucket_size=10000)
education_emb = embedding_column(sparse_id_column=education, dimension=16,
combiner="sum")
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
estimator = DNNLinearCombinedRegressor(
# common settings
weight_column_name=weight_column_name,
# wide settings
linear_feature_columns=[education_x_occupation],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[education_emb, occupation_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...))
# To apply L1 and L2 regularization, you can set optimizers as follows:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# It is same for FtrlOptimizer.
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.train(input_fn_train)
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
model_dir=None,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
_joint_linear_weights=False,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=None,
target_dimension=1,
config=None,
feature_engineering_fn=None):
"""Initializes a DNNLinearCombinedRegressor instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
_joint_linear_weights: If True a single (possibly partitioned) variable
will be used to store the linear model weights. It's faster, but
requires that all columns are sparse and have the 'sum' combiner.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
target_dimension: TODO(zakaria): dimension of the target for multilabels.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
if enable_centered_bias is None:
enable_centered_bias = True
_changing_default_center_bias()
# pylint: disable=protected-access
head = head_lib._regression_head(
weight_column_name=weight_column_name,
target_dimension=target_dimension,
enable_centered_bias=enable_centered_bias)
super(DNNLinearCombinedRegressor, self).__init__(
model_dir=model_dir,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
_joint_linear_weights=_joint_linear_weights,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
head=head,
config=config,
feature_engineering_fn=feature_engineering_fn,
default_prediction_key=head_lib.PedictionKey.SCORES)
def _get_predict_ops(self, features):
"""See base class."""
return super(DNNLinearCombinedRegressor, self)._get_predict_ops(features)[
head_lib.PedictionKey.SCORES]
|
|
"""
Sublime Text Scheme template.
Converts scheme to css provides templating for
additonal so that they can access the colors.
Licensed under MIT
Copyright (c) 2015 - 2016 Isaac Muse <isaacmuse@gmail.com>
----------------------
TextMate theme to CSS.
https://manual.macromates.com/en/language_grammars#naming_conventions
"""
import sublime
import re
from . import version as ver
from .rgba import RGBA
from . import x11colors
from .st_color_scheme_matcher import ColorSchemeMatcher
import jinja2
from pygments.formatters import HtmlFormatter
from collections import OrderedDict
from .st_clean_css import clean_css
import copy
import decimal
INVALID = -1
POPUP = 0
PHANTOM = 1
LUM_MIDPOINT = 127
re_float_trim = re.compile(r'^(?P<keep>\d+)(?P<trash>\.0+|(?P<keep2>\.\d*[1-9])0+)$')
re_valid_custom_scopes = re.compile(r'[a-zA-Z\d]+[a-zA-Z\d._\-]*')
re_missing_semi_colon = re.compile(r'(?<!;) \}')
# Just track the deepest level. We'll unravel it.
# https://manual.macromates.com/en/language_grammars#naming_conventions
textmate_scopes = {
'comment.line.double-slash',
'comment.line.double-dash',
'comment.line.number-sign',
'comment.line.percentage',
'comment.line.character',
'comment.block.documentation',
'constant.numeric',
'constant.character',
'constant.language',
'constant.other',
'entity.name.function',
'entity.name.type',
'entity.name.tag',
'entity.name.section',
'entity.other.inherited-class',
'entity.other.attribute-name',
'invalid.illegal',
'invalid.deprecated',
'keyword.control',
'keyword.operator',
'keyword.other',
'markup.underline.link',
'markup.bold',
'markup.heading',
'markup.italic',
'markup.list.numbered',
'markup.list.unnumbered',
'markup.quote',
'markup.raw',
'markup.other',
'meta',
'storage.type',
'storage.modifier',
'string.quoted.single',
'string.quoted.double',
'string.quoted.triple',
'string.quoted.other',
'string.unquoted',
'string.interpolated',
'string.regexp',
'string.other',
'support.function',
'support.class',
'support.type',
'support.constant',
'support.variable',
'support.other',
'variable.parameter',
'variable.language',
'variable.other'
}
# http://www.sublimetext.com/docs/3/scope_naming.html
sublime_scopes = {
"comment.block.documentation",
"punctuation.definition.comment",
"constant.numeric.integer",
"constant.numeric.float",
"constant.numeric.hex",
"constant.numeric.octal",
"constant.language",
"constant.character.escape",
"constant.other.placeholder",
"entity.name.struct",
"entity.name.enum",
"entity.name.union",
"entity.name.trait",
"entity.name.interface",
"entity.name.type",
"entity.name.class.forward-decl",
"entity.other.inherited-class",
"entity.name.function.constructor",
"entity.name.function.destructor",
"entity.name.namespace",
"entity.name.constant",
"entity.name.label",
"entity.name.section",
"entity.name.tag",
"entity.other.attribute-name",
"invalid.illegal",
"invalid.deprecated",
"keyword.control.conditional",
"keyword.control.import",
"punctuation.definition.keyword",
"keyword.operator.assignment",
"keyword.operator.arithmetic",
"keyword.operator.bitwise",
"keyword.operator.logical",
"keyword.operator.word",
"markup.heading",
"markup.list.unnumbered",
"markup.list.numbered",
"markup.bold",
"markup.italic",
"markup.underline",
"markup.inserted",
"markup.deleted",
"markup.underline.link",
"markup.quote",
"markup.raw.inline",
"markup.raw.block",
"markup.other",
"punctuation.terminator",
"punctuation.separator.continuation",
"punctuation.accessor",
"source",
"storage.type",
"storage.modifier",
"string.quoted.single",
"string.quoted.double",
"string.quoted.triple",
"string.quoted.other",
"punctuation.definition.string.begin",
"punctuation.definition.string.end",
"string.unquoted",
"string.regexp",
"support.constant",
"support.function",
"support.module",
"support.type",
"support.class",
"text.html",
"text.xml",
"variable.other.readwrite",
"variable.other.constant",
"variable.language",
"variable.parameter",
"variable.other.member",
"variable.function"
}
# Merge the sets together
all_scopes = set()
for ss in (sublime_scopes | textmate_scopes):
parts = ss.split('.')
for index in range(1, len(parts) + 1):
all_scopes.add('.'.join(parts[:index]))
re_base_colors = re.compile(r'^\s*\.(?:dummy)\s*\{([^}]+)\}', re.MULTILINE)
re_color = re.compile(r'(?<!-)(color\s*:\s*#[A-Fa-z\d]{6})')
re_bgcolor = re.compile(r'(?<!-)(background(?:-color)?\s*:\s*#[A-Fa-z\d]{6})')
re_pygments_selectors = re.compile(r'\.dummy (\.[a-zA-Z\d]+) ')
CODE_BLOCKS = '.mdpopups .highlight, .mdpopups .inline-highlight { %s; %s; }'
CODE_BLOCKS_LEGACY = '.highlight, .inline-highlight { %s; %s; }'
def fmt_float(f, p=0):
"""Set float precision and trim precision zeros."""
string = str(
decimal.Decimal(f).quantize(decimal.Decimal('0.' + ('0' * p) if p > 0 else '0'), decimal.ROUND_HALF_UP)
)
m = re_float_trim.match(string)
if m:
string = m.group('keep')
if m.group('keep2'):
string += m.group('keep2')
return string
class Scheme2CSS(object):
"""Determine color scheme colors and style for text in a Sublime view buffer."""
def __init__(self, scheme_file):
"""Initialize."""
self.csm = ColorSchemeMatcher(scheme_file)
self.text = ''
self.colors = OrderedDict()
self.scheme_file = scheme_file
self.css_type = INVALID
self.gen_css()
def guess_style(self, scope, selected=False, explicit_background=False):
"""Guess color."""
return self.csm.guess_color(scope, selected, explicit_background)
def parse_global(self):
"""Parse global settings."""
color_settings = {}
for item in self.csm.plist_file["settings"]:
if item.get('scope', None) is None and item.get('name', None) is None:
color_settings = item["settings"]
break
# Get general theme colors from color scheme file
self.bground = self.process_color(color_settings.get("background", '#FFFFFF'), simple_strip=True)
rgba = RGBA(self.bground)
self.lums = rgba.get_luminance()
is_dark = self.lums <= LUM_MIDPOINT
settings = sublime.load_settings("Preferences.sublime-settings")
self.variables = {
"is_dark": is_dark,
"is_light": not is_dark,
"sublime_version": int(sublime.version()),
"mdpopups_version": ver.version(),
"color_scheme": self.scheme_file,
"use_pygments": not settings.get('mdpopups.use_sublime_highlighter', False),
"default_formatting": settings.get('mdpopups.default_formatting', True),
"default_style": settings.get('mdpopups.default_style', True)
}
self.html_border = rgba.get_rgb()
self.fground = self.process_color(color_settings.get("foreground", '#000000'))
# Intialize colors with the global foreground, background, and fake html_border
self.colors = OrderedDict()
self.colors['.foreground'] = OrderedDict([('color', 'color: %s; ' % self.fground)])
self.colors['.background'] = OrderedDict([('background-color', 'background-color: %s; ' % self.bground)])
def parse_settings(self):
"""Parse the color scheme."""
for tscope in sorted(all_scopes):
scope = self.guess_style(tscope, explicit_background=True)
key_scope = '.' + tscope
color = scope.fg_simulated
bgcolor = scope.bg_simulated
if color or bgcolor:
self.colors[key_scope] = OrderedDict()
if color:
self.colors[key_scope]['color'] = 'color: %s; ' % color
if bgcolor:
self.colors[key_scope]['background-color'] = 'background-color: %s; ' % bgcolor
for s in scope.style.split(' '):
if "bold" in s:
self.colors[key_scope]['font-weight'] = 'font-weight: %s; ' % 'bold'
if "italic" in s:
self.colors[key_scope]['font-style'] = 'font-style: %s; ' % 'italic'
if "underline" in s and False: # disabled
self.colors[key_scope]['text-decoration'] = 'text-decoration: %s; ' % 'underline'
def process_color(self, color, simple_strip=False):
"""
Strip transparency from the color value.
Transparency can be stripped in one of two ways:
- Simply mask off the alpha channel.
- Apply the alpha channel to the color essential getting the color seen by the eye.
"""
if color is None or color.strip() == "":
return None
if not color.startswith('#'):
color = x11colors.name2hex(color)
if color is None:
return None
rgba = RGBA(color.replace(" ", ""))
if not simple_strip:
rgba.apply_alpha(self.bground if self.bground != "" else "#FFFFFF")
return rgba.get_rgb()
def gen_css(self):
"""Generate the CSS and the associated template environment."""
self.colors = OrderedDict()
self.parse_global()
self.parse_settings()
# Assemble the CSS text
text = []
css_entry = '%s { %s}' if int(sublime.version()) < 3119 else '.mdpopups %s { %s}'
for k, v in self.colors.items():
text.append(css_entry % (k, ''.join(v.values())))
self.text = '\n'.join(text)
# Create Jinja template
self.env = jinja2.Environment()
self.env.filters['css'] = self.retrieve_selector
self.env.filters['pygments'] = self.pygments
self.env.filters['foreground'] = self.to_fg
self.env.filters['background'] = self.to_bg
self.env.filters['brightness'] = self.brightness
self.env.filters['colorize'] = self.colorize
self.env.filters['hue'] = self.hue
self.env.filters['invert'] = self.invert
self.env.filters['saturation'] = self.saturation
self.env.filters['grayscale'] = self.grayscale
self.env.filters['sepia'] = self.sepia
self.env.filters['fade'] = self.fade
self.env.filters['getcss'] = self.read_css
self.env.filters['relativesize'] = self.relativesize
def read_css(self, css):
"""Read the CSS file."""
try:
var = copy.copy(self.variables)
var.update(
{
'is_phantom': self.css_type == PHANTOM,
'is_popup': self.css_type == POPUP
}
)
return self.env.from_string(
clean_css(sublime.load_resource(css))
).render(var=var, colors=self.colors, plugin=self.plugin_vars)
except Exception:
return ''
def relativesize(self, css, *args):
"""Create a relative font from the current font."""
# Handle things the new way '+1.25em'
try:
if css.endswith(('em', 'px', 'pt')):
offset = css[:-2]
unit = css[-2:]
integer = bool(len(args) and args[0])
else:
offset = css
unit = args[0]
integer = False
assert isinstance(unit, str) and unit in ('em', 'px', 'pt'), 'Bad Arguments!'
except Exception:
return css
if unit == 'em':
size = self.font_size / 16.0
elif unit == 'px':
size = self.font_size
elif unit == 'pt':
size = (self.font_size / 16.0) * 12.0
precision = 0 if integer else 3
op = offset[0]
if op in ('+', '-', '*'):
value = size * float(offset[1:]) if op == '*' else size + float(offset)
else:
value = 0.0
if value < 0.0:
value = 0.0
return '%s%s' % (fmt_float(value, precision), unit)
def fade(self, css, factor):
"""
Apply a fake transparency to color.
Fake transparency is preformed on top of the background color.
"""
try:
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
bgcolor = self.colors.get('.background').get('background-color')
bgparts = [c.strip('; ') for c in bgcolor.split(':')]
rgba = RGBA(parts[1] + "%02f" % int(255.0 * max(min(float(factor), 1.0), 0.0)))
rgba.apply_alpha(bgparts[1])
return '%s: %s; ' % (parts[0], rgba.get_rgb())
except Exception:
pass
return css
def colorize(self, css, degree):
"""Colorize to the given hue."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.colorize(degree)
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def hue(self, css, degree):
"""Shift hue."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.hue(degree)
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def invert(self, css):
"""Invert color."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.invert()
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def saturation(self, css, factor):
"""Apply saturation filter."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.saturation(factor)
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def grayscale(self, css):
"""Apply grayscale filter."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.grayscale()
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def sepia(self, css):
"""Apply sepia filter."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.sepia()
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def brightness(self, css, factor):
"""Adjust brightness."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.brightness(factor)
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def to_fg(self, css):
"""Rename a CSS key value pair."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] == 'background-color':
parts[0] = 'color'
return '%s: %s ' % (parts[0], parts[1])
return css
def to_bg(self, css):
"""Rename a CSS key value pair."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] == 'color':
parts[0] = 'background-color'
return '%s: %s; ' % (parts[0], parts[1])
return css
def pygments(self, style):
"""Get pygments style."""
return get_pygments(style)
def retrieve_selector(self, selector, key=None):
"""Get the CSS key, value pairs for a rule."""
wanted = [s.strip() for s in selector.split(',')]
sel = {}
for w in wanted:
if w in self.colors:
sel = self.colors[w]
break
return ''.join(sel.values()) if key is None else sel.get(key, '')
def get_font_scale(self):
"""Get font scale."""
scale = 1.0
try:
pref_scale = float(sublime.load_settings('Preferences.sublime-settings').get('mdpopups.font_scale', 0.0))
except Exception:
pref_scale = 0.0
if sublime.platform() == 'windows' and pref_scale <= 0.0:
try:
import ctypes
logpixelsy = 90
dc = ctypes.windll.user32.GetDC(0)
height = ctypes.windll.gdi32.GetDeviceCaps(dc, logpixelsy)
scale = float(height) / 96.0
ctypes.windll.user32.ReleaseDC(0, dc)
except Exception:
pass
elif pref_scale > 0.0:
scale = pref_scale
return scale
def apply_template(self, css, css_type, font_size, template_vars=None):
"""Apply template to css."""
if css_type not in (POPUP, PHANTOM):
return ''
self.font_size = float(font_size) * self.get_font_scale()
self.css_type = css_type
var = copy.copy(self.variables)
if template_vars and isinstance(template_vars, (dict, OrderedDict)):
self.plugin_vars = copy.deepcopy(template_vars)
else:
self.plugin_vars = {}
var.update(
{
'is_phantom': self.css_type == PHANTOM,
'is_popup': self.css_type == POPUP
}
)
return self.env.from_string(css).render(var=var, colors=self.colors, plugin=self.plugin_vars)
def get_css(self):
"""Get css."""
return self.text
def get_pygments(style):
"""
Get pygments style.
Subllime CSS support is limited. It cannot handle well
things like: `.class1 .class2`, but it can handle things like:
`.class1.class2`. So we will not use things like `.highlight` in front.
We will first find {...} which has no syntax class. This will contain
our background and possibly foreground. If for whatever reason we
have no background or foreground, we will use `#000000` or `#ffffff`
respectively.
"""
try:
# Lets see if we can find the pygments theme
text = HtmlFormatter(style=style).get_style_defs('.dummy')
text = re_missing_semi_colon.sub('; }', text)
except Exception:
return ''
bg = None
fg = None
# Find {...} which has no syntax classes
m = re_base_colors.search(text)
if m:
# Find background
m1 = re_bgcolor.search(m.group(1))
if m1:
# Use `background-color` as it works better
# with Sublime CSS
bg = m1.group(1).replace('background', 'background-color')
# Find foreground
m1 = re_color.search(m.group(1))
if m1:
fg = m1.group(1)
# Use defaults if None found
if bg is None:
bg = 'background-color: #ffffff'
if fg is None:
fg = 'color: #000000'
# Reassemble replacing .highlight {...} with .codehilite, .inlinehilite {...}
# All other classes will be left bare with only their syntax class.
code_blocks = CODE_BLOCKS_LEGACY if int(sublime.version()) < 3119 else CODE_BLOCKS
if m:
css = clean_css(
(
text[:m.start(0)] +
(code_blocks % (bg, fg)) +
text[m.end(0):] +
'\n'
)
)
else:
css = clean_css(
(
(code_blocks % (bg, fg)) + '\n' + text + '\n'
)
)
if int(sublime.version()) < 3119:
return css.replace('.dummy ', '')
else:
return re_pygments_selectors.sub(r'.mdpopups .highlight \1, .mdpopups .inline-highlight \1', css)
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import atexit
import copy
import functools
import os
import re
import shutil
import socket
import sys
import time
import warnings
import fixtures
import logging
from paste import deploy
import six
import testtools
from testtools import testcase
import webob
from keystone.openstack.common.fixture import mockpatch
from keystone.openstack.common import gettextutils
# NOTE(ayoung)
# environment.use_eventlet must run before any of the code that will
# call the eventlet monkeypatching.
from keystone.common import environment
environment.use_eventlet()
from keystone import auth
from keystone.common import dependency
from keystone.common import kvs
from keystone.common.kvs import core as kvs_core
from keystone.common import sql
from keystone.common.sql import migration_helpers
from keystone.common import utils as common_utils
from keystone import config
from keystone import exception
from keystone import notifications
from keystone.openstack.common.db import options as db_options
from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common.fixture import config as config_fixture
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import log
from keystone import service
from keystone.tests import ksfixtures
# NOTE(dstanek): Tests inheriting from TestCase depend on having the
# policy_file command-line option declared before setUp runs. Importing the
# oslo policy module automatically declares the option.
from keystone.openstack.common import policy as common_policy # noqa
config.configure()
LOG = log.getLogger(__name__)
PID = six.text_type(os.getpid())
TESTSDIR = os.path.dirname(os.path.abspath(__file__))
TESTCONF = os.path.join(TESTSDIR, 'config_files')
ROOTDIR = os.path.normpath(os.path.join(TESTSDIR, '..', '..'))
VENDOR = os.path.join(ROOTDIR, 'vendor')
ETCDIR = os.path.join(ROOTDIR, 'etc')
def _calc_tmpdir():
env_val = os.environ.get('KEYSTONE_TEST_TEMP_DIR')
if not env_val:
return os.path.join(TESTSDIR, 'tmp', PID)
return os.path.join(env_val, PID)
TMPDIR = _calc_tmpdir()
CONF = config.CONF
exception._FATAL_EXCEPTION_FORMAT_ERRORS = True
os.makedirs(TMPDIR)
atexit.register(shutil.rmtree, TMPDIR)
class dirs:
@staticmethod
def root(*p):
return os.path.join(ROOTDIR, *p)
@staticmethod
def etc(*p):
return os.path.join(ETCDIR, *p)
@staticmethod
def tests(*p):
return os.path.join(TESTSDIR, *p)
@staticmethod
def tmp(*p):
return os.path.join(TMPDIR, *p)
@staticmethod
def tests_conf(*p):
return os.path.join(TESTCONF, *p)
# keystone.common.sql.initialize() for testing.
DEFAULT_TEST_DB_FILE = dirs.tmp('test.db')
def _initialize_sql_session():
# Make sure the DB is located in the correct location, in this case set
# the default value, as this should be able to be overridden in some
# test cases.
db_file = DEFAULT_TEST_DB_FILE
db_options.set_defaults(
sql_connection='sqlite:///%s' % db_file,
sqlite_db=db_file)
_initialize_sql_session()
def checkout_vendor(repo, rev):
# TODO(termie): this function is a good target for some optimizations :PERF
name = repo.split('/')[-1]
if name.endswith('.git'):
name = name[:-4]
working_dir = os.getcwd()
revdir = os.path.join(VENDOR, '%s-%s' % (name, rev.replace('/', '_')))
modcheck = os.path.join(VENDOR, '.%s-%s' % (name, rev.replace('/', '_')))
try:
if os.path.exists(modcheck):
mtime = os.stat(modcheck).st_mtime
if int(time.time()) - mtime < 10000:
return revdir
if not os.path.exists(revdir):
common_utils.git('clone', repo, revdir)
os.chdir(revdir)
common_utils.git('checkout', '-q', 'master')
common_utils.git('pull', '-q')
common_utils.git('checkout', '-q', rev)
# write out a modified time
with open(modcheck, 'w') as fd:
fd.write('1')
except environment.subprocess.CalledProcessError:
LOG.warning(_('Failed to checkout %s'), repo)
os.chdir(working_dir)
return revdir
def setup_database():
db = dirs.tmp('test.db')
pristine = dirs.tmp('test.db.pristine')
if os.path.exists(db):
os.unlink(db)
if not os.path.exists(pristine):
migration.db_sync(sql.get_engine(),
migration_helpers.find_migrate_repo())
migration_helpers.sync_database_to_version(extension='revoke')
shutil.copyfile(db, pristine)
else:
shutil.copyfile(pristine, db)
def teardown_database():
sql.cleanup()
@atexit.register
def remove_test_databases():
db = dirs.tmp('test.db')
if os.path.exists(db):
os.unlink(db)
pristine = dirs.tmp('test.db.pristine')
if os.path.exists(pristine):
os.unlink(pristine)
def generate_paste_config(extension_name):
# Generate a file, based on keystone-paste.ini, that is named:
# extension_name.ini, and includes extension_name in the pipeline
with open(dirs.etc('keystone-paste.ini'), 'r') as f:
contents = f.read()
new_contents = contents.replace(' service_v3',
' %s service_v3' % (extension_name))
new_paste_file = dirs.tmp(extension_name + '.ini')
with open(new_paste_file, 'w') as f:
f.write(new_contents)
return new_paste_file
def remove_generated_paste_config(extension_name):
# Remove the generated paste config file, named extension_name.ini
paste_file_to_remove = dirs.tmp(extension_name + '.ini')
os.remove(paste_file_to_remove)
def skip_if_cache_disabled(*sections):
"""This decorator is used to skip a test if caching is disabled either
globally or for the specific section.
In the code fragment::
@skip_if_cache_is_disabled('assignment', 'token')
def test_method(*args):
...
The method test_method would be skipped if caching is disabled globally via
the `enabled` option in the `cache` section of the configuration or if
the `caching` option is set to false in either `assignment` or `token`
sections of the configuration. This decorator can be used with no
arguments to only check global caching.
If a specified configuration section does not define the `caching` option,
this decorator makes the same assumption as the `should_cache_fn` in
keystone.common.cache that caching should be enabled.
"""
def wrapper(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.cache.enabled:
raise testcase.TestSkipped('Cache globally disabled.')
for s in sections:
conf_sec = getattr(CONF, s, None)
if conf_sec is not None:
if not getattr(conf_sec, 'caching', True):
raise testcase.TestSkipped('%s caching disabled.' % s)
return f(*args, **kwargs)
return inner
return wrapper
class UnexpectedExit(Exception):
pass
class TestClient(object):
def __init__(self, app=None, token=None):
self.app = app
self.token = token
def request(self, method, path, headers=None, body=None):
if headers is None:
headers = {}
if self.token:
headers.setdefault('X-Auth-Token', self.token)
req = webob.Request.blank(path)
req.method = method
for k, v in six.iteritems(headers):
req.headers[k] = v
if body:
req.body = body
return req.get_response(self.app)
def get(self, path, headers=None):
return self.request('GET', path=path, headers=headers)
def post(self, path, headers=None, body=None):
return self.request('POST', path=path, headers=headers, body=body)
def put(self, path, headers=None, body=None):
return self.request('PUT', path=path, headers=headers, body=body)
class BaseTestCase(testtools.TestCase):
"""Light weight base test class.
This is a placeholder that will eventually go away once thc
setup/teardown in TestCase is properly trimmed down to the bare
essentials. This is really just a play to speed up the tests by
eliminating unnecessary work.
"""
def cleanup_instance(self, *names):
"""Create a function suitable for use with self.addCleanup.
:returns: a callable that uses a closure to delete instance attributes
"""
def cleanup():
for name in names:
# TODO(dstanek): remove this 'if' statement once
# load_backend in test_backend_ldap is only called once
# per test
if hasattr(self, name):
delattr(self, name)
return cleanup
@dependency.optional('revoke_api')
class TestCase(BaseTestCase):
_config_file_list = []
def config_files(self):
return copy.copy(self._config_file_list)
def config_overrides(self):
self.config_fixture.config(policy_file=dirs.etc('policy.json'))
self.config_fixture.config(
group='auth',
methods=['keystone.auth.plugins.external.DefaultDomain',
'keystone.auth.plugins.password.Password',
'keystone.auth.plugins.token.Token',
'keystone.auth.plugins.oauth1.OAuth',
'keystone.auth.plugins.saml2.Saml2'])
self.config_fixture.config(
# TODO(morganfainberg): Make Cache Testing a separate test case
# in tempest, and move it out of the base unit tests.
group='cache',
backend='dogpile.cache.memory',
enabled=True,
proxies=['keystone.tests.test_cache.CacheIsolatingProxy'])
self.config_fixture.config(
group='catalog',
driver='keystone.catalog.backends.templated.Catalog',
template_file=dirs.tests('default_catalog.templates'))
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.kvs.Identity')
self.config_fixture.config(
group='kvs',
backends=[
'keystone.tests.test_kvs.KVSBackendForcedKeyMangleFixture',
'keystone.tests.test_kvs.KVSBackendFixture'])
self.config_fixture.config(
group='revoke',
driver='keystone.contrib.revoke.backends.kvs.Revoke')
self.config_fixture.config(
group='signing',
certfile='examples/pki/certs/signing_cert.pem',
keyfile='examples/pki/private/signing_key.pem',
ca_certs='examples/pki/certs/cacert.pem')
self.config_fixture.config(
group='token',
driver='keystone.token.backends.kvs.Token')
self.config_fixture.config(
group='trust',
driver='keystone.trust.backends.kvs.Trust')
def setUp(self):
super(TestCase, self).setUp()
self.addCleanup(self.cleanup_instance(
'_paths', '_memo', '_overrides', '_group_overrides', 'maxDiff',
'exit_patch', 'config_fixture', 'logger'))
self._paths = []
def _cleanup_paths():
for path in self._paths:
if path in sys.path:
sys.path.remove(path)
self.addCleanup(_cleanup_paths)
self._memo = {}
self._overrides = []
self._group_overrides = {}
# show complete diffs on failure
self.maxDiff = None
self.addCleanup(CONF.reset)
self.exit_patch = self.useFixture(mockpatch.PatchObject(sys, 'exit'))
self.exit_patch.mock.side_effect = UnexpectedExit
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
self.config(self.config_files())
self.config_overrides()
self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
warnings.filterwarnings('ignore', category=DeprecationWarning)
self.useFixture(ksfixtures.Cache())
# Clear the registry of providers so that providers from previous
# tests aren't used.
self.addCleanup(dependency.reset)
self.addCleanup(kvs.INMEMDB.clear)
# Ensure Notification subscriotions and resource types are empty
self.addCleanup(notifications.SUBSCRIBERS.clear)
self.addCleanup(notifications._reset_notifier)
# Reset the auth-plugin registry
self.addCleanup(self.clear_auth_plugin_registry)
def config(self, config_files):
CONF(args=[], project='keystone', default_config_files=config_files)
def load_backends(self):
"""Initializes each manager and assigns them to an attribute."""
# TODO(blk-u): Shouldn't need to clear the registry here, but some
# tests call load_backends multiple times. These should be fixed to
# only call load_backends once.
dependency.reset()
# TODO(morganfainberg): Shouldn't need to clear the registry here, but
# some tests call load_backends multiple times. Since it is not
# possible to re-configure a backend, we need to clear the list. This
# should eventually be removed once testing has been cleaned up.
kvs_core.KEY_VALUE_STORE_REGISTRY.clear()
self.clear_auth_plugin_registry()
drivers = service.load_backends()
drivers.update(dependency.resolve_future_dependencies())
for manager_name, manager in six.iteritems(drivers):
setattr(self, manager_name, manager)
self.addCleanup(self.cleanup_instance(*drivers.keys()))
# The credential backend only supports SQL, so we always have to load
# the tables.
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
self.addCleanup(self.cleanup_instance('engine'))
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
def load_fixtures(self, fixtures):
"""Hacky basic and naive fixture loading based on a python module.
Expects that the various APIs into the various services are already
defined on `self`.
"""
# NOTE(dstanek): create a list of attribute names to be removed
# from this instance during cleanup
fixtures_to_cleanup = []
# TODO(termie): doing something from json, probably based on Django's
# loaddata will be much preferred.
if hasattr(self, 'identity_api') and hasattr(self, 'assignment_api'):
for domain in fixtures.DOMAINS:
try:
rv = self.assignment_api.create_domain(domain['id'],
domain)
except exception.Conflict:
rv = self.assignment_api.get_domain(domain['id'])
except exception.NotImplemented:
rv = domain
attrname = 'domain_%s' % domain['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for tenant in fixtures.TENANTS:
try:
rv = self.assignment_api.create_project(
tenant['id'], tenant)
except exception.Conflict:
rv = self.assignment_api.get_project(tenant['id'])
attrname = 'tenant_%s' % tenant['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for role in fixtures.ROLES:
try:
rv = self.assignment_api.create_role(role['id'], role)
except exception.Conflict:
rv = self.assignment_api.get_role(role['id'])
attrname = 'role_%s' % role['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for user in fixtures.USERS:
user_copy = user.copy()
tenants = user_copy.pop('tenants')
try:
self.identity_api.create_user(user['id'], user_copy)
except exception.Conflict:
pass
for tenant_id in tenants:
try:
self.assignment_api.add_user_to_project(tenant_id,
user['id'])
except exception.Conflict:
pass
attrname = 'user_%s' % user['id']
setattr(self, attrname, user_copy)
fixtures_to_cleanup.append(attrname)
self.addCleanup(self.cleanup_instance(*fixtures_to_cleanup))
def _paste_config(self, config):
if not config.startswith('config:'):
test_path = os.path.join(TESTSDIR, config)
etc_path = os.path.join(ROOTDIR, 'etc', config)
for path in [test_path, etc_path]:
if os.path.exists('%s-paste.ini' % path):
return 'config:%s-paste.ini' % path
return config
def loadapp(self, config, name='main'):
return deploy.loadapp(self._paste_config(config), name=name)
def client(self, app, *args, **kw):
return TestClient(app, *args, **kw)
def add_path(self, path):
sys.path.insert(0, path)
self._paths.append(path)
def clear_auth_plugin_registry(self):
auth.controllers.AUTH_METHODS.clear()
auth.controllers.AUTH_PLUGINS_LOADED = False
def assertCloseEnoughForGovernmentWork(self, a, b, delta=3):
"""Asserts that two datetimes are nearly equal within a small delta.
:param delta: Maximum allowable time delta, defined in seconds.
"""
msg = '%s != %s within %s delta' % (a, b, delta)
self.assertTrue(abs(a - b).seconds <= delta, msg)
def assertNotEmpty(self, l):
self.assertTrue(len(l))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict)
self.assertIsInstance(d2, dict)
self.assertEqual(d1, d2, msg)
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp.
"""
try:
callable_obj(*args, **kwargs)
except expected_exception as exc_value:
if isinstance(expected_regexp, six.string_types):
expected_regexp = re.compile(expected_regexp)
if isinstance(exc_value.args[0], gettextutils.Message):
if not expected_regexp.search(six.text_type(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, six.text_type(exc_value)))
else:
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
else:
if hasattr(expected_exception, '__name__'):
excName = expected_exception.__name__
else:
excName = str(expected_exception)
raise self.failureException("%s not raised" % excName)
def assertDictContainsSubset(self, expected, actual, msg=None):
"""Checks whether actual is a superset of expected."""
def safe_repr(obj, short=False):
_MAX_LENGTH = 80
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
missing = []
mismatched = []
for key, value in six.iteritems(expected):
if key not in actual:
missing.append(key)
elif value != actual[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(actual[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
@property
def ipv6_enabled(self):
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6)
# NOTE(Mouad): Try to bind to IPv6 loopback ip address.
sock.bind(("::1", 0))
return True
except socket.error:
pass
finally:
if sock:
sock.close()
return False
def skip_if_no_ipv6(self):
if not self.ipv6_enabled:
raise self.skipTest("IPv6 is not enabled in the system")
def skip_if_env_not_set(self, env_var):
if not os.environ.get(env_var):
self.skipTest('Env variable %s is not set.' % env_var)
def assertSetEqual(self, set1, set2, msg=None):
# TODO(morganfainberg): Remove this and self._assertSetEqual once
# support for python 2.6 is no longer needed.
if (sys.version_info < (2, 7)):
return self._assertSetEqual(set1, set2, msg=None)
else:
# use the native assertSetEqual
return super(TestCase, self).assertSetEqual(set1, set2, msg=msg)
def _assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support different types of sets, and
is optimized for sets specifically (parameters must support a
difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('second argument does not support set difference: %s' %
e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
class SQLDriverOverrides(object):
"""A mixin for consolidating sql-specific test overrides."""
def config_overrides(self):
super(SQLDriverOverrides, self).config_overrides()
# SQL specific driver overrides
self.config_fixture.config(
group='catalog',
driver='keystone.catalog.backends.sql.Catalog')
self.config_fixture.config(
group='ec2',
driver='keystone.contrib.ec2.backends.sql.Ec2')
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.sql.Identity')
self.config_fixture.config(
group='policy',
driver='keystone.policy.backends.sql.Policy')
self.config_fixture.config(
group='revoke',
driver='keystone.contrib.revoke.backends.sql.Revoke')
self.config_fixture.config(
group='token',
driver='keystone.token.backends.sql.Token')
self.config_fixture.config(
group='trust',
driver='keystone.trust.backends.sql.Trust')
|
|
## mostly refactored stuff from gcmdr ###
import urllib2, re, os
from shutil import rmtree
from subprocess import Popen, call
def get_study_opentreeapi(studyid, studyloc):
call = "http://api.opentreeoflife.org/v2/study/" + studyid
req = urllib2.Request(call)
res = urllib2.urlopen(req)
fl = open(studyloc + "/" + studyid, "w")
fl.write(res.read())
fl.close()
def get_all_studies_opentreeapi(studytreelist, studyloc):
if not os.path.exists(studyloc):
print "Creating directory " + studyloc
os.makedirs(studyloc)
for i in studytreelist:
a = i.split("_")
studyid = "_".join(a[:-1])
print "Downloading studyid " + studyid + " to " + studyloc
get_study_opentreeapi(studyid, studyloc)
# prune unmapped and duplicated taxa
def process_nexsons(studytreelist, studyloc, javapre, treemloc, graphdb, outd):
if not os.path.exists(outd):
print "Creating directory " + outd
os.makedirs(outd)
else:
print "Overwriting directory " + outd
from shutil import rmtree
rmtree(outd)
os.makedirs(outd)
for i in studytreelist:
a = i.split("_")
studyid = "_".join(a[:-1])
treeid = a[2]
cmd = javapre.split(" ")
cmd.append(treemloc)
cmd.append("processtree")
study = studyloc + studyid
cmd.append(study)
cmd.append(treeid)
cmd.append(graphdb)
cmd.append(outd)
print " ".join(cmd)
pr = Popen(cmd).wait()
print "\nProcessing studyid " + studyid + " and treeid: " + treeid
# needed for processing nexsons (defines valid taxa), and synthesis
def init_taxonomy_db(treemloc, javapre, db, taxfile, otloc, basedir):
print "\nInitializing taxonomy DB"
taxversion = get_taxonomy_version(otloc)
synfile = otloc + "synonyms.tsv"
cmd = javapre.split(" ")
cmd.append(treemloc)
cmd.append("inittax")
cmd.append(taxfile)
cmd.append(synfile)
cmd.append(taxversion)
cmd.append(db)
pr = Popen(cmd).wait()
def get_taxonomy_version(otloc):
tv = open(otloc + "version.txt", "r").read().split("\n")
return tv[0]
# filter taxonomy 1) restrict to target, 2) exclude 'dubious' taxa
def subset_taxonomy(target, otloc, outtax):
tflags = ["major_rank_conflict", "major_rank_conflict_inherited", "environmental",
"unclassified_inherited", "unclassified", "viral", "barren", "not_otu", "incertae_sedis",
"incertae_sedis_inherited", "extinct_inherited", "extinct", "hidden", "unplaced", "unplaced_inherited",
"was_container", "merged", "inconsistent", "hybrid"]
intax = otloc + "taxonomy.tsv"
print "\nSubsetting taxonomy to target taxon:", target
infile = open(intax, "r")
outfile = open(outtax, "w")
count = 0
pid = {} #key is the child id and the value is the parent
cid = {} #key is the parent and value is the list of children
nid = {}
nrank = {}
sid = {}
unid = {}
flagsp = {}
targetid = ""
prune = False
for i in infile:
spls = i.strip().split("\t|")
tid = spls[0].strip()
parentid = spls[1].strip()
name = spls[2].strip()
rank = spls[3].strip()
nrank[tid] = rank
nid[tid] = name
sid[tid] = spls[4].strip()
unid[tid] = spls[5].strip()
flags = spls[6].strip()
badflag = False
if len(flags) > 0:
for j in tflags:
if j in flags:
badflag = True
break
if badflag == True:
continue
flagsp[tid] = flags
pid[tid] = parentid
if tid == target or name == target:
print "name set: " + name + "; tid: " + tid
targetid = tid
pid[tid] = ""
if parentid not in cid:
cid[parentid] = []
cid[parentid].append(tid)
count += 1
if count % 100000 == 0:
print count
infile.close()
stack = [targetid]
while len(stack) > 0:
tempid = stack.pop()
outfile.write(tempid+"\t|\t"+pid[tempid]+"\t|\t"+nid[tempid]+"\t|\t"+nrank[tempid]+"\t|\t"+sid[tempid]+"\t|\t"+unid[tempid]+"\t|\t"+flagsp[tempid]+"\t|\t\n")
if tempid in cid:
for i in cid[tempid]:
if prune == True:
if i in cid: # is the taxon a parent?
stack.append(i)
else:
stack.append(i)
outfile.close()
# generate taxonomy newick for use with otcetera. labels are ottids.
def get_taxonomy_newick(treemloc, javapre, subsettax, subsettaxtree):
print "\nGenerating taxonomy newick"
cmd = javapre.split(" ")
cmd.append(treemloc)
cmd.append("converttaxonomy")
cmd.append(subsettax)
cmd.append(subsettaxtree)
cmd.append("T") # labels are ottids
pr = Popen(cmd).wait()
# from studytreelist, make sure newick exists, write tree rank list for otcetera
def generate_tree_ranking(studytreelist, trloc, outranklist):
# loop over processed newicks and studytreelist.
# some in the latter may not have survived processing (e.g. all tips 'dubious')
dirListing = os.listdir(trloc)
outfile = open(outranklist, "w")
for i in studytreelist:
for j in dirListing:
if j.startswith(i):
outfile.write(j + "\n")
outfile.close()
# symlinks for otcetera:
# step_1: tree-ranking.txt, taxonomy.tre
# step_4: newicks
# using -sf here as make -f Makefile.synth-v3 clean does not currently clean everything
# tbd: don't need basedir any more
def set_symlinks(otceteraloc, ranklist, trloc, subsettaxtree, basedir):
print "\nAttempting to clean any existing files"
wd = os.getcwd()
os.chdir(otceteraloc)
# This yields lots of "No such file or directory" messages,
# but they're innocuous
cmd = ["make", "-f", "Makefile.synth-v3", "clean"]
pr = Popen(cmd).wait()
os.chdir(wd)
print "\nSetting up symlinks for otcetera"
call(["ln", "-sf", ranklist, otceteraloc + "step_1/tree-ranking.txt"])
call(["ln", "-sf", subsettaxtree, otceteraloc + "step_1/taxonomy.tre"])
# remove any existing symlinks (again, make clean does not purge these)
call(["rm", "-f", otceteraloc + "step_4/ot*", otceteraloc + "step_4/pg*"])
print "\nSetting up symlinks for individual newicks"
trees = [line.rstrip('\n') for line in open(ranklist)]
for i in trees:
call(["ln", "-sf", trloc + i, otceteraloc + "step_4/" + i])
# run decomposition and copy results to working directory
# tbd: don't need basedir any more
def run_decomposition(basedir, otceteraloc, subprobs):
print "\nMoving to otcetera dir: " + otceteraloc
wd = os.getcwd()
os.chdir(otceteraloc)
cmd = ["make", "-f", "Makefile.synth-v3"]
pr = Popen(cmd).wait()
if os.path.exists(subprobs):
print "Removing existing directory " + subprobs
rmtree(subprobs)
print "Copying subprobs to base dir: " + basedir
call(["cp", "-r", otceteraloc + "step_7_scratch/export-sub-temp", subprobs])
print "Moving back to base dir: " + basedir
os.chdir(wd)
# throw out trivial subprobs (taxonomy only), format others for treemachine loading
def format_subprobs(treemloc, javapre, subprobs, processedsubprobs):
print "\nFormatting subproblems"
if not os.path.exists(processedsubprobs):
print "Creating directory " + processedsubprobs
os.makedirs(processedsubprobs)
else:
print "Overwriting directory " + processedsubprobs
rmtree(processedsubprobs)
os.makedirs(processedsubprobs)
cmd = javapre.split(" ")
cmd.append(treemloc)
cmd.append("processsubprobs")
cmd.append(subprobs)
cmd.append(processedsubprobs)
print cmd
pr = Popen(cmd).wait()
def load_subprobs(treemloc, javapre, db, processedsubprobs, basedir):
dirListing = os.listdir(processedsubprobs)
count = 0
iter = 0
print "\nLoading " + str(len(dirListing)) + " subproblems into: " + db
for t in dirListing:
count = count + 1
iter = iter + 1
if iter == 100:
iter = 0
print count
cmd = javapre.split(" ")
cmd.append(treemloc)
cmd.append("loadtrees")
cmd.append(processedsubprobs + "/" + t)
cmd.append(db)
cmd.append("T")
cmd.append("subset")
pr = Popen(cmd).wait()
def run_synth(treemloc, javapre, db, processedsubprobs, synthottid, basedir):
print "\nSynthesizing"
cmd = javapre.split(" ")
cmd.append(treemloc)
cmd.append("synthesizedrafttreelist_ottid")
cmd.append(synthottid)
cmd.append("taxonomy")
cmd.append(db)
pr = Popen(cmd).wait()
# extract newick
def extract_tree(treemloc, javapre, db, synthottid, basedir, synthtree):
print "\nExtracting tree"
cmd = javapre.split(" ")
cmd.append(treemloc)
cmd.append("extractdrafttree_ottid")
cmd.append(synthottid)
cmd.append(synthtree)
cmd.append(db)
pr = Popen(cmd).wait()
|
|
"""
Base file upload handler classes, and the built-in concrete subclasses
"""
from io import BytesIO
from django.conf import settings
from django.core.files.uploadedfile import (
InMemoryUploadedFile, TemporaryUploadedFile,
)
from django.utils.module_loading import import_string
__all__ = [
'UploadFileException', 'StopUpload', 'SkipFile', 'FileUploadHandler',
'TemporaryFileUploadHandler', 'MemoryFileUploadHandler', 'load_handler',
'StopFutureHandlers'
]
class UploadFileException(Exception):
"""
Any error having to do with uploading files.
"""
pass
class StopUpload(UploadFileException):
"""
This exception is raised when an upload must abort.
"""
def __init__(self, connection_reset=False):
"""
If ``connection_reset`` is ``True``, Django knows will halt the upload
without consuming the rest of the upload. This will cause the browser to
show a "connection reset" error.
"""
self.connection_reset = connection_reset
def __str__(self):
if self.connection_reset:
return 'StopUpload: Halt current upload.'
else:
return 'StopUpload: Consume request data, then halt.'
class SkipFile(UploadFileException):
"""
This exception is raised by an upload handler that wants to skip a given file.
"""
pass
class StopFutureHandlers(UploadFileException):
"""
Upload handers that have handled a file and do not want future handlers to
run should raise this exception instead of returning None.
"""
pass
class FileUploadHandler:
"""
Base class for streaming upload handlers.
"""
chunk_size = 64 * 2 ** 10 # : The default chunk size is 64 KB.
def __init__(self, request=None):
self.file_name = None
self.content_type = None
self.content_length = None
self.charset = None
self.content_type_extra = None
self.request = request
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Handle the raw input from the client.
Parameters:
:input_data:
An object that supports reading via .read().
:META:
``request.META``.
:content_length:
The (integer) value of the Content-Length header from the
client.
:boundary: The boundary from the Content-Type header. Be sure to
prepend two '--'.
"""
pass
def new_file(self, field_name, file_name, content_type, content_length, charset=None, content_type_extra=None):
"""
Signal that a new file has been started.
Warning: As with any data from the client, you should not trust
content_length (and sometimes won't even get it).
"""
self.field_name = field_name
self.file_name = file_name
self.content_type = content_type
self.content_length = content_length
self.charset = charset
self.content_type_extra = content_type_extra
def receive_data_chunk(self, raw_data, start):
"""
Receive data from the streamed upload parser. ``start`` is the position
in the file of the chunk.
"""
raise NotImplementedError('subclasses of FileUploadHandler must provide a receive_data_chunk() method')
def file_complete(self, file_size):
"""
Signal that a file has completed. File size corresponds to the actual
size accumulated by all the chunks.
Subclasses should return a valid ``UploadedFile`` object.
"""
raise NotImplementedError('subclasses of FileUploadHandler must provide a file_complete() method')
def upload_complete(self):
"""
Signal that the upload is complete. Subclasses should perform cleanup
that is necessary for this handler.
"""
pass
class TemporaryFileUploadHandler(FileUploadHandler):
"""
Upload handler that streams data into a temporary file.
"""
def new_file(self, *args, **kwargs):
"""
Create the file object to append to as data is coming in.
"""
super().new_file(*args, **kwargs)
self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra)
def receive_data_chunk(self, raw_data, start):
self.file.write(raw_data)
def file_complete(self, file_size):
self.file.seek(0)
self.file.size = file_size
return self.file
class MemoryFileUploadHandler(FileUploadHandler):
"""
File upload handler to stream uploads into memory (used for small files).
"""
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Use the content_length to signal whether or not this handler should be
used.
"""
# Check the content-length header to see if we should
# If the post is too large, we cannot use the Memory handler.
if content_length > settings.FILE_UPLOAD_MAX_MEMORY_SIZE:
self.activated = False
else:
self.activated = True
def new_file(self, *args, **kwargs):
super().new_file(*args, **kwargs)
if self.activated:
self.file = BytesIO()
raise StopFutureHandlers()
def receive_data_chunk(self, raw_data, start):
"""Add the data to the BytesIO file."""
if self.activated:
self.file.write(raw_data)
else:
return raw_data
def file_complete(self, file_size):
"""Return a file object if this handler is activated."""
if not self.activated:
return
self.file.seek(0)
return InMemoryUploadedFile(
file=self.file,
field_name=self.field_name,
name=self.file_name,
content_type=self.content_type,
size=file_size,
charset=self.charset,
content_type_extra=self.content_type_extra
)
def load_handler(path, *args, **kwargs):
"""
Given a path to a handler, return an instance of that handler.
E.g.::
>>> from django.http import HttpRequest
>>> request = HttpRequest()
>>> load_handler('django.core.files.uploadhandler.TemporaryFileUploadHandler', request)
<TemporaryFileUploadHandler object at 0x...>
"""
return import_string(path)(*args, **kwargs)
|
|
# flake8: noqa
from __future__ import unicode_literals
from .abc import ABCIE
from .abc7news import Abc7NewsIE
from .abcnews import (
AbcNewsIE,
AbcNewsVideoIE,
)
from .academicearth import AcademicEarthCourseIE
from .acast import (
ACastIE,
ACastChannelIE,
)
from .addanime import AddAnimeIE
from .adobetv import (
AdobeTVIE,
AdobeTVShowIE,
AdobeTVChannelIE,
AdobeTVVideoIE,
)
from .adultswim import AdultSwimIE
from .aenetworks import (
AENetworksIE,
HistoryTopicIE,
)
from .afreecatv import AfreecaTVIE
from .aftonbladet import AftonbladetIE
from .airmozilla import AirMozillaIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
from .animeondemand import AnimeOnDemandIE
from .anitube import AnitubeIE
from .anysex import AnySexIE
from .aol import (
AolIE,
AolFeaturesIE,
)
from .allocine import AllocineIE
from .aparat import AparatIE
from .appleconnect import AppleConnectIE
from .appletrailers import (
AppleTrailersIE,
AppleTrailersSectionIE,
)
from .archiveorg import ArchiveOrgIE
from .arkena import ArkenaIE
from .ard import (
ARDIE,
ARDMediathekIE,
)
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVInfoIE,
ArteTVFutureIE,
ArteTVCinemaIE,
ArteTVDDCIE,
ArteTVMagazineIE,
ArteTVEmbedIE,
ArteTVPlaylistIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
from .audimedia import AudiMediaIE
from .audioboom import AudioBoomIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .azubu import AzubuIE, AzubuLiveIE
from .baidu import BaiduVideoIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbc import (
BBCCoUkIE,
BBCCoUkArticleIE,
BBCCoUkIPlayerPlaylistIE,
BBCCoUkPlaylistIE,
BBCIE,
)
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .beatportpro import BeatportProIE
from .bet import BetIE
from .bigflix import BigflixIE
from .bild import BildIE
from .bilibili import BiliBiliIE
from .biobiochiletv import BioBioChileTVIE
from .biqle import BIQLEIE
from .bleacherreport import (
BleacherReportIE,
BleacherReportCMSIE,
)
from .blinkx import BlinkxIE
from .bloomberg import BloombergIE
from .bokecc import BokeCCIE
from .bpb import BpbIE
from .br import BRIE
from .bravotv import BravoTVIE
from .breakcom import BreakIE
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .camdemy import (
CamdemyIE,
CamdemyFolderIE
)
from .camwithher import CamWithHerIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .canvas import CanvasIE
from .carambatv import (
CarambaTVIE,
CarambaTVPageIE,
)
from .cbc import (
CBCIE,
CBCPlayerIE,
)
from .cbs import CBSIE
from .cbslocal import CBSLocalIE
from .cbsinteractive import CBSInteractiveIE
from .cbsnews import (
CBSNewsIE,
CBSNewsLiveVideoIE,
)
from .cbssports import CBSSportsIE
from .ccc import CCCIE
from .cda import CDAIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chaturbate import ChaturbateIE
from .chilloutzone import ChilloutzoneIE
from .chirbit import (
ChirbitIE,
ChirbitProfileIE,
)
from .cinchcast import CinchcastIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .cliprs import ClipRsIE
from .clipsyndicate import ClipsyndicateIE
from .closertotruth import CloserToTruthIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .clyp import ClypIE
from .cmt import CMTIE
from .cnbc import CNBCIE
from .cnn import (
CNNIE,
CNNBlogsIE,
CNNArticleIE,
)
from .coub import CoubIE
from .collegerama import CollegeRamaIE
from .comedycentral import (
ComedyCentralIE,
ComedyCentralShowsIE,
ComedyCentralTVIE,
)
from .comcarcoff import ComCarCoffIE
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
from .commonprotocols import RtmpIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
from .crackle import CrackleIE
from .criterion import CriterionIE
from .crooksandliars import CrooksAndLiarsIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .ctsnews import CtsNewsIE
from .ctv import CTVIE
from .ctvnews import CTVNewsIE
from .cultureunplugged import CultureUnpluggedIE
from .cwtv import CWTVIE
from .dailymail import DailyMailIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
DailymotionCloudIE,
)
from .daum import (
DaumIE,
DaumClipIE,
DaumPlaylistIE,
DaumUserIE,
)
from .dbtv import DBTVIE
from .dcn import (
DCNIE,
DCNVideoIE,
DCNLiveIE,
DCNSeasonIE,
)
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .democracynow import DemocracynowIE
from .dfb import DFBIE
from .dhm import DHMIE
from .dotsub import DotsubIE
from .douyutv import DouyuTVIE
from .dplay import DPlayIE
from .dramafever import (
DramaFeverIE,
DramaFeverSeriesIE,
)
from .dreisat import DreiSatIE
from .drbonanza import DRBonanzaIE
from .drtuber import DrTuberIE
from .drtv import DRTVIE
from .dvtv import DVTVIE
from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .dispeak import DigitallySpeakingIE
from .dropbox import DropboxIE
from .dw import (
DWIE,
DWArticleIE,
)
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
from .echomsk import EchoMskIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentv import (
EllenTVIE,
EllenTVClipsIE,
)
from .elpais import ElPaisIE
from .embedly import EmbedlyIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .espn import ESPNIE
from .esri import EsriVideoIE
from .europa import EuropaIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
from .eyedotv import EyedoTVIE
from .facebook import FacebookIE
from .faz import FazIE
from .fc2 import FC2IE
from .fczenit import FczenitIE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fivetv import FiveTVIE
from .fktv import FKTVIE
from .flickr import FlickrIE
from .flipagram import FlipagramIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .formula1 import Formula1IE
from .fourtube import FourTubeIE
from .fox import FOXIE
from .foxgay import FoxgayIE
from .foxnews import FoxNewsIE
from .foxsports import FoxSportsIE
from .franceculture import (
FranceCultureIE,
FranceCultureEmissionIE,
)
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
from .funimation import FunimationIE
from .funnyordie import FunnyOrDieIE
from .fusion import FusionIE
from .gameinformer import GameInformerIE
from .gamekings import GamekingsIE
from .gameone import (
GameOneIE,
GameOnePlaylistIE,
)
from .gamersyde import GamersydeIE
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gazeta import GazetaIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .gfycat import GfycatIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
from .globo import (
GloboIE,
GloboArticleIE,
)
from .godtube import GodTubeIE
from .godtv import GodTVIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googledrive import GoogleDriveIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .goshgay import GoshgayIE
from .gputechconf import GPUTechConfIE
from .groupon import GrouponIE
from .hark import HarkIE
from .hbo import HBOIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .historicfilms import HistoricFilmsIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hornbunny import HornBunnyIE
from .hotnewhiphop import HotNewHipHopIE
from .hotstar import HotStarIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .hrti import (
HRTiIE,
HRTiPlaylistIE,
)
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .iconosquare import IconosquareIE
from .ign import (
IGNIE,
OneUPIE,
PCMagIE,
)
from .imdb import (
ImdbIE,
ImdbListIE
)
from .imgur import (
ImgurIE,
ImgurAlbumIE,
)
from .ina import InaIE
from .indavideo import (
IndavideoIE,
IndavideoEmbedIE,
)
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .iqiyi import IqiyiIE
from .ir90tv import Ir90TvIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .ivideon import IvideonIE
from .izlesene import IzleseneIE
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .jwplatform import JWPlatformIE
from .jpopsukitv import JpopsukiIE
from .kaltura import KalturaIE
from .kamcord import KamcordIE
from .kanalplay import KanalPlayIE
from .kankan import KankanIE
from .karaoketv import KaraoketvIE
from .karrierevideos import KarriereVideosIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .konserthusetplay import KonserthusetPlayIE
from .kontrtube import KontrTubeIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .kusi import KUSIIE
from .kuwo import (
KuwoIE,
KuwoAlbumIE,
KuwoChartIE,
KuwoSingerIE,
KuwoCategoryIE,
KuwoMvIE,
)
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
from .lcp import (
LcpPlayIE,
LcpIE,
)
from .learnr import LearnrIE
from .lecture2go import Lecture2GoIE
from .lemonde import LemondeIE
from .leeco import (
LeIE,
LePlaylistIE,
LetvCloudIE,
)
from .libraryofcongress import LibraryOfCongressIE
from .libsyn import LibsynIE
from .lifenews import (
LifeNewsIE,
LifeEmbedIE,
)
from .limelight import (
LimelightMediaIE,
LimelightChannelIE,
LimelightChannelListIE,
)
from .litv import LiTVIE
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
from .localnews8 import LocalNews8IE
from .lovehomeporn import LoveHomePornIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .makerschannel import MakersChannelIE
from .makertv import MakerTVIE
from .matchtv import MatchTVIE
from .mdr import MDRIE
from .meta import METAIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .mgtv import MGTVIE
from .microsoftvirtualacademy import (
MicrosoftVirtualAcademyIE,
MicrosoftVirtualAcademyCourseIE,
)
from .minhateca import MinhatecaIE
from .ministrygrid import MinistryGridIE
from .minoto import MinotoIE
from .miomio import MioMioIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import (
MixcloudIE,
MixcloudUserIE,
MixcloudPlaylistIE,
MixcloudStreamIE,
)
from .mlb import MLBIE
from .mnet import MnetIE
from .mpora import MporaIE
from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
from .mojvideo import MojvideoIE
from .moniker import MonikerIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .msn import MSNIE
from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
MTVIggyIE,
MTVDEIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
from .mwave import MwaveIE, MwaveMeetGreetIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvi import MyviIE
from .myvideo import MyVideoIE
from .myvidster import MyVidsterIE
from .nationalgeographic import (
NationalGeographicIE,
NationalGeographicChannelIE,
)
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
CSNNEIE,
NBCIE,
NBCNewsIE,
NBCSportsIE,
NBCSportsVPlayerIE,
)
from .ndr import (
NDRIE,
NJoyIE,
NDREmbedBaseIE,
NDREmbedIE,
NJoyEmbedIE,
)
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
from .nerdcubed import NerdCubedFeedIE
from .neteasemusic import (
NetEaseMusicIE,
NetEaseMusicAlbumIE,
NetEaseMusicSingerIE,
NetEaseMusicListIE,
NetEaseMusicMvIE,
NetEaseMusicProgramIE,
NetEaseMusicDjRadioIE,
)
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nextmedia import (
NextMediaIE,
NextMediaActionNewsIE,
AppleDailyIE,
)
from .nextmovie import NextMovieIE
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import (
NHLVideocenterIE,
NHLNewsIE,
NHLVideocenterCategoryIE,
NHLIE,
)
from .nick import (
NickIE,
NickDeIE,
)
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninecninemedia import NineCNineMediaIE
from .ninegag import NineGagIE
from .ninenow import NineNowIE
from .nintendo import NintendoIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import NovaIE
from .novamov import (
AuroraVidIE,
CloudTimeIE,
NowVideoIE,
VideoWeedIE,
WholeCloudIE,
)
from .nowness import (
NownessIE,
NownessPlaylistIE,
NownessSeriesIE,
)
from .nowtv import (
NowTVIE,
NowTVListIE,
)
from .noz import NozIE
from .npo import (
NPOIE,
NPOLiveIE,
NPORadioIE,
NPORadioFragmentIE,
SchoolTVIE,
VPROIE,
WNLIE
)
from .npr import NprIE
from .nrk import (
NRKIE,
NRKPlaylistIE,
NRKSkoleIE,
NRKTVIE,
)
from .ntvde import NTVDeIE
from .ntvru import NTVRuIE
from .nytimes import (
NYTimesIE,
NYTimesArticleIE,
)
from .nuvid import NuvidIE
from .odatv import OdaTVIE
from .odnoklassniki import OdnoklassnikiIE
from .oktoberfesttv import OktoberfestTVIE
from .onet import (
OnetIE,
OnetChannelIE,
)
from .onionstudios import OnionStudiosIE
from .ooyala import (
OoyalaIE,
OoyalaExternalIE,
)
from .openload import OpenloadIE
from .ora import OraTVIE
from .orf import (
ORFTVthekIE,
ORFOE1IE,
ORFFM4IE,
ORFIPTVIE,
)
from .pandoratv import PandoraTVIE
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .people import PeopleIE
from .periscope import (
PeriscopeIE,
PeriscopeUserIE,
)
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .pinkbike import PinkbikeIE
from .pladform import PladformIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .plays import PlaysTVIE
from .playtvak import PlaytvakIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
from .pluralsight import (
PluralsightIE,
PluralsightCourseIE,
)
from .podomatic import PodomaticIE
from .polskieradio import PolskieRadioIE
from .porn91 import Porn91IE
from .pornhd import PornHdIE
from .pornhub import (
PornHubIE,
PornHubPlaylistIE,
PornHubUserVideosIE,
)
from .pornotube import PornotubeIE
from .pornovoisines import PornoVoisinesIE
from .pornoxo import PornoXOIE
from .presstv import PressTVIE
from .primesharetv import PrimeShareTVIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .puls4 import Puls4IE
from .pyvideo import PyvideoIE
from .qqmusic import (
QQMusicIE,
QQMusicSingerIE,
QQMusicAlbumIE,
QQMusicToplistIE,
QQMusicPlaylistIE,
)
from .r7 import (
R7IE,
R7ArticleIE,
)
from .radiocanada import (
RadioCanadaIE,
RadioCanadaAudioVideoIE,
)
from .radiode import RadioDeIE
from .radiojavan import RadioJavanIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
from .rai import (
RaiTVIE,
RaiIE,
)
from .rbmaradio import RBMARadioIE
from .rds import RDSIE
from .redtube import RedTubeIE
from .regiotv import RegioTVIE
from .restudy import RestudyIE
from .reuters import ReutersIE
from .reverbnation import ReverbNationIE
from .revision3 import (
Revision3EmbedIE,
Revision3IE,
)
from .rice import RICEIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rockstargames import RockstarGamesIE
from .roosterteeth import RoosterTeethIE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtbf import RTBFIE
from .rte import RteIE, RteRadioIE
from .rtlnl import RtlNlIE
from .rtl2 import RTL2IE
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE, RTVELiveIE, RTVETelevisionIE
from .rtvnh import RTVNHIE
from .rudo import RudoIE
from .ruhd import RUHDIE
from .ruleporn import RulePornIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeEmbedIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .ruutu import RuutuIE
from .sandia import SandiaIE
from .safari import (
SafariIE,
SafariApiIE,
SafariCourseIE,
)
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
from .screenjunkies import ScreenJunkiesIE
from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE
from .seeker import SeekerIE
from .senateisvp import SenateISVPIE
from .sendtonews import SendtoNewsIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .shahid import ShahidIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
from .sixplay import SixPlayIE
from .skynewsarabia import (
SkyNewsArabiaIE,
SkyNewsArabiaArticleIE,
)
from .skysports import SkySportsIE
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .snotr import SnotrIE
from .sohu import SohuIE
from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE,
SoundcloudSearchIE
)
from .soundgasm import (
SoundgasmIE,
SoundgasmProfileIE
)
from .southpark import (
SouthParkIE,
SouthParkDeIE,
SouthParkDkIE,
SouthParkEsIE,
SouthParkNlIE
)
from .spankbang import SpankBangIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .stitcher import StitcherIE
from .sport5 import Sport5IE
from .sportbox import (
SportBoxIE,
SportBoxEmbedIE,
)
from .sportdeutschland import SportDeutschlandIE
from .sportschau import SportschauIE
from .srgssr import (
SRGSSRIE,
SRGSSRPlayIE,
)
from .srmediathek import SRMediathekIE
from .ssa import SSAIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamable import StreamableIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
from .sunporno import SunPornoIE
from .svt import (
SVTIE,
SVTPlayIE,
)
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import (
TagesschauPlayerIE,
TagesschauIE,
)
from .tapely import TapelyIE
from .tass import TassIE
from .tdslifeway import TDSLifewayIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .tele13 import Tele13IE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telegraaf import TelegraafIE
from .telemb import TeleMBIE
from .teletask import TeleTaskIE
from .telewebion import TelewebionIE
from .testurl import TestURLIE
from .tf1 import TF1IE
from .theintercept import TheInterceptIE
from .theplatform import (
ThePlatformIE,
ThePlatformFeedIE,
)
from .thescene import TheSceneIE
from .thesixtyone import TheSixtyOneIE
from .thestar import TheStarIE
from .thisamericanlife import ThisAmericanLifeIE
from .thisav import ThisAVIE
from .threeqsdn import ThreeQSDNIE
from .tinypic import TinyPicIE
from .tlc import TlcDeIE
from .tmz import (
TMZIE,
TMZArticleIE,
)
from .tnaflix import (
TNAFlixNetworkEmbedIE,
TNAFlixIE,
EMPFlixIE,
MovieFapIE,
)
from .toggle import ToggleIE
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trollvids import TrollvidsIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tubitv import TubiTvIE
from .tudou import (
TudouIE,
TudouPlaylistIE,
TudouAlbumIE,
)
from .tumblr import TumblrIE
from .tunein import (
TuneInClipIE,
TuneInStationIE,
TuneInProgramIE,
TuneInTopicIE,
TuneInShortenerIE,
)
from .turbo import TurboIE
from .tutv import TutvIE
from .tv2 import (
TV2IE,
TV2ArticleIE,
)
from .tv3 import TV3IE
from .tv4 import TV4IE
from .tvc import (
TVCIE,
TVCArticleIE,
)
from .tvigle import TvigleIE
from .tvland import TVLandIE
from .tvp import (
TVPIE,
TVPSeriesIE,
)
from .tvplay import TVPlayIE
from .tweakers import TweakersIE
from .twentyfourvideo import TwentyFourVideoIE
from .twentymin import TwentyMinutenIE
from .twentytwotracks import (
TwentyTwoTracksIE,
TwentyTwoTracksGenreIE
)
from .twitch import (
TwitchVideoIE,
TwitchChapterIE,
TwitchVodIE,
TwitchProfileIE,
TwitchPastBroadcastsIE,
TwitchStreamIE,
TwitchClipsIE,
)
from .twitter import (
TwitterCardIE,
TwitterIE,
TwitterAmplifyIE,
)
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .udn import UDNEmbedIE
from .digiteka import DigitekaIE
from .unistra import UnistraIE
from .urort import UrortIE
from .urplay import URPlayIE
from .usatoday import USATodayIE
from .ustream import UstreamIE, UstreamChannelIE
from .ustudio import (
UstudioIE,
UstudioEmbedIE,
)
from .varzesh3 import Varzesh3IE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vessel import VesselIE
from .vesti import VestiIE
from .vevo import (
VevoIE,
VevoPlaylistIE,
)
from .vgtv import (
BTArticleIE,
BTVestlendingenIE,
VGTVIE,
)
from .vh1 import VH1IE
from .vice import (
ViceIE,
ViceShowIE,
)
from .vidbit import VidbitIE
from .viddler import ViddlerIE
from .videodetective import VideoDetectiveIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videomore import (
VideomoreIE,
VideomoreVideoIE,
VideomoreSeasonIE,
)
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .vidio import VidioIE
from .vidme import (
VidmeIE,
VidmeUserIE,
VidmeUserLikesIE,
)
from .vidzi import VidziIE
from .vier import VierIE, VierVideosIE
from .viewlift import (
ViewLiftIE,
ViewLiftEmbedIE,
)
from .viewster import ViewsterIE
from .viidea import ViideaIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoOndemandIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import (
VikiIE,
VikiChannelIE,
)
from .vk import (
VKIE,
VKUserVideosIE,
VKWallPostIE,
)
from .vlive import VLiveIE
from .vodlocker import VodlockerIE
from .voicerepublic import VoiceRepublicIE
from .voxmedia import VoxMediaIE
from .vporn import VpornIE
from .vrt import VRTIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .walla import WallaIE
from .washingtonpost import (
WashingtonPostIE,
WashingtonPostArticleIE,
)
from .wat import WatIE
from .watchindianporn import WatchIndianPornIE
from .wdr import (
WDRIE,
WDRMobileIE,
)
from .webofstories import (
WebOfStoriesIE,
WebOfStoriesPlaylistIE,
)
from .weiqitv import WeiqiTVIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .wrzuta import (
WrzutaIE,
WrzutaPlaylistIE,
)
from .wsj import WSJIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xfileshare import XFileShareIE
from .xhamster import (
XHamsterIE,
XHamsterEmbedIE,
)
from .xiami import (
XiamiSongIE,
XiamiAlbumIE,
XiamiArtistIE,
XiamiCollectionIE
)
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xstream import XstreamIE
from .xtube import XTubeUserIE, XTubeIE
from .xuite import XuiteIE
from .xvideos import XVideosIE
from .xxxymovies import XXXYMoviesIE
from .yahoo import (
YahooIE,
YahooSearchIE,
)
from .yam import YamIE
from .yandexmusic import (
YandexMusicTrackIE,
YandexMusicAlbumIE,
YandexMusicPlaylistIE,
)
from .yesjapan import YesJapanIE
from .yinyuetai import YinYueTaiIE
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import (
YoukuIE,
YoukuShowIE,
)
from .youporn import YouPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubeLiveIE,
YoutubePlaylistIE,
YoutubePlaylistsIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeSharedVideoIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
from .zdf import ZDFIE, ZDFChannelIE
from .zingmp3 import (
ZingMp3SongIE,
ZingMp3AlbumIE,
)
from .zippcast import ZippCastIE
|
|
"""Support for Google Play Music Desktop Player."""
import json
import logging
import socket
import time
import voluptuous as vol
from websocket import _exceptions, create_connection
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "GPM Desktop Player"
DEFAULT_PORT = 5672
GPMDP_CONFIG_FILE = "gpmpd.conf"
SUPPORT_GPMDP = (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_SEEK
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
)
PLAYBACK_DICT = {"0": STATE_PAUSED, "1": STATE_PAUSED, "2": STATE_PLAYING} # Stopped
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def request_configuration(hass, config, url, add_entities_callback):
"""Request configuration steps from the user."""
configurator = hass.components.configurator
if "gpmdp" in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING["gpmdp"], "Failed to register, please try again."
)
return
websocket = create_connection((url), timeout=1)
websocket.send(
json.dumps(
{
"namespace": "connect",
"method": "connect",
"arguments": ["Home Assistant"],
}
)
)
def gpmdp_configuration_callback(callback_data):
"""Handle configuration changes."""
while True:
try:
msg = json.loads(websocket.recv())
except _exceptions.WebSocketConnectionClosedException:
continue
if msg["channel"] != "connect":
continue
if msg["payload"] != "CODE_REQUIRED":
continue
pin = callback_data.get("pin")
websocket.send(
json.dumps(
{
"namespace": "connect",
"method": "connect",
"arguments": ["Home Assistant", pin],
}
)
)
tmpmsg = json.loads(websocket.recv())
if tmpmsg["channel"] == "time":
_LOGGER.error(
"Error setting up GPMDP. Please pause "
"the desktop player and try again"
)
break
code = tmpmsg["payload"]
if code == "CODE_REQUIRED":
continue
setup_gpmdp(hass, config, code, add_entities_callback)
save_json(hass.config.path(GPMDP_CONFIG_FILE), {"CODE": code})
websocket.send(
json.dumps(
{
"namespace": "connect",
"method": "connect",
"arguments": ["Home Assistant", code],
}
)
)
websocket.close()
break
_CONFIGURING["gpmdp"] = configurator.request_config(
DEFAULT_NAME,
gpmdp_configuration_callback,
description=(
"Enter the pin that is displayed in the "
"Google Play Music Desktop Player."
),
submit_caption="Submit",
fields=[{"id": "pin", "name": "Pin Code", "type": "number"}],
)
def setup_gpmdp(hass, config, code, add_entities):
"""Set up gpmdp."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
url = f"ws://{host}:{port}"
if not code:
request_configuration(hass, config, url, add_entities)
return
if "gpmdp" in _CONFIGURING:
configurator = hass.components.configurator
configurator.request_done(_CONFIGURING.pop("gpmdp"))
add_entities([GPMDP(name, url, code)], True)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the GPMDP platform."""
codeconfig = load_json(hass.config.path(GPMDP_CONFIG_FILE))
if codeconfig:
code = codeconfig.get("CODE")
elif discovery_info is not None:
if "gpmdp" in _CONFIGURING:
return
code = None
else:
code = None
setup_gpmdp(hass, config, code, add_entities)
class GPMDP(MediaPlayerDevice):
"""Representation of a GPMDP."""
def __init__(self, name, url, code):
"""Initialize the media player."""
self._connection = create_connection
self._url = url
self._authorization_code = code
self._name = name
self._status = STATE_OFF
self._ws = None
self._title = None
self._artist = None
self._albumart = None
self._seek_position = None
self._duration = None
self._volume = None
self._request_id = 0
self._available = True
def get_ws(self):
"""Check if the websocket is setup and connected."""
if self._ws is None:
try:
self._ws = self._connection((self._url), timeout=1)
msg = json.dumps(
{
"namespace": "connect",
"method": "connect",
"arguments": ["Home Assistant", self._authorization_code],
}
)
self._ws.send(msg)
except (socket.timeout, ConnectionRefusedError, ConnectionResetError):
self._ws = None
return self._ws
def send_gpmdp_msg(self, namespace, method, with_id=True):
"""Send ws messages to GPMDP and verify request id in response."""
try:
websocket = self.get_ws()
if websocket is None:
self._status = STATE_OFF
return
self._request_id += 1
websocket.send(
json.dumps(
{
"namespace": namespace,
"method": method,
"requestID": self._request_id,
}
)
)
if not with_id:
return
while True:
msg = json.loads(websocket.recv())
if "requestID" in msg:
if msg["requestID"] == self._request_id:
return msg
except (
ConnectionRefusedError,
ConnectionResetError,
_exceptions.WebSocketTimeoutException,
_exceptions.WebSocketProtocolException,
_exceptions.WebSocketPayloadException,
_exceptions.WebSocketConnectionClosedException,
):
self._ws = None
def update(self):
"""Get the latest details from the player."""
time.sleep(1)
try:
self._available = True
playstate = self.send_gpmdp_msg("playback", "getPlaybackState")
if playstate is None:
return
self._status = PLAYBACK_DICT[str(playstate["value"])]
time_data = self.send_gpmdp_msg("playback", "getCurrentTime")
if time_data is not None:
self._seek_position = int(time_data["value"] / 1000)
track_data = self.send_gpmdp_msg("playback", "getCurrentTrack")
if track_data is not None:
self._title = track_data["value"]["title"]
self._artist = track_data["value"]["artist"]
self._albumart = track_data["value"]["albumArt"]
self._duration = int(track_data["value"]["duration"] / 1000)
volume_data = self.send_gpmdp_msg("volume", "getVolume")
if volume_data is not None:
self._volume = volume_data["value"] / 100
except OSError:
self._available = False
@property
def available(self):
"""Return if media player is available."""
return self._available
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def state(self):
"""Return the state of the device."""
return self._status
@property
def media_title(self):
"""Title of current playing media."""
return self._title
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self._artist
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._albumart
@property
def media_seek_position(self):
"""Time in seconds of current seek position."""
return self._seek_position
@property
def media_duration(self):
"""Time in seconds of current song duration."""
return self._duration
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_GPMDP
def media_next_track(self):
"""Send media_next command to media player."""
self.send_gpmdp_msg("playback", "forward", False)
def media_previous_track(self):
"""Send media_previous command to media player."""
self.send_gpmdp_msg("playback", "rewind", False)
def media_play(self):
"""Send media_play command to media player."""
self.send_gpmdp_msg("playback", "playPause", False)
self._status = STATE_PLAYING
self.schedule_update_ha_state()
def media_pause(self):
"""Send media_pause command to media player."""
self.send_gpmdp_msg("playback", "playPause", False)
self._status = STATE_PAUSED
self.schedule_update_ha_state()
def media_seek(self, position):
"""Send media_seek command to media player."""
websocket = self.get_ws()
if websocket is None:
return
websocket.send(
json.dumps(
{
"namespace": "playback",
"method": "setCurrentTime",
"arguments": [position * 1000],
}
)
)
self.schedule_update_ha_state()
def volume_up(self):
"""Send volume_up command to media player."""
websocket = self.get_ws()
if websocket is None:
return
websocket.send('{"namespace": "volume", "method": "increaseVolume"}')
self.schedule_update_ha_state()
def volume_down(self):
"""Send volume_down command to media player."""
websocket = self.get_ws()
if websocket is None:
return
websocket.send('{"namespace": "volume", "method": "decreaseVolume"}')
self.schedule_update_ha_state()
def set_volume_level(self, volume):
"""Set volume on media player, range(0..1)."""
websocket = self.get_ws()
if websocket is None:
return
websocket.send(
json.dumps(
{
"namespace": "volume",
"method": "setVolume",
"arguments": [volume * 100],
}
)
)
self.schedule_update_ha_state()
|
|
#! /usr/bin/python
from datetime import datetime
from optparse import make_option
from sys import stderr
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand
from django.conf import settings
from cyder.base.eav.models import Attribute
from cyder.base.utils import get_cursor
from cyder.core.ctnr.models import Ctnr
from cyder.core.system.models import System, SystemAV
from cyder.cydhcp.interface.static_intr.models import StaticInterface
from cyder.cydhcp.workgroup.models import Workgroup
from cyder.cydns.address_record.models import AddressRecord
from cyder.cydns.cname.models import CNAME
from cyder.cydns.domain.models import Domain
from cyder.cydns.models import View
from cyder.cydns.mx.models import MX
from cyder.cydns.nameserver.models import Nameserver
from cyder.cydns.ptr.models import PTR
from cyder.cydns.soa.models import SOA
from cyder.cydns.utils import ensure_domain
from .lib.utilities import (clean_mac, ip2long, long2ip, fix_attr_name,
range_usage_get_create, get_label_domain_workaround,
ensure_domain_workaround)
public, _ = View.objects.get_or_create(name="public")
private, _ = View.objects.get_or_create(name="private")
BAD_DNAMES = ['', '.', '_']
cursor, _ = get_cursor('maintain_sb')
def get_delegated():
global delegated_dnames
if delegated_dnames is None:
print 'Fetching delegated domain names...'
sql = ("SELECT domain.name FROM maintain_sb.domain "
"INNER JOIN maintain_sb.nameserver "
"ON domain.id=nameserver.domain "
"WHERE %s")
where = ' and '.join(["nameserver.name != '%s'" % ns
for ns in settings.NONDELEGATED_NS])
cursor.execute(sql % where)
results = [i for (i,) in cursor.fetchall()]
delegated_dnames = set(results)
return delegated_dnames
delegated_dnames = None
class Zone(object):
def __init__(self, domain_id=None, dname=None, soa=None,
gen_recs=True, secondary=False):
self.domain_id = domain_id
self.dname = self.get_dname() if dname is None else dname
self.dname = self.dname.lower()
self.domain = None
if gen_recs:
try:
self.domain = Domain.objects.get(name=self.dname)
except Domain.DoesNotExist:
print "WARNING: Domain %s does not exist." % self.dname
return
if self.dname in settings.SECONDARY_ZONES or secondary:
print ("WARNING: Domain %s is a secondary, so its records "
"will not be migrated." % self.dname)
secondary = True
self.gen_static(simulate_delegated=True)
self.gen_AR(reverse_only=True)
else:
if self.dname in get_delegated():
self.domain.soa = self.gen_SOA() or soa
if not self.domain.soa:
print ("WARNING: Could not migrate domain %s; no SOA"
% self.domain.name)
self.domain.delete()
return
else:
self.domain.delegated = True
print "%s has been marked as delegated." % self.dname
self.domain.save()
if self.domain_id is not None:
# XXX: if SOA is created before AR and NS, then
# creating glue will raise an error. However,
# when creating delegated domains, an SOA is needed
if self.dname not in get_delegated():
self.gen_MX()
self.gen_static()
self.gen_AR()
else:
self.gen_static(simulate_delegated=True)
self.gen_AR(reverse_only=True)
self.gen_NS()
if self.dname not in get_delegated():
self.domain.soa = self.gen_SOA() or soa
else:
self.domain = self.gen_domain()
if not self.domain:
return
self.domain.save()
if self.domain:
master_domain = self.domain.master_domain
if master_domain and master_domain.delegated:
raise Exception("Whoa dude %s has a delegated master"
% self.domain.name)
if self.domain and self.domain_id is not None:
self.walk_zone(gen_recs=gen_recs, secondary=secondary)
def gen_SOA(self):
"""Generates an SOA record object if the SOA record exists.
:uniqueness: primary, contact, refresh, retry, expire, minimum, comment
"""
if self.domain_id is None:
return None
cursor.execute("SELECT primary_master, hostmaster, refresh, "
"retry, expire, ttl "
"FROM soa "
"WHERE domain = %s" % self.domain_id)
record = cursor.fetchone()
if record:
primary, contact, refresh, retry, expire, minimum = record
primary, contact = primary.lower(), contact.lower()
try:
soa = SOA.objects.get(root_domain=self.domain)
except SOA.DoesNotExist:
soa = SOA()
soa.primary = primary
soa.contact = contact
soa.refresh = refresh
soa.retry = retry
soa.expire = expire
soa.minimum = minimum
soa.root_domain = self.domain
soa.description = ''
soa.save()
return soa
else:
master_domain = self.domain.master_domain
if master_domain and master_domain.soa:
soa = master_domain.soa
else:
print "WARNING: No SOA exists for %s." % self.domain.name
return None
return soa
def gen_domain(self):
"""Generates a Domain object for this Zone from a hostname.
:uniqueness: domain
"""
if not (self.dname in BAD_DNAMES):
try:
domain = ensure_domain(name=self.dname, force=True,
update_range_usage=False)
domain.clean()
domain.save()
return domain
except ValidationError, e:
print "Could not migrate domain %s: %s" % (self.dname, e)
return None
else:
print "Did not migrate %s because it is blacklisted." % self.dname
def gen_MX(self):
"""Generates the MX Record objects related to this zone's domain.
.. note::
Where multiple records with different ttls exist, only the
first is kept.
:uniqueness: label, domain, server, priority
"""
cursor.execute("SELECT zone_mx.name, server, priority, ttl, "
"enabled, zone.name FROM zone_mx "
"JOIN zone ON zone_mx.zone = zone.id "
"WHERE domain = '%s';" % self.domain_id)
for (name, server, priority, ttl,
enabled, zone) in cursor.fetchall():
name, server = name.lower(), server.lower()
if MX.objects.filter(label=name,
domain=self.domain,
server=server,
priority=priority).exists():
print "Ignoring MX %s; MX already exists." % server
continue
ctnr = self.ctnr_from_zone_name(zone, 'MX')
if ctnr is None:
continue
try:
mx, _ = MX.objects.get_or_create(label=name,
domain=self.domain,
server=server,
priority=priority,
ttl=ttl, ctnr=ctnr)
if enabled:
mx.views.add(public)
mx.views.add(private)
except ValidationError, e:
stderr.write("Error generating MX. %s\n" % e)
def gen_static(self, simulate_delegated=False):
"""
Generates the Static Interface objects related to this zone's domain.
.. note::
Every static interface needs a system.
:System uniqueness: hostname, mac, ip_str
:StaticInterface uniqueness: hostname, mac, ip_str
"""
from dhcp_migrate import migrate_zones
if Ctnr.objects.count() <= 2:
print "WARNING: Zones not migrated. Attempting to migrate now."
migrate_zones()
sys_value_keys = {"type": "Hardware Type",
"os": "Operating System",
"location": "Location",
"department": "Department",
"serial": "Serial Number",
"other_id": "Other ID",
"purchase_date": "Purchase Date",
"po_number": "PO Number",
"warranty_date": "Warranty Date",
"owning_unit": "Owning Unit",
"user_id": "User ID"}
keys = ("host.id", "ip", "host.name", "zone.name", "workgroup",
"enabled", "ha", "zone", "type", "os", "location",
"department", "serial", "other_id", "purchase_date",
"po_number", "warranty_date", "owning_unit", "user_id",
"last_seen", "expire", "ttl", "last_update")
sql = ("SELECT %s FROM host JOIN zone ON host.zone = zone.id "
"WHERE ip != 0 AND domain = '%s';" %
(", ".join(keys), self.domain_id))
cursor.execute(sql)
for values in cursor.fetchall():
items = dict(zip(keys, values))
name = items['host.name']
if simulate_delegated:
print ("WARNING: Did not migrate host %s because it is in a "
"delegated or secondary zone." % name)
continue
ctnr = self.ctnr_from_zone_name(items['zone.name'])
if ctnr is None:
continue
enabled = bool(items['enabled'])
dns_enabled, dhcp_enabled = enabled, enabled
ip = items['ip']
ha = items['ha']
if ip == 0:
continue
if len(ha) != 12 or ha == '0' * 12:
ha = ""
if ha == "":
dhcp_enabled = False
# check for duplicate
static = StaticInterface.objects.filter(
label=name, mac=(clean_mac(ha) or None), ip_str=long2ip(ip))
if static:
stderr.write("Ignoring host %s: already exists.\n"
% items['host.id'])
continue
# create system
system = System(name=name)
system.save()
for key in sys_value_keys.keys():
value = items[key].strip()
if not value or value == '0':
continue
attr = Attribute.objects.get(
name=fix_attr_name(sys_value_keys[key]))
eav = SystemAV(entity=system, attribute=attr, value=value)
eav.full_clean()
eav.save()
# check for workgroup
if items['workgroup'] is not None:
cursor.execute("SELECT name "
"FROM workgroup "
"WHERE id = {0}".format(items['workgroup']))
wname = cursor.fetchone()[0]
w, _ = Workgroup.objects.get_or_create(name=wname)
else:
w = None
last_seen = items['last_seen'] or None
if last_seen:
last_seen = datetime.fromtimestamp(last_seen)
static = StaticInterface(
label=name, domain=self.domain, mac=(clean_mac(ha) or None),
system=system, ip_str=long2ip(ip), ip_type='4',
workgroup=w, ctnr=ctnr, ttl=items['ttl'],
dns_enabled=dns_enabled, dhcp_enabled=dhcp_enabled,
last_seen=last_seen)
# create static interface
try:
static.save(update_range_usage=False)
except ValidationError as e:
fqdn = ".".join((name, self.domain.name))
try:
static.dhcp_enabled = False
static.dns_enabled = dns_enabled
static.save(update_range_usage=False)
stderr.write('WARNING: Static interface {} has '
'been disabled: '.format(fqdn))
stderr.write('{}\n'.format(e))
except ValidationError as e:
stderr.write('WARNING: Could not create the static '
'interface {}: '.format(fqdn))
stderr.write('{}\n'.format(e))
static = None
system.delete()
if static:
static.views.add(public)
static.views.add(private)
def gen_AR(self, reverse_only=False):
"""
Generates the Address Record and PTR objects related to this zone's
domain.
.. note::
Some AddressRecords may need to be added to the pointer table in
MAINTAIN for successful migration, for example,
cob-dc81 and cob-dc82.bus.oregonstate.edu
.. note::
AddressRecords/PTRs with the same ip as a StaticInterface can't
coexist, so if a StaticInterface with the same ip exists, it has
priority.
:AddressRecord uniqueness: label, domain, ip_str, ip_type
:PTR uniqueness: name, ip_str, ip_type
"""
name = self.domain.name
cursor.execute("SELECT ip, hostname, type, zone.name, enabled "
"FROM pointer JOIN zone ON pointer.zone = zone.id "
"WHERE hostname LIKE '%%.%s';" % name)
for ip, hostname, ptr_type, zone, enabled, in cursor.fetchall():
hostname = hostname.lower()
label, dname = hostname.split('.', 1)
temp_reverse_only = True if dname != name else False
dup_stats = StaticInterface.objects.filter(ip_str=long2ip(ip))
if dup_stats.exists():
if ptr_type == 'reverse':
print "Ignoring PTR %s; Static intr exists." % long2ip(ip)
continue
elif dup_stats.filter(fqdn=hostname).exists():
print "Ignoring AR %s; Static intr exists." % hostname
continue
else:
pass
ctnr = self.ctnr_from_zone_name(zone, 'AR/PTR')
if ctnr is None:
continue
if (ptr_type == 'forward' and not reverse_only
and not temp_reverse_only):
if AddressRecord.objects.filter(
fqdn=hostname, ip_str=long2ip(ip)).exists():
continue
try:
arec, _ = range_usage_get_create(
AddressRecord, label=label, domain=self.domain,
ip_str=long2ip(ip), ip_type='4', ctnr=ctnr)
except ValidationError, e:
print "Could not migrate AR %s: %s" % (hostname, e)
continue
if enabled:
arec.views.add(public)
arec.views.add(private)
if ptr_type == 'reverse':
if not PTR.objects.filter(ip_str=long2ip(ip)).exists():
ptr = PTR(fqdn=hostname, ip_str=long2ip(ip),
ip_type='4', ctnr=ctnr)
# PTRs need to be cleaned independently of saving
# (no get_or_create)
try:
ptr.full_clean()
except ValidationError, e:
print "Could not migrate PTR %s: %s" % (ptr.ip_str, e)
continue
ptr.save(update_range_usage=False)
if enabled:
ptr.views.add(public)
ptr.views.add(private)
else:
print "Ignoring PTR %s; already exists." % long2ip(ip)
def gen_NS(self):
"""
Generates the Nameserver objects related to this zone's domain.
:uniqueness: domain, server name
"""
cursor.execute("SELECT * "
"FROM nameserver "
"WHERE domain='%s';" % self.domain_id)
for pk, name, _, _ in cursor.fetchall():
name = name.lower()
try:
ns, _ = Nameserver.objects.get_or_create(domain=self.domain,
server=name)
ns.views.add(public)
ns.views.add(private)
except ValidationError, e:
stderr.write("Error generating NS %s. %s\n" % (pk, e))
def walk_zone(self, gen_recs=True, secondary=False):
"""
Recursively traverses the domain tree, creating Zone objects and
migrating related DNS objects along the way.
.. note::
Child domains will inherit this domain's SOA if they do not have
their own.
"""
if self.dname in get_delegated():
print "%s is delegated, so no children to create." % self.dname
return
sql = ("SELECT id, name "
"FROM domain "
"WHERE master_domain = %s;" % self.domain_id)
cursor.execute(sql)
for child_id, child_name in cursor.fetchall():
child_name = child_name.lower()
Zone(child_id, child_name, self.domain.soa, gen_recs=gen_recs,
secondary=secondary)
def get_dname(self):
"""
Finds a domain name for this Zone's domain id.
"""
cursor.execute('SELECT * FROM domain WHERE id = %s;' % self.domain_id)
_, dname, _, _ = cursor.fetchone()
dname = dname.lower()
return dname
@staticmethod
def ctnr_from_zone_name(zone, obj_type="Object"):
from dhcp_migrate import clean_zone_name
zone = clean_zone_name(zone)
try:
ctnr = Ctnr.objects.get(name=zone)
except Ctnr.DoesNotExist:
print ("%s migration error; ctnr %s does not exist." %
(obj_type, zone))
ctnr = None
return ctnr
@staticmethod
def ctnr_from_zone_id(zone_id):
from dhcp_migrate import maintain_find_zone
return maintain_find_zone(zone_id)
def gen_CNAME():
"""Migrates CNAME objects.
.. note::
Run this only after migrating other DNS objects for every zone.
.. note::
Because MAINTAIN is totally messed up, some hostnames in the CNAME
table have ``.``'s in them, so the fully qualified domain name is
created first, then the label is stripped off of the front of that.
.. note::
If the fully qualified domain name of the label + domain name already
exists as a domain object, that object becomes the alias and the label
prefix is set to the empty string. Otherwise, the alias is the
label + domain name.
:uniqueness: label, domain, target
"""
print "Creating CNAMEs."
sql = ("SELECT zone_cname.id, zone_cname.server, zone_cname.name, "
"zone_cname.enabled, zone.name, domain.name FROM zone_cname "
"JOIN zone ON zone_cname.zone = zone.id "
"JOIN domain ON zone_cname.domain = domain.id")
cursor.execute(sql)
for pk, server, name, enabled, zone, dname in cursor.fetchall():
server, name = server.lower(), name.lower()
dname = dname.lower()
server = server.strip('.')
fqdn = ".".join([name, dname])
name, dname = fqdn.split(".", 1)
if Domain.objects.filter(name=fqdn).exists():
domain = Domain.objects.get(name=fqdn)
name = ""
elif Domain.objects.filter(name=dname).exists():
domain = Domain.objects.get(name=dname)
else:
_, domain = get_label_domain_workaround(fqdn)
if server == ".".join([name, domain.name]):
# In maintain, at least one CNAME is a loop: biosys.bioe.orst.edu
print "Ignoring CNAME %s: Is a loop." % server
continue
if CNAME.objects.filter(label=name, domain=domain).exists():
c = CNAME.objects.get(label=name, domain=domain)
if c.target != server:
print ("ALERT: Conflicting CNAME with fqdn %s already exists."
% fqdn)
continue
ctnr = Zone.ctnr_from_zone_name(zone, 'CNAME')
if ctnr is None:
continue
fqdn = "%s.%s" % (name, domain.name)
fqdn = fqdn.lower().strip('.')
if ctnr not in domain.ctnr_set.all():
print "CNAME %s has mismatching container for its domain." % fqdn
continue
cn = CNAME(label=name, domain=domain, target=server, ctnr=ctnr)
cn.set_fqdn()
dup_ptrs = PTR.objects.filter(fqdn=cn.fqdn)
if dup_ptrs:
print "Removing duplicate PTR for %s" % cn.fqdn
dup_ptrs.delete(update_range_usage=False)
# CNAMEs need to be cleaned independently of saving (no get_or_create)
try:
cn.full_clean()
cn.save()
if enabled:
cn.views.add(public)
cn.views.add(private)
except ValidationError, e:
print "Error for CNAME %s.%s: %s" % (name, domain.name, e)
def gen_reverses():
print "Creating reverse domains."
add_pointers_manual()
Domain.objects.get_or_create(name='arpa', is_reverse=True)
Domain.objects.get_or_create(name='in-addr.arpa', is_reverse=True)
gen_reverse_soa()
def gen_reverse_soa():
public = View.objects.get(name="public")
private = View.objects.get(name="private")
for rname in settings.REVERSE_SOAS:
if not rname.endswith(".arpa"):
rname = rname + ".in-addr.arpa"
print "Creating reverse SOA %s" % rname
dom = ensure_domain_workaround(rname)
ns1, _ = Nameserver.objects.get_or_create(domain=dom,
server="ns1.oregonstate.edu")
ns2, _ = Nameserver.objects.get_or_create(domain=dom,
server="ns2.oregonstate.edu")
SOA.objects.get_or_create(root_domain=dom,
primary="ns1.oregonstate.edu",
contact="hostmaster.oregonstate.edu")
ns1.views.add(public)
ns2.views.add(public)
ns1.views.add(private)
ns2.views.add(private)
def gen_DNS(skip_edu=False):
gen_reverses()
cursor.execute('SELECT * FROM domain WHERE master_domain = 0')
for domain_id, dname, _, _ in cursor.fetchall():
if "edu" in dname and skip_edu:
continue
print "Creating %s zone." % dname
Zone(domain_id=domain_id, dname=dname)
def gen_domains_only():
gen_reverses()
cursor.execute('SELECT * FROM domain WHERE master_domain = 0')
for domain_id, dname, _, _ in cursor.fetchall():
print "Creating %s. (domain only)" % dname
Zone(domain_id=domain_id, dname=dname, gen_recs=False)
def add_pointers_manual():
opts = settings.POINTERS
sql = 'SELECT id FROM zone WHERE name LIKE "zone.nws"'
cursor.execute(sql)
zone_id = cursor.fetchone()[0]
for opt in opts:
(ip, hn, ptype) = opt
ip = ip2long(ip)
sql = ('SELECT count(*) FROM pointer WHERE ip = %s AND hostname = "%s"'
'AND type = "%s AND zone = %s"' % (ip, hn, ptype, zone_id))
cursor.execute(sql)
exists = cursor.fetchone()[0]
if not exists:
sql = ('INSERT INTO pointer (ip, hostname, type, zone) '
'VALUES (%s, "%s", "%s", %s)' % (ip, hn, ptype, zone_id))
cursor.execute(sql)
def delete_DNS():
print "Deleting DNS objects."
for thing in [Domain, AddressRecord, PTR, SOA, MX, Nameserver,
StaticInterface, System, Workgroup]:
thing.objects.all().delete()
def delete_CNAME():
print 'Deleting CNAMEs.'
CNAME.objects.all().delete()
def do_everything(skip_edu=False):
delete_DNS()
delete_CNAME()
gen_DNS(skip_edu)
gen_CNAME()
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-d', '--dns',
dest='dns',
default=False,
action='store_true',
help='Migrate DNS objects'),
make_option('-o', '--domains-only',
dest='domains',
default=False,
action='store_true',
help='Migrate domains only'),
make_option('-c', '--cname',
action='store_true',
dest='cname',
default=False,
help='Migrate CNAMEs'),
make_option('-X', '--delete',
dest='delete',
action='store_true',
default=False,
help='Delete old objects'),
make_option('-s', '--skip',
dest='skip',
action='store_true',
default=False,
help='Skip edu zone.'))
def handle(self, **options):
if options['delete']:
if options['dns']:
delete_DNS()
if options['cname']:
delete_CNAME()
if options['dns']:
gen_DNS(options['skip'])
if options['cname']:
gen_CNAME()
if options['domains']:
gen_domains_only()
|
|
#!/usr/bin/env python2.7
import pprint
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import json
import datetime
import time
import logging
from random import randrange
from shared import liburl_wrapper
from shared.liburl_wrapper import safe_pushtx
from shared.fastproto import (
generateKey,
sendMessage,
constructMessage,
getMessages)
from math import ceil
from decimal import Decimal,getcontext
START_COMMAND = "./runclient.sh"
getcontext().prec = 8
# if you'd like to use an external charter file, attach a value to the CHARTER_URL. Otherwise CHARTER_DATA will be used.
CHARTER_URL = None
# A note on miners_fee:
# Eligius requires 4096 satoshi fee per 512 bytes of transaction ( http://eligius.st/~gateway/faq-page )
# With three oracles, the tx fee is around 512 bytes.
# A note on node info:
# pubkey: bitcoin pubkey of an oracle, used to generate the multisig address
# address: bitcoin address to receive the fee
# fastcast: fastcast public key that the oracle will use to sign the messages
CHARTER_DATA = {
"version": "2",
"org_fee": "0.00003",
"miners_fee_satoshi": 8192,
"miners_fee_comment": "2*4096 satoshi. this is enough to pay for Eligius' miners fee for a tx of size up to 2x512 bytes.",
"org_address": "1PCkVX19uGm2QK1vhcXy9uM4y2jwR4dgbF",
"nodes": [
{
"name": "kolinko-orisi-1",
"description": "test server #1, kolinko@gmail.com",
"pubkey": "035e0ef9acb11f30ebb65d8cf9cf5f9db03343b6c041af1d5140d3f2d4e8016885",
"fee": "0.00001",
"address": "1AwzzwxsgLCRqMwYheVNviqpUHLHMi8iYq",
"fastcast": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC+Ei1gIR7QHL4WGuKAbrt+1FOBaTkZ9JUn1rqFMJFFk7bSkTKxPU+LAyx/1fR/B4PTHSpBnDJh5+GJC0RE/+EKbJFRQ3ReS096ffvNvVfd30jumJzN7YoIex5ftQN3lKN7pvEVqO93g0eCU0iqGy3OjCKuxSo1x81fPtNPidr+dQIDAQAB"
},
{
"name": "kolinko-orisi-2",
"description": "test server #2, kolinko@gmail.com",
"pubkey": "0228ea8ccf3ff65994472ed5252b6fb53a00397af1c23cc5fdc3a53e2fec01e5a5",
"fee": "0.00001",
"address": "1Q8rKyzHU1kef25ceaLpVT9erunogoCrky",
"fastcast": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjR81Wtx74hyQQYwP9sMCLM/H4NcQXfgOUNebjdqaVhA/xraMvVRs3q25szXaAhaLxGFUBMIbwDlwKhUqDR8giEA2Ly91SZ/ywLy8/VmGREHR52mC9p2o50VazLu9IC9Ffl4jnrItJmY3194Lk/q5cw6ZluGnYTaYiDPTPOn9KsQIDAQAB"
},
{
"name": "kolinko-orisi-3",
"description": "test server #3, kolinko@gmail.com",
"pubkey": "03cc8ddeaf5068a3f24ab2a7dbc6f571c2f1d70f9c6454e79947567a9670470cf6",
"fee": "0.00001",
"address": "18RSK6LoM9JukLMgm1WEZJPDq4RDsP4oe4",
"fastcast": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCy1gJnXtF1gHctDm600rRHpxd4r64vvcBAsUkrWBHkRRhtth5MYfFrKV7vUi/u1lj5t35Mh9mSQ1bP9Ns6k6islBTVPsatnRsodiOAkqnZPXWfFzigdYP2y8wn02Aw7BU7PuBg3A9CCsCpu+3o8q1X3ZOFrJfRjk3oq0CAHoOu1QIDAQAB"
}
]
}
def fetch_charter():
if CHARTER_URL:
print "fetching charter: %s" % CHARTER_URL
while True:
try:
charter_json = liburl_wrapper.safe_read(charter_url, timeout_time=10)
return json.loads(charter_json)
except:
logging.exception('error fetching charter')
print "retrying..."
else:
return CHARTER_DATA
def main(args):
charter = fetch_charter()
oracle_pubkeys = []
for o in charter['nodes']:
oracle_pubkeys.append(o['pubkey'])
min_sigs = int(ceil(float(len(oracle_pubkeys))/2))
print "number of nodes: %i" % len(charter['nodes'])
print "required signatures: %i" % min_sigs
sum_fees_satoshi = 0
for o in charter['nodes']:
sum_fees_satoshi += Decimal(o['fee'])*100000000
sum_fees_satoshi += Decimal(charter['org_fee'])*100000000
print ""
print "1. wire the funds to %s" % response['address']
print " oracle & org fees: %i satoshi (as detailed in %s)" % (sum_fees_satoshi , CHARTER_URL)
print " miners fee: %i satoshi (see CHARTER_DATA in src/client/main.py if you want to lower it)" % charter['miners_fee_satoshi']
print "2. wait for transaction to get any confirmations"
print "3. run:"
print "%s main2 %s <locktime_minutes> <return_address>" % ( START_COMMAND, client_pubkey )
def timelock(args):
if len(args) < 2:
print "USAGE: `%s timelock <locktime_minutes> <return_address>`" % START_COMMAND
return
return_address = args[1]
if CHARTER_URL != None:
print "fetching charter: %s" % CHARTER_URL
else:
print "using built-in charter"
charter = fetch_charter()
oracle_pubkeys = []
oracle_fees = {}
oracle_bms = []
oracle_fastcasts = []
sum_fees_satoshi = Decimal(charter['org_fee']) * 100000000
for o in charter['nodes']:
oracle_pubkeys.append(o['pubkey'])
oracle_fees[o['address']] = o['fee']
oracle_fastcasts.append(o['fastcast'])
sum_fees_satoshi += Decimal(o['fee']) * 100000000
min_sigs = int(ceil(float(len(oracle_pubkeys))/2))
print "number of nodes: %i" % len(charter['nodes'])
print "required signatures: %i" % min_sigs
oracle_fees[charter['org_address']] = charter['org_fee']
key_list = oracle_pubkeys
request = {}
msig_addr = return_address
request['message_id'] = "%s-%s" % (msig_addr, str(randrange(1000000000,9000000000)))
request['pubkey_list'] = key_list
request['miners_fee_satoshi'] = charter['miners_fee_satoshi']
request['locktime'] = time.time() + int(args[0])*60
request['return_address'] = return_address
request['oracle_fees'] = oracle_fees
request['req_sigs'] = min_sigs
request['operation'] = 'safe_timelock_create'
pub, priv = generateKey()
meta_request = {}
meta_request['source'] = pub
meta_request['channel'] = 0
meta_request['epoch'] = time.mktime(datetime.datetime.utcnow().timetuple())
meta_request['body'] = json.dumps(request)
sendMessage(constructMessage(priv, **meta_request))
print ""
print "request sent. awaiting oracle replies..."
print "need at least %r of %r oracles to reply to the request to proceed" % (min_sigs, len(charter['nodes']))
print "if the request takes over 30 seconds to process, it means that some of the oracles might be offline - contact support@orisi.org ."
print ""
suffix = None
msig_addr = None
contract_id = None
confirmation_count = 0
while suffix is None:
msgs = getMessages()
for m in msgs['results']:
try:
body = json.loads(m['body'])
except:
logging.exception('fastcast: wrong body for frame_id %r ; ignoring' % m)
continue
if not 'in_reply_to' in body:
continue
if body['in_reply_to'] == request['message_id']:
if m['source'] in oracle_fastcasts:
oracle_fastcasts.remove(m['source'])
if 'operation' in body:
if body['operation'] == 'safe_timelock_error':
print "Operation error! One of the oracles reports:"
print body['comment']
return
print "received confirmation from %r" % m['source']
if suffix == None:
suffix = body['mark']
msig_addr = body['addr']
contract_id = body['contract_id']
else:
if (msig_addr != body['addr']) or (suffix != body['mark']) or (contract_id != body['contract_id']):
logging.error('Oracles didn\'t agree on the timelock address or the marker. Is one of them running a different code?')
logging.error('Please investigate.')
return
confirmation_count += 1
print "Oracle confirmations: %r of %r required" % (confirmation_count, min_sigs)
print ""
print "You can now send bitcoin to this address: %s and it will be locked for %r minutes from now." % (msig_addr, int(args[0]))
print "IMPORTANT: the amount you send needs to end with %r satoshi." % suffix
print " e.g. if you want to lock in BTC 0.00030000, you have to send 0.0003%r" % suffix
print " qr code: http://www.btcfrog.com/qr/bitcoinPNG.php?address=%s&amount=0.0003%r&label=timelock" % (msig_addr, suffix)
print " monitoring: https://blockchain.info/address/%s" % msig_addr
print ""
print "FEES: oracle & org fees: %i satoshi (as detailed in the charter)" % sum_fees_satoshi
print " miners fee: %i satoshi (yes, it's high - we want to encourage more pools to accept msig)" % charter['miners_fee_satoshi']
print ""
print "awaiting further oracle communication regarding this contract...."
print "(contract_id: %s)" % contract_id
print ""
read_messages = []
while True:
msgs = getMessages()
for m in msgs['results']:
if m['frame_id'] in read_messages:
continue
read_messages.append(m['frame_id'])
try:
body = json.loads(m['body'])
except:
logging.exception('fastcast: wrong body for frame_id %r ; ignoring' % m)
continue
if not "contract_id" in body:
continue
if body['contract_id'] != contract_id:
continue
print body
def main2(args):
if len(args)<3:
print "USAGE: `%s main2 <pubkey_once> <locktime_minutes> <return_address>`" % START_COMMAND
print "- run `%s main` to obtain pubkey_once" % START_COMMAND
print "- keep in mind that this is alpha, don't expect oracles to run properly for any extended periods of time"
print "- you don't want to lock money for over a week, and use anything above 0.05 BTC for testing"
return
btc = BitcoinClient()
request = {}
client_pubkey = args[0]
request['locktime'] = time.time() + int(args[1])*60
request['return_address'] = args[2]
print "fetching charter url" # hopefully it didn't check between running main1 and main2
charter = fetch_charter(CHARTER_URL)
oracle_pubkeys = []
oracle_fees = {}
oracle_bms = []
for o in charter['nodes']:
oracle_pubkeys.append(o['pubkey'])
oracle_fees[o['address']] = o['fee']
#oracle_bms.append(o['bm'])
oracle_fees[charter['org_address']] = charter['org_fee']
min_sigs = int(ceil(float(len(oracle_pubkeys))/2))
key_list = [client_pubkey] + oracle_pubkeys
response = btc.create_multisig_address(min_sigs, key_list)
msig_addr = response['address'] # we're using this as an identificator
redeemScript = response['redeemScript']
request['message_id'] = "%s-%s" % (msig_addr, str(randrange(1000000000,9000000000)))
request['pubkey_list'] = key_list
request['miners_fee_satoshi'] = MINERS_FEE
print "fetching transactions incoming to %s ..." % msig_addr
import requests
# for production purposes you might want to fetch the data using bitcoind, but that's expensive
print "get"
address_json = requests.get("https://blockchain.info/address/%s?format=json" % msig_addr).text
#try:
print address_json
address_history = json.loads(address_json)
#except:
#print "blockchain.info problem"
#print address_json
#return
prevtxs = []
sum_satoshi = 0
for tx in address_history['txs']:
outputs = []
if 'out' in tx:
outputs = outputs + tx['out']
if 'outputs' in tx:
outputs = outputs + tx['outputs']
for vout in tx['out']:
print vout
if vout['addr'] == msig_addr:
prevtx = {
'scriptPubKey' : vout['script'],
'vout': vout['n'],
'txid': tx['hash'],
'redeemScript': redeemScript,
}
sum_satoshi += vout['value']
prevtxs.append(prevtx)
if len(prevtxs) == 0:
print "ERROR: couldn't find transactions sending money to %s" % msig_addr
# return
request['prevtxs'] = prevtxs
request['outputs'] = oracle_fees
request["req_sigs"] = min_sigs
request['operation'] = 'timelock_create'
request['sum_satoshi'] = sum_satoshi
pub, priv = generateKey()
meta_request = {}
meta_request['source'] = pub
meta_request['channel'] = 0
meta_request['signature'] = 0
meta_request['body'] = json.dumps(request)
print sendMessage(constructMessage(priv, **meta_request))
def wait_sign(args):
bm = BitmessageClient()
while True:
messages = bm.get_unread_messages()
print "unread messages: %r" % len(messages)
for msg in messages:
if msg.subject[0:10] == 'final-sign':
try:
content = json.loads(msg.message)
print content['pwtxid']
except:
print "problem with message parsing"
time.sleep(5)
else:
print "complete signed tx for pwtxid: %s" % content['pwtxid']
print "please forward this to Eligius pool ( http://eligius.st/~wizkid057/newstats/pushtxn.php ):"
print content['transaction']
bm.mark_message_as_read(msg)
time.sleep(5)
def tx_info(args):
tx = args[0]
btc = BitcoinClient()
prevtxs = '[{"redeemScript": "52210281cf9fa9241f0a9799f27a4d5d60cff74f30eed1d536bf7a72d3dec936c151632102e8e22190b0adfefd0962c6332e74ab68831d56d0bfc2b01b32beccd56e3ef6f021035ff60e6745093b9bcbae93082e1c50ca5b3fcf8bcd186a46da46ded5132530522103a9bd3bfbd9f9b1719d3ecad8658796dc5e778177d77145b5c37247eb3060861854ae", "txid": "10a3ab54e1e19701fcb86c7725621b5b1b26415f94363de35a493ba9ca502b15", "vout": 0, "scriptPubKey": "a914a37ce66d7065157037e90ca4d4b4a20d8d865a2687"}]'
prevtxs = json.loads(prevtxs)
pprint.pprint( btc.decode_raw_transaction(tx))
pprint.pprint (btc.signatures_count(tx, prevtxs))
pprint.pprint (btc.signatures(tx, prevtxs))
def pushtx(args):
tx = args[0]
print safe_pushtx(tx)
OPERATIONS = {
'main': main,
'timelock': timelock,
'main2': main2,
'wait': wait_sign,
'txinfo': tx_info,
'pushtx': pushtx,
}
SHORT_DESCRIPTIONS = {
'main': "prepares the first multisig",
'main2': "broadcasts a request for create (timelock/bounty)",
'wait_sign': "waits for a signature",
'tx_info': 'information about a signed tx',
'pushtx': 'pushes tx to eligius',
}
def help():
print "You can use one of the following functions:"
for name, desc in SHORT_DESCRIPTIONS.iteritems():
print "{0} - {1}".format(name, desc)
print "Learn more by using {0} help functionname".format(START_COMMAND)
def main(args):
if len(args) == 0:
print "no arguments given, use {0} help for possible operations".format(START_COMMAND)
return
if args[0] == 'help':
if len(args) == 1:
help()
else:
if args[1] in OPERATIONS:
print OPERATIONS[args[1]].__doc__
return
if args[0] in OPERATIONS:
operation = OPERATIONS[args[0]]
operation(args[1:])
else:
print "unknown operation, use {} help for possible operations".format(START_COMMAND)
if __name__=="__main__":
args = sys.argv[1:]
main(args)
|
|
"""Classes for defining instructions."""
from __future__ import absolute_import
from . import camel_case
from .types import ValueType
from .operands import Operand
from .formats import InstructionFormat
try:
from typing import Union, Sequence, List, Tuple, Any, TYPE_CHECKING # noqa
from typing import Dict # noqa
if TYPE_CHECKING:
from .ast import Expr, Apply, Var, Def, VarAtomMap # noqa
from .typevar import TypeVar # noqa
from .ti import TypeConstraint # noqa
from .xform import XForm, Rtl
# List of operands for ins/outs:
OpList = Union[Sequence[Operand], Operand]
ConstrList = Union[Sequence[TypeConstraint], TypeConstraint]
MaybeBoundInst = Union['Instruction', 'BoundInstruction']
InstructionSemantics = Sequence[XForm]
SemDefCase = Union[Rtl, Tuple[Rtl, Sequence[TypeConstraint]], XForm]
except ImportError:
pass
class InstructionGroup(object):
"""
Every instruction must belong to exactly one instruction group. A given
target architecture can support instructions from multiple groups, and it
does not necessarily support all instructions in a group.
New instructions are automatically added to the currently open instruction
group.
"""
# The currently open instruction group.
_current = None # type: InstructionGroup
def open(self):
# type: () -> None
"""
Open this instruction group such that future new instructions are
added to this group.
"""
assert InstructionGroup._current is None, (
"Can't open {} since {} is already open"
.format(self, InstructionGroup._current))
InstructionGroup._current = self
def close(self):
# type: () -> None
"""
Close this instruction group. This function should be called before
opening another instruction group.
"""
assert InstructionGroup._current is self, (
"Can't close {}, the open instuction group is {}"
.format(self, InstructionGroup._current))
InstructionGroup._current = None
def __init__(self, name, doc):
# type: (str, str) -> None
self.name = name
self.__doc__ = doc
self.instructions = [] # type: List[Instruction]
self.open()
@staticmethod
def append(inst):
# type: (Instruction) -> None
assert InstructionGroup._current, \
"Open an instruction group before defining instructions."
InstructionGroup._current.instructions.append(inst)
class Instruction(object):
"""
The operands to the instruction are specified as two tuples: ``ins`` and
``outs``. Since the Python singleton tuple syntax is a bit awkward, it is
allowed to specify a singleton as just the operand itself, i.e., `ins=x`
and `ins=(x,)` are both allowed and mean the same thing.
:param name: Instruction mnemonic, also becomes opcode name.
:param doc: Documentation string.
:param ins: Tuple of input operands. This can be a mix of SSA value
operands and other operand kinds.
:param outs: Tuple of output operands. The output operands must be SSA
values or `variable_args`.
:param constraints: Tuple of instruction-specific TypeConstraints.
:param is_terminator: This is a terminator instruction.
:param is_branch: This is a branch instruction.
:param is_indirect_branch: This is an indirect branch instruction.
:param is_call: This is a call instruction.
:param is_return: This is a return instruction.
:param is_ghost: This is a ghost instruction, which has no encoding and no
other register allocation constraints.
:param can_trap: This instruction can trap.
:param can_load: This instruction can load from memory.
:param can_store: This instruction can store to memory.
:param other_side_effects: Instruction has other side effects.
"""
# Boolean instruction attributes that can be passed as keyword arguments to
# the constructor. Map attribute name to doc comment for generated Rust
# code.
ATTRIBS = {
'is_terminator': 'True for instructions that terminate the EBB.',
'is_branch': 'True for all branch or jump instructions.',
'is_indirect_branch':
'True for all indirect branch or jump instructions.',
'is_call': 'Is this a call instruction?',
'is_return': 'Is this a return instruction?',
'is_ghost': 'Is this a ghost instruction?',
'can_load': 'Can this instruction read from memory?',
'can_store': 'Can this instruction write to memory?',
'can_trap': 'Can this instruction cause a trap?',
'other_side_effects':
'Does this instruction have other side effects besides can_*',
'writes_cpu_flags': 'Does this instruction write to CPU flags?',
}
def __init__(self, name, doc, ins=(), outs=(), constraints=(), **kwargs):
# type: (str, str, OpList, OpList, ConstrList, **Any) -> None
self.name = name
self.camel_name = camel_case(name)
self.__doc__ = doc
self.ins = self._to_operand_tuple(ins)
self.outs = self._to_operand_tuple(outs)
self.constraints = self._to_constraint_tuple(constraints)
self.format = InstructionFormat.lookup(self.ins, self.outs)
self.semantics = None # type: InstructionSemantics
# Opcode number, assigned by gen_instr.py.
self.number = None # type: int
# Indexes into `self.outs` for value results.
# Other results are `variable_args`.
self.value_results = tuple(
i for i, o in enumerate(self.outs) if o.is_value())
# Indexes into `self.ins` for value operands.
self.value_opnums = tuple(
i for i, o in enumerate(self.ins) if o.is_value())
# Indexes into `self.ins` for non-value operands.
self.imm_opnums = tuple(
i for i, o in enumerate(self.ins) if o.is_immediate())
self._verify_polymorphic()
for attr in kwargs:
if attr not in Instruction.ATTRIBS:
raise AssertionError(
"unknown instruction attribute '" + attr + "'")
for attr in Instruction.ATTRIBS:
setattr(self, attr, not not kwargs.get(attr, False))
# Infer the 'writes_cpu_flags' field value.
if 'writes_cpu_flags' not in kwargs:
self.writes_cpu_flags = any(
out.is_cpu_flags() for out in self.outs)
InstructionGroup.append(self)
def __str__(self):
# type: () -> str
prefix = ', '.join(o.name for o in self.outs)
if prefix:
prefix = prefix + ' = '
suffix = ', '.join(o.name for o in self.ins)
return '{}{} {}'.format(prefix, self.name, suffix)
def snake_name(self):
# type: () -> str
"""
Get the snake_case name of this instruction.
Keywords in Rust and Python are altered by appending a '_'
"""
if self.name == 'return':
return 'return_'
else:
return self.name
def blurb(self):
# type: () -> str
"""Get the first line of the doc comment"""
for line in self.__doc__.split('\n'):
line = line.strip()
if line:
return line
return ""
def _verify_polymorphic(self):
# type: () -> None
"""
Check if this instruction is polymorphic, and verify its use of type
variables.
"""
poly_ins = [
i for i in self.value_opnums
if self.ins[i].typevar.free_typevar()]
poly_outs = [
i for i, o in enumerate(self.outs)
if o.is_value() and o.typevar.free_typevar()]
self.is_polymorphic = len(poly_ins) > 0 or len(poly_outs) > 0
if not self.is_polymorphic:
return
# Prefer to use the typevar_operand to infer the controlling typevar.
self.use_typevar_operand = False
typevar_error = None
tv_op = self.format.typevar_operand
if tv_op is not None and tv_op < len(self.value_opnums):
try:
opnum = self.value_opnums[tv_op]
tv = self.ins[opnum].typevar
if tv is tv.free_typevar() or tv.singleton_type() is not None:
self.other_typevars = self._verify_ctrl_typevar(tv)
self.ctrl_typevar = tv
self.use_typevar_operand = True
except RuntimeError as e:
typevar_error = e
if not self.use_typevar_operand:
# The typevar_operand argument doesn't work. Can we infer from the
# first result instead?
if len(self.outs) == 0:
if typevar_error:
raise typevar_error
else:
raise RuntimeError(
"typevar_operand must be a free type variable")
tv = self.outs[0].typevar
if tv is not tv.free_typevar():
raise RuntimeError("first result must be a free type variable")
self.other_typevars = self._verify_ctrl_typevar(tv)
self.ctrl_typevar = tv
def _verify_ctrl_typevar(self, ctrl_typevar):
# type: (TypeVar) -> List[TypeVar]
"""
Verify that the use of TypeVars is consistent with `ctrl_typevar` as
the controlling type variable.
All polymorhic inputs must either be derived from `ctrl_typevar` or be
independent free type variables only used once.
All polymorphic results must be derived from `ctrl_typevar`.
Return list of other type variables used, or raise an error.
"""
other_tvs = [] # type: List[TypeVar]
# Check value inputs.
for opnum in self.value_opnums:
typ = self.ins[opnum].typevar
tv = typ.free_typevar()
# Non-polymorphic or derived from ctrl_typevar is OK.
if tv is None or tv is ctrl_typevar:
continue
# No other derived typevars allowed.
if typ is not tv:
raise RuntimeError(
"{}: type variable {} must be derived from {}"
.format(self.ins[opnum], typ.name, ctrl_typevar))
# Other free type variables can only be used once each.
if tv in other_tvs:
raise RuntimeError(
"type variable {} can't be used more than once"
.format(tv.name))
other_tvs.append(tv)
# Check outputs.
for result in self.outs:
if not result.is_value():
continue
typ = result.typevar
tv = typ.free_typevar()
# Non-polymorphic or derived from ctrl_typevar is OK.
if tv is None or tv is ctrl_typevar:
continue
raise RuntimeError(
"type variable in output not derived from ctrl_typevar")
return other_tvs
def all_typevars(self):
# type: () -> List[TypeVar]
"""
Get a list of all type variables in the instruction.
"""
if self.is_polymorphic:
return [self.ctrl_typevar] + self.other_typevars
else:
return []
@staticmethod
def _to_operand_tuple(x):
# type: (Union[Sequence[Operand], Operand]) -> Tuple[Operand, ...]
# Allow a single Operand instance instead of the awkward singleton
# tuple syntax.
if isinstance(x, Operand):
y = (x,) # type: Tuple[Operand, ...]
else:
y = tuple(x)
for op in y:
assert isinstance(op, Operand)
return y
@staticmethod
def _to_constraint_tuple(x):
# type: (ConstrList) -> Tuple[TypeConstraint, ...]
"""
Allow a single TypeConstraint instance instead of the awkward singleton
tuple syntax.
"""
# import placed here to avoid circular dependency
from .ti import TypeConstraint # noqa
if isinstance(x, TypeConstraint):
y = (x,) # type: Tuple[TypeConstraint, ...]
else:
y = tuple(x)
for op in y:
assert isinstance(op, TypeConstraint)
return y
def bind(self, *args):
# type: (*ValueType) -> BoundInstruction
"""
Bind a polymorphic instruction to a concrete list of type variable
values.
"""
assert self.is_polymorphic
return BoundInstruction(self, args)
def __getattr__(self, name):
# type: (str) -> BoundInstruction
"""
Bind a polymorphic instruction to a single type variable with dot
syntax:
>>> iadd.i32
"""
assert name != 'any', 'Wildcard not allowed for ctrl_typevar'
return self.bind(ValueType.by_name(name))
def fully_bound(self):
# type: () -> Tuple[Instruction, Tuple[ValueType, ...]]
"""
Verify that all typevars have been bound, and return a
`(inst, typevars)` pair.
This version in `Instruction` itself allows non-polymorphic
instructions to duck-type as `BoundInstruction`\\s.
"""
assert not self.is_polymorphic, self
return (self, ())
def __call__(self, *args):
# type: (*Expr) -> Apply
"""
Create an `ast.Apply` AST node representing the application of this
instruction to the arguments.
"""
from .ast import Apply # noqa
return Apply(self, args)
def set_semantics(self, src, *dsts):
# type: (Union[Def, Apply], *SemDefCase) -> None
"""Set our semantics."""
from semantics import verify_semantics
from .xform import XForm, Rtl
sem = [] # type: List[XForm]
for dst in dsts:
if isinstance(dst, Rtl):
sem.append(XForm(Rtl(src).copy({}), dst))
elif isinstance(dst, XForm):
sem.append(XForm(
dst.src.copy({}),
dst.dst.copy({}),
dst.constraints))
else:
assert isinstance(dst, tuple)
sem.append(XForm(Rtl(src).copy({}), dst[0],
constraints=dst[1]))
verify_semantics(self, Rtl(src), sem)
self.semantics = sem
class BoundInstruction(object):
"""
A polymorphic `Instruction` bound to concrete type variables.
"""
def __init__(self, inst, typevars):
# type: (Instruction, Tuple[ValueType, ...]) -> None
self.inst = inst
self.typevars = typevars
assert len(typevars) <= 1 + len(inst.other_typevars)
def __str__(self):
# type: () -> str
return '.'.join([self.inst.name, ] + list(map(str, self.typevars)))
def bind(self, *args):
# type: (*ValueType) -> BoundInstruction
"""
Bind additional typevars.
"""
return BoundInstruction(self.inst, self.typevars + args)
def __getattr__(self, name):
# type: (str) -> BoundInstruction
"""
Bind an additional typevar dot syntax:
>>> uext.i32.i8
"""
if name == 'any':
# This is a wild card bind represented as a None type variable.
return self.bind(None)
return self.bind(ValueType.by_name(name))
def fully_bound(self):
# type: () -> Tuple[Instruction, Tuple[ValueType, ...]]
"""
Verify that all typevars have been bound, and return a
`(inst, typevars)` pair.
"""
if len(self.typevars) < 1 + len(self.inst.other_typevars):
unb = ', '.join(
str(tv) for tv in
self.inst.other_typevars[len(self.typevars) - 1:])
raise AssertionError("Unbound typevar {} in {}".format(unb, self))
assert len(self.typevars) == 1 + len(self.inst.other_typevars)
return (self.inst, self.typevars)
def __call__(self, *args):
# type: (*Expr) -> Apply
"""
Create an `ast.Apply` AST node representing the application of this
instruction to the arguments.
"""
from .ast import Apply # noqa
return Apply(self, args)
|
|
"""
Tests for structural time series models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import warnings
from statsmodels.datasets import macrodata
from statsmodels.tsa.statespace import structural
from statsmodels.tsa.statespace.structural import UnobservedComponents
from statsmodels.tsa.statespace.tests.results import results_structural
from statsmodels.tools import add_constant
from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
dta = macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-07-01', freq='QS')
def run_ucm(name):
true = getattr(results_structural, name)
for model in true['models']:
kwargs = model.copy()
kwargs.update(true['kwargs'])
# Make a copy of the data
values = dta.copy()
freq = kwargs.pop('freq', None)
if freq is not None:
values.index = pd.date_range(start='1959-01-01', periods=len(dta),
freq=freq)
# Test pandas exog
if 'exog' in kwargs:
# Default value here is pd.Series object
exog = np.log(values['realgdp'])
# Also allow a check with a 1-dim numpy array
if kwargs['exog'] == 'numpy':
exog = exog.values.squeeze()
kwargs['exog'] = exog
# Create the model
mod = UnobservedComponents(values['unemp'], **kwargs)
# Smoke test for starting parameters, untransform, transform
# Also test that transform and untransform are inverses
mod.start_params
assert_allclose(mod.start_params, mod.transform_params(mod.untransform_params(mod.start_params)))
# Fit the model at the true parameters
res_true = mod.filter(true['params'])
# Check that the cycle bounds were computed correctly
freqstr = freq[0] if freq is not None else values.index.freqstr[0]
if 'cycle_period_bounds' in kwargs:
cycle_period_bounds = kwargs['cycle_period_bounds']
elif freqstr == 'A':
cycle_period_bounds = (1.5, 12)
elif freqstr == 'Q':
cycle_period_bounds = (1.5*4, 12*4)
elif freqstr == 'M':
cycle_period_bounds = (1.5*12, 12*12)
else:
# If we have no information on data frequency, require the
# cycle frequency to be between 0 and pi
cycle_period_bounds = (2, np.inf)
# Test that the cycle frequency bound is correct
assert_equal(mod.cycle_frequency_bound,
(2*np.pi / cycle_period_bounds[1],
2*np.pi / cycle_period_bounds[0])
)
# Test that the likelihood is correct
rtol = true.get('rtol', 1e-7)
atol = true.get('atol', 0)
assert_allclose(res_true.llf, true['llf'], rtol=rtol, atol=atol)
# Smoke test for plot_components
if have_matplotlib:
fig = res_true.plot_components()
plt.close(fig)
# Now fit the model via MLE
with warnings.catch_warnings(record=True) as w:
res = mod.fit(disp=-1)
# If we found a higher likelihood, no problem; otherwise check
# that we're very close to that found by R
if res.llf <= true['llf']:
assert_allclose(res.llf, true['llf'], rtol=1e-4)
# Smoke test for summary
res.summary()
def test_irregular():
run_ucm('irregular')
def test_fixed_intercept():
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
run_ucm('fixed_intercept')
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
def test_deterministic_constant():
run_ucm('deterministic_constant')
def test_random_walk():
run_ucm('random_walk')
def test_local_level():
run_ucm('local_level')
def test_fixed_slope():
run_ucm('fixed_slope')
def test_fixed_slope():
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
run_ucm('fixed_slope')
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
def test_deterministic_trend():
run_ucm('deterministic_trend')
def test_random_walk_with_drift():
run_ucm('random_walk_with_drift')
def test_local_linear_deterministic_trend():
run_ucm('local_linear_deterministic_trend')
def test_local_linear_trend():
run_ucm('local_linear_trend')
def test_smooth_trend():
run_ucm('smooth_trend')
def test_random_trend():
run_ucm('random_trend')
def test_cycle():
run_ucm('cycle')
def test_seasonal():
run_ucm('seasonal')
def test_reg():
run_ucm('reg')
def test_rtrend_ar1():
run_ucm('rtrend_ar1')
def test_lltrend_cycle_seasonal_reg_ar1():
run_ucm('lltrend_cycle_seasonal_reg_ar1')
def test_mle_reg():
endog = np.arange(100)*1.0
exog = endog*2
# Make the fit not-quite-perfect
endog[::2] += 0.01
endog[1::2] -= 0.01
with warnings.catch_warnings(record=True) as w:
mod1 = UnobservedComponents(endog, irregular=True, exog=exog, mle_regression=False)
res1 = mod1.fit(disp=-1)
mod2 = UnobservedComponents(endog, irregular=True, exog=exog, mle_regression=True)
res2 = mod2.fit(disp=-1)
assert_allclose(res1.regression_coefficients.filtered[0, -1], 0.5, atol=1e-5)
assert_allclose(res2.params[1], 0.5, atol=1e-5)
def test_specifications():
endog = [1, 2]
# Test that when nothing specified, a warning is issued and the model that
# is fit is one with irregular=True and nothing else.
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
mod = UnobservedComponents(endog)
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
assert_equal(mod.trend_specification, 'irregular')
# Test an invalid string trend specification
assert_raises(ValueError, UnobservedComponents, endog, 'invalid spec')
# Test that if a trend component is specified without a level component,
# a warning is issued and a deterministic level component is added
with warnings.catch_warnings(record=True) as w:
mod = UnobservedComponents(endog, trend=True, irregular=True)
message = ("Trend component specified without level component;"
" deterministic level component added.")
assert_equal(str(w[0].message), message)
assert_equal(mod.trend_specification, 'deterministic trend')
# Test that if a string specification is provided, a warning is issued if
# the boolean attributes are also specified
trend_attributes = ['irregular', 'trend', 'stochastic_level',
'stochastic_trend']
for attribute in trend_attributes:
with warnings.catch_warnings(record=True) as w:
kwargs = {attribute: True}
mod = UnobservedComponents(endog, 'deterministic trend', **kwargs)
message = ("Value of `%s` may be overridden when the trend"
" component is specified using a model string."
% attribute)
assert_equal(str(w[0].message), message)
# Test that a seasonal with period less than two is invalid
assert_raises(ValueError, UnobservedComponents, endog, seasonal=1)
def test_start_params():
# Test that the behavior is correct for multiple exogenous and / or
# autoregressive components
# Parameters
nobs = int(1e4)
beta = np.r_[10, -2]
phi = np.r_[0.5, 0.1]
# Generate data
np.random.seed(1234)
exog = np.c_[np.ones(nobs), np.arange(nobs)*1.0]
eps = np.random.normal(size=nobs)
endog = np.zeros(nobs+2)
for t in range(1, nobs):
endog[t+1] = phi[0] * endog[t] + phi[1] * endog[t-1] + eps[t]
endog = endog[2:]
endog += np.dot(exog, beta)
# Now just test that the starting parameters are approximately what they
# ought to be (could make this arbitrarily precise by increasing nobs,
# but that would slow down the test for no real gain)
mod = UnobservedComponents(endog, exog=exog, autoregressive=2)
assert_allclose(mod.start_params, [1., 0.5, 0.1, 10, -2], atol=1e-1)
def test_forecast():
endog = np.arange(50) + 10
exog = np.arange(50)
mod = UnobservedComponents(endog, exog=exog, level='dconstant')
res = mod.smooth([1e-15, 1])
actual = res.forecast(10, exog=np.arange(50,60)[:,np.newaxis])
desired = np.arange(50,60) + 10
assert_allclose(actual, desired)
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class AuthRegistrationsCredentialListMappingTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.domains(sid="SDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.auth \
.registrations \
.credential_list_mappings.create(credential_list_sid="CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
values = {'CredentialListSid': "CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", }
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SIP/Domains/SDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Auth/Registrations/CredentialListMappings.json',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Thu, 30 Jul 2015 20:00:00 +0000",
"date_updated": "Thu, 30 Jul 2015 20:00:00 +0000",
"friendly_name": "friendly_name",
"sid": "CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.domains(sid="SDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.auth \
.registrations \
.credential_list_mappings.create(credential_list_sid="CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.domains(sid="SDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.auth \
.registrations \
.credential_list_mappings.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SIP/Domains/SDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Auth/Registrations/CredentialListMappings.json',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/Domains/SDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Auth/Registrations/CredentialListMappings.json?PageSize=50&Page=0",
"end": 0,
"previous_page_uri": null,
"contents": [],
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/Domains/SDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Auth/Registrations/CredentialListMappings.json?PageSize=50&Page=0",
"page_size": 50,
"start": 0,
"next_page_uri": null,
"page": 0
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.domains(sid="SDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.auth \
.registrations \
.credential_list_mappings.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/Domains/SDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Auth/Registrations/CredentialListMappings.json?PageSize=50&Page=0",
"end": 0,
"previous_page_uri": null,
"contents": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Thu, 30 Jul 2015 20:00:00 +0000",
"date_updated": "Thu, 30 Jul 2015 20:00:00 +0000",
"friendly_name": "friendly_name",
"sid": "CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/Domains/SDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Auth/Registrations/CredentialListMappings.json?PageSize=50&Page=0",
"page_size": 50,
"start": 0,
"next_page_uri": null,
"page": 0
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.domains(sid="SDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.auth \
.registrations \
.credential_list_mappings.list()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.domains(sid="SDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.auth \
.registrations \
.credential_list_mappings(sid="CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SIP/Domains/SDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Auth/Registrations/CredentialListMappings/CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Thu, 30 Jul 2015 20:00:00 +0000",
"date_updated": "Thu, 30 Jul 2015 20:00:00 +0000",
"friendly_name": "friendly_name",
"sid": "CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.domains(sid="SDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.auth \
.registrations \
.credential_list_mappings(sid="CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.domains(sid="SDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.auth \
.registrations \
.credential_list_mappings(sid="CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SIP/Domains/SDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Auth/Registrations/CredentialListMappings/CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.domains(sid="SDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.auth \
.registrations \
.credential_list_mappings(sid="CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
|
|
import logging
from glfw import *
from OpenGL.GL import *
import numpy as np
# create logger for the context of this function
logger = logging.getLogger(__name__)
import time
from pyglui import ui
from pyglui.cygl.utils import init
from pyglui.cygl.utils import RGBA
from pyglui.pyfontstash import fontstash as fs
width, height = (1280,720)
def basic_gl_setup():
glEnable(GL_POINT_SPRITE )
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE) # overwrite pointsize
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
glClearColor(.8,.8,.8,1.)
glEnable(GL_LINE_SMOOTH)
# glEnable(GL_POINT_SMOOTH)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
glEnable(GL_LINE_SMOOTH)
glEnable(GL_POLYGON_SMOOTH)
glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST)
def adjust_gl_view(w,h,window):
"""
adjust view onto our scene.
"""
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, w, h, 0, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def clear_gl_screen():
glClearColor(.9,.9,0.9,1.)
glClear(GL_COLOR_BUFFER_BIT)
def demo():
global quit
quit = False
# Callback functions
def on_resize(window,w, h):
h = max(h,1)
w = max(w,1)
hdpi_factor = glfwGetFramebufferSize(window)[0]/glfwGetWindowSize(window)[0]
w,h = w*hdpi_factor,h*hdpi_factor
gui.update_window(w,h)
active_window = glfwGetCurrentContext()
glfwMakeContextCurrent(window)
# norm_size = normalize((w,h),glfwGetWindowSize(window))
# fb_size = denormalize(norm_size,glfwGetFramebufferSize(window))
adjust_gl_view(w,h,window)
glfwMakeContextCurrent(active_window)
def on_iconify(window,iconfied):
pass
def on_key(window, key, scancode, action, mods):
gui.update_key(key,scancode,action,mods)
if action == GLFW_PRESS:
if key == GLFW_KEY_ESCAPE:
on_close(window)
def on_char(window,char):
gui.update_char(char)
def on_button(window,button, action, mods):
gui.update_button(button,action,mods)
# pos = normalize(pos,glfwGetWindowSize(window))
# pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
def on_pos(window,x, y):
hdpi_factor = float(glfwGetFramebufferSize(window)[0]/glfwGetWindowSize(window)[0])
x,y = x*hdpi_factor,y*hdpi_factor
gui.update_mouse(x,y)
def on_scroll(window,x,y):
gui.update_scroll(x,y)
def on_close(window):
global quit
quit = True
logger.info('Process closing from window')
# get glfw started
glfwInit()
window = glfwCreateWindow(width, height, "pyglui demo", None, None)
if not window:
exit()
glfwSetWindowPos(window,0,0)
# Register callbacks window
glfwSetWindowSizeCallback(window,on_resize)
glfwSetWindowCloseCallback(window,on_close)
glfwSetWindowIconifyCallback(window,on_iconify)
glfwSetKeyCallback(window,on_key)
glfwSetCharCallback(window,on_char)
glfwSetMouseButtonCallback(window,on_button)
glfwSetCursorPosCallback(window,on_pos)
glfwSetScrollCallback(window,on_scroll)
glfwMakeContextCurrent(window)
init()
basic_gl_setup()
class Temp(object):
"""Temp class to make objects"""
def __init__(self):
pass
foo = Temp()
foo.bar = 34
foo.bur = 4
foo.mytext = [203,12]
foo.myswitch = 10
foo.select = 'Tiger'
foo.record = False
foo.calibrate = False
foo.stream = True
foo.test = False
d = {}
d['one'] = 1
def print_hello():
foo.select = 'Cougar'
gui.scale += .1
print 'hello'
# m.configuration = sidebar.configuration
def printer(val):
print 'setting to :',val
print "pyglui version: %s" %(ui.__version__)
gui = ui.UI()
gui.scale = 1.0
sidebar = ui.Scrolling_Menu("MySideBar",pos=(-300,0),size=(0,0),header_pos='left')
sm = ui.Growing_Menu("SubMenu",pos=(0,0),size=(0,100))
sm.append(ui.Slider("bar",foo))
sm.append(ui.Text_Input('mytext',foo,setter=printer))
ssm = ui.Growing_Menu("SubSubMenu",pos=(0,0),size=(0,100))
ssm.append(ui.Slider("bar",foo))
ssm.append(ui.Text_Input('mytext',foo,setter=printer))
sm.append(ssm)
sidebar.append(sm)
sm.append(ui.Selector('select',foo,selection=['Tiger','Lion','Cougar','Hyena']) )
sm.append(ui.Button("Say Hi!",print_hello))
gui.append(sidebar)
m = ui.Scrolling_Menu("MyMenu",pos=(250,30),size=(300,500),header_pos='top')
m.append(ui.Info_Text("This is my multiline info text. I wonder if multilines break as designed... How does it look? Info Text with long label text to test multiline break handling." ))
m.append(ui.Selector('select',foo,selection=['Tiger','Lion','Cougar','Hyena'],setter=printer) )
m.append(ui.Slider("bur",foo,step=50,min=1,max=1005, label="Slider label with long label text to test overflow handling"))
m.append(ui.Button("Say Hi!",print_hello))
m.append(ui.Switch("myswitch",foo,on_val=1000,off_val=10,label="Switch Me"))
sm = ui.Growing_Menu("SubMenu",pos=(0,0),size=(0,100))
sm.append(ui.Slider("bar",foo))
sm.append(ui.Text_Input('mytext',foo))
m.append(sm)
m.append(ui.Button("Say Hi!",print_hello))
rightbar = ui.Stretching_Menu('Right Bar',(0,100),(150,-100))
rightbar.append(ui.Thumb("record",foo,label="Record") )
rightbar.append(ui.Thumb("calibrate",foo,label="Calibrate") )
rightbar.append(ui.Thumb("stream",foo,label="Stream") )
rightbar.append(ui.Thumb("test",foo,label="Test") )
gui.append(rightbar)
gui.append(m)
import os
import psutil
pid = os.getpid()
ps = psutil.Process(pid)
ts = time.time()
from pyglui import graph
print graph.__version__
cpu_g = graph.Line_Graph()
cpu_g.pos = (50,100)
cpu_g.update_fn = ps.get_cpu_percent
cpu_g.update_rate = 5
cpu_g.label = 'CPU %0.1f'
fps_g = graph.Line_Graph()
fps_g.pos = (50,100)
fps_g.update_rate = 5
fps_g.label = "%0.0f FPS"
fps_g.color[:] = .1,.1,.8,.9
st_graph = graph.Averaged_Value()
st_graph.pos = (200,200)
st_graph.update_rate = 5
st_graph.label = "Slider Value: %0.0f"
st_graph.color[:] = 1.,0.,.6,.9
on_resize(window,*glfwGetWindowSize(window))
while not quit:
dt,ts = time.time()-ts,time.time()
clear_gl_screen()
cpu_g.update()
cpu_g.draw()
fps_g.add(1./dt)
fps_g.draw()
st_graph.add(foo.bur)
st_graph.draw()
gui.update()
glfwSwapBuffers(window)
glfwPollEvents()
glfwDestroyWindow(window)
glfwTerminate()
logger.debug("Process done")
if __name__ == '__main__':
if 1:
demo()
else:
import cProfile,subprocess,os
cProfile.runctx("demo()",{},locals(),"example.pstats")
gprof2dot_loc = 'gprof2dot.py'
subprocess.call("python "+gprof2dot_loc+" -f pstats example.pstats | dot -Tpng -o example_profile.png", shell=True)
print "created cpu time graph for example. Please check out the png next to this."
|
|
"""Internal utilties; not for external use
"""
import contextlib
import functools
import itertools
import os.path
import re
import warnings
from enum import Enum
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Container,
Dict,
Hashable,
Iterable,
Iterator,
Mapping,
MutableMapping,
MutableSet,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
import numpy as np
import pandas as pd
K = TypeVar("K")
V = TypeVar("V")
T = TypeVar("T")
def _check_inplace(inplace: Optional[bool]) -> None:
if inplace is not None:
raise TypeError(
"The `inplace` argument has been removed from xarray. "
"You can achieve an identical effect with python's standard assignment."
)
def alias_message(old_name: str, new_name: str) -> str:
return f"{old_name} has been deprecated. Use {new_name} instead."
def alias_warning(old_name: str, new_name: str, stacklevel: int = 3) -> None:
warnings.warn(
alias_message(old_name, new_name), FutureWarning, stacklevel=stacklevel
)
def alias(obj: Callable[..., T], old_name: str) -> Callable[..., T]:
assert isinstance(old_name, str)
@functools.wraps(obj)
def wrapper(*args, **kwargs):
alias_warning(old_name, obj.__name__)
return obj(*args, **kwargs)
wrapper.__doc__ = alias_message(old_name, obj.__name__)
return wrapper
def _maybe_cast_to_cftimeindex(index: pd.Index) -> pd.Index:
from ..coding.cftimeindex import CFTimeIndex
if len(index) > 0 and index.dtype == "O":
try:
return CFTimeIndex(index)
except (ImportError, TypeError):
return index
else:
return index
def maybe_cast_to_coords_dtype(label, coords_dtype):
if coords_dtype.kind == "f" and not isinstance(label, slice):
label = np.asarray(label, dtype=coords_dtype)
return label
def safe_cast_to_index(array: Any) -> pd.Index:
"""Given an array, safely cast it to a pandas.Index.
If it is already a pandas.Index, return it unchanged.
Unlike pandas.Index, if the array has dtype=object or dtype=timedelta64,
this function will not attempt to do automatic type conversion but will
always return an index with dtype=object.
"""
if isinstance(array, pd.Index):
index = array
elif hasattr(array, "to_index"):
index = array.to_index()
else:
kwargs = {}
if hasattr(array, "dtype") and array.dtype.kind == "O":
kwargs["dtype"] = object
index = pd.Index(np.asarray(array), **kwargs)
return _maybe_cast_to_cftimeindex(index)
def multiindex_from_product_levels(
levels: Sequence[pd.Index], names: Sequence[str] = None
) -> pd.MultiIndex:
"""Creating a MultiIndex from a product without refactorizing levels.
Keeping levels the same gives back the original labels when we unstack.
Parameters
----------
levels : sequence of pd.Index
Values for each MultiIndex level.
names : optional sequence of objects
Names for each level.
Returns
-------
pandas.MultiIndex
"""
if any(not isinstance(lev, pd.Index) for lev in levels):
raise TypeError("levels must be a list of pd.Index objects")
split_labels, levels = zip(*[lev.factorize() for lev in levels])
labels_mesh = np.meshgrid(*split_labels, indexing="ij")
labels = [x.ravel() for x in labels_mesh]
return pd.MultiIndex(levels, labels, sortorder=0, names=names)
def maybe_wrap_array(original, new_array):
"""Wrap a transformed array with __array_wrap__ is it can be done safely.
This lets us treat arbitrary functions that take and return ndarray objects
like ufuncs, as long as they return an array with the same shape.
"""
# in case func lost array's metadata
if isinstance(new_array, np.ndarray) and new_array.shape == original.shape:
return original.__array_wrap__(new_array)
else:
return new_array
def equivalent(first: T, second: T) -> bool:
"""Compare two objects for equivalence (identity or equality), using
array_equiv if either object is an ndarray. If both objects are lists,
equivalent is sequentially called on all the elements.
"""
# TODO: refactor to avoid circular import
from . import duck_array_ops
if isinstance(first, np.ndarray) or isinstance(second, np.ndarray):
return duck_array_ops.array_equiv(first, second)
elif isinstance(first, list) or isinstance(second, list):
return list_equiv(first, second)
else:
return (
(first is second)
or (first == second)
or (pd.isnull(first) and pd.isnull(second))
)
def list_equiv(first, second):
equiv = True
if len(first) != len(second):
return False
else:
for f, s in zip(first, second):
equiv = equiv and equivalent(f, s)
return equiv
def peek_at(iterable: Iterable[T]) -> Tuple[T, Iterator[T]]:
"""Returns the first value from iterable, as well as a new iterator with
the same content as the original iterable
"""
gen = iter(iterable)
peek = next(gen)
return peek, itertools.chain([peek], gen)
def update_safety_check(
first_dict: Mapping[K, V],
second_dict: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent,
) -> None:
"""Check the safety of updating one dictionary with another.
Raises ValueError if dictionaries have non-compatible values for any key,
where compatibility is determined by identity (they are the same item) or
the `compat` function.
Parameters
----------
first_dict, second_dict : dict-like
All items in the second dictionary are checked against for conflicts
against items in the first dictionary.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
"""
for k, v in second_dict.items():
if k in first_dict and not compat(v, first_dict[k]):
raise ValueError(
"unsafe to merge dictionaries without "
"overriding values; conflicting key %r" % k
)
def remove_incompatible_items(
first_dict: MutableMapping[K, V],
second_dict: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent,
) -> None:
"""Remove incompatible items from the first dictionary in-place.
Items are retained if their keys are found in both dictionaries and the
values are compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
"""
for k in list(first_dict):
if k not in second_dict or not compat(first_dict[k], second_dict[k]):
del first_dict[k]
def is_dict_like(value: Any) -> bool:
return hasattr(value, "keys") and hasattr(value, "__getitem__")
def is_full_slice(value: Any) -> bool:
return isinstance(value, slice) and value == slice(None)
def is_list_like(value: Any) -> bool:
return isinstance(value, list) or isinstance(value, tuple)
def either_dict_or_kwargs(
pos_kwargs: Optional[Mapping[Hashable, T]],
kw_kwargs: Mapping[str, T],
func_name: str,
) -> Mapping[Hashable, T]:
if pos_kwargs is not None:
if not is_dict_like(pos_kwargs):
raise ValueError(
"the first argument to .%s must be a dictionary" % func_name
)
if kw_kwargs:
raise ValueError(
"cannot specify both keyword and positional "
"arguments to .%s" % func_name
)
return pos_kwargs
else:
# Need an explicit cast to appease mypy due to invariance; see
# https://github.com/python/mypy/issues/6228
return cast(Mapping[Hashable, T], kw_kwargs)
def is_scalar(value: Any, include_0d: bool = True) -> bool:
"""Whether to treat a value as a scalar.
Any non-iterable, string, or 0-D array
"""
from .variable import NON_NUMPY_SUPPORTED_ARRAY_TYPES
if include_0d:
include_0d = getattr(value, "ndim", None) == 0
return (
include_0d
or isinstance(value, (str, bytes))
or not (
isinstance(value, (Iterable,) + NON_NUMPY_SUPPORTED_ARRAY_TYPES)
or hasattr(value, "__array_function__")
)
)
def is_valid_numpy_dtype(dtype: Any) -> bool:
try:
np.dtype(dtype)
except (TypeError, ValueError):
return False
else:
return True
def to_0d_object_array(value: Any) -> np.ndarray:
"""Given a value, wrap it in a 0-D numpy.ndarray with dtype=object.
"""
result = np.empty((), dtype=object)
result[()] = value
return result
def to_0d_array(value: Any) -> np.ndarray:
"""Given a value, wrap it in a 0-D numpy.ndarray.
"""
if np.isscalar(value) or (isinstance(value, np.ndarray) and value.ndim == 0):
return np.array(value)
else:
return to_0d_object_array(value)
def dict_equiv(
first: Mapping[K, V],
second: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent,
) -> bool:
"""Test equivalence of two dict-like objects. If any of the values are
numpy arrays, compare them correctly.
Parameters
----------
first, second : dict-like
Dictionaries to compare for equality
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
Returns
-------
equals : bool
True if the dictionaries are equal
"""
for k in first:
if k not in second or not compat(first[k], second[k]):
return False
for k in second:
if k not in first:
return False
return True
def compat_dict_intersection(
first_dict: Mapping[K, V],
second_dict: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent,
) -> MutableMapping[K, V]:
"""Return the intersection of two dictionaries as a new dictionary.
Items are retained if their keys are found in both dictionaries and the
values are compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
Returns
-------
intersection : dict
Intersection of the contents.
"""
new_dict = dict(first_dict)
remove_incompatible_items(new_dict, second_dict, compat)
return new_dict
def compat_dict_union(
first_dict: Mapping[K, V],
second_dict: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent,
) -> MutableMapping[K, V]:
"""Return the union of two dictionaries as a new dictionary.
An exception is raised if any keys are found in both dictionaries and the
values are not compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
Returns
-------
union : dict
union of the contents.
"""
new_dict = dict(first_dict)
update_safety_check(first_dict, second_dict, compat)
new_dict.update(second_dict)
return new_dict
class Frozen(Mapping[K, V]):
"""Wrapper around an object implementing the mapping interface to make it
immutable. If you really want to modify the mapping, the mutable version is
saved under the `mapping` attribute.
"""
__slots__ = ("mapping",)
def __init__(self, mapping: Mapping[K, V]):
self.mapping = mapping
def __getitem__(self, key: K) -> V:
return self.mapping[key]
def __iter__(self) -> Iterator[K]:
return iter(self.mapping)
def __len__(self) -> int:
return len(self.mapping)
def __contains__(self, key: object) -> bool:
return key in self.mapping
def __repr__(self) -> str:
return "{}({!r})".format(type(self).__name__, self.mapping)
def FrozenDict(*args, **kwargs) -> Frozen:
return Frozen(dict(*args, **kwargs))
class SortedKeysDict(MutableMapping[K, V]):
"""An wrapper for dictionary-like objects that always iterates over its
items in sorted order by key but is otherwise equivalent to the underlying
mapping.
"""
__slots__ = ("mapping",)
def __init__(self, mapping: MutableMapping[K, V] = None):
self.mapping = {} if mapping is None else mapping
def __getitem__(self, key: K) -> V:
return self.mapping[key]
def __setitem__(self, key: K, value: V) -> None:
self.mapping[key] = value
def __delitem__(self, key: K) -> None:
del self.mapping[key]
def __iter__(self) -> Iterator[K]:
return iter(sorted(self.mapping))
def __len__(self) -> int:
return len(self.mapping)
def __contains__(self, key: object) -> bool:
return key in self.mapping
def __repr__(self) -> str:
return "{}({!r})".format(type(self).__name__, self.mapping)
class OrderedSet(MutableSet[T]):
"""A simple ordered set.
The API matches the builtin set, but it preserves insertion order of elements, like
a dict. Note that, unlike in an OrderedDict, equality tests are not order-sensitive.
"""
_d: Dict[T, None]
__slots__ = ("_d",)
def __init__(self, values: AbstractSet[T] = None):
self._d = {}
if values is not None:
# Disable type checking - both mypy and PyCharm believe that
# we're altering the type of self in place (see signature of
# MutableSet.__ior__)
self |= values # type: ignore
# Required methods for MutableSet
def __contains__(self, value: object) -> bool:
return value in self._d
def __iter__(self) -> Iterator[T]:
return iter(self._d)
def __len__(self) -> int:
return len(self._d)
def add(self, value: T) -> None:
self._d[value] = None
def discard(self, value: T) -> None:
del self._d[value]
# Additional methods
def update(self, values: AbstractSet[T]) -> None:
# See comment on __init__ re. type checking
self |= values # type: ignore
def __repr__(self) -> str:
return "{}({!r})".format(type(self).__name__, list(self))
class NdimSizeLenMixin:
"""Mixin class that extends a class that defines a ``shape`` property to
one that also defines ``ndim``, ``size`` and ``__len__``.
"""
__slots__ = ()
@property
def ndim(self: Any) -> int:
return len(self.shape)
@property
def size(self: Any) -> int:
# cast to int so that shape = () gives size = 1
return int(np.prod(self.shape))
def __len__(self: Any) -> int:
try:
return self.shape[0]
except IndexError:
raise TypeError("len() of unsized object")
class NDArrayMixin(NdimSizeLenMixin):
"""Mixin class for making wrappers of N-dimensional arrays that conform to
the ndarray interface required for the data argument to Variable objects.
A subclass should set the `array` property and override one or more of
`dtype`, `shape` and `__getitem__`.
"""
__slots__ = ()
@property
def dtype(self: Any) -> np.dtype:
return self.array.dtype
@property
def shape(self: Any) -> Tuple[int]:
return self.array.shape
def __getitem__(self: Any, key):
return self.array[key]
def __repr__(self: Any) -> str:
return "{}(array={!r})".format(type(self).__name__, self.array)
class ReprObject:
"""Object that prints as the given value, for use with sentinel values.
"""
__slots__ = ("_value",)
def __init__(self, value: str):
self._value = value
def __repr__(self) -> str:
return self._value
def __eq__(self, other) -> bool:
if isinstance(other, ReprObject):
return self._value == other._value
return False
def __hash__(self) -> int:
return hash((type(self), self._value))
def __dask_tokenize__(self):
from dask.base import normalize_token
return normalize_token((type(self), self._value))
@contextlib.contextmanager
def close_on_error(f):
"""Context manager to ensure that a file opened by xarray is closed if an
exception is raised before the user sees the file object.
"""
try:
yield
except Exception:
f.close()
raise
def is_remote_uri(path: str) -> bool:
return bool(re.search(r"^https?\://", path))
def is_grib_path(path: str) -> bool:
_, ext = os.path.splitext(path)
return ext in [".grib", ".grb", ".grib2", ".grb2"]
def is_uniform_spaced(arr, **kwargs) -> bool:
"""Return True if values of an array are uniformly spaced and sorted.
>>> is_uniform_spaced(range(5))
True
>>> is_uniform_spaced([-4, 0, 100])
False
kwargs are additional arguments to ``np.isclose``
"""
arr = np.array(arr, dtype=float)
diffs = np.diff(arr)
return bool(np.isclose(diffs.min(), diffs.max(), **kwargs))
def hashable(v: Any) -> bool:
"""Determine whether `v` can be hashed.
"""
try:
hash(v)
except TypeError:
return False
return True
def not_implemented(*args, **kwargs):
return NotImplemented
def decode_numpy_dict_values(attrs: Mapping[K, V]) -> Dict[K, V]:
"""Convert attribute values from numpy objects to native Python objects,
for use in to_dict
"""
attrs = dict(attrs)
for k, v in attrs.items():
if isinstance(v, np.ndarray):
attrs[k] = v.tolist()
elif isinstance(v, np.generic):
attrs[k] = v.item()
return attrs
def ensure_us_time_resolution(val):
"""Convert val out of numpy time, for use in to_dict.
Needed because of numpy bug GH#7619"""
if np.issubdtype(val.dtype, np.datetime64):
val = val.astype("datetime64[us]")
elif np.issubdtype(val.dtype, np.timedelta64):
val = val.astype("timedelta64[us]")
return val
class HiddenKeyDict(MutableMapping[K, V]):
"""Acts like a normal dictionary, but hides certain keys.
"""
__slots__ = ("_data", "_hidden_keys")
# ``__init__`` method required to create instance from class.
def __init__(self, data: MutableMapping[K, V], hidden_keys: Iterable[K]):
self._data = data
self._hidden_keys = frozenset(hidden_keys)
def _raise_if_hidden(self, key: K) -> None:
if key in self._hidden_keys:
raise KeyError("Key `%r` is hidden." % key)
# The next five methods are requirements of the ABC.
def __setitem__(self, key: K, value: V) -> None:
self._raise_if_hidden(key)
self._data[key] = value
def __getitem__(self, key: K) -> V:
self._raise_if_hidden(key)
return self._data[key]
def __delitem__(self, key: K) -> None:
self._raise_if_hidden(key)
del self._data[key]
def __iter__(self) -> Iterator[K]:
for k in self._data:
if k not in self._hidden_keys:
yield k
def __len__(self) -> int:
num_hidden = len(self._hidden_keys & self._data.keys())
return len(self._data) - num_hidden
def infix_dims(dims_supplied: Collection, dims_all: Collection) -> Iterator:
"""
Resolves a supplied list containing an ellispsis representing other items, to
a generator with the 'realized' list of all items
"""
if ... in dims_supplied:
if len(set(dims_all)) != len(dims_all):
raise ValueError("Cannot use ellipsis with repeated dims")
if len([d for d in dims_supplied if d == ...]) > 1:
raise ValueError("More than one ellipsis supplied")
other_dims = [d for d in dims_all if d not in dims_supplied]
for d in dims_supplied:
if d == ...:
yield from other_dims
else:
yield d
else:
if set(dims_supplied) ^ set(dims_all):
raise ValueError(
f"{dims_supplied} must be a permuted list of {dims_all}, unless `...` is included"
)
yield from dims_supplied
def get_temp_dimname(dims: Container[Hashable], new_dim: Hashable) -> Hashable:
""" Get an new dimension name based on new_dim, that is not used in dims.
If the same name exists, we add an underscore(s) in the head.
Example1:
dims: ['a', 'b', 'c']
new_dim: ['_rolling']
-> ['_rolling']
Example2:
dims: ['a', 'b', 'c', '_rolling']
new_dim: ['_rolling']
-> ['__rolling']
"""
while new_dim in dims:
new_dim = "_" + str(new_dim)
return new_dim
def drop_dims_from_indexers(
indexers: Mapping[Hashable, Any],
dims: Union[list, Mapping[Hashable, int]],
missing_dims: str,
) -> Mapping[Hashable, Any]:
""" Depending on the setting of missing_dims, drop any dimensions from indexers that
are not present in dims.
Parameters
----------
indexers : dict
dims : sequence
missing_dims : {"raise", "warn", "ignore"}
"""
if missing_dims == "raise":
invalid = indexers.keys() - set(dims)
if invalid:
raise ValueError(
f"dimensions {invalid} do not exist. Expected one or more of {dims}"
)
return indexers
elif missing_dims == "warn":
# don't modify input
indexers = dict(indexers)
invalid = indexers.keys() - set(dims)
if invalid:
warnings.warn(
f"dimensions {invalid} do not exist. Expected one or more of {dims}"
)
for key in invalid:
indexers.pop(key)
return indexers
elif missing_dims == "ignore":
return {key: val for key, val in indexers.items() if key in dims}
else:
raise ValueError(
f"Unrecognised option {missing_dims} for missing_dims argument"
)
# Singleton type, as per https://github.com/python/typing/pull/240
class Default(Enum):
token = 0
_default = Default.token
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.