gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from sympy.mpmath.libmpf import *
from sympy.mpmath.libelefun import *
from sympy.mpmath import *
import random
import time
import math
import cmath
def mpc_ae(a, b, eps=eps):
res = True
res = res and a.real.ae(b.real, eps)
res = res and a.imag.ae(b.imag, eps)
return res
#----------------------------------------------------------------------------
# Constants and functions
#
tpi = "3.1415926535897932384626433832795028841971693993751058209749445923078\
1640628620899862803482534211706798"
te = "2.71828182845904523536028747135266249775724709369995957496696762772407\
663035354759457138217852516642743"
tdegree = "0.017453292519943295769236907684886127134428718885417254560971914\
4017100911460344944368224156963450948221"
teuler = "0.5772156649015328606065120900824024310421593359399235988057672348\
84867726777664670936947063291746749516"
tln2 = "0.693147180559945309417232121458176568075500134360255254120680009493\
393621969694715605863326996418687542"
tln10 = "2.30258509299404568401799145468436420760110148862877297603332790096\
757260967735248023599720508959829834"
tcatalan = "0.91596559417721901505460351493238411077414937428167213426649811\
9621763019776254769479356512926115106249"
tkhinchin = "2.6854520010653064453097148354817956938203822939944629530511523\
4555721885953715200280114117493184769800"
tglaisher = "1.2824271291006226368753425688697917277676889273250011920637400\
2174040630885882646112973649195820237439420646"
tapery = "1.2020569031595942853997381615114499907649862923404988817922715553\
4183820578631309018645587360933525815"
tphi = "1.618033988749894848204586834365638117720309179805762862135448622705\
26046281890244970720720418939113748475"
tmertens = "0.26149721284764278375542683860869585905156664826119920619206421\
3924924510897368209714142631434246651052"
ttwinprime = "0.660161815846869573927812110014555778432623360284733413319448\
423335405642304495277143760031413839867912"
def test_constants():
for prec in [3, 7, 10, 15, 20, 37, 80, 100, 29]:
mp.dps = prec
assert pi == mpf(tpi)
assert e == mpf(te)
assert degree == mpf(tdegree)
assert euler == mpf(teuler)
assert ln2 == mpf(tln2)
assert ln10 == mpf(tln10)
assert catalan == mpf(tcatalan)
assert khinchin == mpf(tkhinchin)
assert glaisher == mpf(tglaisher)
assert phi == mpf(tphi)
if prec < 50:
assert mertens == mpf(tmertens)
assert twinprime == mpf(ttwinprime)
mp.dps = 15
def test_exact_sqrts():
for i in range(20000):
assert sqrt(mpf(i*i)) == i
random.seed(1)
for prec in [100, 300, 1000, 10000]:
mp.dps = prec
for i in range(20):
A = random.randint(10**(prec//2-2), 10**(prec//2-1))
assert sqrt(mpf(A*A)) == A
mp.dps = 15
for i in range(100):
for a in [1, 8, 25, 112307]:
assert sqrt(mpf((a*a, 2*i))) == mpf((a, i))
assert sqrt(mpf((a*a, -2*i))) == mpf((a, -i))
def test_sqrt_rounding():
for i in [2, 3, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15]:
i = from_int(i)
for dps in [7, 15, 83, 106, 2000]:
mp.dps = dps
a = mpf_pow_int(mpf_sqrt(i, mp.prec, round_down), 2, mp.prec, round_down)
b = mpf_pow_int(mpf_sqrt(i, mp.prec, round_up), 2, mp.prec, round_up)
assert mpf_lt(a, i)
assert mpf_gt(b, i)
random.seed(1234)
prec = 100
for rnd in [round_down, round_nearest, round_ceiling]:
for i in range(100):
a = mpf_rand(prec)
b = mpf_mul(a, a)
assert mpf_sqrt(b, prec, rnd) == a
# Test some extreme cases
mp.dps = 100
a = mpf(9) + 1e-90
b = mpf(9) - 1e-90
mp.dps = 15
assert sqrt(a, rounding='d') == 3
assert sqrt(a, rounding='n') == 3
assert sqrt(a, rounding='u') > 3
assert sqrt(b, rounding='d') < 3
assert sqrt(b, rounding='n') == 3
assert sqrt(b, rounding='u') == 3
# A worst case, from the MPFR test suite
assert sqrt(mpf('7.0503726185518891')) == mpf('2.655253776675949')
def test_float_sqrt():
mp.dps = 15
# These should round identically
for x in [0, 1e-7, 0.1, 0.5, 1, 2, 3, 4, 5, 0.333, 76.19]:
assert sqrt(mpf(x)) == float(x)**0.5
assert sqrt(-1) == 1j
assert sqrt(-2).ae(cmath.sqrt(-2))
assert sqrt(-3).ae(cmath.sqrt(-3))
assert sqrt(-100).ae(cmath.sqrt(-100))
assert sqrt(1j).ae(cmath.sqrt(1j))
assert sqrt(-1j).ae(cmath.sqrt(-1j))
assert sqrt(math.pi + math.e*1j).ae(cmath.sqrt(math.pi + math.e*1j))
assert sqrt(math.pi - math.e*1j).ae(cmath.sqrt(math.pi - math.e*1j))
def test_hypot():
assert hypot(0, 0) == 0
assert hypot(0, 0.33) == mpf(0.33)
assert hypot(0.33, 0) == mpf(0.33)
assert hypot(-0.33, 0) == mpf(0.33)
assert hypot(3, 4) == mpf(5)
def test_exact_cbrt():
for i in range(0, 20000, 200):
assert cbrt(mpf(i*i*i)) == i
random.seed(1)
for prec in [100, 300, 1000, 10000]:
mp.dps = prec
A = random.randint(10**(prec//2-2), 10**(prec//2-1))
assert cbrt(mpf(A*A*A)) == A
mp.dps = 15
def test_exp():
assert exp(0) == 1
assert exp(10000).ae(mpf('8.8068182256629215873e4342'))
assert exp(-10000).ae(mpf('1.1354838653147360985e-4343'))
a = exp(mpf((1, 8198646019315405L, -53, 53)))
assert(a.bc == bitcount(a.man))
mp.prec = 67
a = exp(mpf((1, 1781864658064754565L, -60, 61)))
assert(a.bc == bitcount(a.man))
mp.prec = 53
assert exp(ln2 * 10).ae(1024)
assert exp(2+2j).ae(cmath.exp(2+2j))
def test_issue_33():
mp.dps = 512
a = exp(-1)
b = exp(1)
mp.dps = 15
assert (+a).ae(0.36787944117144233)
assert (+b).ae(2.7182818284590451)
def test_log():
mp.dps = 15
assert log(1) == 0
for x in [0.5, 1.5, 2.0, 3.0, 100, 10**50, 1e-50]:
assert log(x).ae(math.log(x))
assert log(x, x) == 1
assert log(1024, 2) == 10
assert log(10**1234, 10) == 1234
assert log(2+2j).ae(cmath.log(2+2j))
# Accuracy near 1
assert (log(0.6+0.8j).real*10**17).ae(2.2204460492503131)
assert (log(0.6-0.8j).real*10**17).ae(2.2204460492503131)
assert (log(0.8-0.6j).real*10**17).ae(2.2204460492503131)
assert (log(1+1e-8j).real*10**16).ae(0.5)
assert (log(1-1e-8j).real*10**16).ae(0.5)
assert (log(-1+1e-8j).real*10**16).ae(0.5)
assert (log(-1-1e-8j).real*10**16).ae(0.5)
assert (log(1j+1e-8).real*10**16).ae(0.5)
assert (log(1j-1e-8).real*10**16).ae(0.5)
assert (log(-1j+1e-8).real*10**16).ae(0.5)
assert (log(-1j-1e-8).real*10**16).ae(0.5)
assert (log(1+1e-40j).real*10**80).ae(0.5)
assert (log(1j+1e-40).real*10**80).ae(0.5)
# Huge
assert log(ldexp(1.234,10**20)).ae(log(2)*1e20)
assert log(ldexp(1.234,10**200)).ae(log(2)*1e200)
# Some special values
assert log(mpc(0,0)) == mpc(-inf,0)
assert isnan(log(mpc(nan,0)).real)
assert isnan(log(mpc(nan,0)).imag)
assert isnan(log(mpc(0,nan)).real)
assert isnan(log(mpc(0,nan)).imag)
assert isnan(log(mpc(nan,1)).real)
assert isnan(log(mpc(nan,1)).imag)
assert isnan(log(mpc(1,nan)).real)
assert isnan(log(mpc(1,nan)).imag)
def test_trig_hyperb_basic():
for x in (range(100) + range(-100,0)):
t = x / 4.1
assert cos(mpf(t)).ae(math.cos(t))
assert sin(mpf(t)).ae(math.sin(t))
assert tan(mpf(t)).ae(math.tan(t))
assert cosh(mpf(t)).ae(math.cosh(t))
assert sinh(mpf(t)).ae(math.sinh(t))
assert tanh(mpf(t)).ae(math.tanh(t))
assert sin(1+1j).ae(cmath.sin(1+1j))
assert sin(-4-3.6j).ae(cmath.sin(-4-3.6j))
assert cos(1+1j).ae(cmath.cos(1+1j))
assert cos(-4-3.6j).ae(cmath.cos(-4-3.6j))
def test_degrees():
assert cos(0*degree) == 1
assert cos(90*degree).ae(0)
assert cos(180*degree).ae(-1)
assert cos(270*degree).ae(0)
assert cos(360*degree).ae(1)
assert sin(0*degree) == 0
assert sin(90*degree).ae(1)
assert sin(180*degree).ae(0)
assert sin(270*degree).ae(-1)
assert sin(360*degree).ae(0)
def random_complexes(N):
random.seed(1)
a = []
for i in range(N):
x1 = random.uniform(-10, 10)
y1 = random.uniform(-10, 10)
x2 = random.uniform(-10, 10)
y2 = random.uniform(-10, 10)
z1 = complex(x1, y1)
z2 = complex(x2, y2)
a.append((z1, z2))
return a
def test_complex_powers():
for dps in [15, 30, 100]:
# Check accuracy for complex square root
mp.dps = dps
a = mpc(1j)**0.5
assert a.real == a.imag == mpf(2)**0.5 / 2
mp.dps = 15
random.seed(1)
for (z1, z2) in random_complexes(100):
assert (mpc(z1)**mpc(z2)).ae(z1**z2, 1e-12)
assert (e**(-pi*1j)).ae(-1)
mp.dps = 50
assert (e**(-pi*1j)).ae(-1)
mp.dps = 15
def test_complex_sqrt_accuracy():
def test_mpc_sqrt(lst):
for a, b in lst:
z = mpc(a + j*b)
assert mpc_ae(sqrt(z*z), z)
z = mpc(-a + j*b)
assert mpc_ae(sqrt(z*z), -z)
z = mpc(a - j*b)
assert mpc_ae(sqrt(z*z), z)
z = mpc(-a - j*b)
assert mpc_ae(sqrt(z*z), -z)
random.seed(2)
N = 10
mp.dps = 30
dps = mp.dps
test_mpc_sqrt([(random.uniform(0, 10),random.uniform(0, 10)) for i in range(N)])
test_mpc_sqrt([(i + 0.1, (i + 0.2)*10**i) for i in range(N)])
mp.dps = 15
def test_atan():
mp.dps = 15
assert atan(-2.3).ae(math.atan(-2.3))
assert atan(1e-50) == 1e-50
assert atan(1e50).ae(pi/2)
assert atan(-1e-50) == -1e-50
assert atan(-1e50).ae(-pi/2)
assert atan(10**1000).ae(pi/2)
for dps in [25, 70, 100, 300, 1000]:
mp.dps = dps
assert (4*atan(1)).ae(pi)
mp.dps = 15
pi2 = pi/2
assert atan(mpc(inf,-1)).ae(pi2)
assert atan(mpc(inf,0)).ae(pi2)
assert atan(mpc(inf,1)).ae(pi2)
assert atan(mpc(1,inf)).ae(pi2)
assert atan(mpc(0,inf)).ae(pi2)
assert atan(mpc(-1,inf)).ae(-pi2)
assert atan(mpc(-inf,1)).ae(-pi2)
assert atan(mpc(-inf,0)).ae(-pi2)
assert atan(mpc(-inf,-1)).ae(-pi2)
assert atan(mpc(-1,-inf)).ae(-pi2)
assert atan(mpc(0,-inf)).ae(-pi2)
assert atan(mpc(1,-inf)).ae(pi2)
def test_atan2():
mp.dps = 15
assert atan2(1,1).ae(pi/4)
assert atan2(1,-1).ae(3*pi/4)
assert atan2(-1,-1).ae(-3*pi/4)
assert atan2(-1,1).ae(-pi/4)
assert atan2(-1,0).ae(-pi/2)
assert atan2(1,0).ae(pi/2)
assert atan2(0,0) == 0
assert atan2(inf,0).ae(pi/2)
assert atan2(-inf,0).ae(-pi/2)
assert isnan(atan2(inf,inf))
assert isnan(atan2(-inf,inf))
assert isnan(atan2(inf,-inf))
assert isnan(atan2(3,nan))
assert isnan(atan2(nan,3))
assert isnan(atan2(0,nan))
assert isnan(atan2(nan,0))
assert atan2(0,inf) == 0
assert atan2(0,-inf).ae(pi)
assert atan2(10,inf) == 0
assert atan2(-10,inf) == 0
assert atan2(-10,-inf).ae(-pi)
assert atan2(10,-inf).ae(pi)
assert atan2(inf,10).ae(pi/2)
assert atan2(inf,-10).ae(pi/2)
assert atan2(-inf,10).ae(-pi/2)
assert atan2(-inf,-10).ae(-pi/2)
def test_areal_inverses():
assert asin(mpf(0)) == 0
assert asinh(mpf(0)) == 0
assert acosh(mpf(1)) == 0
assert isinstance(asin(mpf(0.5)), mpf)
assert isinstance(asin(mpf(2.0)), mpc)
assert isinstance(acos(mpf(0.5)), mpf)
assert isinstance(acos(mpf(2.0)), mpc)
assert isinstance(atanh(mpf(0.1)), mpf)
assert isinstance(atanh(mpf(1.1)), mpc)
random.seed(1)
for i in range(50):
x = random.uniform(0, 1)
assert asin(mpf(x)).ae(math.asin(x))
assert acos(mpf(x)).ae(math.acos(x))
x = random.uniform(-10, 10)
assert asinh(mpf(x)).ae(cmath.asinh(x).real)
assert isinstance(asinh(mpf(x)), mpf)
x = random.uniform(1, 10)
assert acosh(mpf(x)).ae(cmath.acosh(x).real)
assert isinstance(acosh(mpf(x)), mpf)
x = random.uniform(-10, 0.999)
assert isinstance(acosh(mpf(x)), mpc)
x = random.uniform(-1, 1)
assert atanh(mpf(x)).ae(cmath.atanh(x).real)
assert isinstance(atanh(mpf(x)), mpf)
dps = mp.dps
mp.dps = 300
assert isinstance(asin(0.5), mpf)
mp.dps = 1000
assert asin(1).ae(pi/2)
assert asin(-1).ae(-pi/2)
mp.dps = dps
def test_invhyperb_inaccuracy():
mp.dps = 15
assert (asinh(1e-5)*10**5).ae(0.99999999998333333)
assert (asinh(1e-10)*10**10).ae(1)
assert (asinh(1e-50)*10**50).ae(1)
assert (asinh(-1e-5)*10**5).ae(-0.99999999998333333)
assert (asinh(-1e-10)*10**10).ae(-1)
assert (asinh(-1e-50)*10**50).ae(-1)
assert asinh(10**20).ae(46.744849040440862)
assert asinh(-10**20).ae(-46.744849040440862)
assert (tanh(1e-10)*10**10).ae(1)
assert (tanh(-1e-10)*10**10).ae(-1)
assert (atanh(1e-10)*10**10).ae(1)
assert (atanh(-1e-10)*10**10).ae(-1)
def test_complex_functions():
for x in (range(10) + range(-10,0)):
for y in (range(10) + range(-10,0)):
z = complex(x, y)/4.3 + 0.01j
assert exp(mpc(z)).ae(cmath.exp(z))
assert log(mpc(z)).ae(cmath.log(z))
assert cos(mpc(z)).ae(cmath.cos(z))
assert sin(mpc(z)).ae(cmath.sin(z))
assert tan(mpc(z)).ae(cmath.tan(z))
assert sinh(mpc(z)).ae(cmath.sinh(z))
assert cosh(mpc(z)).ae(cmath.cosh(z))
assert tanh(mpc(z)).ae(cmath.tanh(z))
def test_complex_inverse_functions():
for (z1, z2) in random_complexes(30):
# apparently cmath uses a different branch, so we
# can't use it for comparison
assert sinh(asinh(z1)).ae(z1)
#
assert acosh(z1).ae(cmath.acosh(z1))
assert atanh(z1).ae(cmath.atanh(z1))
assert atan(z1).ae(cmath.atan(z1))
# the reason we set a big eps here is that the cmath
# functions are inaccurate
assert asin(z1).ae(cmath.asin(z1), rel_eps=1e-12)
assert acos(z1).ae(cmath.acos(z1), rel_eps=1e-12)
one = mpf(1)
for i in range(-9, 10, 3):
for k in range(-9, 10, 3):
a = 0.9*j*10**k + 0.8*one*10**i
b = cos(acos(a))
assert b.ae(a)
b = sin(asin(a))
assert b.ae(a)
one = mpf(1)
err = 2*10**-15
for i in range(-9, 9, 3):
for k in range(-9, 9, 3):
a = -0.9*10**k + j*0.8*one*10**i
b = cosh(acosh(a))
assert b.ae(a, err)
b = sinh(asinh(a))
assert b.ae(a, err)
def test_reciprocal_functions():
assert sec(3).ae(-1.01010866590799375)
assert csc(3).ae(7.08616739573718592)
assert cot(3).ae(-7.01525255143453347)
assert sech(3).ae(0.0993279274194332078)
assert csch(3).ae(0.0998215696688227329)
assert coth(3).ae(1.00496982331368917)
assert asec(3).ae(1.23095941734077468)
assert acsc(3).ae(0.339836909454121937)
assert acot(3).ae(0.321750554396642193)
assert asech(0.5).ae(1.31695789692481671)
assert acsch(3).ae(0.327450150237258443)
assert acoth(3).ae(0.346573590279972655)
def test_ldexp():
mp.dps = 15
assert ldexp(mpf(2.5), 0) == 2.5
assert ldexp(mpf(2.5), -1) == 1.25
assert ldexp(mpf(2.5), 2) == 10
assert ldexp(mpf('inf'), 3) == mpf('inf')
def test_frexp():
mp.dps = 15
assert frexp(0) == (0.0, 0)
assert frexp(9) == (0.5625, 4)
assert frexp(1) == (0.5, 1)
assert frexp(0.2) == (0.8, -2)
assert frexp(1000) == (0.9765625, 10)
def test_aliases():
assert ln(7) == log(7)
assert log10(3.75) == log(3.75,10)
assert degrees(5.6) == 5.6 / degree
assert radians(5.6) == 5.6 * degree
assert power(-1,0.5) == j
assert modf(25,7) == 4.0 and isinstance(modf(25,7), mpf)
def test_arg_sign():
assert arg(3) == 0
assert arg(-3).ae(pi)
assert arg(j).ae(pi/2)
assert arg(-j).ae(-pi/2)
assert arg(0) == 0
assert isnan(atan2(3,nan))
assert isnan(atan2(nan,3))
assert isnan(atan2(0,nan))
assert isnan(atan2(nan,0))
assert isnan(atan2(nan,nan))
assert arg(inf) == 0
assert arg(-inf).ae(pi)
assert isnan(arg(nan))
#assert arg(inf*j).ae(pi/2)
assert sign(0) == 0
assert sign(3) == 1
assert sign(-3) == -1
assert sign(inf) == 1
assert sign(-inf) == -1
assert isnan(sign(nan))
assert sign(j) == j
assert sign(-3*j) == -j
assert sign(1+j).ae((1+j)/sqrt(2))
def test_misc_bugs():
# test that this doesn't raise an exception
mp.dps = 1000
log(1302)
mp.dps = 15
def test_arange():
assert arange(10) == [mpf('0.0'), mpf('1.0'), mpf('2.0'), mpf('3.0'),
mpf('4.0'), mpf('5.0'), mpf('6.0'), mpf('7.0'),
mpf('8.0'), mpf('9.0')]
assert arange(-5, 5) == [mpf('-5.0'), mpf('-4.0'), mpf('-3.0'),
mpf('-2.0'), mpf('-1.0'), mpf('0.0'),
mpf('1.0'), mpf('2.0'), mpf('3.0'), mpf('4.0')]
assert arange(0, 1, 0.1) == [mpf('0.0'), mpf('0.10000000000000001'),
mpf('0.20000000000000001'),
mpf('0.30000000000000004'),
mpf('0.40000000000000002'),
mpf('0.5'), mpf('0.60000000000000009'),
mpf('0.70000000000000007'),
mpf('0.80000000000000004'),
mpf('0.90000000000000002')]
assert arange(17, -9, -3) == [mpf('17.0'), mpf('14.0'), mpf('11.0'),
mpf('8.0'), mpf('5.0'), mpf('2.0'),
mpf('-1.0'), mpf('-4.0'), mpf('-7.0')]
assert arange(0.2, 0.1, -0.1) == [mpf('0.20000000000000001')]
assert arange(0) == []
assert arange(1000, -1) == []
assert arange(-1.23, 3.21, -0.0000001) == []
def test_linspace():
assert linspace(2, 9, 7) == [mpf('2.0'), mpf('3.166666666666667'),
mpf('4.3333333333333339'), mpf('5.5'), mpf('6.666666666666667'),
mpf('7.8333333333333339'), mpf('9.0')] == linspace(mpi(2, 9), 7)
assert linspace(2, 9, 7, endpoint=0) == [mpf('2.0'), mpf('3.0'), mpf('4.0'),
mpf('5.0'), mpf('6.0'), mpf('7.0'), mpf('8.0')]
assert linspace(2, 7, 1) == [mpf(2)]
def test_float_cbrt():
mp.dps = 30
for a in arange(0,10,0.1):
assert cbrt(a*a*a).ae(a, eps)
assert cbrt(-1).ae(0.5 + j*sqrt(3)/2)
one_third = mpf(1)/3
for a in arange(0,10,2.7) + [0.1 + 10**5]:
a = mpc(a + 1.1j)
r1 = cbrt(a)
mp.dps += 10
r2 = pow(a, one_third)
mp.dps -= 10
assert r1.ae(r2, eps)
mp.dps = 100
for n in range(100, 301, 100):
w = 10**n + j*10**-3
z = w*w*w
r = cbrt(z)
assert mpc_ae(r, w, eps)
mp.dps = 15
def test_root():
mp.dps = 30
random.seed(1)
a = random.randint(0, 10000)
p = a*a*a
r = nthroot(mpf(p), 3)
assert r == a
for n in range(4, 10):
p = p*a
assert nthroot(mpf(p), n) == a
mp.dps = 40
for n in range(10, 5000, 100):
for a in [random.random()*10000, random.random()*10**100]:
r = nthroot(a, n)
r1 = pow(a, mpf(1)/n)
assert r.ae(r1)
r = nthroot(a, -n)
r1 = pow(a, -mpf(1)/n)
assert r.ae(r1)
# XXX: this is broken right now
# tests for nthroot rounding
for rnd in ['nearest', 'up', 'down']:
mp.rounding = rnd
for n in [-5, -3, 3, 5]:
prec = 50
for i in xrange(10):
mp.prec = prec
a = rand()
mp.prec = 2*prec
b = a**n
mp.prec = prec
r = nthroot(b, n)
assert r == a
mp.dps = 30
for n in range(3, 21):
a = (random.random() + j*random.random())
assert nthroot(a, n).ae(pow(a, mpf(1)/n))
assert mpc_ae(nthroot(a, n), pow(a, mpf(1)/n))
a = (random.random()*10**100 + j*random.random())
r = nthroot(a, n)
mp.dps += 4
r1 = pow(a, mpf(1)/n)
mp.dps -= 4
assert r.ae(r1)
assert mpc_ae(r, r1, eps)
r = nthroot(a, -n)
mp.dps += 4
r1 = pow(a, -mpf(1)/n)
mp.dps -= 4
assert r.ae(r1)
assert mpc_ae(r, r1, eps)
mp.dps = 15
assert nthroot(4, 1) == 4
assert nthroot(4, 0) == 1
assert nthroot(4, -1) == 0.25
assert nthroot(inf, 1) == inf
assert nthroot(inf, 2) == inf
assert nthroot(inf, 3) == inf
assert nthroot(inf, -1) == 0
assert nthroot(inf, -2) == 0
assert nthroot(inf, -3) == 0
assert nthroot(j, 1) == j
assert nthroot(j, 0) == 1
assert nthroot(j, -1) == -j
assert isnan(nthroot(nan, 1))
assert isnan(nthroot(nan, 0))
assert isnan(nthroot(nan, -1))
assert isnan(nthroot(inf, 0))
assert root(2,3) == nthroot(2,3)
assert root(16,4,0) == 2
assert root(16,4,1) == 2j
assert root(16,4,2) == -2
assert root(16,4,3) == -2j
assert root(16,4,4) == 2
assert root(-125,3,1) == -5
def test_issue_96():
for dps in [20, 80]:
mp.dps = dps
r = nthroot(mpf('-1e-20'), 4)
assert r.ae(mpf(10)**(-5) * (1 + j) * mpf(2)**(-0.5))
mp.dps = 80
assert nthroot('-1e-3', 4).ae(mpf(10)**(-3./4) * (1 + j)/sqrt(2))
assert nthroot('-1e-6', 4).ae((1 + j)/(10 * sqrt(20)))
# Check that this doesn't take eternity to compute
mp.dps = 20
assert nthroot('-1e100000000', 4).ae((1+j)*mpf('1e25000000')/sqrt(2))
mp.dps = 15
def test_perturbation_rounding():
mp.dps = 100
a = pi/10**50
b = -pi/10**50
c = 1 + a
d = 1 + b
mp.dps = 15
assert exp(a) == 1
assert exp(a, rounding='c') > 1
assert exp(b, rounding='c') == 1
assert exp(a, rounding='f') == 1
assert exp(b, rounding='f') < 1
assert cos(a) == 1
assert cos(a, rounding='c') == 1
assert cos(b, rounding='c') == 1
assert cos(a, rounding='f') < 1
assert cos(b, rounding='f') < 1
for f in [sin, atan, asinh, tanh]:
assert f(a) == +a
assert f(a, rounding='c') > a
assert f(a, rounding='f') < a
assert f(b) == +b
assert f(b, rounding='c') > b
assert f(b, rounding='f') < b
for f in [asin, tan, sinh, atanh]:
assert f(a) == +a
assert f(b) == +b
assert f(a, rounding='c') > a
assert f(b, rounding='c') > b
assert f(a, rounding='f') < a
assert f(b, rounding='f') < b
assert ln(c) == +a
assert ln(d) == +b
assert ln(c, rounding='c') > a
assert ln(c, rounding='f') < a
assert ln(d, rounding='c') > b
assert ln(d, rounding='f') < b
assert cosh(a) == 1
assert cosh(b) == 1
assert cosh(a, rounding='c') > 1
assert cosh(b, rounding='c') > 1
assert cosh(a, rounding='f') == 1
assert cosh(b, rounding='f') == 1
def test_integer_parts():
assert floor(3.2) == 3
assert ceil(3.2) == 4
assert floor(3.2+5j) == 3+5j
assert ceil(3.2+5j) == 4+5j
def test_complex_parts():
assert fabs('3') == 3
assert fabs(3+4j) == 5
assert re(3) == 3
assert re(1+4j) == 1
assert im(3) == 0
assert im(1+4j) == 4
assert conj(3) == 3
assert conj(3+4j) == 3-4j
assert mpf(3).conjugate() == 3
def test_cospi_sinpi():
assert sinpi(0) == 0
assert sinpi(0.5) == 1
assert sinpi(1) == 0
assert sinpi(1.5) == -1
assert sinpi(2) == 0
assert sinpi(2.5) == 1
assert sinpi(-0.5) == -1
assert cospi(0) == 1
assert cospi(0.5) == 0
assert cospi(1) == -1
assert cospi(1.5) == 0
assert cospi(2) == 1
assert cospi(2.5) == 0
assert cospi(-0.5) == 0
assert cospi(100000000000.25).ae(sqrt(2)/2)
a = cospi(2+3j)
assert a.real.ae(cos((2+3j)*pi).real)
assert a.imag == 0
b = sinpi(2+3j)
assert b.imag.ae(sin((2+3j)*pi).imag)
assert b.real == 0
mp.dps = 35
x1 = mpf(10000) - mpf('1e-15')
x2 = mpf(10000) + mpf('1e-15')
x3 = mpf(10000.5) - mpf('1e-15')
x4 = mpf(10000.5) + mpf('1e-15')
x5 = mpf(10001) - mpf('1e-15')
x6 = mpf(10001) + mpf('1e-15')
x7 = mpf(10001.5) - mpf('1e-15')
x8 = mpf(10001.5) + mpf('1e-15')
mp.dps = 15
M = 10**15
assert (sinpi(x1)*M).ae(-pi)
assert (sinpi(x2)*M).ae(pi)
assert (cospi(x3)*M).ae(pi)
assert (cospi(x4)*M).ae(-pi)
assert (sinpi(x5)*M).ae(pi)
assert (sinpi(x6)*M).ae(-pi)
assert (cospi(x7)*M).ae(-pi)
assert (cospi(x8)*M).ae(pi)
assert 0.999 < cospi(x1, rounding='d') < 1
assert 0.999 < cospi(x2, rounding='d') < 1
assert 0.999 < sinpi(x3, rounding='d') < 1
assert 0.999 < sinpi(x4, rounding='d') < 1
assert -1 < cospi(x5, rounding='d') < -0.999
assert -1 < cospi(x6, rounding='d') < -0.999
assert -1 < sinpi(x7, rounding='d') < -0.999
assert -1 < sinpi(x8, rounding='d') < -0.999
assert (sinpi(1e-15)*M).ae(pi)
assert (sinpi(-1e-15)*M).ae(-pi)
assert cospi(1e-15) == 1
assert cospi(1e-15, rounding='d') < 1
def test_sinc():
assert sinc(0) == sincpi(0) == 1
assert sinc(inf) == sincpi(inf) == 0
assert sinc(-inf) == sincpi(-inf) == 0
assert sinc(2).ae(0.45464871341284084770)
assert sinc(2+3j).ae(0.4463290318402435457-2.7539470277436474940j)
assert sincpi(2) == 0
assert sincpi(1.5).ae(-0.212206590789193781)
def test_fibonacci():
mp.dps = 15
assert [fibonacci(n) for n in range(-5, 10)] == \
[5, -3, 2, -1, 1, 0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
assert fib(2.5).ae(1.4893065462657091)
assert fib(3+4j).ae(-5248.51130728372 - 14195.962288353j)
assert fib(1000).ae(4.3466557686937455e+208)
assert str(fib(10**100)) == '6.24499112864607e+2089876402499787337692720892375554168224592399182109535392875613974104853496745963277658556235103534'
mp.dps = 2100
a = fib(10000)
assert a % 10**10 == 9947366875
mp.dps = 15
assert fibonacci(inf) == inf
assert fib(3+0j) == 2
def test_call_with_dps():
mp.dps = 15
assert abs(exp(1, dps=30)-e(dps=35)) < 1e-29
def test_tanh():
mp.dps = 15
assert tanh(0) == 0
assert tanh(inf) == 1
assert tanh(-inf) == -1
assert isnan(tanh(nan))
assert tanh(mpc('inf', '0')) == 1
def test_atanh():
mp.dps = 15
assert atanh(0) == 0
assert atanh(0.5).ae(0.54930614433405484570)
assert atanh(-0.5).ae(-0.54930614433405484570)
assert atanh(1) == inf
assert atanh(-1) == -inf
assert isnan(atanh(nan))
assert isinstance(atanh(1), mpf)
assert isinstance(atanh(-1), mpf)
# Limits at infinity
jpi2 = j*pi/2
assert atanh(inf).ae(-jpi2)
assert atanh(-inf).ae(jpi2)
assert atanh(mpc(inf,-1)).ae(-jpi2)
assert atanh(mpc(inf,0)).ae(-jpi2)
assert atanh(mpc(inf,1)).ae(jpi2)
assert atanh(mpc(1,inf)).ae(jpi2)
assert atanh(mpc(0,inf)).ae(jpi2)
assert atanh(mpc(-1,inf)).ae(jpi2)
assert atanh(mpc(-inf,1)).ae(jpi2)
assert atanh(mpc(-inf,0)).ae(jpi2)
assert atanh(mpc(-inf,-1)).ae(-jpi2)
assert atanh(mpc(-1,-inf)).ae(-jpi2)
assert atanh(mpc(0,-inf)).ae(-jpi2)
assert atanh(mpc(1,-inf)).ae(-jpi2)
def test_expm1():
mp.dps = 15
assert expm1(0) == 0
assert expm1(3).ae(exp(3)-1)
assert expm1(inf) == inf
assert expm1(1e-10)*1e10
assert expm1(1e-50).ae(1e-50)
assert (expm1(1e-10)*1e10).ae(1.00000000005)
def test_powm1():
mp.dps = 15
assert powm1(2,3) == 7
assert powm1(-1,2) == 0
assert powm1(-1,0) == 0
assert powm1(-2,0) == 0
assert powm1(3+4j,0) == 0
assert powm1(0,1) == -1
assert powm1(0,0) == 0
assert powm1(1,0) == 0
assert powm1(1,2) == 0
assert powm1(1,3+4j) == 0
assert powm1(1,5) == 0
assert powm1(j,4) == 0
assert powm1(-j,4) == 0
assert (powm1(2,1e-100)*1e100).ae(ln2)
assert powm1(2,'1e-100000000000') != 0
assert (powm1(fadd(1,1e-100,exact=True), 5)*1e100).ae(5)
def test_unitroots():
assert unitroots(1) == [1]
assert unitroots(2) == [1, -1]
a, b, c = unitroots(3)
assert a == 1
assert b.ae(-0.5 + 0.86602540378443864676j)
assert c.ae(-0.5 - 0.86602540378443864676j)
assert unitroots(1, primitive=True) == [1]
assert unitroots(2, primitive=True) == [-1]
assert unitroots(3, primitive=True) == unitroots(3)[1:]
assert unitroots(4, primitive=True) == [j, -j]
assert len(unitroots(17, primitive=True)) == 16
assert len(unitroots(16, primitive=True)) == 8
def test_cyclotomic():
mp.dps = 15
assert [cyclotomic(n,1) for n in range(31)] == [1,0,2,3,2,5,1,7,2,3,1,11,1,13,1,1,2,17,1,19,1,1,1,23,1,5,1,3,1,29,1]
assert [cyclotomic(n,-1) for n in range(31)] == [1,-2,0,1,2,1,3,1,2,1,5,1,1,1,7,1,2,1,3,1,1,1,11,1,1,1,13,1,1,1,1]
assert [cyclotomic(n,j) for n in range(21)] == [1,-1+j,1+j,j,0,1,-j,j,2,-j,1,j,3,1,-j,1,2,1,j,j,5]
assert [cyclotomic(n,-j) for n in range(21)] == [1,-1-j,1-j,-j,0,1,j,-j,2,j,1,-j,3,1,j,1,2,1,-j,-j,5]
assert cyclotomic(1624,j) == 1
assert cyclotomic(33600,j) == 1
u = sqrt(j, prec=500)
assert cyclotomic(8, u).ae(0)
assert cyclotomic(30, u).ae(5.8284271247461900976)
assert cyclotomic(2040, u).ae(1)
assert cyclotomic(0,2.5) == 1
assert cyclotomic(1,2.5) == 2.5-1
assert cyclotomic(2,2.5) == 2.5+1
assert cyclotomic(3,2.5) == 2.5**2 + 2.5 + 1
assert cyclotomic(7,2.5) == 406.234375
|
|
# Added Fortran compiler support to config. Currently useful only for
# try_compile call. try_run works but is untested for most of Fortran
# compilers (they must define linker_exe first).
# Pearu Peterson
from __future__ import division, absolute_import, print_function
import distutils
import os
import signal
import sys
import warnings
from distutils import log
from distutils.ccompiler import CompileError
from distutils.command.config import LANG_EXT
from distutils.command.config import config as old_config
from distutils.file_util import copy_file
from numpy.distutils.command.autodist import (check_gcc_function_attribute,
check_gcc_variable_attribute,
check_inline,
check_restrict,
check_compiler_gcc4)
from numpy.distutils.compat import get_exception
from numpy.distutils.exec_command import exec_command
from numpy.distutils.mingw32ccompiler import generate_manifest
LANG_EXT['f77'] = '.f'
LANG_EXT['f90'] = '.f90'
class config(old_config):
old_config.user_options += [
('fcompiler=', None, "specify the Fortran compiler type"),
]
def initialize_options(self):
self.fcompiler = None
old_config.initialize_options(self)
def _check_compiler(self):
old_config._check_compiler(self)
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
if sys.platform == 'win32' and (self.compiler.compiler_type in
('msvc', 'intelw', 'intelemw')):
# XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
# initialize call query_vcvarsall, which throws an IOError, and
# causes an error along the way without much information. We try to
# catch it here, hoping it is early enough, and print an helpful
# message instead of Error: None.
if not self.compiler.initialized:
try:
self.compiler.initialize()
except IOError:
e = get_exception()
msg = """\
Could not initialize compiler instance: do you have Visual Studio
installed? If you are trying to build with MinGW, please use "python setup.py
build -c mingw32" instead. If you have Visual Studio installed, check it is
correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2,
VS 2010 for >= 3.3).
Original exception was: %s, and the Compiler class was %s
============================================================================""" \
% (e, self.compiler.__class__.__name__)
print("""\
============================================================================""")
raise distutils.errors.DistutilsPlatformError(msg)
# After MSVC is initialized, add an explicit /MANIFEST to linker
# flags. See issues gh-4245 and gh-4101 for details. Also
# relevant are issues 4431 and 16296 on the Python bug tracker.
from distutils import msvc9compiler
if msvc9compiler.get_build_version() >= 10:
for ldflags in [self.compiler.ldflags_shared,
self.compiler.ldflags_shared_debug]:
if '/MANIFEST' not in ldflags:
ldflags.append('/MANIFEST')
if not isinstance(self.fcompiler, FCompiler):
self.fcompiler = new_fcompiler(compiler=self.fcompiler,
dry_run=self.dry_run, force=1,
c_compiler=self.compiler)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
if self.fcompiler.get_version():
self.fcompiler.customize_cmd(self)
self.fcompiler.show_customization()
def _wrap_method(self, mth, lang, args):
from distutils.ccompiler import CompileError
from distutils.errors import DistutilsExecError
save_compiler = self.compiler
if lang in ['f77', 'f90']:
self.compiler = self.fcompiler
try:
ret = mth(*((self,) + args))
except (DistutilsExecError, CompileError):
msg = str(get_exception())
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
return ret
def _compile(self, body, headers, include_dirs, lang):
return self._wrap_method(old_config._compile, lang,
(body, headers, include_dirs, lang))
def _link(self, body,
headers, include_dirs,
libraries, library_dirs, lang):
if self.compiler.compiler_type == 'msvc':
libraries = (libraries or [])[:]
library_dirs = (library_dirs or [])[:]
if lang in ['f77', 'f90']:
lang = 'c' # always use system linker when using MSVC compiler
if self.fcompiler:
for d in self.fcompiler.library_dirs or []:
# correct path when compiling in Cygwin but with
# normal Win Python
if d.startswith('/usr/lib'):
s, o = exec_command(['cygpath', '-w', d],
use_tee=False)
if not s: d = o
library_dirs.append(d)
for libname in self.fcompiler.libraries or []:
if libname not in libraries:
libraries.append(libname)
for libname in libraries:
if libname.startswith('msvc'): continue
fileexists = False
for libdir in library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists: continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(libdir, '%s.lib' % (libname))
copy_file(libfile, libfile2)
self.temp_files.append(libfile2)
fileexists = True
break
if fileexists: continue
log.warn('could not find library %r in directories %s' \
% (libname, library_dirs))
elif self.compiler.compiler_type == 'mingw32':
generate_manifest(self)
return self._wrap_method(old_config._link, lang,
(body, headers, include_dirs,
libraries, library_dirs, lang))
def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
self._check_compiler()
return self.try_compile(
"/* we need a dummy line to make distutils happy */",
[header], include_dirs)
def check_decl(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main(void)
{
#ifndef %s
(void) %s;
#endif
;
return 0;
}""" % (symbol, symbol)
return self.try_compile(body, headers, include_dirs)
def check_macro_true(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main(void)
{
#if %s
#else
#error false or undefined macro
#endif
;
return 0;
}""" % (symbol,)
return self.try_compile(body, headers, include_dirs)
def check_type(self, type_name, headers=None, include_dirs=None,
library_dirs=None):
"""Check type availability. Return True if the type can be compiled,
False otherwise"""
self._check_compiler()
# First check the type can be compiled
body = r"""
int main(void) {
if ((%(name)s *) 0)
return 0;
if (sizeof (%(name)s))
return 0;
}
""" % {'name': type_name}
st = False
try:
try:
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
st = True
except distutils.errors.CompileError:
st = False
finally:
self._clean()
return st
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
"""Check size of a given type."""
self._check_compiler()
# First check the type can be compiled
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
test_array [0] = 0
;
return 0;
}
"""
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
self._clean()
if expected:
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
for size in expected:
try:
self._compile(body % {'type': type_name, 'size': size},
headers, include_dirs, 'c')
self._clean()
return size
except CompileError:
pass
# this fails to *compile* if size > sizeof(type)
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
# The principle is simple: we first find low and high bounds of size
# for the type, where low/high are looked up on a log scale. Then, we
# do a binary search to find the exact size between low and high
low = 0
mid = 0
while True:
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
break
except CompileError:
# log.info("failure to test for bound %d" % mid)
low = mid + 1
mid = 2 * mid + 1
high = mid
# Binary search:
while low != high:
mid = (high - low) // 2 + low
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
high = mid
except CompileError:
low = mid + 1
return low
def check_func(self, func,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
# clean up distutils's config a bit: add void to main(), and
# return a value.
self._check_compiler()
body = []
if decl:
if type(decl) == str:
body.append(decl)
else:
body.append("int %s (void);" % func)
# Handle MSVC intrinsics: force MS compiler to make a function call.
# Useful to test for some functions when built with optimization on, to
# avoid build error because the intrinsic and our 'fake' test
# declaration do not match.
body.append("#ifdef _MSC_VER")
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
if call_args is None:
call_args = ''
body.append(" %s(%s);" % (func, call_args))
else:
body.append(" %s;" % func)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_funcs_once(self, funcs,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
"""Check a list of functions at once.
This is useful to speed up things, since all the functions in the funcs
list will be put in one compilation unit.
Arguments
---------
funcs : seq
list of functions to test
include_dirs : seq
list of header paths
libraries : seq
list of libraries to link the code snippet to
library_dirs : seq
list of library paths
decl : dict
for every (key, value), the declaration in the value will be
used for function in key. If a function is not in the
dictionay, no declaration will be used.
call : dict
for every item (f, value), if the value is True, a call will be
done to the function f.
"""
self._check_compiler()
body = []
if decl:
for f, v in decl.items():
if v:
body.append("int %s (void);" % f)
# Handle MS intrinsics. See check_func for more info.
body.append("#ifdef _MSC_VER")
for func in funcs:
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
for f in funcs:
if f in call and call[f]:
if not (call_args and f in call_args and call_args[f]):
args = ''
else:
args = call_args[f]
body.append(" %s(%s);" % (f, args))
else:
body.append(" %s;" % f)
else:
for f in funcs:
body.append(" %s;" % f)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_inline(self):
"""Return the inline keyword recognized by the compiler, empty string
otherwise."""
return check_inline(self)
def check_restrict(self):
"""Return the restrict keyword recognized by the compiler, empty string
otherwise."""
return check_restrict(self)
def check_compiler_gcc4(self):
"""Return True if the C compiler is gcc >= 4."""
return check_compiler_gcc4(self)
def check_gcc_function_attribute(self, attribute, name):
return check_gcc_function_attribute(self, attribute, name)
def check_gcc_variable_attribute(self, attribute):
return check_gcc_variable_attribute(self, attribute)
def get_output(self, body, headers=None, include_dirs=None,
libraries=None, library_dirs=None,
lang="c", use_tee=None):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Returns the exit status code
of the program and its output.
"""
# 2008-11-16, RemoveMe
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \
"Usage of get_output is deprecated: please do not \n" \
"use it anymore, and avoid configuration checks \n" \
"involving running executable on the target machine.\n" \
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning, stacklevel=2)
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
exitcode, output = 255, ''
try:
grabber = GrabStdout()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
grabber.restore()
except:
output = grabber.data
grabber.restore()
raise
exe = os.path.join('.', exe)
exitstatus, output = exec_command(exe, execute_in='.',
use_tee=use_tee)
if hasattr(os, 'WEXITSTATUS'):
exitcode = os.WEXITSTATUS(exitstatus)
if os.WIFSIGNALED(exitstatus):
sig = os.WTERMSIG(exitstatus)
log.error('subprocess exited with signal %d' % (sig,))
if sig == signal.SIGINT:
# control-C
raise KeyboardInterrupt
else:
exitcode = exitstatus
log.info("success!")
except (CompileError, LinkError):
log.info("failure.")
self._clean()
return exitcode, output
class GrabStdout(object):
def __init__(self):
self.sys_stdout = sys.stdout
self.data = ''
sys.stdout = self
def write(self, data):
self.sys_stdout.write(data)
self.data += data
def flush(self):
self.sys_stdout.flush()
def restore(self):
sys.stdout = self.sys_stdout
|
|
###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ['Cases']
## The set of cases we construct and export from this module.
## Everything else is private.
Cases = []
#### BEGIN OF CONFIG
#### END OF CONFIG
import json, time
from zope.interface import implementer
from twisted.internet import reactor
from twisted.internet.defer import Deferred, DeferredList, maybeDeferred
from autobahn.twisted.websocket import connectWS
from autobahn.wamp import WampClientFactory, WampCraClientProtocol
from autobahntestsuite.testrun import TestResult
from autobahntestsuite.util import AttributeBag, perf_counter
from autobahntestsuite.interfaces import ITestCase
class WampCase3_1_x_x_Protocol(WampCraClientProtocol):
def onSessionOpen(self):
if self.test.testee.auth:
d = self.authenticate(**self.test.testee.auth)
d.addCallbacks(self.onAuthSuccess, self.onAuthError)
else:
self.main()
def onAuthSuccess(self, permissions):
self.main()
def onAuthError(self, e):
uri, desc, details = e.value.args
print "Authentication Error!", uri, desc, details
def main(self):
self.factory.onReady(self)
class WampCase3_1_x_x_Factory(WampClientFactory):
protocol = WampCase3_1_x_x_Protocol
def __init__(self, test, onReady, onGone):
WampClientFactory.__init__(self, test.testee.url)
self.test = test
self.onReady = onReady
self.onGone = onGone
self.proto = None
def buildProtocol(self, addr):
proto = self.protocol()
proto.factory = self
proto.test = self.test
self.proto = proto
return proto
def clientConnectionLost(self, connector, reason):
self.onGone(self.proto)
def clientConnectionFailed(self, connector, reason):
self.onGone(self.proto)
class WampCase3_1_x_x_Params(AttributeBag):
"""
Test parameter set for configuring instances of WampCase2_*_*.
peers: a list with one item per WAMP session run during the test, where each item contains a list of topics each peer _subscribes_ to. The publisher that publishes during the test is always the first item in the list.
publicationTopic, excludeMe, exclude, eligible: parameters controlling how events are published during the test.
eventPayloads: a list of payloads each tested as event payload to the test at hand.
expectedReceivers: a list of session indices, where each index references a WAMP session created for the list in `peers`.
"""
ATTRIBUTES = ['peers',
'publicationTopic',
'excludeMe',
'exclude',
'eligible',
'eventPayloads',
'expectedReceivers']
@implementer(ITestCase)
class WampCase3_1_x_x_Base:
DESCRIPTION = "Undefined."
EXPECTATION = "Undefined."
def __init__(self, testee):
self.testee = testee
self.client = None
self.result = TestResult()
self.result.received = {}
self.result.expected = {}
self.result.log = []
def run(self):
self.result.started = perf_counter()
def shutdown():
if self.client:
self.client.proto.sendClose()
def test(proto):
#res = yield self.call("http://api.testsuite.wamp.ws/case/3.1.1#1", 23)
## after having published everything the test had specified,
## we need to _wait_ for events on all our WAMP sessions to
## compare with our expectation. by default, we wait 3x the
## specified/default RTT
def perform(i, p):
d = proto.call("http://api.testsuite.wamp.ws/case/3.1.1#1", float(p))
def got(res):
self.result.received[i] = float(res)
d.addCallback(got)
payloads = []
payloads.extend([0])
payloads.extend([2**7-1, 2**8-1, 2**15-1, 2**16-1, 2**24])
#payloads.extend([2**7-1, 2**8-1, 2**15-1, 2**16-1, 2**24, 2**31-1, 2**32-1, 2**53])
#payloads.extend([2**53+1, 2**63-1, 2**64-1])
#payloads.extend([-2**7, -2**15, -2**24, -2**31, -2**53])
payloads.extend([-2**7, -2**15, -2**24])
#payloads.extend([-2**63])
i = 0
for p in payloads:
self.result.expected[i] = float(p)
perform(i, p)
i += 1
wait = 3 * self.testee.options.get("rtt", 0.2)
reactor.callLater(wait, shutdown)
def launch(proto):
## FIXME: explain why the following needed, since
## without the almost zero delay (which triggers a
## reactor loop), the code will not work as expected!
#test() # <= does NOT work
reactor.callLater(0.00001, test, proto)
def error(err):
## FIXME
print "ERROR", err
shutdown()
self.finished.errback(err)
def done(proto):
self.result.ended = perf_counter()
passed = json.dumps(self.result.received) == json.dumps(self.result.expected)
if not passed:
print "EXPECTED", self.result.expected
print "RECEIVED", self.result.received
self.result.passed = passed
self.finished.callback(self.result)
self.client = WampCase3_1_x_x_Factory(self, launch, done)
connectWS(self.client)
self.finished = Deferred()
return self.finished
class WampCase3_1_1_1(WampCase3_1_x_x_Base):
pass
Cases = [WampCase3_1_1_1]
def generate_WampCase3_1_x_x_classes2():
## dynamically create case classes
##
res = []
jc = 1
for setting in SETTINGS:
ic = 1
for payload in PAYLOADS:
params = WampCase2_2_x_x_Params(peers = setting[0],
publicationTopic = setting[1],
excludeMe = setting[2],
exclude = setting[3],
eligible = setting[4],
eventPayloads = payload,
expectedReceivers = setting[5])
pl = len(params.eventPayloads)
plc = "s" if pl else ""
s = []
i = 0
for p in params.peers:
if len(p) > 0:
s.append("%d: %s" % (i, ' & '.join(p)))
else:
s.append("%d: %s" % (i, '-'))
i += 1
s = ', '.join(s)
o = []
if params.excludeMe is not None:
o.append("excludeMe = %s" % params.excludeMe)
if params.exclude is not None:
o.append("exclude = %s" % params.exclude)
if params.eligible is not None:
o.append("eligible = %s" % params.eligible)
if len(o) > 0:
o = ', '.join(o)
else:
o = "-"
description = """The test connects %d WAMP clients to the testee, subscribes \
the sessions to topics %s and \
then publishes %d event%s to the topic %s with payload%s %s from the first session. \
The test sets the following publication options: %s.
""" % (len(params.peers),
s,
pl,
plc,
params.publicationTopic,
plc,
', '.join(['"' + str(x) + '"' for x in params.eventPayloads]),
o)
expectation = """We expect the testee to dispatch the events to us on \
the sessions %s""" % (params.expectedReceivers,)
klassname = "WampCase3_1_%d_%d" % (jc, ic)
Klass = type(klassname,
(object, WampCase3_1_x_x_Base, ),
{
"__init__": WampCase3_1_x_x_Base.__init__,
"run": WampCase3_1_x_x_Base.run,
"description": description,
"expectation": expectation,
"params": params
})
res.append(Klass)
ic += 1
jc += 1
return res
#Cases.extend(generate_WampCase3_1_x_x_classes())
|
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_builtin_identifier233_all_of
except ImportError:
btp_builtin_identifier233_all_of = sys.modules[
"onshape_client.oas.models.btp_builtin_identifier233_all_of"
]
try:
from onshape_client.oas.models import btp_node7
except ImportError:
btp_node7 = sys.modules["onshape_client.oas.models.btp_node7"]
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
class BTPBuiltinIdentifier233(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("documentation_type",): {
"FUNCTION": "FUNCTION",
"PREDICATE": "PREDICATE",
"CONSTANT": "CONSTANT",
"ENUM": "ENUM",
"USER_TYPE": "USER_TYPE",
"FEATURE_DEFINITION": "FEATURE_DEFINITION",
"FILE_HEADER": "FILE_HEADER",
"UNDOCUMENTABLE": "UNDOCUMENTABLE",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"identifier": (str,), # noqa: E501
"atomic": (bool,), # noqa: E501
"documentation_type": (str,), # noqa: E501
"end_source_location": (int,), # noqa: E501
"node_id": (str,), # noqa: E501
"short_descriptor": (str,), # noqa: E501
"space_after": (btp_space10.BTPSpace10,), # noqa: E501
"space_before": (btp_space10.BTPSpace10,), # noqa: E501
"space_default": (bool,), # noqa: E501
"start_source_location": (int,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"identifier": "identifier", # noqa: E501
"atomic": "atomic", # noqa: E501
"documentation_type": "documentationType", # noqa: E501
"end_source_location": "endSourceLocation", # noqa: E501
"node_id": "nodeId", # noqa: E501
"short_descriptor": "shortDescriptor", # noqa: E501
"space_after": "spaceAfter", # noqa: E501
"space_before": "spaceBefore", # noqa: E501
"space_default": "spaceDefault", # noqa: E501
"start_source_location": "startSourceLocation", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btp_builtin_identifier233.BTPBuiltinIdentifier233 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
identifier (str): [optional] # noqa: E501
atomic (bool): [optional] # noqa: E501
documentation_type (str): [optional] # noqa: E501
end_source_location (int): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
short_descriptor (str): [optional] # noqa: E501
space_after (btp_space10.BTPSpace10): [optional] # noqa: E501
space_before (btp_space10.BTPSpace10): [optional] # noqa: E501
space_default (bool): [optional] # noqa: E501
start_source_location (int): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
btp_builtin_identifier233_all_of.BTPBuiltinIdentifier233AllOf,
btp_node7.BTPNode7,
],
"oneOf": [],
}
|
|
"""
==============================
Compare over-sampling samplers
==============================
The following example attends to make a qualitative comparison between the
different over-sampling algorithms available in the imbalanced-learn package.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("poster")
# %% [markdown]
# The following function will be used to create toy dataset. It uses the
# :func:`~sklearn.datasets.make_classification` from scikit-learn but fixing
# some parameters.
# %%
from sklearn.datasets import make_classification
def create_dataset(
n_samples=1000,
weights=(0.01, 0.01, 0.98),
n_classes=3,
class_sep=0.8,
n_clusters=1,
):
return make_classification(
n_samples=n_samples,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_classes=n_classes,
n_clusters_per_class=n_clusters,
weights=list(weights),
class_sep=class_sep,
random_state=0,
)
# %% [markdown]
# The following function will be used to plot the sample space after resampling
# to illustrate the specificities of an algorithm.
# %%
def plot_resampling(X, y, sampler, ax, title=None):
X_res, y_res = sampler.fit_resample(X, y)
ax.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.8, edgecolor="k")
if title is None:
title = f"Resampling with {sampler.__class__.__name__}"
ax.set_title(title)
sns.despine(ax=ax, offset=10)
# %% [markdown]
# The following function will be used to plot the decision function of a
# classifier given some data.
# %%
import numpy as np
def plot_decision_function(X, y, clf, ax, title=None):
plot_step = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(
np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)
)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=0.4)
ax.scatter(X[:, 0], X[:, 1], alpha=0.8, c=y, edgecolor="k")
if title is not None:
ax.set_title(title)
# %% [markdown]
# Illustration of the influence of the balancing ratio
# ----------------------------------------------------
#
# We will first illustrate the influence of the balancing ratio on some toy
# data using a logistic regression classifier which is a linear model.
# %%
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
# %% [markdown]
# We will fit and show the decision boundary model to illustrate the impact of
# dealing with imbalanced classes.
# %%
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 12))
weights_arr = (
(0.01, 0.01, 0.98),
(0.01, 0.05, 0.94),
(0.2, 0.1, 0.7),
(0.33, 0.33, 0.33),
)
for ax, weights in zip(axs.ravel(), weights_arr):
X, y = create_dataset(n_samples=300, weights=weights)
clf.fit(X, y)
plot_decision_function(X, y, clf, ax, title=f"weight={weights}")
fig.suptitle(f"Decision function of {clf.__class__.__name__}")
fig.tight_layout()
# %% [markdown]
# Greater is the difference between the number of samples in each class, poorer
# are the classification results.
#
# Random over-sampling to balance the data set
# --------------------------------------------
#
# Random over-sampling can be used to repeat some samples and balance the
# number of samples between the dataset. It can be seen that with this trivial
# approach the boundary decision is already less biased toward the majority
# class. The class :class:`~imblearn.over_sampling.RandomOverSampler`
# implements such of a strategy.
# %%
from imblearn.pipeline import make_pipeline
from imblearn.over_sampling import RandomOverSampler
X, y = create_dataset(n_samples=100, weights=(0.05, 0.25, 0.7))
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(15, 7))
clf.fit(X, y)
plot_decision_function(X, y, clf, axs[0], title="Without resampling")
sampler = RandomOverSampler(random_state=0)
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(X, y, model, axs[1], f"Using {model[0].__class__.__name__}")
fig.suptitle(f"Decision function of {clf.__class__.__name__}")
fig.tight_layout()
# %% [markdown]
# By default, random over-sampling generates a bootstrap. The parameter
# `shrinkage` allows adding a small perturbation to the generated data
# to generate a smoothed bootstrap instead. The plot below shows the difference
# between the two data generation strategies.
# %%
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(15, 7))
sampler.set_params(shrinkage=None)
plot_resampling(X, y, sampler, ax=axs[0], title="Normal bootstrap")
sampler.set_params(shrinkage=0.3)
plot_resampling(X, y, sampler, ax=axs[1], title="Smoothed bootstrap")
fig.suptitle(f"Resampling with {sampler.__class__.__name__}")
fig.tight_layout()
# %% [markdown]
# It looks like more samples are generated with smoothed bootstrap. This is due
# to the fact that the samples generated are not superimposing with the
# original samples.
#
# More advanced over-sampling using ADASYN and SMOTE
# --------------------------------------------------
#
# Instead of repeating the same samples when over-sampling or perturbating the
# generated bootstrap samples, one can use some specific heuristic instead.
# :class:`~imblearn.over_sampling.ADASYN` and
# :class:`~imblearn.over_sampling.SMOTE` can be used in this case.
# %%
from imblearn import FunctionSampler # to use a idendity sampler
from imblearn.over_sampling import SMOTE, ADASYN
X, y = create_dataset(n_samples=150, weights=(0.1, 0.2, 0.7))
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 15))
samplers = [
FunctionSampler(),
RandomOverSampler(random_state=0),
SMOTE(random_state=0),
ADASYN(random_state=0),
]
for ax, sampler in zip(axs.ravel(), samplers):
title = "Original dataset" if isinstance(sampler, FunctionSampler) else None
plot_resampling(X, y, sampler, ax, title=title)
fig.tight_layout()
# %% [markdown]
# The following plot illustrates the difference between
# :class:`~imblearn.over_sampling.ADASYN` and
# :class:`~imblearn.over_sampling.SMOTE`.
# :class:`~imblearn.over_sampling.ADASYN` will focus on the samples which are
# difficult to classify with a nearest-neighbors rule while regular
# :class:`~imblearn.over_sampling.SMOTE` will not make any distinction.
# Therefore, the decision function depending of the algorithm.
X, y = create_dataset(n_samples=150, weights=(0.05, 0.25, 0.7))
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(20, 6))
models = {
"Without sampler": clf,
"ADASYN sampler": make_pipeline(ADASYN(random_state=0), clf),
"SMOTE sampler": make_pipeline(SMOTE(random_state=0), clf),
}
for ax, (title, model) in zip(axs, models.items()):
model.fit(X, y)
plot_decision_function(X, y, model, ax=ax, title=title)
fig.suptitle(f"Decision function using a {clf.__class__.__name__}")
fig.tight_layout()
# %% [markdown]
# Due to those sampling particularities, it can give rise to some specific
# issues as illustrated below.
# %%
X, y = create_dataset(n_samples=5000, weights=(0.01, 0.05, 0.94), class_sep=0.8)
samplers = [SMOTE(random_state=0), ADASYN(random_state=0)]
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 15))
for ax, sampler in zip(axs, samplers):
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(
X, y, clf, ax[0], title=f"Decision function with {sampler.__class__.__name__}"
)
plot_resampling(X, y, sampler, ax[1])
fig.suptitle("Particularities of over-sampling with SMOTE and ADASYN")
fig.tight_layout()
# %% [markdown]
# SMOTE proposes several variants by identifying specific samples to consider
# during the resampling. The borderline version
# (:class:`~imblearn.over_sampling.BorderlineSMOTE`) will detect which point to
# select which are in the border between two classes. The SVM version
# (:class:`~imblearn.over_sampling.SVMSMOTE`) will use the support vectors
# found using an SVM algorithm to create new sample while the KMeans version
# (:class:`~imblearn.over_sampling.KMeansSMOTE`) will make a clustering before
# to generate samples in each cluster independently depending each cluster
# density.
# %%
from imblearn.over_sampling import BorderlineSMOTE, KMeansSMOTE, SVMSMOTE
X, y = create_dataset(n_samples=5000, weights=(0.01, 0.05, 0.94), class_sep=0.8)
fig, axs = plt.subplots(5, 2, figsize=(15, 30))
samplers = [
SMOTE(random_state=0),
BorderlineSMOTE(random_state=0, kind="borderline-1"),
BorderlineSMOTE(random_state=0, kind="borderline-2"),
KMeansSMOTE(random_state=0),
SVMSMOTE(random_state=0),
]
for ax, sampler in zip(axs, samplers):
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(
X, y, clf, ax[0], title=f"Decision function for {sampler.__class__.__name__}"
)
plot_resampling(X, y, sampler, ax[1])
fig.suptitle("Decision function and resampling using SMOTE variants")
fig.tight_layout()
# %% [markdown]
# When dealing with a mixed of continuous and categorical features,
# :class:`~imblearn.over_sampling.SMOTENC` is the only method which can handle
# this case.
# %%
from collections import Counter
from imblearn.over_sampling import SMOTENC
rng = np.random.RandomState(42)
n_samples = 50
# Create a dataset of a mix of numerical and categorical data
X = np.empty((n_samples, 3), dtype=object)
X[:, 0] = rng.choice(["A", "B", "C"], size=n_samples).astype(object)
X[:, 1] = rng.randn(n_samples)
X[:, 2] = rng.randint(3, size=n_samples)
y = np.array([0] * 20 + [1] * 30)
print("The original imbalanced dataset")
print(sorted(Counter(y).items()))
print()
print("The first and last columns are containing categorical features:")
print(X[:5])
print()
smote_nc = SMOTENC(categorical_features=[0, 2], random_state=0)
X_resampled, y_resampled = smote_nc.fit_resample(X, y)
print("Dataset after resampling:")
print(sorted(Counter(y_resampled).items()))
print()
print("SMOTE-NC will generate categories for the categorical features:")
print(X_resampled[-5:])
print()
# %% [markdown]
# However, if the dataset is composed of only categorical features then one
# should use :class:`~imblearn.over_sampling.SMOTEN`.
# %%
from imblearn.over_sampling import SMOTEN
# Generate only categorical data
X = np.array(["A"] * 10 + ["B"] * 20 + ["C"] * 30, dtype=object).reshape(-1, 1)
y = np.array([0] * 20 + [1] * 40, dtype=np.int32)
print(f"Original class counts: {Counter(y)}")
print()
print(X[:5])
print()
sampler = SMOTEN(random_state=0)
X_res, y_res = sampler.fit_resample(X, y)
print(f"Class counts after resampling {Counter(y_res)}")
print()
print(X_res[-5:])
print()
|
|
import unittest
from graphene.server.server import GrapheneServer
from graphene.query.planner import QueryPlanner
from graphene.storage import (StorageManager, GrapheneStore, Property)
from graphene.expressions import *
from graphene.errors import *
from graphene.traversal import Query
class TestQueryPlanner(unittest.TestCase):
@classmethod
def setUpClass(cls):
GrapheneStore.TESTING = True
graphene_store = GrapheneStore()
graphene_store.remove_test_datafiles()
cls.server = GrapheneServer()
cls.sm = cls.server.storage_manager
cls.server.doCommands("CREATE TYPE T ( a: int );", False)
cls.server.doCommands("INSERT NODE T(1), T(2), T(3), T(4), T(5);", False)
cls.server.doCommands("CREATE TYPE S ( c: int );", False)
cls.server.doCommands("INSERT NODE S(7);", False)
cls.server.doCommands("CREATE RELATION R ( b: int );", False)
cls.server.doCommands("INSERT RELATION T(a=1)-[R(2)]->T(a=2);")
cls.server.doCommands("INSERT RELATION T(a=1)-[R(3)]->T(a=3);")
cls.server.doCommands("INSERT RELATION T(a=2)-[R(6)]->T(a=3);")
cls.server.doCommands("INSERT RELATION T(a=3)-[R(12)]->T(a=4);")
cls.server.doCommands("INSERT RELATION T(a=3)-[R(15)]->T(a=5);")
cls.server.doCommands("INSERT RELATION S(c=7)-[R(0)]->T(a=5);")
cls.planner = QueryPlanner(cls.sm)
@classmethod
def tearDownClass(cls):
"""
Clean the database so that the tests are independent of one another
"""
cls.sm.close()
def assertListEqualUnsorted(self, given, expected):
self.assertListEqual(sorted(given), sorted(expected))
def test_get_schema(self):
#ni = no ident
n1, n1ni = MatchNode("t", "T"), MatchNode(None, "T")
n2, n2ni = MatchNode("t2", "T"), MatchNode(None, "T")
r, rni = MatchRelation("r", "R"), MatchRelation(None, "R")
# (T)
self.assertListEqual(self.planner.get_schema((n1ni,)),
[('a', Property.PropertyType.int)])
# (t:T)
self.assertListEqual(self.planner.get_schema((n1,)),
[('t.a', Property.PropertyType.int)])
# (T)-[r:R]->(T) => no error, because duplicates will be stripped anyway
self.assertListEqual(self.planner.get_schema((n1ni, r, n2ni), True),
[('a', Property.PropertyType.int), ('r.b', Property.PropertyType.int),
('a', Property.PropertyType.int)])
# (T)-[R]->(T) => error (duplicate), if the set is the full schema
with self.assertRaises(DuplicatePropertyException):
self.planner.get_schema((n1ni, rni, n2ni), True)
# (T)-[R]->(T) => no error, if the set is not the full set (subset, so
# anything not identified will be stripped later)
self.assertListEqual(self.planner.get_schema((n1ni, rni, n2ni), False),
[('a', Property.PropertyType.int), ('b', Property.PropertyType.int),
('a', Property.PropertyType.int)])
# (t:T)-[R]->(t:T) => error (duplicate), same identifier
with self.assertRaises(DuplicatePropertyException):
self.planner.get_schema((n1, r, n1))
def test_check_query_single_node(self):
nc = (MatchNode("t", "T"),)
schema = self.planner.get_schema(nc)
# query_chain is None
try:
self.planner.check_query(schema, None)
except Exception:
self.fail("check_query raised an Exception unexpectedly.")
# With identifier
qc = ((('t', 'a'), '=', '1'),)
try:
self.planner.check_query(schema, qc)
except Exception:
self.fail("check_query raised an Exception unexpectedly.")
# Without identifier
qc = (((None, 'a'), '=', '1'),)
try:
self.planner.check_query(schema, qc)
except Exception:
self.fail("check_query raised an Exception unexpectedly.")
# No such property
qc = (((None, 'b'), '=', '1'),)
with self.assertRaises(NonexistentPropertyException):
self.planner.check_query(schema, qc)
# No such identifier
qc = ((('s', 'a'), '=', '1'),)
with self.assertRaises(NonexistentPropertyException):
self.planner.check_query(schema, qc)
def test_check_query_relations(self):
nc = (MatchNode("t", "T"), MatchRelation("r", "R"), MatchNode("t2", "T"))
schema = self.planner.get_schema(nc)
# With identifier
qc = ((('t', 'a'), '=', '1'),)
try:
self.planner.check_query(schema, qc)
except Exception:
self.fail("check_query raised an Exception unexpectedly.")
qc = ((('r', 'b'), '=', '1'),)
try:
self.planner.check_query(schema, qc)
except Exception:
self.fail("check_query raised an Exception unexpectedly.")
# With two identifiers
qc = ((('t', 'a'), '=', ('t2', 'a')),)
try:
self.planner.check_query(schema, qc)
except Exception:
self.fail("check_query raised an Exception unexpectedly.")
# Without identifier, ambiguous
qc = (((None, 'a'), '=', '1'),)
with self.assertRaises(AmbiguousPropertyException):
self.planner.check_query(schema, qc)
qc = ((('r', 'b'), '=', (None, 'a')),)
with self.assertRaises(AmbiguousPropertyException):
self.planner.check_query(schema, qc)
# Without identifier, unambiguous
qc = (((None, 'b'), '=', '1'),)
try:
self.planner.check_query(schema, qc)
except Exception:
self.fail("check_query raised an Exception unexpectedly.")
# No such identifier
qc = ((('s', 'a'), '=', '1'),)
with self.assertRaises(NonexistentPropertyException):
self.planner.check_query(schema, qc)
qc = ((('t', 'a'), '=', ('s', 'a')),)
with self.assertRaises(NonexistentPropertyException):
self.planner.check_query(schema, qc)
qc = ((('t', 'a'), '=', (None, 'q')),)
with self.assertRaises(NonexistentPropertyException):
self.planner.check_query(schema, qc)
def test_get_orderby_indexes(self):
nc = (MatchNode("t", "T"), MatchRelation("r", "R"), MatchNode("t2", "T"))
schema = self.planner.get_schema(nc)
chain = [((None, 'a'), 'ASC')]
with self.assertRaises(AmbiguousPropertyException):
self.planner.get_orderby_indexes(schema, chain)
chain = [((None, 'q'), 'ASC')]
with self.assertRaises(NonexistentPropertyException):
self.planner.get_orderby_indexes(schema, chain)
chain = [(('r', 'a'), 'ASC')]
with self.assertRaises(NonexistentPropertyException):
self.planner.get_orderby_indexes(schema, chain)
chain = [(('t', 'a'), 'ASC')]
result = self.planner.get_orderby_indexes(schema, chain)
self.assertListEqual(result, [(0, 1)])
chain = [(('t', 'a'), None)]
result = self.planner.get_orderby_indexes(schema, chain)
self.assertListEqual(result, [(0, 1)])
chain = [(('t', 'a'), 'DESC')]
result = self.planner.get_orderby_indexes(schema, chain)
self.assertListEqual(result, [(0, -1)])
chain = [((None, 'b'), 'ASC')]
result = self.planner.get_orderby_indexes(schema, chain)
self.assertListEqual(result, [(1, 1)])
def test_get_orderby_fn(self):
nc = (MatchNode("t", "T"), MatchRelation("r", "R"), MatchNode("t2", "T"))
schema_rel = self.planner.get_schema(nc)
schema = self.planner.get_schema((MatchNode("t", "T"),))
chain = [(('t', 'a'), 'ASC')]
cmp_fn = self.planner.get_orderby_fn(schema, chain, is_relation=False)
# Nodes are T[1], T[2], T[3], T[4], T[5]
node1, node2 = self.sm.get_node(1), self.sm.get_node(2)
self.assertListEqual(sorted([node1, node1], cmp=cmp_fn), [node1, node1])
self.assertListEqual(sorted([node2, node1], cmp=cmp_fn), [node1, node2])
chain = [(('t', 'a'), 'DESC')]
cmp_fn = self.planner.get_orderby_fn(schema, chain, is_relation=False)
self.assertListEqual(sorted([node1, node2], cmp=cmp_fn), [node2, node1])
chain = [(('r', 'b'), 'ASC')]
cmp_fn = self.planner.get_orderby_fn(schema_rel, chain, is_relation=True)
self.assertListEqual(sorted([([1,2,3], None), ([3,1,2], None)], cmp=cmp_fn), [([3,1,2], None), ([1,2,3], None)])
self.assertListEqual(sorted([([1,2,3], None), ([3,2,2], None)], cmp=cmp_fn), [([1,2,3], None), ([3,2,2], None)])
def test_execute_only_nodes(self):
# Without identifier
exp_schema = ['a']
exp_vals = [[1], [2], [3], [4], [5]]
schema, results = self.planner.execute((MatchNode(None, "T"),), None, None)
self.assertListEqual(schema, exp_schema)
self.assertListEqual(results, exp_vals)
# With identifier
exp_schema = ['t.a']
exp_vals = [[1], [2], [3], [4], [5]]
schema, results = self.planner.execute((MatchNode("t", "T"),), None, None)
self.assertListEqual(schema, exp_schema)
self.assertListEqual(results, exp_vals)
def test_execute_one_relation(self):
#ni = no ident
n1, n1ni = MatchNode("t", "T"), MatchNode(None, "T")
n2, n2ni = MatchNode("t2", "T"), MatchNode(None, "T")
r, rni = MatchRelation("r", "R"), MatchRelation(None, "R")
# (t:T)-[r:R]->(t2:T)
exp_schema = ['t.a', 'r.b', 't2.a']
exp_vals = [[1,2,2], [1,3,3], [2,6,3], [3,12,4], [3,15,5]]
schema, results = self.planner.execute((n1, r, n2), None, None)
self.assertListEqual(schema, exp_schema)
self.assertListEqual(results, exp_vals)
# (t:T)-[R]->(t2:T)
exp_schema = ['t.a', 't2.a']
exp_vals = [[1,2], [1,3], [2,3], [3,4], [3,5]]
schema, results = self.planner.execute((n1, rni, n2), None, None)
self.assertListEqual(schema, exp_schema)
self.assertListEqual(results, exp_vals)
# (t:T)-[R]->(T)
exp_schema = ['t.a']
exp_vals = [[1], [1], [2], [3], [3]]
schema, results = self.planner.execute((n1, rni, n2ni), None, None)
self.assertListEqual(schema, exp_schema)
self.assertListEqual(results, exp_vals)
# (T)-[R]->(t2:T)
exp_schema = ['t2.a']
exp_vals = [[2], [3], [3], [4], [5]]
schema, results = self.planner.execute((n1ni, rni, n2), None, None)
self.assertListEqual(schema, exp_schema)
self.assertListEqual(results, exp_vals)
# (T)-[r:R]->(T)
exp_schema = ['r.b']
exp_vals = [[2], [3], [6], [12], [15]]
schema, results = self.planner.execute((n1ni, r, n2ni), None, None)
self.assertListEqual(schema, exp_schema)
self.assertListEqual(results, exp_vals)
# (t:T)-[r:R]->(T)
exp_schema = ['t.a', 'r.b']
exp_vals = [[1,2], [1,3], [2,6], [3,12], [3,15]]
schema, results = self.planner.execute((n1, r, n2ni), None, None)
self.assertListEqual(schema, exp_schema)
self.assertListEqual(results, exp_vals)
def test_execute_multi_relation(self):
#ni = no ident
n1, n1ni = MatchNode("t", "T"), MatchNode(None, "T")
n2, n2ni = MatchNode("t2", "T"), MatchNode(None, "T")
n3, n3ni = MatchNode("t3", "T"), MatchNode(None, "T")
r, rni = MatchRelation("r", "R"), MatchRelation(None, "R")
r2, r2ni = MatchRelation("r2", "R"), MatchRelation(None, "R")
# (t:T)-[r:R]->(t2:T)-[r2:R]->(t3:T)
exp_schema = ['t.a', 'r.b', 't2.a', 'r2.b', 't3.a']
exp_vals = [[1,2,2,6,3], [1,3,3,12,4], [1,3,3,15,5], [2,6,3,12,4], [2,6,3,15,5]]
schema, results = self.planner.execute((n1, r, n2, r2, n3), None, None)
self.assertListEqual(schema, exp_schema)
self.assertListEqualUnsorted(results, exp_vals)
# (t:T)-[R]->(T)-[R]->(t3:T)
exp_schema = ['t.a', 't3.a']
exp_vals = [[1,3], [1,4], [1,5], [2,4], [2,5]]
schema, results = self.planner.execute((n1, rni, n2ni, r2ni, n3), None, None)
self.assertListEqual(schema, exp_schema)
self.assertListEqualUnsorted(results, exp_vals)
def test_execute_with_query(self):
#ni = no ident
n1, n1ni = MatchNode("t", "T"), MatchNode(None, "T")
n2, n2ni = MatchNode("t2", "T"), MatchNode(None, "T")
n3, n3ni = MatchNode("t3", "T"), MatchNode(None, "T")
r, rni = MatchRelation("r", "R"), MatchRelation(None, "R")
r2, r2ni = MatchRelation("r2", "R"), MatchRelation(None, "R")
# (t:T)-[r:R]->(t2:T)-[r2:R]->(t3:T)
exp_schema = ['t.a', 'r.b', 't2.a', 'r2.b', 't3.a']
# node queries
exp_vals = [[1,2,2,6,3], [1,3,3,12,4], [1,3,3,15,5]]
schema, results = self.planner.execute((n1, r, n2, r2, n3), ((('t','a'),'=','1'),), None)
self.assertListEqual(schema, exp_schema)
self.assertListEqualUnsorted(results, exp_vals)
exp_vals = [[1,3,3,12,4], [1,3,3,15,5], [2,6,3,12,4], [2,6,3,15,5]]
schema, results = self.planner.execute((n1, r, n2, r2, n3), ((('t2','a'),'=','3'),), None)
self.assertListEqualUnsorted(results, exp_vals)
exp_vals = [[1,3,3,12,4], [2,6,3,12,4]]
schema, results = self.planner.execute((n1, r, n2, r2, n3), ((('t3','a'),'=','4'),), None)
self.assertListEqualUnsorted(results, exp_vals)
# relation queries
exp_vals = [[1,2,2,6,3]]
schema, results = self.planner.execute((n1, r, n2, r2, n3), ((('r','b'),'=','2'),), None)
self.assertListEqualUnsorted(results, exp_vals)
def test_execute_with_return(self):
#ni = no ident
n1, n1ni = MatchNode("t", "T"), MatchNode(None, "T")
n2, n2ni = MatchNode("t2", "T"), MatchNode(None, "T")
n3, n3ni = MatchNode("t3", "T"), MatchNode(None, "T")
r, rni = MatchRelation("r", "R"), MatchRelation(None, "R")
r2, r2ni = MatchRelation("r2", "R"), MatchRelation(None, "R")
# (t:T)-[r:R]->(t2:T)-[r2:R]->(t3:T) RETURN t.a
exp_schema = ['t.a']
# node queries
exp_vals = [[1],[1],[1],[2],[2]]
schema, results = self.planner.execute((n1, r, n2, r2, n3), None, (('t', 'a'),))
self.assertListEqual(schema, exp_schema)
self.assertListEqualUnsorted(results, exp_vals)
def test_execute_with_ambiguous_names(self):
#ni = no ident
n1, n1ni = MatchNode("t", "T"), MatchNode(None, "T")
n2, n2ni = MatchNode("t2", "T"), MatchNode(None, "T")
n3, n3ni = MatchNode("t3", "T"), MatchNode(None, "T")
r, rni = MatchRelation("r", "R"), MatchRelation(None, "R")
r2, r2ni = MatchRelation("r2", "R"), MatchRelation(None, "R")
# (t:T)-[r:R]->(t2:T)-[r2:R]->(t3:T) WHERE a = 1
with self.assertRaises(AmbiguousPropertyException):
self.planner.execute((n1, r, n2, r2, n3), (((None, 'a'), '=', '1'),), None)
# (t:T)-[r:R]->(t2:T)-[r2:R]->(t3:T) RETURN a
with self.assertRaises(AmbiguousPropertyException):
self.planner.execute((n1, r, n2, r2, n3), None, ((None, 'a'),))
def test_execute_with_duplicate_names(self):
#ni = no ident
n1, n1ni = MatchNode("t", "T"), MatchNode(None, "T")
r, rni = MatchRelation("r", "R"), MatchRelation(None, "R")
with self.assertRaises(DuplicatePropertyException):
self.planner.execute((n1, r, n1), None, None)
def test_execute_limit(self):
#ni = no ident
n1, n1ni = MatchNode("t", "T"), MatchNode(None, "T")
n2, n2ni = MatchNode("t2", "T"), MatchNode(None, "T")
r, rni = MatchRelation("r", "R"), MatchRelation(None, "R")
exp_vals = [[1], [2], [3]]
schema, results = self.planner.execute((n1ni,), None, None, limit=3)
self.assertListEqual(results, exp_vals)
exp_vals = [[1], [2], [3], [4], [5]]
schema, results = self.planner.execute((n1ni,), None, None, limit=0)
self.assertListEqual(results, exp_vals)
schema, results = self.planner.execute((n1ni,), None, None, limit=6)
self.assertListEqual(results, exp_vals)
exp_vals = [[1,2,2], [1,3,3], [2,6,3]]
schema, results = self.planner.execute((n1, r, n2), None, None, limit=3)
self.assertListEqual(results, exp_vals)
def test_execute_orderby(self):
#ni = no ident
n1, n1ni = MatchNode("t", "T"), MatchNode(None, "T")
n2, n2ni = MatchNode("t2", "T"), MatchNode(None, "T")
r, rni = MatchRelation("r", "R"), MatchRelation(None, "R")
exp_vals = [[5], [4], [3], [2], [1]]
schema, results = self.planner.execute((n1ni,), None, None, orderby=[((None, 'a'), 'DESC')])
self.assertListEqual(results, exp_vals)
exp_vals = [[3, 12, 4], [3, 15, 5], [2,6,3], [1,2,2], [1,3,3]]
schema, results = self.planner.execute((n1, r, n2), None, None, orderby=[(('t', 'a'), 'DESC')])
self.assertListEqual(results, exp_vals)
exp_vals = [[3, 15, 5], [3, 12, 4], [2,6,3], [1,3,3], [1,2,2]]
schema, results = self.planner.execute((n1, r, n2), None, None, orderby=[(('t', 'a'), 'DESC'), (('r', 'b'), 'DESC')])
self.assertListEqual(results, exp_vals)
|
|
from collections import defaultdict
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Set
from datahub.ingestion.source.aws.sagemaker_processors.common import (
SagemakerSourceReport,
)
if TYPE_CHECKING:
from mypy_boto3_sagemaker import SageMakerClient
from mypy_boto3_sagemaker.type_defs import (
ActionSummaryTypeDef,
ArtifactSummaryTypeDef,
AssociationSummaryTypeDef,
ContextSummaryTypeDef,
)
@dataclass
class LineageInfo:
"""
Helper class for containing extracted SageMaker lineage info.
"""
# map from model URIs to deployed endpoints
model_uri_endpoints: DefaultDict[str, Set[str]] = field(
default_factory=lambda: defaultdict(set)
)
# map from model images to deployed endpoints
model_image_endpoints: DefaultDict[str, Set[str]] = field(
default_factory=lambda: defaultdict(set)
)
# map from group ARNs to model URIs
model_uri_to_groups: DefaultDict[str, Set[str]] = field(
default_factory=lambda: defaultdict(set)
)
# map from group ARNs to model images
model_image_to_groups: DefaultDict[str, Set[str]] = field(
default_factory=lambda: defaultdict(set)
)
@dataclass
class LineageProcessor:
sagemaker_client: "SageMakerClient"
env: str
report: SagemakerSourceReport
nodes: Dict[str, Dict[str, Any]] = field(default_factory=dict)
lineage_info: LineageInfo = field(default_factory=LineageInfo)
def get_all_actions(self) -> List["ActionSummaryTypeDef"]:
"""
List all actions in SageMaker.
"""
actions = []
# see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.list_actions
paginator = self.sagemaker_client.get_paginator("list_actions")
for page in paginator.paginate():
actions += page["ActionSummaries"]
return actions
def get_all_artifacts(self) -> List["ArtifactSummaryTypeDef"]:
"""
List all artifacts in SageMaker.
"""
artifacts = []
# see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.list_artifacts
paginator = self.sagemaker_client.get_paginator("list_artifacts")
for page in paginator.paginate():
artifacts += page["ArtifactSummaries"]
return artifacts
def get_all_contexts(self) -> List["ContextSummaryTypeDef"]:
"""
List all contexts in SageMaker.
"""
contexts = []
# see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.list_contexts
paginator = self.sagemaker_client.get_paginator("list_contexts")
for page in paginator.paginate():
contexts += page["ContextSummaries"]
return contexts
def get_incoming_edges(self, node_arn: str) -> List["AssociationSummaryTypeDef"]:
"""
Get all incoming edges for a node in the lineage graph.
"""
edges = []
# see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.list_associations
paginator = self.sagemaker_client.get_paginator("list_associations")
for page in paginator.paginate(DestinationArn=node_arn):
edges += page["AssociationSummaries"]
return edges
def get_outgoing_edges(self, node_arn: str) -> List["AssociationSummaryTypeDef"]:
"""
Get all outgoing edges for a node in the lineage graph.
"""
edges = []
# see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.list_associations
paginator = self.sagemaker_client.get_paginator("list_associations")
for page in paginator.paginate(SourceArn=node_arn):
edges += page["AssociationSummaries"]
return edges
def get_model_deployment_lineage(self, deployment_node_arn: str) -> None:
"""
Get the lineage of a model deployment (input models and output endpoints).
"""
# if a node's action type is a ModelDeployment, then the incoming edges will be
# the model(s) being deployed, and the outgoing edges will be the endpoint(s) created
incoming_edges = self.get_incoming_edges(deployment_node_arn)
outgoing_edges = self.get_outgoing_edges(deployment_node_arn)
# models are linked to endpoints not by their ARNs, but by their output files and/or images
model_uris = set()
model_images = set()
# check the incoming edges for model URIs and images
for edge in incoming_edges:
source_node = self.nodes.get(edge["SourceArn"])
if source_node is None:
continue
source_uri = source_node.get("Source", {}).get("SourceUri")
if edge["SourceType"] == "Model" and source_uri is not None:
model_uris.add(source_uri)
elif edge["SourceType"] == "Image" and source_uri is not None:
model_images.add(source_uri)
model_endpoints = set()
# check the outgoing edges for endpoints resulting from the deployment
for edge in outgoing_edges:
destination_node = self.nodes[edge["DestinationArn"]]
if destination_node is None:
continue
source_uri = destination_node.get("Source", {}).get("SourceUri")
source_type = destination_node.get("Source", {}).get("SourceType")
if (
edge["DestinationType"] == "Endpoint"
and source_uri is not None
and source_type == "ARN"
):
model_endpoints.add(source_uri)
for model_uri in model_uris:
self.lineage_info.model_uri_endpoints[model_uri] |= model_endpoints
for model_image in model_images:
self.lineage_info.model_image_endpoints[model_image] |= model_endpoints
def get_model_group_lineage(
self, model_group_node_arn: str, node: Dict[str, Any]
) -> None:
"""
Get the lineage of a model group (models part of the group).
"""
model_group_arn = node.get("Source", {}).get("SourceUri")
model_source_type = node.get("Source", {}).get("SourceType")
# if group ARN is invalid
if model_group_arn is None or model_source_type != "ARN":
return
# check incoming edges for model packages under the group
group_incoming_edges = self.get_incoming_edges(model_group_node_arn)
for edge in group_incoming_edges:
# if edge is a model package, then look for models in its source edges
if edge["SourceType"] == "Model":
model_package_incoming_edges = self.get_incoming_edges(
edge["SourceArn"]
)
# check incoming edges for models under the model package
for model_package_edge in model_package_incoming_edges:
source_node = self.nodes.get(model_package_edge["SourceArn"])
if source_node is None:
continue
source_uri = source_node.get("Source", {}).get("SourceUri")
# add model_group_arn -> model_uri mapping
if (
model_package_edge["SourceType"] == "Model"
and source_uri is not None
):
self.lineage_info.model_uri_to_groups[source_uri].add(
model_group_arn
)
# add model_group_arn -> model_image mapping
elif (
model_package_edge["SourceType"] == "Image"
and source_uri is not None
):
self.lineage_info.model_image_to_groups[source_uri].add(
model_group_arn
)
def get_lineage(self) -> LineageInfo:
"""
Get the lineage of all artifacts in SageMaker.
"""
for action in self.get_all_actions():
self.nodes[action["ActionArn"]] = {**action, "node_type": "action"}
for artifact in self.get_all_artifacts():
self.nodes[artifact["ArtifactArn"]] = {**artifact, "node_type": "artifact"}
for context in self.get_all_contexts():
self.nodes[context["ContextArn"]] = {**context, "node_type": "context"}
for node_arn, node in self.nodes.items():
# get model-endpoint lineage
if (
node["node_type"] == "action"
and node.get("ActionType") == "ModelDeployment"
):
self.get_model_deployment_lineage(node_arn)
# get model-group lineage
if (
node["node_type"] == "context"
and node.get("ContextType") == "ModelGroup"
):
self.get_model_group_lineage(node_arn, node)
return self.lineage_info
|
|
#!/usr/env/python
# -*- coding: utf-8 -*-
'''
Library for identifying articles that require side-chaining in a prediction
model's workflow.
Copyright (c) 2017 Morten Wang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import yaml
import logging
import requests
from time import sleep
from collections import defaultdict
## Maximum number of articles, set to 50 for now, unless we get to be a bot,
## then it can be raised to 500.
MAX_ITEMS = 50
## Maximum number of retries we'll make to the Wikidata API
MAX_RETRIES = 3
## API URLs
WIKI_API_URL = 'https://{lang}.wikipedia.org/w/api.php'
WIKIDATA_API_URL = 'https://www.wikidata.org/w/api.php'
class RuleExistsError(Exception):
'''
A rule is attempted to be added that already exists.
'''
pass
class NoSuchRuleError(Exception):
'''
A rule is attempted to be modified that doesn't exist.
'''
pass
class TooManyItemsError(Exception):
'''
We are requested to investigate side-chaining for more items than
we can process in single requests.
'''
pass
class PageTitleError(Exception):
'''
The Wikipedia API returned a page for which we do not have the title,
suggesting something went terribly wrong.
'''
pass
class Ruleset:
'''
A set of rules defining side-chains through predicates (Wikidata properties)
and objects (Wikidata entities) that cause certain articles to receive a
pre-defined importance rating.
'''
def __init__(self):
# Rules map a predicate (key) to an object (key)
# to an importance rating (value). This allows for fast
# lookup of whether a claim matches a rule.
self.rules = defaultdict(dict)
def add_rule(self, predicate_p, object_q, importance_rating):
'''
Add the given rule to the ruleset.
:param predicate_p: identifier for the predicate property
:type predicate_p: str
:param object_q: identifier for the object of the predicate
:type object_q: str
:param importance_rating: the importance rating to give an article
that matches the given predicate-object rule
:type importance_rating: str
'''
if predicate_p in self.rules and \
object_q in self.rules[predicate_p]:
raise(RuleExistsException)
self.rules[predicate_p][object_q] = importance_rating
def modify_rule(self, predicate_p, object_q, importance_rating):
'''
Modify the given rule to match the supplied parameters.
:param predicate_p: identifier for the predicate property
:type predicate_p: str
:param object_q: identifier for the object of the predicate
:type object_q: str
:param importance_rating: the importance rating to give an article
that matches the given predicate-object rule
:type importance_rating: str
'''
if not predicate_p in self.rules or \
not object_q in self.rules[predicate_p]:
raise(NoSuchRuleException)
self.rules[predicate_p][object_q] = importance_rating
def delete_rule(self, predicate_p, object_q):
'''
Delete the rule matching the given predicate and object. If there
are no remaining rules for the given predicate, the predicate is
also deleted.
:param predicate_p: identifier for the predicate property
:type predicate_p: str
:param object_q: identifier for the object of the predicate
:type object_q: str
'''
if not predicate_p in self.rules or \
not object_q in self.rules[predicate_p]:
raise(NoSuchRuleException)
del(self.rules[predicate_p][object_q])
if not self.rules[predicate_p]:
del(self.rules[redicate_p])
def load(rule_file):
'''
Load in the rules defined in the given rule file and return it
as a `Ruleset`
:param rule_file: path to the file containing the rules
:type rule_file: str
'''
with open(rule_file) as infile:
rules = yaml.load(infile)
project_name = ""
ruleset = Ruleset()
for (proj_name, pred_p, obj_q, imp_rating) in rules:
project_name = proj_name
## Remove "wd:" and "wdt:" in pred_p and obj_q if present
if ":" in pred_p:
pred_p = pred_p.split(":")[1]
if ":" in obj_q:
obj_q = obj_q.split(":")[1]
ruleset.add_rule(pred_p, obj_q, imp_rating)
return((project_name, ruleset))
def sidechain_entities(entity_data, ruleset):
'''
Process a set of entity data from Wikidata's API and identify any entities
that should be side-chained based on the given set of rules.
:param entity_data: claims about entities, as returned from Wikidata's API
:type entity_data: dict
:param ruleset: the side-chaining ruleset
:type ruleset: `Ruleset`
'''
## By default, all entities are not side-chained, and we move entities
## over if we find a rule match
non_sidechained_entities = set(entity_data.keys())
sidechained_entities = {}
for entity in entity_data.values():
if "redirects" in entity:
## This is a Wikidata redirect, cannot be side-chained.
## If we don't ignore them, the entity ID selected below will
## point to the redirect.
continue
try:
qid = entity['id']
except KeyError:
logging.warning('unable to get QID for {}'.format(entity))
continue
if not 'claims' in entity:
## No claims about this entity, it should not be side-chained
continue
## Importance ratings of matched rules for this entity
ratings = []
for (claim, claimdata) in entity['claims'].items():
## If this claim does not occur in the ruleset, this claim
## cannot lead to the article being side-chained
if not claim in ruleset.rules:
continue
if isinstance(claimdata, dict):
try:
object_q = claimdata['mainsnak']['datavalue']['value']['id']
if object_q in ruleset.rules[claim]:
ratings.append(ruleset.rules[claim][object_q])
except KeyError:
## Claim does not point to a Wikidata entity
logging.warning('not a wikidata entity:')
logging.warning(claimdata)
continue
except TypeError:
## Something along the line might not be a dict,
## which means there is not a side-chain possibility
logging.warning('TypeError:')
logging.warning(claimdata)
continue
elif isinstance(claimdata, list):
for c in claimdata:
try:
object_q = c['mainsnak']['datavalue']['value']['id']
if object_q in ruleset.rules[claim]:
ratings.append(ruleset.rules[claim][object_q])
except KeyError:
## Claim does not point to a Wikidata entity
logging.warning('not a Wikidata entity?')
logging.warning(c)
continue
except TypeError:
## Something along the line might not be a dict,
## which means there is not a side-chain possibility
logging.warning('TypeError:')
logging.warning(c)
continue
if ratings:
non_sidechained_entities.remove(qid)
sidechained_entities[qid] = ratings
## Return the sidechain
return({'sidechain': sidechained_entities,
'non_sidechain': list(non_sidechained_entities)})
def sidechain_q(lang, wikidata_items, ruleset):
'''
Determine which of the given wikidata items should be side-chained
in the given context of a WikiProject, as defined through the ruleset.
:param lang: language code of the Wikipedia edition we are working with
:type lang: str
:param wikidata_items: list of Wikidata entity identifiers (Q-something
as strings) that are to be tested for side-chaining
:type wikidata_items: list
:param ruleset: the set of rules to be used for side-chaining
:type ruleset: `Ruleset`
'''
if len(wikidata_items) > MAX_ITEMS:
raise(TooManyItemsError)
wikidata_query_params = {'action': 'wbgetentities',
'sites': '{}wiki'.format(lang),
'languages': lang,
'maxlag': 5,
'format': 'json',
'ids': "|".join(wikidata_items)}
# get the Wikidata entities for all the associated articles
wikidata = wd_api_request(wikidata_query_params)
return(sidechain_entities(wikidata['entities'], ruleset))
def sidechain(lang, articles, ruleset):
''''
Determine which of the articles should be side-chained in the given
context of a WikiProject, per the given set of rules.
:param lang: language code for the Wikipedia edition we are working with
:type lang: str
:param articles: article titles to determine side-chaining for
:type articles: list
:param ruleset: the set of rules to be used for side-chaining
:type ruleset: `Ruleset`
'''
if len(articles) > MAX_ITEMS:
raise(TooManyItemsError)
## By default, all articles are not sidechained, we'll move them over
## if we find evidence to the contrary.
non_sidechain = set(articles)
sidechain = defaultdict(list)
# Note: For future reference and performance improvements, this can be
# easily looked up in the page_props table, but requires a DB connection.
wiki_query_params = {'action': 'query',
'prop': 'pageprops',
'titles': '', # titles added later
'format': 'json'}
wikidata_query_params = {'action': 'wbgetentities',
'sites': '{}wiki'.format(lang),
'ids': '', # added later
'languages': lang,
'maxlag': 5,
'format': 'json'}
## Mapping Wikidata identifier to article title
q_title_map = {}
# get the Wikidata item associated with every article
wiki_query_params['titles'] = '|'.join(articles)
r = requests.get(WIKI_API_URL.format(lang=lang),
params=wiki_query_params)
r_json = r.json()
pages = r_json['query']['pages']
for pagedata in pages.values():
## Q: should we handle title normalization in results?
## Title doesn't match any known page in this Wikipedia
if "missing" in pagedata:
continue
page_title = pagedata['title']
if not page_title in non_sidechain:
print('Missing page title {}'.format(page_title))
raise(PageTitleError)
try:
wikibase_item = pagedata['pageprops']['wikibase_item']
q_title_map[wikibase_item] = page_title
except KeyError:
continue # article does not have a Wikidata item associated with it
sidechain_result = sidechain_q(lang, q_title_map.keys(), ruleset)
# Translate Wikidata QIDs to article titles if anything got side-chained
if sidechain_result['sidechain']:
for (qid, ratings) in sidechain_result['sidechain'].items():
page_title = q_title_map[qid]
non_sidechain.remove(page_title)
sidechain[page_title] = ratings
## Return the sidechain and the non-sidechain
return({'sidechain': sidechain,
'non_sidechain': list(non_sidechain)})
def wd_api_request(params):
'''
Make an HTTP request to the Wikidata API with the given parameters and
return the JSON dict from it.
:param params: URL parameters
:type params: dict
'''
content = {}
done = False
num_retries = 0
while not done and num_retries < MAX_RETRIES:
response = requests.get(WIKIDATA_API_URL, params=params)
if response.status_code != 200:
logging.warning('Wikidata returned status {}'.format(
response.status_code))
done = True
continue
try:
content = response.json()
except ValueError:
logging.warning('Unable to decode Wikidata response as JSON')
sleep(1)
num_retries += 1
continue
except KeyError:
logging.warning("Wikidata response keys not as expected")
sleep(1)
num_retries += 1
continue
if "error" in content and content['error']['code'] == 'maxlag':
## Pause before trying again
ptime = max(5, int(response.headers['Retry-After']))
logging.warning('WD API is lagged, waiting {} seconds to try again'.format(ptime))
sleep(ptime)
continue
done = True
continue
return(content)
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manage hosts in the current zone.
"""
import collections
import functools
import time
try:
from collections import UserDict as IterableUserDict # Python 3
except ImportError:
from UserDict import IterableUserDict # Python 2
import iso8601
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from nova.compute import task_states
from nova.compute import vm_states
from nova import context as context_module
from nova import exception
from nova.i18n import _LI, _LW
from nova import objects
from nova.pci import stats as pci_stats
from nova.scheduler import filters
from nova.scheduler import weights
from nova import utils
from nova.virt import hardware
host_manager_opts = [
cfg.MultiStrOpt('scheduler_available_filters',
default=['nova.scheduler.filters.all_filters'],
help='Filter classes available to the scheduler which may '
'be specified more than once. An entry of '
'"nova.scheduler.filters.all_filters" '
'maps to all filters included with nova.'),
cfg.ListOpt('scheduler_default_filters',
default=[
'RetryFilter',
'AvailabilityZoneFilter',
'RamFilter',
'DiskFilter',
'ComputeFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter',
'ServerGroupAntiAffinityFilter',
'ServerGroupAffinityFilter',
],
help='Which filter class names to use for filtering hosts '
'when not specified in the request.'),
cfg.ListOpt('scheduler_weight_classes',
default=['nova.scheduler.weights.all_weighers'],
help='Which weight class names to use for weighing hosts'),
cfg.BoolOpt('scheduler_tracks_instance_changes',
default=True,
help='Determines if the Scheduler tracks changes to instances '
'to help with its filtering decisions.'),
]
CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
HOST_INSTANCE_SEMAPHORE = "host_instance"
class ReadOnlyDict(IterableUserDict):
"""A read-only dict."""
def __init__(self, source=None):
self.data = {}
if source:
self.data.update(source)
def __setitem__(self, key, item):
raise TypeError()
def __delitem__(self, key):
raise TypeError()
def clear(self):
raise TypeError()
def pop(self, key, *args):
raise TypeError()
def popitem(self):
raise TypeError()
def update(self):
raise TypeError()
# Representation of a single metric value from a compute node.
MetricItem = collections.namedtuple(
'MetricItem', ['value', 'timestamp', 'source'])
@utils.expects_func_args('self', 'instance')
def set_update_time_on_success(function):
"""Set updated time of HostState when consuming succeed."""
@functools.wraps(function)
def decorated_function(self, instance):
return_value = None
try:
return_value = function(self, instance)
except Exception as e:
# Ignores exception raised from consume_from_instance() so that
# booting instance would fail in the resource claim of compute
# node, other suitable node may be chosen during scheduling retry.
LOG.warning(_LW("Selected host: %(host)s failed to consume from "
"instance. Error: %(error)s"),
{'host': self.host, 'error': e},
instance=instance)
else:
now = timeutils.utcnow()
# NOTE(sbauza): Objects are UTC tz-aware by default
self.updated = now.replace(tzinfo=iso8601.iso8601.Utc())
return return_value
return decorated_function
class HostState(object):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def __init__(self, host, node, compute=None):
self.host = host
self.nodename = node
# Mutable available resources.
# These will change as resources are virtually "consumed".
self.total_usable_ram_mb = 0
self.total_usable_disk_gb = 0
self.disk_mb_used = 0
self.free_ram_mb = 0
self.free_disk_mb = 0
self.vcpus_total = 0
self.vcpus_used = 0
self.pci_stats = None
self.numa_topology = None
# Additional host information from the compute node stats:
self.num_instances = 0
self.num_io_ops = 0
# Other information
self.host_ip = None
self.hypervisor_type = None
self.hypervisor_version = None
self.hypervisor_hostname = None
self.cpu_info = None
self.supported_instances = None
# Resource oversubscription values for the compute host:
self.limits = {}
# Generic metrics from compute nodes
self.metrics = None
# List of aggregates the host belongs to
self.aggregates = []
# Instances on this host
self.instances = {}
self.updated = None
if compute:
self.update_from_compute_node(compute)
def update_service(self, service):
self.service = ReadOnlyDict(service)
def update_from_compute_node(self, compute):
"""Update information about a host from a ComputeNode object."""
if (self.updated and compute.updated_at
and self.updated > compute.updated_at):
return
all_ram_mb = compute.memory_mb
# Assume virtual size is all consumed by instances if use qcow2 disk.
free_gb = compute.free_disk_gb
least_gb = compute.disk_available_least
if least_gb is not None:
if least_gb > free_gb:
# can occur when an instance in database is not on host
LOG.warning(_LW("Host %(hostname)s has more disk space than "
"database expected "
"(%(physical)sgb > %(database)sgb)"),
{'physical': least_gb, 'database': free_gb,
'hostname': compute.hypervisor_hostname})
free_gb = min(least_gb, free_gb)
free_disk_mb = free_gb * 1024
self.disk_mb_used = compute.local_gb_used * 1024
# NOTE(jogo) free_ram_mb can be negative
self.free_ram_mb = compute.free_ram_mb
self.total_usable_ram_mb = all_ram_mb
self.total_usable_disk_gb = compute.local_gb
self.free_disk_mb = free_disk_mb
self.vcpus_total = compute.vcpus
self.vcpus_used = compute.vcpus_used
self.updated = compute.updated_at
self.numa_topology = compute.numa_topology
self.pci_stats = pci_stats.PciDeviceStats(
compute.pci_device_pools)
# All virt drivers report host_ip
self.host_ip = compute.host_ip
self.hypervisor_type = compute.hypervisor_type
self.hypervisor_version = compute.hypervisor_version
self.hypervisor_hostname = compute.hypervisor_hostname
self.cpu_info = compute.cpu_info
if compute.supported_hv_specs:
self.supported_instances = [spec.to_list() for spec
in compute.supported_hv_specs]
else:
self.supported_instances = []
# Don't store stats directly in host_state to make sure these don't
# overwrite any values, or get overwritten themselves. Store in self so
# filters can schedule with them.
self.stats = compute.stats or {}
# Track number of instances on host
self.num_instances = int(self.stats.get('num_instances', 0))
self.num_io_ops = int(self.stats.get('io_workload', 0))
# update metrics
self.metrics = objects.MonitorMetricList.from_json(compute.metrics)
@set_update_time_on_success
def consume_from_instance(self, instance):
"""Incrementally update host state from an instance."""
disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
ram_mb = instance['memory_mb']
vcpus = instance['vcpus']
self.free_ram_mb -= ram_mb
self.free_disk_mb -= disk_mb
self.vcpus_used += vcpus
# Track number of instances on host
self.num_instances += 1
pci_requests = instance.get('pci_requests')
# NOTE(danms): Instance here is still a dict, which is converted from
# an object. The pci_requests are a dict as well. Convert this when
# we get an object all the way to this path.
if pci_requests and pci_requests['requests'] and self.pci_stats:
pci_requests = objects.InstancePCIRequests \
.from_request_spec_instance_props(pci_requests)
pci_requests = pci_requests.requests
else:
pci_requests = None
# Calculate the numa usage
host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
self)
instance_numa_topology = hardware.instance_topology_from_instance(
instance)
instance['numa_topology'] = hardware.numa_fit_instance_to_host(
host_numa_topology, instance_numa_topology,
limits=self.limits.get('numa_topology'),
pci_requests=pci_requests, pci_stats=self.pci_stats)
if pci_requests:
instance_cells = None
if instance['numa_topology']:
instance_cells = instance['numa_topology'].cells
self.pci_stats.apply_requests(pci_requests, instance_cells)
self.numa_topology = hardware.get_host_numa_usage_from_instance(
self, instance)
vm_state = instance.get('vm_state', vm_states.BUILDING)
task_state = instance.get('task_state')
if vm_state == vm_states.BUILDING or task_state in [
task_states.RESIZE_MIGRATING, task_states.REBUILDING,
task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
task_states.IMAGE_BACKUP, task_states.UNSHELVING,
task_states.RESCUING]:
self.num_io_ops += 1
def __repr__(self):
return ("(%s, %s) ram:%s disk:%s io_ops:%s instances:%s" %
(self.host, self.nodename, self.free_ram_mb, self.free_disk_mb,
self.num_io_ops, self.num_instances))
class HostManager(object):
"""Base HostManager class."""
# Can be overridden in a subclass
def host_state_cls(self, host, node, **kwargs):
return HostState(host, node, **kwargs)
def __init__(self):
self.host_state_map = {}
self.filter_handler = filters.HostFilterHandler()
filter_classes = self.filter_handler.get_matching_classes(
CONF.scheduler_available_filters)
self.filter_cls_map = {cls.__name__: cls for cls in filter_classes}
self.filter_obj_map = {}
self.default_filters = self._choose_host_filters(self._load_filters())
self.weight_handler = weights.HostWeightHandler()
weigher_classes = self.weight_handler.get_matching_classes(
CONF.scheduler_weight_classes)
self.weighers = [cls() for cls in weigher_classes]
# Dict of aggregates keyed by their ID
self.aggs_by_id = {}
# Dict of set of aggregate IDs keyed by the name of the host belonging
# to those aggregates
self.host_aggregates_map = collections.defaultdict(set)
self._init_aggregates()
self.tracks_instance_changes = CONF.scheduler_tracks_instance_changes
# Dict of instances and status, keyed by host
self._instance_info = {}
if self.tracks_instance_changes:
self._init_instance_info()
def _load_filters(self):
return CONF.scheduler_default_filters
def _init_aggregates(self):
elevated = context_module.get_admin_context()
aggs = objects.AggregateList.get_all(elevated)
for agg in aggs:
self.aggs_by_id[agg.id] = agg
for host in agg.hosts:
self.host_aggregates_map[host].add(agg.id)
def update_aggregates(self, aggregates):
"""Updates internal HostManager information about aggregates."""
if isinstance(aggregates, (list, objects.AggregateList)):
for agg in aggregates:
self._update_aggregate(agg)
else:
self._update_aggregate(aggregates)
def _update_aggregate(self, aggregate):
self.aggs_by_id[aggregate.id] = aggregate
for host in aggregate.hosts:
self.host_aggregates_map[host].add(aggregate.id)
# Refreshing the mapping dict to remove all hosts that are no longer
# part of the aggregate
for host in self.host_aggregates_map:
if (aggregate.id in self.host_aggregates_map[host]
and host not in aggregate.hosts):
self.host_aggregates_map[host].remove(aggregate.id)
def delete_aggregate(self, aggregate):
"""Deletes internal HostManager information about a specific aggregate.
"""
if aggregate.id in self.aggs_by_id:
del self.aggs_by_id[aggregate.id]
for host in aggregate.hosts:
if aggregate.id in self.host_aggregates_map[host]:
self.host_aggregates_map[host].remove(aggregate.id)
def _init_instance_info(self):
"""Creates the initial view of instances for all hosts.
As this initial population of instance information may take some time,
we don't wish to block the scheduler's startup while this completes.
The async method allows us to simply mock out the _init_instance_info()
method in tests.
"""
def _async_init_instance_info():
context = context_module.get_admin_context()
LOG.debug("START:_async_init_instance_info")
self._instance_info = {}
compute_nodes = objects.ComputeNodeList.get_all(context).objects
LOG.debug("Total number of compute nodes: %s", len(compute_nodes))
# Break the queries into batches of 10 to reduce the total number
# of calls to the DB.
batch_size = 10
start_node = 0
end_node = batch_size
while start_node <= len(compute_nodes):
curr_nodes = compute_nodes[start_node:end_node]
start_node += batch_size
end_node += batch_size
filters = {"host": [curr_node.host
for curr_node in curr_nodes]}
result = objects.InstanceList.get_by_filters(context,
filters)
instances = result.objects
LOG.debug("Adding %s instances for hosts %s-%s",
len(instances), start_node, end_node)
for instance in instances:
host = instance.host
if host not in self._instance_info:
self._instance_info[host] = {"instances": {},
"updated": False}
inst_dict = self._instance_info[host]
inst_dict["instances"][instance.uuid] = instance
# Call sleep() to cooperatively yield
time.sleep(0)
LOG.debug("END:_async_init_instance_info")
# Run this async so that we don't block the scheduler start-up
utils.spawn_n(_async_init_instance_info)
def _choose_host_filters(self, filter_cls_names):
"""Since the caller may specify which filters to use we need
to have an authoritative list of what is permissible. This
function checks the filter names against a predefined set
of acceptable filters.
"""
if not isinstance(filter_cls_names, (list, tuple)):
filter_cls_names = [filter_cls_names]
good_filters = []
bad_filters = []
for filter_name in filter_cls_names:
if filter_name not in self.filter_obj_map:
if filter_name not in self.filter_cls_map:
bad_filters.append(filter_name)
continue
filter_cls = self.filter_cls_map[filter_name]
self.filter_obj_map[filter_name] = filter_cls()
good_filters.append(self.filter_obj_map[filter_name])
if bad_filters:
msg = ", ".join(bad_filters)
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters
def get_filtered_hosts(self, hosts, filter_properties,
filter_class_names=None, index=0):
"""Filter hosts and return only ones passing all filters."""
def _strip_ignore_hosts(host_map, hosts_to_ignore):
ignored_hosts = []
for host in hosts_to_ignore:
for (hostname, nodename) in list(host_map.keys()):
if host == hostname:
del host_map[(hostname, nodename)]
ignored_hosts.append(host)
ignored_hosts_str = ', '.join(ignored_hosts)
LOG.info(_LI('Host filter ignoring hosts: %s'), ignored_hosts_str)
def _match_forced_hosts(host_map, hosts_to_force):
forced_hosts = []
for (hostname, nodename) in list(host_map.keys()):
if hostname not in hosts_to_force:
del host_map[(hostname, nodename)]
else:
forced_hosts.append(hostname)
if host_map:
forced_hosts_str = ', '.join(forced_hosts)
msg = _LI('Host filter forcing available hosts to %s')
else:
forced_hosts_str = ', '.join(hosts_to_force)
msg = _LI("No hosts matched due to not matching "
"'force_hosts' value of '%s'")
LOG.info(msg % forced_hosts_str)
def _match_forced_nodes(host_map, nodes_to_force):
forced_nodes = []
for (hostname, nodename) in list(host_map.keys()):
if nodename not in nodes_to_force:
del host_map[(hostname, nodename)]
else:
forced_nodes.append(nodename)
if host_map:
forced_nodes_str = ', '.join(forced_nodes)
msg = _LI('Host filter forcing available nodes to %s')
else:
forced_nodes_str = ', '.join(nodes_to_force)
msg = _LI("No nodes matched due to not matching "
"'force_nodes' value of '%s'")
LOG.info(msg % forced_nodes_str)
if filter_class_names is None:
filters = self.default_filters
else:
filters = self._choose_host_filters(filter_class_names)
ignore_hosts = filter_properties.get('ignore_hosts', [])
force_hosts = filter_properties.get('force_hosts', [])
force_nodes = filter_properties.get('force_nodes', [])
if ignore_hosts or force_hosts or force_nodes:
# NOTE(deva): we can't assume "host" is unique because
# one host may have many nodes.
name_to_cls_map = {(x.host, x.nodename): x for x in hosts}
if ignore_hosts:
_strip_ignore_hosts(name_to_cls_map, ignore_hosts)
if not name_to_cls_map:
return []
# NOTE(deva): allow force_hosts and force_nodes independently
if force_hosts:
_match_forced_hosts(name_to_cls_map, force_hosts)
if force_nodes:
_match_forced_nodes(name_to_cls_map, force_nodes)
if force_hosts or force_nodes:
# NOTE(deva): Skip filters when forcing host or node
if name_to_cls_map:
return name_to_cls_map.values()
hosts = six.itervalues(name_to_cls_map)
return self.filter_handler.get_filtered_objects(filters,
hosts, filter_properties, index)
def get_weighed_hosts(self, hosts, weight_properties):
"""Weigh the hosts."""
return self.weight_handler.get_weighed_objects(self.weighers,
hosts, weight_properties)
def get_all_host_states(self, context):
"""Returns a list of HostStates that represents all the hosts
the HostManager knows about. Also, each of the consumable resources
in HostState are pre-populated and adjusted based on data in the db.
"""
service_refs = {service.host: service
for service in objects.ServiceList.get_by_binary(
context, 'nova-compute')}
# Get resource usage across the available compute nodes:
compute_nodes = objects.ComputeNodeList.get_all(context)
seen_nodes = set()
for compute in compute_nodes:
service = service_refs.get(compute.host)
if not service:
LOG.warning(_LW(
"No compute service record found for host %(host)s"),
{'host': compute.host})
continue
host = compute.host
node = compute.hypervisor_hostname
state_key = (host, node)
host_state = self.host_state_map.get(state_key)
if host_state:
host_state.update_from_compute_node(compute)
else:
host_state = self.host_state_cls(host, node, compute=compute)
self.host_state_map[state_key] = host_state
# We force to update the aggregates info each time a new request
# comes in, because some changes on the aggregates could have been
# happening after setting this field for the first time
host_state.aggregates = [self.aggs_by_id[agg_id] for agg_id in
self.host_aggregates_map[
host_state.host]]
host_state.update_service(dict(service))
self._add_instance_info(context, compute, host_state)
seen_nodes.add(state_key)
# remove compute nodes from host_state_map if they are not active
dead_nodes = set(self.host_state_map.keys()) - seen_nodes
for state_key in dead_nodes:
host, node = state_key
LOG.info(_LI("Removing dead compute node %(host)s:%(node)s "
"from scheduler"), {'host': host, 'node': node})
del self.host_state_map[state_key]
return six.itervalues(self.host_state_map)
def _add_instance_info(self, context, compute, host_state):
"""Adds the host instance info to the host_state object.
Some older compute nodes may not be sending instance change updates to
the Scheduler; other sites may disable this feature for performance
reasons. In either of these cases, there will either be no information
for the host, or the 'updated' value for that host dict will be False.
In those cases, we need to grab the current InstanceList instead of
relying on the version in _instance_info.
"""
host_name = compute.host
host_info = self._instance_info.get(host_name)
if host_info and host_info.get("updated"):
inst_dict = host_info["instances"]
else:
# Host is running old version, or updates aren't flowing.
inst_list = objects.InstanceList.get_by_host(context, host_name)
inst_dict = {instance.uuid: instance
for instance in inst_list.objects}
host_state.instances = inst_dict
def _recreate_instance_info(self, context, host_name):
"""Get the InstanceList for the specified host, and store it in the
_instance_info dict.
"""
instances = objects.InstanceList.get_by_host(context, host_name)
inst_dict = {instance.uuid: instance for instance in instances}
host_info = self._instance_info[host_name] = {}
host_info["instances"] = inst_dict
host_info["updated"] = False
@utils.synchronized(HOST_INSTANCE_SEMAPHORE)
def update_instance_info(self, context, host_name, instance_info):
"""Receives an InstanceList object from a compute node.
This method receives information from a compute node when it starts up,
or when its instances have changed, and updates its view of hosts and
instances with it.
"""
host_info = self._instance_info.get(host_name)
if host_info:
inst_dict = host_info.get("instances")
for instance in instance_info.objects:
# Overwrite the entry (if any) with the new info.
inst_dict[instance.uuid] = instance
host_info["updated"] = True
else:
instances = instance_info.objects
if len(instances) > 1:
# This is a host sending its full instance list, so use it.
host_info = self._instance_info[host_name] = {}
host_info["instances"] = {instance.uuid: instance
for instance in instances}
host_info["updated"] = True
else:
self._recreate_instance_info(context, host_name)
LOG.info(_LI("Received an update from an unknown host '%s'. "
"Re-created its InstanceList."), host_name)
@utils.synchronized(HOST_INSTANCE_SEMAPHORE)
def delete_instance_info(self, context, host_name, instance_uuid):
"""Receives the UUID from a compute node when one of its instances is
terminated.
The instance in the local view of the host's instances is removed.
"""
host_info = self._instance_info.get(host_name)
if host_info:
inst_dict = host_info["instances"]
# Remove the existing Instance object, if any
inst_dict.pop(instance_uuid, None)
host_info["updated"] = True
else:
self._recreate_instance_info(context, host_name)
LOG.info(_LI("Received a delete update from an unknown host '%s'. "
"Re-created its InstanceList."), host_name)
@utils.synchronized(HOST_INSTANCE_SEMAPHORE)
def sync_instance_info(self, context, host_name, instance_uuids):
"""Receives the uuids of the instances on a host.
This method is periodically called by the compute nodes, which send a
list of all the UUID values for the instances on that node. This is
used by the scheduler's HostManager to detect when its view of the
compute node's instances is out of sync.
"""
host_info = self._instance_info.get(host_name)
if host_info:
local_set = set(host_info["instances"].keys())
compute_set = set(instance_uuids)
if not local_set == compute_set:
self._recreate_instance_info(context, host_name)
LOG.info(_LI("The instance sync for host '%s' did not match. "
"Re-created its InstanceList."), host_name)
return
host_info["updated"] = True
LOG.info(_LI("Successfully synced instances from host '%s'."),
host_name)
else:
self._recreate_instance_info(context, host_name)
LOG.info(_LI("Received a sync request from an unknown host '%s'. "
"Re-created its InstanceList."), host_name)
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
PEM formatted data is used frequently in conjunction with X509 PKI as
a data exchange mechanism for binary data. The acronym PEM stands for
Privacy Enhanced Mail as defined in RFC-1421. Contrary to expectation
the PEM format in common use has little to do with RFC-1421. Instead
what we know as PEM format grew out of the need for a data exchange
mechanism largely by the influence of OpenSSL. Other X509
implementations have adopted it.
Unfortunately PEM format has never been officially standarized. It's
basic format is as follows:
1) A header consisting of 5 hyphens followed by the word BEGIN and a
single space. Then an upper case string describing the contents of the
PEM block, this is followed by 5 hyphens and a newline.
2) Binary data (typically in DER ASN.1 format) encoded in base64. The
base64 text is line wrapped so that each line of base64 is 64
characters long and terminated with a newline. The last line of base64
text may be less than 64 characters. The content and format of the
binary data is entirely dependent upon the type of data announced in
the header and footer.
3) A footer in the exact same as the header except the word BEGIN is
replaced by END. The content name in both the header and footer should
exactly match.
The above is called a PEM block. It is permissible for multiple PEM
blocks to appear in a single file or block of text. This is often used
when specifying multiple X509 certificates.
An example PEM block for a certificate is:
-----BEGIN CERTIFICATE-----
MIIC0TCCAjqgAwIBAgIJANsHKV73HYOwMA0GCSqGSIb3DQEBBQUAMIGeMQowCAYD
VQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1bm55
dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTElMCMG
CSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxMLU2Vs
ZiBTaWduZWQwIBcNMTIxMTA1MTgxODI0WhgPMjA3MTA0MzAxODE4MjRaMIGeMQow
CAYDVQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1
bm55dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTEl
MCMGCSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxML
U2VsZiBTaWduZWQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALzI17ExCaqd
r7xY2Q5CBZ1bW1lsrXxS8eNJRdQtskDuQVAluY03/OGZd8HQYiiY/ci2tYy7BNIC
bh5GaO95eqTDykJR3liOYE/tHbY6puQlj2ZivmhlSd2d5d7lF0/H28RQsLu9VktM
uw6q9DpDm35jfrr8LgSeA3MdVqcS/4OhAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMB
Af8wDQYJKoZIhvcNAQEFBQADgYEAjSQND7i1dNZtLKpWgX+JqMr3BdVlM15mFeVr
C26ZspZjZVY5okdozO9gU3xcwRe4Cg30sKFOe6EBQKpkTZucFOXwBtD3h6dWJrdD
c+m/CL/rs0GatDavbaIT2vv405SQUQooCdVh72LYel+4/a6xmRd7fQx3iEXN9QYj
vmHJUcA=
-----END CERTIFICATE-----
PEM format is safe for transmission in 7-bit ASCII systems
(i.e. standard email). Since 7-bit ASCII is a proper subset of UTF-8
and Latin-1 it is not affected by transcoding between those
charsets. Nor is PEM format affected by the choice of line
endings. This makes PEM format particularity attractive for transport
and storage of binary data.
This module provides a number of utilities supporting the generation
and consumption of PEM formatted data including:
* parse text and find all PEM blocks contained in the
text. Information on the location of the block in the text, the
type of PEM block, and it's base64 and binary data contents.
* parse text assumed to contain PEM data and return the binary
data.
* test if a block of text is a PEM block
* convert base64 text into a formatted PEM block
* convert binary data into a formatted PEM block
* access to the valid PEM types and their headers
"""
import base64
import re
import six
from keystone.common import base64utils
from keystone.openstack.common.gettextutils import _
PEM_TYPE_TO_HEADER = {
u'cms': u'CMS',
u'dsa-private': u'DSA PRIVATE KEY',
u'dsa-public': u'DSA PUBLIC KEY',
u'ecdsa-public': u'ECDSA PUBLIC KEY',
u'ec-private': u'EC PRIVATE KEY',
u'pkcs7': u'PKCS7',
u'pkcs7-signed': u'PKCS',
u'pkcs8': u'ENCRYPTED PRIVATE KEY',
u'private-key': u'PRIVATE KEY',
u'public-key': u'PUBLIC KEY',
u'rsa-private': u'RSA PRIVATE KEY',
u'rsa-public': u'RSA PUBLIC KEY',
u'cert': u'CERTIFICATE',
u'crl': u'X509 CRL',
u'cert-pair': u'CERTIFICATE PAIR',
u'csr': u'CERTIFICATE REQUEST',
}
# This is not a 1-to-1 reverse map of PEM_TYPE_TO_HEADER
# because it includes deprecated headers that map to 1 pem_type.
PEM_HEADER_TO_TYPE = {
u'CMS': u'cms',
u'DSA PRIVATE KEY': u'dsa-private',
u'DSA PUBLIC KEY': u'dsa-public',
u'ECDSA PUBLIC KEY': u'ecdsa-public',
u'EC PRIVATE KEY': u'ec-private',
u'PKCS7': u'pkcs7',
u'PKCS': u'pkcs7-signed',
u'ENCRYPTED PRIVATE KEY': u'pkcs8',
u'PRIVATE KEY': u'private-key',
u'PUBLIC KEY': u'public-key',
u'RSA PRIVATE KEY': u'rsa-private',
u'RSA PUBLIC KEY': u'rsa-public',
u'CERTIFICATE': u'cert',
u'X509 CERTIFICATE': u'cert',
u'CERTIFICATE PAIR': u'cert-pair',
u'X509 CRL': u'crl',
u'CERTIFICATE REQUEST': u'csr',
u'NEW CERTIFICATE REQUEST': u'csr',
}
# List of valid pem_types
pem_types = sorted(PEM_TYPE_TO_HEADER.keys())
# List of valid pem_headers
pem_headers = sorted(PEM_TYPE_TO_HEADER.values())
_pem_begin_re = re.compile(r'^-{5}BEGIN\s+([^-]+)-{5}\s*$', re.MULTILINE)
_pem_end_re = re.compile(r'^-{5}END\s+([^-]+)-{5}\s*$', re.MULTILINE)
class PEMParseResult(object):
"""Information returned when a PEM block is found in text.
PEMParseResult contains information about a PEM block discovered
while parsing text. The following properties are defined:
pem_type
A short hand name for the type of the PEM data, e.g. cert,
csr, crl, cms, key. Valid pem_types are listed in pem_types.
When the pem_type is set the pem_header is updated to match it.
pem_header
The text following '-----BEGIN ' in the PEM header.
Common examples are:
-----BEGIN CERTIFICATE-----
-----BEGIN CMS-----
Thus the pem_header would be CERTIFICATE and CMS respectively.
When the pem_header is set the pem_type is updated to match it.
pem_start, pem_end
The beginning and ending positions of the PEM block
including the PEM header and footer.
base64_start, base64_end
The beginning and ending positions of the base64 data
contained inside the PEM header and footer. Includes trailing
new line
binary_data
The decoded base64 data. None if not decoded.
"""
def __init__(self, pem_type=None, pem_header=None,
pem_start=None, pem_end=None,
base64_start=None, base64_end=None,
binary_data=None):
self._pem_type = None
self._pem_header = None
if pem_type is not None:
self.pem_type = pem_type
if pem_header is not None:
self.pem_header = pem_header
self.pem_start = pem_start
self.pem_end = pem_end
self.base64_start = base64_start
self.base64_end = base64_end
self.binary_data = binary_data
@property
def pem_type(self):
return self._pem_type
@pem_type.setter
def pem_type(self, pem_type):
if pem_type is None:
self._pem_type = None
self._pem_header = None
else:
pem_header = PEM_TYPE_TO_HEADER.get(pem_type)
if pem_header is None:
raise ValueError(_('unknown pem_type "%(pem_type)s", '
'valid types are: %(valid_pem_types)s') %
{'pem_type': pem_type,
'valid_pem_types': ', '.join(pem_types)})
self._pem_type = pem_type
self._pem_header = pem_header
@property
def pem_header(self):
return self._pem_header
@pem_header.setter
def pem_header(self, pem_header):
if pem_header is None:
self._pem_type = None
self._pem_header = None
else:
pem_type = PEM_HEADER_TO_TYPE.get(pem_header)
if pem_type is None:
raise ValueError(_('unknown pem header "%(pem_header)s", '
'valid headers are: '
'%(valid_pem_headers)s') %
{'pem_header': pem_header,
'valid_pem_headers':
', '.join("'%s'" %
[x for x in pem_headers])})
self._pem_type = pem_type
self._pem_header = pem_header
#------------------------------------------------------------------------------
def pem_search(text, start=0):
"""Search for a block of PEM formatted data
Search for a PEM block in a text string. The search begins at
start. If a PEM block is found a PEMParseResult object is
returned, otherwise if no PEM block is found None is returned.
If the pem_type is not the same in both the header and footer
a ValueError is raised.
The start and end positions are suitable for use as slices into
the text. To search for multiple PEM blocks pass pem_end as the
start position for the next iteration. Terminate the iteration
when None is returned. Example::
start = 0
while True:
block = pem_search(text, start)
if block is None:
break
base64_data = text[block.base64_start : block.base64_end]
start = block.pem_end
:param text: the text to search for PEM blocks
:type text: string
:param start: the position in text to start searching from (default: 0)
:type start: int
:returns: PEMParseResult or None if not found
:raises: ValueError
"""
match = _pem_begin_re.search(text, pos=start)
if match:
pem_start = match.start()
begin_text = match.group(0)
base64_start = min(len(text), match.end() + 1)
begin_pem_header = match.group(1).strip()
match = _pem_end_re.search(text, pos=base64_start)
if match:
pem_end = min(len(text), match.end() + 1)
base64_end = match.start()
end_pem_header = match.group(1).strip()
else:
raise ValueError(_('failed to find end matching "%s"') %
begin_text)
if begin_pem_header != end_pem_header:
raise ValueError(_('beginning & end PEM headers do not match '
'(%(begin_pem_header)s'
'!= '
'%(end_pem_header)s)') %
{'begin_pem_header': begin_pem_header,
'end_pem_header': end_pem_header})
else:
return None
result = PEMParseResult(pem_header=begin_pem_header,
pem_start=pem_start, pem_end=pem_end,
base64_start=base64_start, base64_end=base64_end)
return result
def parse_pem(text, pem_type=None, max_items=None):
"""Scan text for PEM data, return list of PEM items
The input text is scanned for PEM blocks, for each one found a
PEMParseResult is contructed and added to the return list.
pem_type operates as a filter on the type of PEM desired. If
pem_type is specified only those PEM blocks which match will be
included. The pem_type is a logical name, not the actual text in
the pem header (e.g. 'cert'). If the pem_type is None all PEM
blocks are returned.
If max_items is specified the result is limited to that number of
items.
The return value is a list of PEMParseResult objects. The
PEMParseResult provides complete information about the PEM block
including the decoded binary data for the PEM block. The list is
ordered in the same order as found in the text.
Examples::
# Get all certs
certs = parse_pem(text, 'cert')
# Get the first cert
try:
binary_cert = parse_pem(text, 'cert', 1)[0].binary_data
except IndexError:
raise ValueError('no cert found')
:param text: The text to search for PEM blocks
:type text: string
:param pem_type: Only return data for this pem_type.
Valid types are: csr, cert, crl, cms, key.
If pem_type is None no filtering is performed.
(default: None)
:type pem_type: string or None
:param max_items: Limit the number of blocks returned. (default: None)
:type max_items: int or None
:return: List of PEMParseResult, one for each PEM block found
:raises: ValueError, InvalidBase64Error
"""
pem_blocks = []
start = 0
while True:
block = pem_search(text, start)
if block is None:
break
start = block.pem_end
if pem_type is None:
pem_blocks.append(block)
else:
try:
if block.pem_type == pem_type:
pem_blocks.append(block)
except KeyError:
raise ValueError(_('unknown pem_type: "%s"') % (pem_type))
if max_items is not None and len(pem_blocks) >= max_items:
break
for block in pem_blocks:
base64_data = text[block.base64_start:block.base64_end]
try:
binary_data = base64.b64decode(base64_data)
except Exception as e:
block.binary_data = None
raise base64utils.InvalidBase64Error(
_('failed to base64 decode %(pem_type)s PEM at position'
'%(position)d: %(err_msg)s') %
{'pem_type': block.pem_type,
'position': block.pem_start,
'err_msg': six.text_type(e)})
else:
block.binary_data = binary_data
return pem_blocks
def get_pem_data(text, pem_type='cert'):
"""Scan text for PEM data, return binary contents
The input text is scanned for a PEM block which matches the pem_type.
If found the binary data contained in the PEM block is returned.
If no PEM block is found or it does not match the specified pem type
None is returned.
:param text: The text to search for the PEM block
:type text: string
:param pem_type: Only return data for this pem_type.
Valid types are: csr, cert, crl, cms, key.
(default: 'cert')
:type pem_type: string
:return: binary data or None if not found.
"""
blocks = parse_pem(text, pem_type, 1)
if not blocks:
return None
return blocks[0].binary_data
def is_pem(text, pem_type='cert'):
"""Does this text contain a PEM block.
Check for the existence of a PEM formatted block in the
text, if one is found verify it's contents can be base64
decoded, if so return True. Return False otherwise.
:param text: The text to search for PEM blocks
:type text: string
:param pem_type: Only return data for this pem_type.
Valid types are: csr, cert, crl, cms, key.
(default: 'cert')
:type pem_type: string
:returns: bool -- True if text contains PEM matching the pem_type,
False otherwise.
"""
try:
pem_blocks = parse_pem(text, pem_type, max_items=1)
except base64utils.InvalidBase64Error:
return False
if pem_blocks:
return True
else:
return False
def base64_to_pem(base64_text, pem_type='cert'):
"""Format string of base64 text into PEM format
Input is assumed to consist only of members of the base64 alphabet
(i.e no whitepace). Use one of the filter functions from
base64utils to assure the input is clean
(i.e. strip_whitespace()).
:param base64_text: text containing ONLY base64 alphabet
characters to be inserted into PEM output.
:type base64_text: string
:param pem_type: Produce a PEM block for this type.
Valid types are: csr, cert, crl, cms, key.
(default: 'cert')
:type pem_type: string
:returns: string -- PEM formatted text
"""
pem_header = PEM_TYPE_TO_HEADER[pem_type]
buf = six.StringIO()
buf.write(u'-----BEGIN %s-----' % pem_header)
buf.write(u'\n')
for line in base64utils.base64_wrap_iter(base64_text, width=64):
buf.write(line)
buf.write(u'\n')
buf.write(u'-----END %s-----' % pem_header)
buf.write(u'\n')
text = buf.getvalue()
buf.close()
return text
def binary_to_pem(binary_data, pem_type='cert'):
"""Format binary data into PEM format
Example:
# get the certificate binary data in DER format
der_data = certificate.der
# convert the DER binary data into a PEM
pem = binary_to_pem(der_data, 'cert')
:param binary_data: binary data to encapsulate into PEM
:type binary_data: buffer
:param pem_type: Produce a PEM block for this type.
Valid types are: csr, cert, crl, cms, key.
(default: 'cert')
:type pem_type: string
:returns: string -- PEM formatted text
"""
base64_text = base64.b64encode(binary_data)
return base64_to_pem(base64_text, pem_type)
|
|
#!/usr/bin/env python
# Copyright 2016 Vimal Manohar
# 2016 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0
from __future__ import print_function
import sys, operator, argparse, os
from collections import defaultdict
# This script reads and writes the 'ctm-edits' file that is
# produced by get_ctm_edits.py.
# It modifies the ctm-edits so that non-scored words
# are not counted as errors: for instance, if there are things like
# [COUGH] and [NOISE] in the transcript, deletions, insertions and
# substitutions involving them are allowed, and we modify the reference
# to correspond to the hypothesis.
#
# If you supply the <lang> directory (the one that corresponds to
# how you decoded the data) to this script, it assumes that the <lang>
# directory contains phones/align_lexicon.int, and it uses this to work
# out a reasonable guess of the non-scored phones, based on which have
# a single-word pronunciation that maps to a silence phone.
# It then uses the words.txt to work out the written form of those words.
#
# Alternatively, you may specify a file containing the non-scored words one
# per line, with the --non-scored-words option.
#
# Non-scored words that were deleted (i.e. they were in the ref but not the
# hyp) are simply removed from the ctm. For non-scored words that
# were inserted or substituted, we change the reference word to match the
# hyp word, but instead of marking the operation as 'cor' (correct), we
# mark it as 'fix' (fixed), so that it will not be positively counted as a correct
# word for purposes of finding the optimal segment boundaries.
#
# e.g.
# <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit-type>
# [note: the <channel> will always be 1].
# AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil
# AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor
# AJJacobs_2007P-0001605-0003029 1 0.24 0.25 thought 1.0 thought cor
# AJJacobs_2007P-0001605-0003029 1 0.49 0.14 i'd 1.0 i'd cor
# AJJacobs_2007P-0001605-0003029 1 0.63 0.22 tell 1.0 tell cor
# AJJacobs_2007P-0001605-0003029 1 0.85 0.11 you 1.0 you cor
# AJJacobs_2007P-0001605-0003029 1 0.96 0.05 a 1.0 a cor
# AJJacobs_2007P-0001605-0003029 1 1.01 0.24 little 1.0 little cor
# AJJacobs_2007P-0001605-0003029 1 1.25 0.5 about 1.0 about cor
# AJJacobs_2007P-0001605-0003029 1 1.75 0.48 [UH] 1.0 [UH] cor
parser = argparse.ArgumentParser(
description = "This program modifies the reference in the ctm-edits which "
"is output by steps/cleanup/get_ctm_edits.py, to allow insertions, deletions and "
"substitutions of non-scored words, and [if --allow-repetitions=true], "
"duplications of single words or pairs of scored words (to account for dysfluencies "
"that were not transcribed). Note: deletions and substitutions of non-scored words "
"after the reference is corrected, will be marked as operation 'fix' rather than "
"'cor' (correct) so that the downstream processing knows that this was not in "
"the original reference. Also by defaults tags non-scored words as such when "
"they are correct; see the --tag-non-scored option.")
parser.add_argument("--verbose", type = int, default = 1,
choices=[0,1,2,3],
help = "Verbose level, higher = more verbose output")
parser.add_argument("--allow-repetitions", type = str, default = 'true',
choices=['true','false'],
help = "If true, allow repetitions in the transcript of one or "
"two-word sequences: for instance if the ref says 'i' but "
"the hyp says 'i i', or the ref says 'but then' and the hyp says "
"'but then but then', fix the reference accordingly. Intervening "
"non-scored words are allowed between the repetitions. These "
"fixes will be marked as 'cor', not as 'fix', since there is "
"generally no way to tell which repetition was the 'real' one "
"(and since we're generally confident that such things were "
"actually uttered).")
parser.add_argument("non_scored_words_in", metavar = "<non-scored-words-file>",
help="Filename of file containing a list of non-scored words, "
"one per line. See steps/cleanup/get_nonscored_words.py.")
parser.add_argument("ctm_edits_in", metavar = "<ctm-edits-in>",
help = "Filename of input ctm-edits file. "
"Use /dev/stdin for standard input.")
parser.add_argument("ctm_edits_out", metavar = "<ctm-edits-out>",
help = "Filename of output ctm-edits file. "
"Use /dev/stdout for standard output.")
args = parser.parse_args()
def ReadNonScoredWords(non_scored_words_file):
global non_scored_words
try:
f = open(non_scored_words_file)
except:
sys.exit("modify_ctm_edits.py: error opening file: "
"--non-scored-words=" + non_scored_words_file)
for line in f.readlines():
a = line.split()
if not len(line.split()) == 1:
sys.exit("modify_ctm_edits.py: bad line in non-scored-words "
"file {0}: {1}".format(non_scored_words_file, line))
non_scored_words.add(a[0])
f.close()
# The ctm-edits file format is as follows [note: file-id is really utterance-id
# in this context].
# <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit>
# e.g.:
# AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil
# AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor
# ...
# This function processes a single line of ctm-edits input for fixing
# "non-scored" words. The input 'a' is the split line as an array of fields.
# It modifies the object 'a'. This function returns the modified array,
# and please note that it is destructive of its input 'a'.
# If it returnso the empty array then the line is to be deleted.
def ProcessLineForNonScoredWords(a):
global num_lines, num_correct_lines, ref_change_stats
try:
assert len(a) == 8
num_lines += 1
# we could do:
# [ file, channel, start, duration, hyp_word, confidence, ref_word, edit_type ] = a
duration = a[3]
hyp_word = a[4]
ref_word = a[6]
edit_type = a[7]
if edit_type == 'ins':
assert ref_word == '<eps>'
if hyp_word in non_scored_words:
# insert this non-scored word into the reference.
ref_change_stats[ref_word + ' -> ' + hyp_word] += 1
ref_word = hyp_word
edit_type = 'fix'
elif edit_type == 'del':
assert hyp_word == '<eps>' and float(duration) == 0.0
if ref_word in non_scored_words:
ref_change_stats[ref_word + ' -> ' + hyp_word] += 1
return []
elif edit_type == 'sub':
if hyp_word in non_scored_words and ref_word in non_scored_words:
# we also allow replacing one non-scored word with another.
ref_change_stats[ref_word + ' -> ' + hyp_word] += 1
ref_word = hyp_word
edit_type = 'fix'
else:
assert edit_type == 'cor' or edit_type == 'sil'
num_correct_lines += 1
a[4] = hyp_word
a[6] = ref_word
a[7] = edit_type
return a
except Exception as e:
print("modify_ctm_edits.py: bad line in ctm-edits input: " + ' '.join(a),
file = sys.stderr)
print("modify_ctm_edits.py: exception was: " + str(e),
file = sys.stderr)
sys.exit(1)
# This function processes the split lines of one utterance (as a
# list of lists of fields), to allow repetitions of words, so if the
# reference says 'i' but the hyp says 'i i', or the ref says
# 'you know' and the hyp says 'you know you know', we change the
# ref to match.
# It returns the modified list-of-lists [but note that the input
# is actually modified].
def ProcessUtteranceForRepetitions(split_lines_of_utt):
global non_scored_words, repetition_stats
# The array 'selected_lines' will contain the indexes of of selected
# elements of 'split_lines_of_utt'. Consider split_line =
# split_lines_of_utt[i]. If the hyp and ref words in split_line are both
# either '<eps>' or non-scoreable words, we discard the index.
# Otherwise we put it into selected_lines.
selected_line_indexes = []
# selected_edits will contain, for each element of selected_line_indexes, the
# corresponding edit_type from the original utterance previous to
# this function call ('cor', 'ins', etc.).
#
# As a special case, if there was a substitution ('sub') where the
# reference word was a non-scored word and the hyp word was a real word,
# we mark it in this array as 'ins', because for purposes of this algorithm
# it behaves the same as an insertion.
#
# Whenever we do any operation that will change the reference, we change
# all the selected_edits in the array to None so that they won't match
# any further operations.
selected_edits = []
# selected_hyp_words will contain, for each element of selected_line_indexes, the
# corresponding hyp_word.
selected_hyp_words = []
for i in range(len(split_lines_of_utt)):
split_line = split_lines_of_utt[i]
hyp_word = split_line[4]
ref_word = split_line[6]
# keep_this_line will be True if we are going to keep this line in the
# 'selected lines' for further processing of repetitions. We only
# eliminate lines involving non-scored words or epsilon in both hyp
# and reference position
# [note: epsilon in hyp position for non-empty segments indicates
# optional-silence, and it does make sense to make this 'invisible',
# just like non-scored words, for the purposes of this code.]
keep_this_line = True
if (hyp_word == '<eps>' or hyp_word in non_scored_words) and \
(ref_word == '<eps>' or ref_word in non_scored_words):
keep_this_line = False
if keep_this_line:
selected_line_indexes.append(i)
edit_type = split_line[7]
if edit_type == 'sub' and ref_word in non_scored_words:
assert not hyp_word in non_scored_words
# For purposes of this algorithm, substitution of, say,
# '[COUGH]' by 'hello' behaves like an insertion of 'hello',
# since we're willing to remove the '[COUGH]' from the
# transript.
edit_type = 'ins'
selected_edits.append(edit_type)
selected_hyp_words.append(hyp_word)
# indexes_to_fix will be a list of indexes into 'selected_indexes' where we
# plan to fix the ref to match the hyp.
indexes_to_fix = []
# This loop scans for, and fixes, two-word insertions that follow,
# or precede, the corresponding correct words.
for i in range(0, len(selected_line_indexes) - 3):
this_indexes = selected_line_indexes[i:i+4]
this_hyp_words = selected_hyp_words[i:i+4]
if this_hyp_words[0] == this_hyp_words[2] and \
this_hyp_words[1] == this_hyp_words[3] and \
this_hyp_words[0] != this_hyp_words[1]:
# if the hyp words were of the form [ 'a', 'b', 'a', 'b' ]...
this_edits = selected_edits[i:i+4]
if this_edits == [ 'cor', 'cor', 'ins', 'ins' ] or \
this_edits == [ 'ins', 'ins', 'cor', 'cor' ]:
if this_edits[0] == 'cor':
indexes_to_fix += [ i+2, i+3 ]
else:
indexes_to_fix += [ i, i+1 ]
# the next line prevents this region of the text being used
# in any further edits.
selected_edits[i:i+4] = [ None, None, None, None ]
word_pair = this_hyp_words[0] + ' ' + this_hyp_words[1]
# e.g. word_pair = 'hi there'
# add 2 because these stats are of words.
repetition_stats[word_pair] += 2
# the next line prevents this region of the text being used
# in any further edits.
selected_edits[i:i+4] = [ None, None, None, None ]
# This loop scans for, and fixes, one-word insertions that follow,
# or precede, the corresponding correct words.
for i in range(0, len(selected_line_indexes) - 1):
this_indexes = selected_line_indexes[i:i+2]
this_hyp_words = selected_hyp_words[i:i+2]
if this_hyp_words[0] == this_hyp_words[1]:
# if the hyp words were of the form [ 'a', 'a' ]...
this_edits = selected_edits[i:i+2]
if this_edits == [ 'cor', 'ins' ] or this_edits == [ 'ins', 'cor' ]:
if this_edits[0] == 'cor':
indexes_to_fix.append(i+1)
else:
indexes_to_fix.append(i)
repetition_stats[this_hyp_words[0]] += 1
# the next line prevents this region of the text being used
# in any further edits.
selected_edits[i:i+2] = [ None, None ]
for i in indexes_to_fix:
j = selected_line_indexes[i]
split_line = split_lines_of_utt[j]
ref_word = split_line[6]
hyp_word = split_line[4]
assert ref_word == '<eps>' or ref_word in non_scored_words
# we replace reference with the decoded word, which will be a
# repetition.
split_line[6] = hyp_word
split_line[7] = 'cor'
return split_lines_of_utt
# note: split_lines_of_utt is a list of lists, one per line, each containing the
# sequence of fields.
# Returns the same format of data after processing.
def ProcessUtterance(split_lines_of_utt):
new_split_lines_of_utt = []
for split_line in split_lines_of_utt:
new_split_line = ProcessLineForNonScoredWords(split_line)
if new_split_line != []:
new_split_lines_of_utt.append(new_split_line)
if args.allow_repetitions == 'true':
new_split_lines_of_utt = ProcessUtteranceForRepetitions(new_split_lines_of_utt)
return new_split_lines_of_utt
def ProcessData():
try:
f_in = open(args.ctm_edits_in)
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits input "
"file {0}".format(args.ctm_edits_in))
try:
f_out = open(args.ctm_edits_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits output "
"file {0}".format(args.ctm_edits_out))
num_lines_processed = 0
# Most of what we're doing in the lines below is splitting the input lines
# and grouping them per utterance, before giving them to ProcessUtterance()
# and then printing the modified lines.
first_line = f_in.readline()
if first_line == '':
sys.exit("modify_ctm_edits.py: empty input")
split_pending_line = first_line.split()
if len(split_pending_line) == 0:
sys.exit("modify_ctm_edits.py: bad input line " + first_line)
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance = []
while True:
if len(split_pending_line) == 0 or split_pending_line[0] != cur_utterance:
split_lines_of_cur_utterance = ProcessUtterance(split_lines_of_cur_utterance)
for split_line in split_lines_of_cur_utterance:
print(' '.join(split_line), file = f_out)
split_lines_of_cur_utterance = []
if len(split_pending_line) == 0:
break
else:
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance.append(split_pending_line)
next_line = f_in.readline()
split_pending_line = next_line.split()
if len(split_pending_line) == 0:
if next_line != '':
sys.exit("modify_ctm_edits.py: got an empty or whitespace input line")
try:
f_out.close()
except:
sys.exit("modify_ctm_edits.py: error closing ctm-edits output "
"(broken pipe or full disk?)")
def PrintNonScoredStats():
if args.verbose < 1:
return
if num_lines == 0:
print("modify_ctm_edits.py: processed no input.", file = sys.stderr)
num_lines_modified = sum(ref_change_stats.values())
num_incorrect_lines = num_lines - num_correct_lines
percent_lines_incorrect= '%.2f' % (num_incorrect_lines * 100.0 / num_lines)
percent_modified = '%.2f' % (num_lines_modified * 100.0 / num_lines);
percent_of_incorrect_modified = '%.2f' % (num_lines_modified * 100.0 / num_incorrect_lines)
print("modify_ctm_edits.py: processed {0} lines of ctm ({1}% of which incorrect), "
"of which {2} were changed fixing the reference for non-scored words "
"({3}% of lines, or {4}% of incorrect lines)".format(
num_lines, percent_lines_incorrect, num_lines_modified,
percent_modified, percent_of_incorrect_modified),
file = sys.stderr)
keys = sorted(ref_change_stats.keys(), reverse=True,
key = lambda x: ref_change_stats[x])
num_keys_to_print = 40 if args.verbose >= 2 else 10
print("modify_ctm_edits.py: most common edits (as percentages "
"of all such edits) are:\n" +
('\n'.join([ '%s [%.2f%%]' % (k, ref_change_stats[k]*100.0/num_lines_modified)
for k in keys[0:num_keys_to_print]]))
+ '\n...'if num_keys_to_print < len(keys) else '',
file = sys.stderr)
def PrintRepetitionStats():
if args.verbose < 1 or sum(repetition_stats.values()) == 0:
return
num_lines_modified = sum(repetition_stats.values())
num_incorrect_lines = num_lines - num_correct_lines
percent_lines_incorrect= '%.2f' % (num_incorrect_lines * 100.0 / num_lines)
percent_modified = '%.2f' % (num_lines_modified * 100.0 / num_lines);
percent_of_incorrect_modified = '%.2f' % (num_lines_modified * 100.0 / num_incorrect_lines)
print("modify_ctm_edits.py: processed {0} lines of ctm ({1}% of which incorrect), "
"of which {2} were changed fixing the reference for repetitions ({3}% of "
"lines, or {4}% of incorrect lines)".format(
num_lines, percent_lines_incorrect, num_lines_modified,
percent_modified, percent_of_incorrect_modified),
file = sys.stderr)
keys = sorted(repetition_stats.keys(), reverse=True,
key = lambda x: repetition_stats[x])
num_keys_to_print = 40 if args.verbose >= 2 else 10
print("modify_ctm_edits.py: most common repetitions inserted into reference (as percentages "
"of all words fixed in this way) are:\n" +
('\n'.join([ '%s [%.2f%%]' % (k, repetition_stats[k]*100.0/num_lines_modified)
for k in keys[0:num_keys_to_print]]))
+ '\n...' if num_keys_to_print < len(keys) else '',
file = sys.stderr)
non_scored_words = set()
ReadNonScoredWords(args.non_scored_words_in)
num_lines = 0
num_correct_lines = 0
# ref_change_stats will be a map from a string like
# 'foo -> bar' to an integer count; it keeps track of how much we changed
# the reference.
ref_change_stats = defaultdict(int)
# repetition_stats will be a map from strings like
# 'a', or 'a b' (the repeated strings), to an integer count; like
# ref_change_stats, it keeps track of how many changes we made
# in allowing repetitions.
repetition_stats = defaultdict(int)
ProcessData()
PrintNonScoredStats()
PrintRepetitionStats()
|
|
import bpy
import bmesh
import shutil
import os
import json
from os import listdir
from os.path import isfile, join
# image = bpy.data.images['mergeatlas']
# width = image.size[0]
# height = image.size[1]
# pixels = image.pixels[:] # create a copy
# # Use the tuple objects, which is way faster than direct access to Image.pixels
# # Write back to image.
# # Slice notation here means to replace in-place, not sure if it's faster...
# image.pixels[:] = pixels
# # Should probably update image
# image.update()
def get_textures(objs):
tex_set = set()
for obj in objs:
bm = bmesh.new()
bm.from_mesh(obj.data)
uvmap_layer = bm.faces.layers.tex.get("UVMap")
for face in bm.faces:
im = face[uvmap_layer].image
if im != None:
abs_path = bpy.path.abspath(im.filepath, library=im.library)
tex_set.add(abs_path)
return tex_set
def make_atlas():
objs = bpy.context.selected_objects
textures = get_textures(objs)
for tex in textures:
dstpath = "D:/Blender/troensimulator/Berlin3ds/Berlin3ds/seperatepack"
dst_name = "_".join(tex.split("\\")[-2:])
shutil.copyfile(tex, os.path.join(dstpath, dst_name))
str_tex = " ".join(textures)
packer_config = """D:/Programme/CodeAndWeb/TexturePacker/bin/TexturePacker.exe --format json --size-constraints POT --data D:/Blender/troensimulator/Berlin3ds/Berlin3ds/seperatepack/{n}.json --multipack --pack-mode Good --max-size 4096 --texture-format tga --verbose --sheet D:/Blender/troensimulator/Berlin3ds/Berlin3ds/seperatepack/{n}.tga D:/Blender/troensimulator/Berlin3ds/Berlin3ds/seperatepack/"""
print(packer_config)
with open("D:\\Blender\\troensimulator\\Berlin3ds\\atlasgen.bat","w") as fd:
fd.write(packer_config)
def gen_iterable(value_dict):
if type(value_dict["frames"]) == list:
iterable = value_dict["frames"]
isdict = False
elif type(value_dict["frames"]) == dict:
iterable = value_dict["frames"].keys()
isdict = True
for _iter in iterable:
if not isdict:
frame = _iter
mat_name = frame["filename"].rsplit(".",1)[0] #remove .jpg
else:
frame = value_dict["frames"][_iter] #iter is a key
mat_name = _iter.rsplit(".",1)[0]
yield frame, mat_name
def get_or_make_material(obj, name_trimmed, image):
textlas_material = bpy.data.materials.get(name_trimmed+"-mat")
if textlas_material is None:
slot_texatlas_mat = len(obj.data.materials)
textlas_material = bpy.data.materials.new(name_trimmed+"-mat")
obj.data.materials.append(textlas_material)
cTex = bpy.data.textures.new(name_trimmed, type = 'IMAGE')
cTex.image = image
# Add texture slot for color texture
mtex = textlas_material.texture_slots.add()
mtex.texture = cTex
mtex.texture_coords = 'UV'
mtex.use_map_color_diffuse = True
mtex.mapping = 'FLAT'
mtex.uv_layer = 'mergeatlas'
else:
for i, slot in enumerate(obj.material_slots):
if slot.material == None:
obj.data.materials.append(textlas_material)
slot_texatlas_mat = i
break
if trimmed(slot.material.name) == textlas_material.name:
slot_texatlas_mat = i
break
else:
slot_texatlas_mat = 0
obj.data.materials.append(textlas_material)
return slot_texatlas_mat
def trimmed(name):
return name.split(".")[0]
def clear_materials(obj):
#obj.data.materials.clear() #works since 2.69
while obj.data.materials:
obj.data.materials.pop(0,update_data=True)
def process_part(name, obj):
atlas_path = "D:/Blender/troensimulator/Berlin3ds/Berlin3ds/seperatepack/"
with open(atlas_path+name) as fp:
value_dict = json.load(fp)
print("processing:" + name)
image_width = int(value_dict["meta"]["size"]["w"])
image_height = int(value_dict["meta"]["size"]["h"])
filename = value_dict["meta"]["image"].split("/")[-1]
image = bpy.data.images.get(filename)
if image is None:
image = bpy.data.images.load(atlas_path+filename)
slot_texatlas_mat = None
bm = bmesh.new()
bm.from_mesh(obj.data)
standard_uv_tex = bm.loops.layers.uv["UVMap"]
standard_layer = bm.faces.layers.tex.get("UVMap")
atlas_uv_tex = bm.loops.layers.uv["mergeatlas"]#obj.data.uv_textures["texatlas"]
layer = bm.faces.layers.tex.get("mergeatlas")
for face in bm.faces:
if face[standard_layer].image == None:
continue
im = face[standard_layer].image
abs_path = bpy.path.abspath(im.filepath, library=im.library)
im_name = ("_".join(abs_path.split("\\")[-2:]))
im_name = im_name.replace(".tga", ".jpg")
frames = value_dict["frames"]
if im_name not in frames:
continue
print(abs_path)
frame = frames[im_name]
frame_x = frame["frame"]["x"]
frame_top = frame["frame"]["y"]
tile_w = frame["sourceSize"]["w"]
tile_h = frame["sourceSize"]["h"]
for loop in face.loops:
uv = loop[standard_uv_tex].uv
if not frame["rotated"]:
x_co = frame_x + tile_w*uv.x
y_co = image_height - frame_top - tile_h + tile_h*uv.y
else :
#rotate clockwise 90 degrees =(y,-x)
x_co = frame_x + tile_h*uv.y
y_co = image_height - frame_top - tile_w*uv.x
loop[atlas_uv_tex].uv.x = x_co / float(image_width)
loop[atlas_uv_tex].uv.y = y_co / float(image_height)
face[layer].image = image
face.material_index = get_or_make_material(obj, trimmed(filename), image)
bm.to_mesh(obj.data)
bm.free()
obj.data.update()
if __name__ == "__main__":
atlas_path = "D:/Blender/troensimulator/Berlin3ds/Berlin3ds/seperatepack/"
objs = bpy.context.selected_objects
jsons = [ f for f in listdir(atlas_path) if isfile(join(atlas_path,f)) and f.split(".")[1] == "json" ]
for obj in objs:
clear_materials(obj)
if "mergeatlas" not in obj.data.uv_textures:
obj.data.uv_textures.new("mergeatlas")
for name in jsons:
process_part(name, obj)
|
|
from django.db import models
from django.contrib.auth.models import AbstractUser, User
import hashlib
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from IESPV.settings import EMAIL_HOST_USER
from datetime import datetime
from six import with_metaclass
from core.models import Observer, Observable, Email
class Employee(models.Model):
class Meta:
abstract = True
phone_number = models.CharField(max_length = 12)
user = models.OneToOneField(User,on_delete=models.CASCADE)
def register_donor(self, name, phone_number, address, address_reference, observations, email,donation_date):
user = self.generate_user(self, name, email, '')
donor = Donor (user=user,
name=name,
email = email,
phone_number=phone_number,
address = address,
address_reference = address_reference,
observations = observations,
donation_date=donation_date)
donor.save()
def confirm_scheduling(self):
pass
def edit_donor(self):
pass
def update_donation_date(self, newDonationDate, donor):
donor.donation_date = newDonationDate
donor.save()
def __str__(self):
return self.user.username
class Administrator(Employee, Observer):
is_superuser = True
def register_employee(self,employee_type, name, phone_number, email, password):
if employee_type == 'secretary':
self.create_secretary(name, phone_number, email, password)
else:
self.create_administrator(name, phone_number, email, password)
def remove_employee(self):
pass
def release_login(self, id_Secretary):
try:
secretary = Secretary.objects.get(id=id_Secretary)
except ObjectDoesNotExist:
secretary = None
if secretary is not None:
if secretary.activate == False:
secretary.activate = True
secretary.release_activate_at = datetime.now()
secretary.save()
release = True
else:
release = False
else:
release = False
return release
def block_login(self, id_Secretary):
try:
secretary = Secretary.objects.get(id=id_Secretary)
except ObjectDoesNotExist:
secretary = None
if secretary is not None:
if secretary.activate == True:
secretary.activate = False
secretary.release_activate_at = None
secretary.save()
release = True
else:
release = False
else:
release = False
return release
def generate_superuser(self, name, phone_number, email, password):
user = User(first_name=name,username=email,email=email)
user.set_password(password)
user.is_superuser = True
user.save()
return user
def generate_user(self, name, phone_number, email, password):
user = User(first_name=name,username=email,email=email)
user.set_password(password)
user.save()
return user
def create_secretary(self, name, phone_number, email, password):
user = self.generate_user(self, name, email, password)
secretary = Secretary (user=user,
phone_number=phone_number
)
secretary.save()
return secretary
def create_administrator(self, name, phone_number, email, password):
user = self.generate_superuser(self, name, email, password)
admin = Administrator (user=user,
phone_number=phone_number
)
admin.save()
return admin
def update(self, input):
subject = "Login suspeito"
message = "O Secretario dolo do email: " + input + " realizou um login agora, este email eh informativo"
email_destination = self.user.email
email = Email()
email.send_email(subject, message, email_destination)
print(email_destination)
class Secretary (Employee, Observable):
is_superuser = False
activate = models.BooleanField(default=False)
release_activate_at = models.DateTimeField(null=True, blank=True)
observers_in_secretary = []
def listAllSecretaries(self):
secretaries = Secretary.objects.all()
return secretaries
def add_observers(self):
self.observers_in_secretary = Administrator.objects.all()
def remove_observers(self, input):
return
def notify_observers(self, input):
for observer in self.observers_in_secretary:
observer.update(input)
def date_time_release(self):
date_now = datetime.now()
if self.release_activate_at.date() == date_now.date():
if self.release_activate_at.hour - date_now.hour >= 6:
self.add_observers()
self.notify_observers(self.user.email)
else:
self.add_observers()
self.notify_observers(self.user.email)
class RecoveryPassword(models.Model):
usuario = models.OneToOneField(User, primary_key=True,blank=True)
token_hash = models.TextField(max_length = 60,blank=True)
date_expired = models.DateField(auto_now=True)
token_used = models.BooleanField(default=False)
def search_email_user(self, email):
self.usuario = User.objects.get(email=email)
def generate_hash(self):
plain_text = str(self.usuario.email) + str(self.usuario.password +str(self.date_expired))
self.token_hash = hashlib.sha256(plain_text.encode('utf-8')).hexdigest()
def make_url(self):
return 'localhost:8000/users/recuperar_senha/' + str(self.token_hash)
def send_email_url(self, email):
self.search_email_user(email)
self.generate_hash()
self.search_token_user()
self.make_url()
send_mail(
'Troca de senha',
'Entre nesse link para mudar sua senha ' + self.make_url(),
EMAIL_HOST_USER,
[self.usuario.email],
fail_silently=False,
)
def search_token_user(self):
try:
recovery_password = RecoveryPassword.objects.get(usuario=self.usuario)
except ObjectDoesNotExist:
recovery_password = None
if recovery_password is None:
super(RecoveryPassword,self).save()
else:
recovery_password.token_hash = self.token_hash
recovery_password.token_used = False
recovery_password.save()
class Donor (models.Model):
name = models.CharField(max_length = 50, blank = False)
phone_number = models.CharField(max_length = 12)
email = models.CharField(max_length = 30, blank = True)
address = models.CharField(max_length = 200)
address_reference = models.CharField(max_length = 200, blank = True)
observations = models.TextField(blank = True)
donation_date = models.DateField()
user = models.OneToOneField(User,on_delete=models.CASCADE)
|
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db import exception as db_exc
import oslo_messaging
import sqlalchemy as sa
from sqlalchemy import func
from sqlalchemy import or_
from sqlalchemy import orm
from sqlalchemy.orm import joinedload
from sqlalchemy import sql
from neutron.common import constants
from neutron.common import utils as n_utils
from neutron import context as n_ctx
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import l3_attrs_db
from neutron.db import model_base
from neutron.extensions import l3agentscheduler
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
L3_AGENTS_SCHEDULER_OPTS = [
cfg.StrOpt('router_scheduler_driver',
default='neutron.scheduler.l3_agent_scheduler.ChanceScheduler',
help=_('Driver to use for scheduling '
'router to a default L3 agent')),
cfg.BoolOpt('router_auto_schedule', default=True,
help=_('Allow auto scheduling of routers to L3 agent.')),
cfg.BoolOpt('allow_automatic_l3agent_failover', default=False,
help=_('Automatically reschedule routers from offline L3 '
'agents to online L3 agents.')),
]
cfg.CONF.register_opts(L3_AGENTS_SCHEDULER_OPTS)
class RouterL3AgentBinding(model_base.BASEV2):
"""Represents binding between neutron routers and L3 agents."""
router_id = sa.Column(sa.String(36),
sa.ForeignKey("routers.id", ondelete='CASCADE'),
primary_key=True)
l3_agent = orm.relation(agents_db.Agent)
l3_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id", ondelete='CASCADE'),
primary_key=True)
class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
agentschedulers_db.AgentSchedulerDbMixin):
"""Mixin class to add l3 agent scheduler extension to plugins
using the l3 agent for routing.
"""
router_scheduler = None
def start_periodic_l3_agent_status_check(self):
if not cfg.CONF.allow_automatic_l3agent_failover:
LOG.info(_LI("Skipping period L3 agent status check because "
"automatic router rescheduling is disabled."))
return
self.setup_agent_status_check(
self.reschedule_routers_from_down_agents)
def reschedule_routers_from_down_agents(self):
"""Reschedule routers from down l3 agents if admin state is up."""
agent_dead_limit = self.agent_dead_limit_seconds()
self.wait_down_agents('L3', agent_dead_limit)
cutoff = self.get_cutoff_time(agent_dead_limit)
context = n_ctx.get_admin_context()
down_bindings = (
context.session.query(RouterL3AgentBinding).
join(agents_db.Agent).
filter(agents_db.Agent.heartbeat_timestamp < cutoff,
agents_db.Agent.admin_state_up).
outerjoin(l3_attrs_db.RouterExtraAttributes,
l3_attrs_db.RouterExtraAttributes.router_id ==
RouterL3AgentBinding.router_id).
filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
try:
for binding in down_bindings:
LOG.warn(_LW(
"Rescheduling router %(router)s from agent %(agent)s "
"because the agent did not report to the server in "
"the last %(dead_time)s seconds."),
{'router': binding.router_id,
'agent': binding.l3_agent_id,
'dead_time': agent_dead_limit})
try:
self.reschedule_router(context, binding.router_id)
except (l3agentscheduler.RouterReschedulingFailed,
oslo_messaging.RemoteError):
# Catch individual router rescheduling errors here
# so one broken one doesn't stop the iteration.
LOG.exception(_LE("Failed to reschedule router %s"),
binding.router_id)
except db_exc.DBError:
# Catch DB errors here so a transient DB connectivity issue
# doesn't stop the loopingcall.
LOG.exception(_LE("Exception encountered during router "
"rescheduling."))
def validate_agent_router_combination(self, context, agent, router):
"""Validate if the router can be correctly assigned to the agent.
:raises: RouterL3AgentMismatch if attempting to assign DVR router
to legacy agent, or centralized router to compute's L3 agents.
:raises: InvalidL3Agent if attempting to assign router to an
unsuitable agent (disabled, type != L3, incompatible configuration)
:raises: DVRL3CannotAssignToDvrAgent if attempting to assign DVR
router from one DVR Agent to another.
"""
is_distributed = router.get('distributed')
agent_conf = self.get_configuration_dict(agent)
agent_mode = agent_conf.get(constants.L3_AGENT_MODE,
constants.L3_AGENT_MODE_LEGACY)
router_type = (
'distributed' if is_distributed else
'centralized')
is_agent_router_types_incompatible = (
agent_mode == constants.L3_AGENT_MODE_DVR and not is_distributed
or agent_mode == constants.L3_AGENT_MODE_LEGACY and is_distributed
)
if is_agent_router_types_incompatible:
raise l3agentscheduler.RouterL3AgentMismatch(
router_type=router_type, router_id=router['id'],
agent_mode=agent_mode, agent_id=agent['id'])
if agent_mode == constants.L3_AGENT_MODE_DVR and is_distributed:
raise l3agentscheduler.DVRL3CannotAssignToDvrAgent(
router_type=router_type, router_id=router['id'],
agent_id=agent['id'])
is_wrong_type_or_unsuitable_agent = (
agent['agent_type'] != constants.AGENT_TYPE_L3 or
not agent['admin_state_up'] or
not self.get_l3_agent_candidates(context, router, [agent])
)
if is_wrong_type_or_unsuitable_agent:
raise l3agentscheduler.InvalidL3Agent(id=agent['id'])
def check_agent_router_scheduling_needed(self, context, agent, router):
"""Check if the router scheduling is needed.
:raises: RouterHostedByL3Agent if router is already assigned
to a different agent.
:returns: True if scheduling is needed, otherwise False
"""
router_id = router['id']
agent_id = agent['id']
query = context.session.query(RouterL3AgentBinding)
bindings = query.filter_by(router_id=router_id).all()
if not bindings:
return True
for binding in bindings:
if binding.l3_agent_id == agent_id:
# router already bound to the agent we need
return False
if router.get('distributed'):
return False
# non-dvr case: centralized router is already bound to some agent
raise l3agentscheduler.RouterHostedByL3Agent(
router_id=router_id,
agent_id=bindings[0].l3_agent_id)
def create_router_to_agent_binding(self, context, agent, router):
"""Create router to agent binding."""
router_id = router['id']
agent_id = agent['id']
if self.router_scheduler:
try:
self.router_scheduler.bind_router(context, router_id, agent)
except db_exc.DBError:
raise l3agentscheduler.RouterSchedulingFailed(
router_id=router_id, agent_id=agent_id)
def add_router_to_l3_agent(self, context, agent_id, router_id):
"""Add a l3 agent to host a router."""
with context.session.begin(subtransactions=True):
router = self.get_router(context, router_id)
agent = self._get_agent(context, agent_id)
self.validate_agent_router_combination(context, agent, router)
if self.check_agent_router_scheduling_needed(
context, agent, router):
self.create_router_to_agent_binding(context, agent, router)
else:
return
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
l3_notifier.router_added_to_agent(
context, [router_id], agent.host)
def remove_router_from_l3_agent(self, context, agent_id, router_id):
"""Remove the router from l3 agent.
After removal, the router will be non-hosted until there is update
which leads to re-schedule or be added to another agent manually.
"""
agent = self._get_agent(context, agent_id)
self._unbind_router(context, router_id, agent_id)
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
l3_notifier.router_removed_from_agent(
context, router_id, agent.host)
def _unbind_router(self, context, router_id, agent_id):
with context.session.begin(subtransactions=True):
query = context.session.query(RouterL3AgentBinding)
query = query.filter(
RouterL3AgentBinding.router_id == router_id,
RouterL3AgentBinding.l3_agent_id == agent_id)
query.delete()
def reschedule_router(self, context, router_id, candidates=None):
"""Reschedule router to a new l3 agent
Remove the router from the agent(s) currently hosting it and
schedule it again
"""
cur_agents = self.list_l3_agents_hosting_router(
context, router_id)['agents']
with context.session.begin(subtransactions=True):
for agent in cur_agents:
self._unbind_router(context, router_id, agent['id'])
new_agent = self.schedule_router(context, router_id,
candidates=candidates)
if not new_agent:
raise l3agentscheduler.RouterReschedulingFailed(
router_id=router_id)
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
for agent in cur_agents:
l3_notifier.router_removed_from_agent(
context, router_id, agent['host'])
l3_notifier.router_added_to_agent(
context, [router_id], new_agent.host)
def list_routers_on_l3_agent(self, context, agent_id):
# Exception thrown if the requested agent does not exist.
self._get_agent(context, agent_id)
query = context.session.query(RouterL3AgentBinding.router_id)
query = query.filter(RouterL3AgentBinding.l3_agent_id == agent_id)
router_ids = [item[0] for item in query]
if router_ids:
return {'routers':
self.get_routers(context, filters={'id': router_ids})}
else:
return {'routers': []}
def _get_active_l3_agent_routers_sync_data(self, context, host, agent,
router_ids):
if n_utils.is_extension_supported(self,
constants.L3_HA_MODE_EXT_ALIAS):
return self.get_ha_sync_data_for_host(context, host,
router_ids=router_ids,
active=True)
return self.get_sync_data(context, router_ids=router_ids, active=True)
def list_active_sync_routers_on_active_l3_agent(
self, context, host, router_ids):
agent = self._get_agent_by_type_and_host(
context, constants.AGENT_TYPE_L3, host)
if not agent.admin_state_up:
return []
query = context.session.query(RouterL3AgentBinding.router_id)
query = query.filter(
RouterL3AgentBinding.l3_agent_id == agent.id)
if router_ids:
query = query.filter(
RouterL3AgentBinding.router_id.in_(router_ids))
router_ids = [item[0] for item in query]
if router_ids:
return self._get_active_l3_agent_routers_sync_data(context, host,
agent,
router_ids)
return []
def get_l3_agents_hosting_routers(self, context, router_ids,
admin_state_up=None,
active=None):
if not router_ids:
return []
query = context.session.query(RouterL3AgentBinding)
if len(router_ids) > 1:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id.in_(router_ids))
else:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id == router_ids[0])
if admin_state_up is not None:
query = (query.filter(agents_db.Agent.admin_state_up ==
admin_state_up))
l3_agents = [binding.l3_agent for binding in query]
if active is not None:
l3_agents = [l3_agent for l3_agent in
l3_agents if not
agents_db.AgentDbMixin.is_agent_down(
l3_agent['heartbeat_timestamp'])]
return l3_agents
def _get_l3_bindings_hosting_routers(self, context, router_ids):
if not router_ids:
return []
query = context.session.query(RouterL3AgentBinding)
if len(router_ids) > 1:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id.in_(router_ids))
else:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id == router_ids[0])
return query.all()
def list_l3_agents_hosting_router(self, context, router_id):
with context.session.begin(subtransactions=True):
bindings = self._get_l3_bindings_hosting_routers(
context, [router_id])
results = []
for binding in bindings:
l3_agent_dict = self._make_agent_dict(binding.l3_agent)
results.append(l3_agent_dict)
if results:
return {'agents': results}
else:
return {'agents': []}
def get_l3_agents(self, context, active=None, filters=None):
query = context.session.query(agents_db.Agent)
query = query.filter(
agents_db.Agent.agent_type == constants.AGENT_TYPE_L3)
if active is not None:
query = (query.filter(agents_db.Agent.admin_state_up == active))
if filters:
for key, value in filters.iteritems():
column = getattr(agents_db.Agent, key, None)
if column:
query = query.filter(column.in_(value))
agent_modes = filters.get('agent_modes', [])
if agent_modes:
agent_mode_key = '\"agent_mode\": \"'
configuration_filter = (
[agents_db.Agent.configurations.contains('%s%s\"' %
(agent_mode_key, agent_mode))
for agent_mode in agent_modes])
query = query.filter(or_(*configuration_filter))
return [l3_agent
for l3_agent in query
if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent(
active, l3_agent)]
def check_ports_exist_on_l3agent(self, context, l3_agent, router_id):
"""
This function checks for existence of dvr serviceable
ports on the host, running the input l3agent.
"""
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
core_plugin = manager.NeutronManager.get_plugin()
filter = {'fixed_ips': {'subnet_id': subnet_ids}}
ports = core_plugin.get_ports(context, filters=filter)
for port in ports:
if (n_utils.is_dvr_serviced(port['device_owner']) and
l3_agent['host'] == port['binding:host_id']):
return True
return False
def get_snat_candidates(self, sync_router, l3_agents):
"""Get the valid snat enabled l3 agents for the distributed router."""
candidates = []
is_router_distributed = sync_router.get('distributed', False)
if not is_router_distributed:
return candidates
for l3_agent in l3_agents:
if not l3_agent.admin_state_up:
continue
agent_conf = self.get_configuration_dict(l3_agent)
agent_mode = agent_conf.get(constants.L3_AGENT_MODE,
constants.L3_AGENT_MODE_LEGACY)
if agent_mode != constants.L3_AGENT_MODE_DVR_SNAT:
continue
router_id = agent_conf.get('router_id', None)
use_namespaces = agent_conf.get('use_namespaces', True)
if not use_namespaces and router_id != sync_router['id']:
continue
handle_internal_only_routers = agent_conf.get(
'handle_internal_only_routers', True)
gateway_external_network_id = agent_conf.get(
'gateway_external_network_id', None)
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
'network_id')
if ((not ex_net_id and not handle_internal_only_routers) or
(ex_net_id and gateway_external_network_id and
ex_net_id != gateway_external_network_id)):
continue
candidates.append(l3_agent)
return candidates
def get_l3_agent_candidates(self, context, sync_router, l3_agents):
"""Get the valid l3 agents for the router from a list of l3_agents."""
candidates = []
for l3_agent in l3_agents:
if not l3_agent.admin_state_up:
continue
agent_conf = self.get_configuration_dict(l3_agent)
router_id = agent_conf.get('router_id', None)
use_namespaces = agent_conf.get('use_namespaces', True)
handle_internal_only_routers = agent_conf.get(
'handle_internal_only_routers', True)
gateway_external_network_id = agent_conf.get(
'gateway_external_network_id', None)
agent_mode = agent_conf.get(constants.L3_AGENT_MODE,
constants.L3_AGENT_MODE_LEGACY)
if not use_namespaces and router_id != sync_router['id']:
continue
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
'network_id')
if ((not ex_net_id and not handle_internal_only_routers) or
(ex_net_id and gateway_external_network_id and
ex_net_id != gateway_external_network_id)):
continue
is_router_distributed = sync_router.get('distributed', False)
if agent_mode in (
constants.L3_AGENT_MODE_LEGACY,
constants.L3_AGENT_MODE_DVR_SNAT) and (
not is_router_distributed):
candidates.append(l3_agent)
elif is_router_distributed and agent_mode.startswith(
constants.L3_AGENT_MODE_DVR) and (
self.check_ports_exist_on_l3agent(
context, l3_agent, sync_router['id'])):
candidates.append(l3_agent)
return candidates
def auto_schedule_routers(self, context, host, router_ids):
if self.router_scheduler:
return self.router_scheduler.auto_schedule_routers(
self, context, host, router_ids)
def schedule_router(self, context, router, candidates=None):
if self.router_scheduler:
return self.router_scheduler.schedule(
self, context, router, candidates=candidates)
def schedule_routers(self, context, routers):
"""Schedule the routers to l3 agents."""
for router in routers:
self.schedule_router(context, router, candidates=None)
def get_l3_agent_with_min_routers(self, context, agent_ids):
"""Return l3 agent with the least number of routers."""
query = context.session.query(
agents_db.Agent,
func.count(
RouterL3AgentBinding.router_id
).label('count')).outerjoin(RouterL3AgentBinding).group_by(
RouterL3AgentBinding.l3_agent_id).order_by('count')
res = query.filter(agents_db.Agent.id.in_(agent_ids)).first()
return res[0]
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for the Metrics Plugin."""
import argparse
import collections
import os.path
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorboard import context
from tensorboard.backend.event_processing import data_provider
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer,
)
from tensorboard.data import provider
from tensorboard.plugins import base_plugin
from tensorboard.plugins.image import metadata as image_metadata
from tensorboard.plugins.metrics import metrics_plugin
tf1.enable_eager_execution()
class MetricsPluginTest(tf.test.TestCase):
def setUp(self):
super(MetricsPluginTest, self).setUp()
self._logdir = self.get_temp_dir()
self._multiplexer = event_multiplexer.EventMultiplexer()
flags = argparse.Namespace(generic_data="true")
provider = data_provider.MultiplexerDataProvider(
self._multiplexer, self._logdir
)
ctx = base_plugin.TBContext(
flags=flags,
logdir=self._logdir,
multiplexer=self._multiplexer,
data_provider=provider,
)
self._plugin = metrics_plugin.MetricsPlugin(ctx)
### Writing utilities.
def _write_scalar(self, run, tag, description=None):
subdir = os.path.join(self._logdir, run)
writer = tf.summary.create_file_writer(subdir)
with writer.as_default():
tf.summary.scalar(tag, 42, step=0, description=description)
writer.flush()
self._multiplexer.AddRunsFromDirectory(self._logdir)
def _write_scalar_data(self, run, tag, data=[]):
"""Writes scalar data, starting at step 0.
Args:
run: string run name.
tag: string tag name.
data: list of scalar values to write at each step.
"""
subdir = os.path.join(self._logdir, run)
writer = tf.summary.create_file_writer(subdir)
with writer.as_default():
step = 0
for datum in data:
tf.summary.scalar(tag, datum, step=step)
step += 1
writer.flush()
self._multiplexer.AddRunsFromDirectory(self._logdir)
def _write_histogram(self, run, tag, description=None):
subdir = os.path.join(self._logdir, run)
writer = tf.summary.create_file_writer(subdir)
with writer.as_default():
data = tf.random.normal(shape=[3])
tf.summary.histogram(tag, data, step=0, description=description)
writer.flush()
self._multiplexer.AddRunsFromDirectory(self._logdir)
def _write_histogram_data(self, run, tag, data=[]):
"""Writes histogram data, starting at step 0.
Args:
run: string run name.
tag: string tag name.
data: list of histogram values to write at each step.
"""
subdir = os.path.join(self._logdir, run)
writer = tf.summary.create_file_writer(subdir)
with writer.as_default():
step = 0
for datum in data:
tf.summary.histogram(tag, datum, step=step)
step += 1
writer.flush()
self._multiplexer.AddRunsFromDirectory(self._logdir)
def _write_image(self, run, tag, samples=2, description=None):
subdir = os.path.join(self._logdir, run)
writer = tf.summary.create_file_writer(subdir)
with writer.as_default():
data = tf.random.normal(shape=[samples, 8, 8, 1])
tf.summary.image(
tag, data, step=0, max_outputs=samples, description=description
)
writer.flush()
self._multiplexer.AddRunsFromDirectory(self._logdir)
### Misc utilities.
def _clean_time_series_responses(self, responses):
"""Cleans non-deterministic data from a TimeSeriesResponse, in
place."""
for response in responses:
run_to_series = response.get("runToSeries", {})
for (run, series) in run_to_series.items():
for datum in series:
if "wallTime" in datum:
datum["wallTime"] = "<wall_time>"
# Clean images.
run_to_image_series = response.get("runToSeries", {})
for (run, series) in run_to_image_series.items():
for datum in series:
if "wallTime" in datum:
datum["wallTime"] = "<wall_time>"
if "imageId" in datum:
datum["imageId"] = "<image_id>"
return responses
def _get_image_blob_key(self, run, tag, step=0, sample=0):
"""Returns a single image's blob_key after it has been written."""
mapping = self._plugin._data_provider.read_blob_sequences(
context.RequestContext(),
experiment_id="expid",
plugin_name=image_metadata.PLUGIN_NAME,
downsample=10,
run_tag_filter=provider.RunTagFilter(tags=[tag]),
)
blob_sequence_datum = mapping[run][tag][step]
# For images, the first 2 datum values are ignored.
return blob_sequence_datum.values[2 + sample].blob_key
### Actual tests.
def test_routes_provided(self):
"""Tests that the plugin offers the correct routes."""
routes = self._plugin.get_plugin_apps()
self.assertIsInstance(routes["/tags"], collections.Callable)
def test_tags_empty(self):
response = self._plugin._tags_impl(context.RequestContext(), "eid")
expected_tags = {
"runTagInfo": {},
"tagDescriptions": {},
}
self.assertEqual(expected_tags, response["scalars"])
self.assertEqual(expected_tags, response["histograms"])
self.assertEqual(
{
"tagDescriptions": {},
"tagRunSampledInfo": {},
},
response["images"],
)
def test_tags(self):
self._write_scalar("run1", "scalars/tagA", None)
self._write_scalar("run1", "scalars/tagA", None)
self._write_scalar("run1", "scalars/tagB", None)
self._write_scalar("run2", "scalars/tagB", None)
self._write_histogram("run1", "histograms/tagA", None)
self._write_histogram("run1", "histograms/tagA", None)
self._write_histogram("run1", "histograms/tagB", None)
self._write_histogram("run2", "histograms/tagB", None)
self._write_image("run1", "images/tagA", 1, None)
self._write_image("run1", "images/tagA", 2, None)
self._write_image("run1", "images/tagB", 3, None)
self._write_image("run2", "images/tagB", 4, None)
self._multiplexer.Reload()
response = self._plugin._tags_impl(context.RequestContext(), "eid")
self.assertEqual(
{
"runTagInfo": {
"run1": ["scalars/tagA", "scalars/tagB"],
"run2": ["scalars/tagB"],
},
"tagDescriptions": {},
},
response["scalars"],
)
self.assertEqual(
{
"runTagInfo": {
"run1": ["histograms/tagA", "histograms/tagB"],
"run2": ["histograms/tagB"],
},
"tagDescriptions": {},
},
response["histograms"],
)
self.assertEqual(
{
"tagDescriptions": {},
"tagRunSampledInfo": {
"images/tagA": {"run1": {"maxSamplesPerStep": 2}},
"images/tagB": {
"run1": {"maxSamplesPerStep": 3},
"run2": {"maxSamplesPerStep": 4},
},
},
},
response["images"],
)
def test_tags_with_descriptions(self):
self._write_scalar("run1", "scalars/tagA", "Describing tagA")
self._write_scalar("run1", "scalars/tagB", "Describing tagB")
self._write_scalar("run2", "scalars/tagB", "Describing tagB")
self._write_histogram("run1", "histograms/tagA", "Describing tagA")
self._write_histogram("run1", "histograms/tagB", "Describing tagB")
self._write_histogram("run2", "histograms/tagB", "Describing tagB")
self._write_image("run1", "images/tagA", 1, "Describing tagA")
self._write_image("run1", "images/tagB", 2, "Describing tagB")
self._write_image("run2", "images/tagB", 3, "Describing tagB")
self._multiplexer.Reload()
response = self._plugin._tags_impl(context.RequestContext(), "eid")
self.assertEqual(
{
"runTagInfo": {
"run1": ["scalars/tagA", "scalars/tagB"],
"run2": ["scalars/tagB"],
},
"tagDescriptions": {
"scalars/tagA": "<p>Describing tagA</p>",
"scalars/tagB": "<p>Describing tagB</p>",
},
},
response["scalars"],
)
self.assertEqual(
{
"runTagInfo": {
"run1": ["histograms/tagA", "histograms/tagB"],
"run2": ["histograms/tagB"],
},
"tagDescriptions": {
"histograms/tagA": "<p>Describing tagA</p>",
"histograms/tagB": "<p>Describing tagB</p>",
},
},
response["histograms"],
)
self.assertEqual(
{
"tagDescriptions": {
"images/tagA": "<p>Describing tagA</p>",
"images/tagB": "<p>Describing tagB</p>",
},
"tagRunSampledInfo": {
"images/tagA": {"run1": {"maxSamplesPerStep": 1}},
"images/tagB": {
"run1": {"maxSamplesPerStep": 2},
"run2": {"maxSamplesPerStep": 3},
},
},
},
response["images"],
)
def test_tags_conflicting_description(self):
self._write_scalar("run1", "scalars/tagA", None)
self._write_scalar("run2", "scalars/tagA", "tagA is hot")
self._write_scalar("run3", "scalars/tagA", "tagA is cold")
self._write_scalar("run4", "scalars/tagA", "tagA is cold")
self._write_histogram("run1", "histograms/tagA", None)
self._write_histogram("run2", "histograms/tagA", "tagA is hot")
self._write_histogram("run3", "histograms/tagA", "tagA is cold")
self._write_histogram("run4", "histograms/tagA", "tagA is cold")
self._multiplexer.Reload()
response = self._plugin._tags_impl(context.RequestContext(), "eid")
expected_composite_description = (
"<h1>Multiple descriptions</h1>\n"
"<h2>For runs: run3, run4</h2>\n"
"<p>tagA is cold</p>\n"
"<h2>For run: run2</h2>\n"
"<p>tagA is hot</p>"
)
self.assertEqual(
{"scalars/tagA": expected_composite_description},
response["scalars"]["tagDescriptions"],
)
self.assertEqual(
{"histograms/tagA": expected_composite_description},
response["histograms"]["tagDescriptions"],
)
def test_tags_unsafe_description(self):
self._write_scalar("<&#run>", "scalars/<&#tag>", "<&#description>")
self._write_histogram(
"<&#run>", "histograms/<&#tag>", "<&#description>"
)
self._multiplexer.Reload()
response = self._plugin._tags_impl(context.RequestContext(), "eid")
self.assertEqual(
{"scalars/<&#tag>": "<p><&#description></p>"},
response["scalars"]["tagDescriptions"],
)
self.assertEqual(
{"histograms/<&#tag>": "<p><&#description></p>"},
response["histograms"]["tagDescriptions"],
)
def test_tags_unsafe_conflicting_description(self):
self._write_scalar("<&#run1>", "scalars/<&#tag>", None)
self._write_scalar("<&#run2>", "scalars/<&#tag>", "<&# is hot>")
self._write_scalar("<&#run3>", "scalars/<&#tag>", "<&# is cold>")
self._write_scalar("<&#run4>", "scalars/<&#tag>", "<&# is cold>")
self._write_histogram("<&#run1>", "histograms/<&#tag>", None)
self._write_histogram("<&#run2>", "histograms/<&#tag>", "<&# is hot>")
self._write_histogram("<&#run3>", "histograms/<&#tag>", "<&# is cold>")
self._write_histogram("<&#run4>", "histograms/<&#tag>", "<&# is cold>")
self._multiplexer.Reload()
response = self._plugin._tags_impl(context.RequestContext(), "eid")
expected_composite_description = (
"<h1>Multiple descriptions</h1>\n"
"<h2>For runs: <&#run3>, <&#run4></h2>\n"
"<p><&# is cold></p>\n"
"<h2>For run: <&#run2></h2>\n"
"<p><&# is hot></p>"
)
self.assertEqual(
{"scalars/<&#tag>": expected_composite_description},
response["scalars"]["tagDescriptions"],
)
self.assertEqual(
{"histograms/<&#tag>": expected_composite_description},
response["histograms"]["tagDescriptions"],
)
def test_time_series_scalar(self):
self._write_scalar_data("run1", "scalars/tagA", [0, 100, -200])
self._multiplexer.Reload()
requests = [{"plugin": "scalars", "tag": "scalars/tagA"}]
response = self._plugin._time_series_impl(
context.RequestContext(), "", requests
)
clean_response = self._clean_time_series_responses(response)
self.assertEqual(
[
{
"plugin": "scalars",
"tag": "scalars/tagA",
"runToSeries": {
"run1": [
{
"wallTime": "<wall_time>",
"step": 0,
"value": 0.0,
},
{
"wallTime": "<wall_time>",
"step": 1,
"value": 100.0,
},
{
"wallTime": "<wall_time>",
"step": 2,
"value": -200.0,
},
]
},
}
],
clean_response,
)
def test_time_series_histogram(self):
self._write_histogram_data("run1", "histograms/tagA", [0, 10])
self._multiplexer.Reload()
requests = [
{"plugin": "histograms", "tag": "histograms/tagA", "run": "run1"}
]
response = self._plugin._time_series_impl(
context.RequestContext(), "", requests
)
clean_response = self._clean_time_series_responses(response)
# By default 30 bins will be generated.
bins_zero = [{"min": 0, "max": 0, "count": 0}] * 29 + [
{"min": 0, "max": 0, "count": 1.0}
]
bins_ten = [{"min": 10, "max": 10, "count": 0}] * 29 + [
{"min": 10, "max": 10, "count": 1.0}
]
self.assertEqual(
[
{
"plugin": "histograms",
"tag": "histograms/tagA",
"run": "run1",
"runToSeries": {
"run1": [
{
"wallTime": "<wall_time>",
"step": 0,
"bins": bins_zero,
},
{
"wallTime": "<wall_time>",
"step": 1,
"bins": bins_ten,
},
]
},
}
],
clean_response,
)
def test_time_series_unmatching_request(self):
self._write_scalar_data("run1", "scalars/tagA", [0, 100, -200])
self._multiplexer.Reload()
requests = [{"plugin": "scalars", "tag": "nothing-matches"}]
response = self._plugin._time_series_impl(
context.RequestContext(), "", requests
)
clean_response = self._clean_time_series_responses(response)
self.assertEqual(
[
{
"plugin": "scalars",
"runToSeries": {},
"tag": "nothing-matches",
}
],
clean_response,
)
def test_time_series_multiple_runs(self):
self._write_scalar_data("run1", "scalars/tagA", [0])
self._write_scalar_data("run2", "scalars/tagA", [1])
self._write_scalar_data("run2", "scalars/tagB", [2])
self._multiplexer.Reload()
requests = [{"plugin": "scalars", "tag": "scalars/tagA"}]
response = self._plugin._time_series_impl(
context.RequestContext(), "", requests
)
clean_response = self._clean_time_series_responses(response)
self.assertEqual(
[
{
"plugin": "scalars",
"runToSeries": {
"run1": [
{
"step": 0,
"value": 0.0,
"wallTime": "<wall_time>",
},
],
"run2": [
{
"step": 0,
"value": 1.0,
"wallTime": "<wall_time>",
},
],
},
"tag": "scalars/tagA",
}
],
clean_response,
)
def test_time_series_multiple_requests(self):
self._write_scalar_data("run1", "scalars/tagA", [0])
self._write_scalar_data("run2", "scalars/tagB", [1])
self._multiplexer.Reload()
requests = [
{"plugin": "scalars", "tag": "scalars/tagA"},
{"plugin": "scalars", "tag": "scalars/tagB"},
{"plugin": "scalars", "tag": "scalars/tagB"},
]
response = self._plugin._time_series_impl(
context.RequestContext(), "", requests
)
clean_response = self._clean_time_series_responses(response)
self.assertEqual(
[
{
"plugin": "scalars",
"runToSeries": {
"run1": [
{
"step": 0,
"value": 0.0,
"wallTime": "<wall_time>",
},
],
},
"tag": "scalars/tagA",
},
{
"plugin": "scalars",
"runToSeries": {
"run2": [
{
"step": 0,
"value": 1.0,
"wallTime": "<wall_time>",
},
],
},
"tag": "scalars/tagB",
},
{
"plugin": "scalars",
"runToSeries": {
"run2": [
{
"step": 0,
"value": 1.0,
"wallTime": "<wall_time>",
},
],
},
"tag": "scalars/tagB",
},
],
clean_response,
)
def test_time_series_single_request_specific_run(self):
self._write_scalar_data("run1", "scalars/tagA", [0])
self._write_scalar_data("run2", "scalars/tagA", [1])
self._multiplexer.Reload()
requests = [{"plugin": "scalars", "tag": "scalars/tagA", "run": "run2"}]
response = self._plugin._time_series_impl(
context.RequestContext(), "", requests
)
clean_response = self._clean_time_series_responses(response)
self.assertEqual(
[
{
"plugin": "scalars",
"runToSeries": {
"run2": [
{
"step": 0,
"value": 1.0,
"wallTime": "<wall_time>",
},
],
},
"tag": "scalars/tagA",
"run": "run2",
}
],
clean_response,
)
def test_image_data(self):
self._write_image("run1", "images/tagA", 1, None)
self._multiplexer.Reload()
# Get the blob_key manually.
image_id = self._get_image_blob_key(
"run1", "images/tagA", step=0, sample=0
)
(data, content_type) = self._plugin._image_data_impl(
context.RequestContext(), image_id
)
self.assertIsInstance(data, bytes)
self.assertEqual(content_type, "image/png")
self.assertGreater(len(data), 0)
def test_time_series_bad_arguments(self):
requests = [
{"plugin": "images"},
{"plugin": "unknown_plugin", "tag": "tagA"},
]
response = self._plugin._time_series_impl(
context.RequestContext(), "expid", requests
)
errors = [
series_response.get("error", "") for series_response in response
]
self.assertEqual(errors, ["Missing tag", "Invalid plugin"])
def test_image_data_from_time_series_query(self):
self._write_image("run1", "images/tagA", samples=3)
self._multiplexer.Reload()
requests = [
{
"plugin": "images",
"tag": "images/tagA",
"run": "run1",
"sample": 2,
}
]
original_response = self._plugin._time_series_impl(
context.RequestContext(), "expid", requests
)
response = self._plugin._time_series_impl(
context.RequestContext(), "expid", requests
)
clean_response = self._clean_time_series_responses(response)
self.assertEqual(
[
{
"plugin": "images",
"tag": "images/tagA",
"run": "run1",
"sample": 2,
"runToSeries": {
"run1": [
{
"wallTime": "<wall_time>",
"step": 0,
"imageId": "<image_id>",
}
]
},
}
],
clean_response,
)
image_id = original_response[0]["runToSeries"]["run1"][0]["imageId"]
(data, content_type) = self._plugin._image_data_impl(
context.RequestContext(), image_id
)
self.assertIsInstance(data, bytes)
self.assertGreater(len(data), 0)
def test_image_bad_request(self):
self._write_image("run1", "images/tagA", 1, None)
self._multiplexer.Reload()
invalid_sample = 999
requests = [
{
"plugin": "images",
"tag": "images/tagA",
"sample": invalid_sample,
"run": "run1",
},
{"plugin": "images", "tag": "images/tagA", "run": "run1"},
{
"plugin": "images",
"tag": "images/tagA",
},
]
response = self._plugin._time_series_impl(
context.RequestContext(), "expid", requests
)
errors = [
series_response.get("error", "") for series_response in response
]
self.assertEqual(errors, ["", "Missing sample", "Missing run"])
if __name__ == "__main__":
tf.test.main()
|
|
from unittest import mock
from zerver.lib.actions import check_add_realm_emoji, do_create_realm, do_create_user
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import get_test_image_file
from zerver.models import Realm, RealmEmoji, UserProfile, get_realm
class RealmEmojiTest(ZulipTestCase):
def create_test_emoji(self, name: str, author: UserProfile) -> RealmEmoji:
with get_test_image_file('img.png') as img_file:
realm_emoji = check_add_realm_emoji(realm=author.realm,
name=name,
author=author,
image_file=img_file)
if realm_emoji is None:
raise Exception("Error creating test emoji.") # nocoverage
return realm_emoji
def create_test_emoji_with_no_author(self, name: str, realm: Realm) -> RealmEmoji:
realm_emoji = RealmEmoji.objects.create(realm=realm, name=name)
return realm_emoji
def test_list(self) -> None:
emoji_author = self.example_user('iago')
self.login_user(emoji_author)
self.create_test_emoji('my_emoji', emoji_author)
result = self.client_get("/json/realm/emoji")
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
self.assertEqual(len(result.json()["emoji"]), 2)
def test_list_no_author(self) -> None:
self.login('iago')
realm = get_realm('zulip')
realm_emoji = self.create_test_emoji_with_no_author('my_emoji', realm)
result = self.client_get("/json/realm/emoji")
self.assert_json_success(result)
content = result.json()
self.assertEqual(len(content["emoji"]), 2)
test_emoji = content["emoji"][str(realm_emoji.id)]
self.assertIsNone(test_emoji['author_id'])
def test_list_admins_only(self) -> None:
# Test that realm emoji list is public and realm emojis
# having no author are also there in the list.
self.login('othello')
realm = get_realm('zulip')
realm.add_emoji_by_admins_only = True
realm.save()
realm_emoji = self.create_test_emoji_with_no_author('my_emoji', realm)
result = self.client_get("/json/realm/emoji")
self.assert_json_success(result)
content = result.json()
self.assertEqual(len(content["emoji"]), 2)
test_emoji = content["emoji"][str(realm_emoji.id)]
self.assertIsNone(test_emoji['author_id'])
def test_upload(self) -> None:
user = self.example_user('iago')
email = user.email
self.login_user(user)
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
realm_emoji = RealmEmoji.objects.get(name="my_emoji")
self.assertEqual(realm_emoji.author.email, email)
result = self.client_get("/json/realm/emoji")
content = result.json()
self.assert_json_success(result)
self.assertEqual(len(content["emoji"]), 2)
test_emoji = content["emoji"][str(realm_emoji.id)]
self.assertIn('author_id', test_emoji)
author = UserProfile.objects.get(id = test_emoji['author_id'])
self.assertEqual(author.email, email)
def test_realm_emoji_repr(self) -> None:
realm_emoji = RealmEmoji.objects.get(name='green_tick')
file_name = str(realm_emoji.id) + '.png'
self.assertEqual(
str(realm_emoji),
f'<RealmEmoji(zulip): {realm_emoji.id} green_tick False {file_name}>',
)
def test_upload_exception(self) -> None:
self.login('iago')
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_em*oji', info=emoji_data)
self.assert_json_error(result, 'Invalid characters in emoji name')
def test_upload_uppercase_exception(self) -> None:
self.login('iago')
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_EMoji', info=emoji_data)
self.assert_json_error(result, 'Invalid characters in emoji name')
def test_missing_name_exception(self) -> None:
self.login('iago')
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/', info=emoji_data)
self.assert_json_error(result, 'Emoji name is missing')
def test_upload_admins_only(self) -> None:
self.login('othello')
realm = get_realm('zulip')
realm.add_emoji_by_admins_only = True
realm.save()
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_error(result, 'Must be an organization administrator')
def test_upload_anyone(self) -> None:
self.login('othello')
realm = get_realm('zulip')
realm.add_emoji_by_admins_only = False
realm.save()
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_success(result)
def test_emoji_upload_by_guest_user(self) -> None:
self.login('polonius')
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_error(result, 'Not allowed for guest users')
def test_delete(self) -> None:
emoji_author = self.example_user('iago')
self.login_user(emoji_author)
realm_emoji = self.create_test_emoji('my_emoji', emoji_author)
result = self.client_delete('/json/realm/emoji/my_emoji')
self.assert_json_success(result)
result = self.client_get("/json/realm/emoji")
emojis = result.json()["emoji"]
self.assert_json_success(result)
# We only mark an emoji as deactivated instead of
# removing it from the database.
self.assertEqual(len(emojis), 2)
test_emoji = emojis[str(realm_emoji.id)]
self.assertEqual(test_emoji["deactivated"], True)
def test_delete_no_author(self) -> None:
self.login('iago')
realm = get_realm('zulip')
self.create_test_emoji_with_no_author('my_emoji', realm)
result = self.client_delete('/json/realm/emoji/my_emoji')
self.assert_json_success(result)
def test_delete_admins_only(self) -> None:
emoji_author = self.example_user('othello')
self.login_user(emoji_author)
realm = get_realm('zulip')
realm.add_emoji_by_admins_only = True
realm.save()
self.create_test_emoji_with_no_author("my_emoji", realm)
result = self.client_delete("/json/realm/emoji/my_emoji")
self.assert_json_error(result, 'Must be an organization administrator')
def test_delete_admin_or_author(self) -> None:
# If any user in a realm can upload the emoji then the user who
# uploaded it as well as the admin should be able to delete it.
emoji_author = self.example_user('othello')
realm = get_realm('zulip')
realm.add_emoji_by_admins_only = False
realm.save()
self.create_test_emoji('my_emoji_1', emoji_author)
self.login_user(emoji_author)
result = self.client_delete("/json/realm/emoji/my_emoji_1")
self.assert_json_success(result)
self.logout()
self.create_test_emoji('my_emoji_2', emoji_author)
self.login('iago')
result = self.client_delete("/json/realm/emoji/my_emoji_2")
self.assert_json_success(result)
self.logout()
self.create_test_emoji('my_emoji_3', emoji_author)
self.login('cordelia')
result = self.client_delete("/json/realm/emoji/my_emoji_3")
self.assert_json_error(result, 'Must be an organization administrator or emoji author')
def test_delete_exception(self) -> None:
self.login('iago')
result = self.client_delete("/json/realm/emoji/invalid_emoji")
self.assert_json_error(result, "Emoji 'invalid_emoji' does not exist")
def test_multiple_upload(self) -> None:
self.login('iago')
with get_test_image_file('img.png') as fp1, get_test_image_file('img.png') as fp2:
result = self.client_post('/json/realm/emoji/my_emoji', {'f1': fp1, 'f2': fp2})
self.assert_json_error(result, 'You must upload exactly one file.')
def test_emoji_upload_file_size_error(self) -> None:
self.login('iago')
with get_test_image_file('img.png') as fp:
with self.settings(MAX_EMOJI_FILE_SIZE=0):
result = self.client_post('/json/realm/emoji/my_emoji', {'file': fp})
self.assert_json_error(result, 'Uploaded file is larger than the allowed limit of 0 MiB')
def test_upload_already_existed_emoji(self) -> None:
self.login('iago')
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/green_tick', info=emoji_data)
self.assert_json_error(result, 'A custom emoji with this name already exists.')
def test_reupload(self) -> None:
# An user should be able to reupload an emoji with same name.
self.login('iago')
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_success(result)
result = self.client_delete("/json/realm/emoji/my_emoji")
self.assert_json_success(result)
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_success(result)
result = self.client_get("/json/realm/emoji")
emojis = result.json()["emoji"]
self.assert_json_success(result)
self.assertEqual(len(emojis), 3)
def test_failed_file_upload(self) -> None:
self.login('iago')
with mock.patch('zerver.lib.upload.write_local_file', side_effect=Exception()):
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_error(result, "Image file upload failed.")
def test_check_admin_realm_emoji(self) -> None:
# Test that an user A is able to remove a realm emoji uploaded by him
# and having same name as a deactivated realm emoji uploaded by some
# other user B.
emoji_author_1 = self.example_user('cordelia')
self.create_test_emoji('test_emoji', emoji_author_1)
self.login_user(emoji_author_1)
result = self.client_delete('/json/realm/emoji/test_emoji')
self.assert_json_success(result)
emoji_author_2 = self.example_user('othello')
self.create_test_emoji('test_emoji', emoji_author_2)
self.login_user(emoji_author_2)
result = self.client_delete('/json/realm/emoji/test_emoji')
self.assert_json_success(result)
def test_check_admin_different_realm_emoji(self) -> None:
# Test that two different realm emojis in two different realms but
# having same name can be administered independently.
realm_1 = do_create_realm('test_realm', 'test_realm')
emoji_author_1 = do_create_user('abc@example.com',
password='abc',
realm=realm_1,
full_name='abc',
short_name='abc')
self.create_test_emoji('test_emoji', emoji_author_1)
emoji_author_2 = self.example_user('othello')
self.create_test_emoji('test_emoji', emoji_author_2)
self.login_user(emoji_author_2)
result = self.client_delete('/json/realm/emoji/test_emoji')
self.assert_json_success(result)
|
|
"""Support for Synology NAS Sensors."""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_USERNAME, CONF_PASSWORD, CONF_PORT, CONF_SSL,
ATTR_ATTRIBUTION, TEMP_CELSIUS, CONF_MONITORED_CONDITIONS,
EVENT_HOMEASSISTANT_START, CONF_DISKS)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = 'Data provided by Synology'
CONF_VOLUMES = 'volumes'
DEFAULT_NAME = 'Synology DSM'
DEFAULT_PORT = 5001
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
_UTILISATION_MON_COND = {
'cpu_other_load': ['CPU Load (Other)', '%', 'mdi:chip'],
'cpu_user_load': ['CPU Load (User)', '%', 'mdi:chip'],
'cpu_system_load': ['CPU Load (System)', '%', 'mdi:chip'],
'cpu_total_load': ['CPU Load (Total)', '%', 'mdi:chip'],
'cpu_1min_load': ['CPU Load (1 min)', '%', 'mdi:chip'],
'cpu_5min_load': ['CPU Load (5 min)', '%', 'mdi:chip'],
'cpu_15min_load': ['CPU Load (15 min)', '%', 'mdi:chip'],
'memory_real_usage': ['Memory Usage (Real)', '%', 'mdi:memory'],
'memory_size': ['Memory Size', 'Mb', 'mdi:memory'],
'memory_cached': ['Memory Cached', 'Mb', 'mdi:memory'],
'memory_available_swap': ['Memory Available (Swap)', 'Mb', 'mdi:memory'],
'memory_available_real': ['Memory Available (Real)', 'Mb', 'mdi:memory'],
'memory_total_swap': ['Memory Total (Swap)', 'Mb', 'mdi:memory'],
'memory_total_real': ['Memory Total (Real)', 'Mb', 'mdi:memory'],
'network_up': ['Network Up', 'Kbps', 'mdi:upload'],
'network_down': ['Network Down', 'Kbps', 'mdi:download'],
}
_STORAGE_VOL_MON_COND = {
'volume_status': ['Status', None, 'mdi:checkbox-marked-circle-outline'],
'volume_device_type': ['Type', None, 'mdi:harddisk'],
'volume_size_total': ['Total Size', None, 'mdi:chart-pie'],
'volume_size_used': ['Used Space', None, 'mdi:chart-pie'],
'volume_percentage_used': ['Volume Used', '%', 'mdi:chart-pie'],
'volume_disk_temp_avg': ['Average Disk Temp', None, 'mdi:thermometer'],
'volume_disk_temp_max': ['Maximum Disk Temp', None, 'mdi:thermometer'],
}
_STORAGE_DSK_MON_COND = {
'disk_name': ['Name', None, 'mdi:harddisk'],
'disk_device': ['Device', None, 'mdi:dots-horizontal'],
'disk_smart_status': ['Status (Smart)', None,
'mdi:checkbox-marked-circle-outline'],
'disk_status': ['Status', None, 'mdi:checkbox-marked-circle-outline'],
'disk_exceed_bad_sector_thr': ['Exceeded Max Bad Sectors', None,
'mdi:test-tube'],
'disk_below_remain_life_thr': ['Below Min Remaining Life', None,
'mdi:test-tube'],
'disk_temp': ['Temperature', None, 'mdi:thermometer'],
}
_MONITORED_CONDITIONS = list(_UTILISATION_MON_COND.keys()) + \
list(_STORAGE_VOL_MON_COND.keys()) + \
list(_STORAGE_DSK_MON_COND.keys())
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=True): cv.boolean,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(_MONITORED_CONDITIONS)]),
vol.Optional(CONF_DISKS): cv.ensure_list,
vol.Optional(CONF_VOLUMES): cv.ensure_list,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Synology NAS Sensor."""
def run_setup(event):
"""Wait until Home Assistant is fully initialized before creating.
Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
use_ssl = config.get(CONF_SSL)
unit = hass.config.units.temperature_unit
monitored_conditions = config.get(CONF_MONITORED_CONDITIONS)
api = SynoApi(host, port, username, password, unit, use_ssl)
sensors = [SynoNasUtilSensor(
api, variable, _UTILISATION_MON_COND[variable])
for variable in monitored_conditions
if variable in _UTILISATION_MON_COND]
# Handle all volumes
for volume in config.get(CONF_VOLUMES, api.storage.volumes):
sensors += [SynoNasStorageSensor(
api, variable, _STORAGE_VOL_MON_COND[variable], volume)
for variable in monitored_conditions
if variable in _STORAGE_VOL_MON_COND]
# Handle all disks
for disk in config.get(CONF_DISKS, api.storage.disks):
sensors += [SynoNasStorageSensor(
api, variable, _STORAGE_DSK_MON_COND[variable], disk)
for variable in monitored_conditions
if variable in _STORAGE_DSK_MON_COND]
add_entities(sensors, True)
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class SynoApi:
"""Class to interface with Synology DSM API."""
def __init__(self, host, port, username, password, temp_unit, use_ssl):
"""Initialize the API wrapper class."""
from SynologyDSM import SynologyDSM
self.temp_unit = temp_unit
try:
self._api = SynologyDSM(host, port, username, password,
use_https=use_ssl)
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.error("Error setting up Synology DSM")
# Will be updated when update() gets called.
self.utilisation = self._api.utilisation
self.storage = self._api.storage
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update function for updating api information."""
self._api.update()
class SynoNasSensor(Entity):
"""Representation of a Synology NAS Sensor."""
def __init__(self, api, variable, variable_info, monitor_device=None):
"""Initialize the sensor."""
self.var_id = variable
self.var_name = variable_info[0]
self.var_units = variable_info[1]
self.var_icon = variable_info[2]
self.monitor_device = monitor_device
self._api = api
@property
def name(self):
"""Return the name of the sensor, if any."""
if self.monitor_device is not None:
return "{} ({})".format(self.var_name, self.monitor_device)
return self.var_name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self.var_icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self.var_id in ['volume_disk_temp_avg', 'volume_disk_temp_max',
'disk_temp']:
return self._api.temp_unit
return self.var_units
def update(self):
"""Get the latest data for the states."""
if self._api is not None:
self._api.update()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
}
class SynoNasUtilSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
network_sensors = ['network_up', 'network_down']
memory_sensors = ['memory_size', 'memory_cached',
'memory_available_swap', 'memory_available_real',
'memory_total_swap', 'memory_total_real']
if self.var_id in network_sensors or self.var_id in memory_sensors:
attr = getattr(self._api.utilisation, self.var_id)(False)
if self.var_id in network_sensors:
return round(attr / 1024.0, 1)
if self.var_id in memory_sensors:
return round(attr / 1024.0 / 1024.0, 1)
else:
return getattr(self._api.utilisation, self.var_id)
class SynoNasStorageSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
temp_sensors = ['volume_disk_temp_avg', 'volume_disk_temp_max',
'disk_temp']
if self.monitor_device is not None:
if self.var_id in temp_sensors:
attr = getattr(
self._api.storage, self.var_id)(self.monitor_device)
if attr is None:
return None
if self._api.temp_unit == TEMP_CELSIUS:
return attr
return round(attr * 1.8 + 32.0, 1)
return getattr(self._api.storage, self.var_id)(self.monitor_device)
|
|
#!/usr/bin/env python
# =================================================================================================
# check_CloudEndure_replication.py
#
# Version 2016-02-02
#
# By Stefan Wuensch, Jan. 2016
#
# This script is a Nagios plugin which will query the CloudEndure API for the
# replication / sync status of a host. (CloudEndure is a server-replication
# provider, allowing migration and/or DR.) https://www.cloudendure.com/
# Disclaimer: I have no affiliation with CloudEndure; my employer is a customer of CloudEndure.
#
#
# usage: check_CloudEndure_replication.py [-h] [-v] -u USERNAME -p PASSWORD
# [-n HOSTNAME]
#
# Nagios check of the sync status of CloudEndure replication. Exit status 0 ==
# OK, 1 == Warning, 2 == Critical, 3 == Unknown.
#
# optional arguments:
# -h, --help show this help message and exit
# -v, --verbose increase output verbosity
# -u USERNAME, --username USERNAME
# user name for the CloudEndure account - required
# -p PASSWORD, --password PASSWORD
# password for the CloudEndure account - required
# -n HOSTNAME, --hostname HOSTNAME
# hostname of instance to check, or "all" (defaults to
# "all" if not specified)
#
#
#
# Required inputs: CloudEndure username and password.
# Optional inputs: A host name (expected to be FQDN, but not manditory) to check
#
# Outputs: One line of text containing the explanation of the replication status. Note that
# this will be one line no matter how many hosts are found (in the case of "all")
#
# Exit status: 0, 1, 2, 3 as standard Nagios status codes. See EXIT_STATUS_DICT for mapping.
#
# =================================================================================================
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Stefan Wuensch
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# =================================================================================================
#
# To Do:
# - turn the Warning and Critical constants into optional arguments
# - make the Location an optional argument, instead of hard-coded "originalLocation".
# (The two Locations we might want to watch are "originalLocation" and "mirrorLocation".)
#
# =================================================================================================
import httplib, json, re, sys, argparse, time, calendar
from datetime import datetime
# Dictionary for exit status codes
EXIT_STATUS_DICT = {
"OK": 0,
"WARNING": 1,
"CRITICAL": 2,
"UNKNOWN": 3
}
# Dictionary for looking up the status string from the value
EXIT_STATUS_DICT_REVERSE = {
0: "OK",
1: "WARNING",
2: "CRITICAL",
3: "UNKNOWN"
}
# To do: make these optional args
WARNING_SYNC_DELAY = 1800 # Number of seconds over which it's a Warning - we will forgive any sync delay up to 30 min.
CRITICAL_SYNC_DELAY = 3600 # Number of seconds (equals 1 hour) beyond which it's Critical
CLOUDENDURE_API_HOST = "api.cloudendure.com"
###################################################################################################
def exit_with_message( message = "Something not defined", exitCode = EXIT_STATUS_DICT[ 'UNKNOWN' ] ):
# Output a message and exit
#
# Usage: exit_with_message( string, int )
# 'string' is printed to STDOUT
# 'int' is used for the exit status
#
# Returns: nothing - will always exit
#
# Note the default values.
prefix = ""
if exitCode == EXIT_STATUS_DICT[ 'UNKNOWN' ]: prefix = "Error: " # Add additional info at beginning
print "{0}{1}".format( prefix, message )
# Try and do a proper logout (because they want that) but NOT if we got here because of
# an 'Unknown' state! If we tried to do a 'logout' call on 'Unknown' we'd be risking an
# endless loop of send_request() fail bringing us back here again. Ugly.
# (Nagios would eventually time out this script, but let's not even risk it.)
if exitCode != EXIT_STATUS_DICT[ 'UNKNOWN' ]:
try:
response, connection = send_request( 'logout', {}, { 'Cookie': session_cookie } ) # Here we don't care what is the response.
connection.close()
if args.verbose: print "Connection closed"
except Exception:
sys.exit( exitCode ) # If we get an error trying to log out, just bail.
sys.exit( exitCode )
###################################################################################################
def last_sync_time_test( instance ):
# This function is the heart of the health check logic.
#
# Usage: last_sync_time_test( dictionary )
# 'dictionary' is from JSON, containing details of one specific host
#
# Returns: tuple of ( string, int ) where 'string' is a status message and 'int' is a status code
if args.verbose: print "replicationState:", instance[ 'replicationState' ]
if args.verbose: print "lastConsistencyTime ISO-8601:", instance[ 'lastConsistencyTime' ]
# First thing to check is the text string of the state
if instance[ 'replicationState' ] != "Replicated":
message = instance[ 'name' ] + " (" + instance[ 'id' ] + ") in account \"" + args.username + "\" is \"" + instance[ 'replicationState' ] + "\" not \"Replicated\" !!"
return ( message, EXIT_STATUS_DICT[ 'CRITICAL' ] )
# Dummy check the timestamp, because if the host isn't replicating the timestamp will be null
# This shouldn't be a real indication of replication failure, because the 'replicationState' being
# checked above should catch it.
if instance[ 'lastConsistencyTime' ] is None:
message = instance[ 'name' ] + " lastConsistencyTime is empty! There should be something there if it is replicating properly!"
return ( message, EXIT_STATUS_DICT[ 'UNKNOWN' ] )
# Convert ISO-8601 format to UNIX epoch (integer seconds since Jan 1 1970) since that makes the math easy :-)
# We will try several different ISO-8601 formats before giving up.
# https://en.wikipedia.org/wiki/ISO_8601
# See format codes at https://docs.python.org/2/library/datetime.html
originalTimeValue = instance[ 'lastConsistencyTime' ] # Save it for later. We will be trying to replace it with the integer value.
for format in ( '%Y-%m-%dT%H:%M:%S.%f%z', '%Y-%m-%dT%H:%M:%S%z', '%Y-%m-%dT%H:%M:%S.%f+00:00', '%Y-%m-%dT%H:%M:%S+00:00', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%SZ', '%Y%m%dT%H%M%SZ' ):
if args.verbose: print "Trying ISO-8601 format ", format
try:
instance[ 'lastConsistencyTime' ] = calendar.timegm( datetime.strptime( instance[ 'lastConsistencyTime' ], format ).timetuple() )
if isinstance( instance[ 'lastConsistencyTime' ], ( int, long ) ):
break # If we managed to get a numeric value, we're done.
except ValueError:
continue # Try again with the next format if this one didn't work.
# If we still have the same time value & format as before, we failed to find a matching ISO-8601 pattern.
if instance[ 'lastConsistencyTime' ] == originalTimeValue:
message = instance[ 'name' ] + " lastConsistencyTime " + str( instance[ 'lastConsistencyTime' ] ) + " doesn't appear to be a date / time in a recognized ISO-8601 format!"
return ( message, EXIT_STATUS_DICT[ 'UNKNOWN' ] )
# Now for the ultimate in being careful, make sure it really is an integer!
if not isinstance( instance[ 'lastConsistencyTime' ], ( int, long ) ):
message = instance[ 'name' ] + " lastConsistencyTime is not an integer!"
return ( message, EXIT_STATUS_DICT[ 'UNKNOWN' ] )
if args.verbose: print "lastConsistencyTime UNIX epoch seconds:", instance[ 'lastConsistencyTime' ]
# Make a string that's human-readable for printing in output
lastSyncTimeStr = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime( instance[ 'lastConsistencyTime' ] ) )
# Finally calculate how far back was the last sync
if args.verbose: print "Time now", int( time.time() )
timeDelta = int( time.time() ) - instance[ 'lastConsistencyTime' ]
if args.verbose: print "lastConsistencyTime seconds ago:", timeDelta
if ( timeDelta > CRITICAL_SYNC_DELAY ): # This is the first test, because the longest delay value is Critical
message = instance[ 'name' ] + " has not had an update since " + lastSyncTimeStr + ", " + str( seconds_to_time_text( timeDelta ) )
return ( message, EXIT_STATUS_DICT[ 'CRITICAL' ] )
if ( timeDelta > WARNING_SYNC_DELAY ):
message = instance[ 'name' ] + " has not had an update since " + lastSyncTimeStr + ", " + str( seconds_to_time_text( timeDelta ) )
return ( message, EXIT_STATUS_DICT[ 'WARNING' ] )
if ( timeDelta <= WARNING_SYNC_DELAY ): # If the delay since last sync is less than our tolerance for Warning, it's good!!
message = instance[ 'name' ] + " last update " + lastSyncTimeStr + ", " + str( seconds_to_time_text( timeDelta ) )
return ( message, EXIT_STATUS_DICT[ 'OK' ] )
message = "Could not analyze the sync state for " + instance[ 'name' ]
return ( message, EXIT_STATUS_DICT[ 'UNKNOWN' ] ) # If we get to this point something went wrong!
###################################################################################################
def send_request( function, params, headers ):
# This function makes the HTTPS call out to the CloudEndure API and makes sure we get a '200' HTTP status
# before returning the JSON
#
# Usage: send_request( string, dict1, dict2 )
# 'string' is the API function call
# 'dict1' is a dictionary of parameters for the API call
# 'dict2' is a dictionary of HTTP headers - currently only used for the session auth cookie
#
# Returns: tuple of HTTPSConnection.getresponse(), and the connection object itself (to allow closing outside this function)
connection = httplib.HTTPSConnection( CLOUDENDURE_API_HOST, 443 )
try:
connection.connect()
except HTTPException:
exit_with_message( "Problem setting up the HTTPS connection to \"" + CLOUDENDURE_API_HOST + "\" !!", EXIT_STATUS_DICT[ 'UNKNOWN' ] )
headers.update( { 'Content-Type': 'application/json' } )
# For debugging it's helpful to include the 'params' in verbose output, but
# that exposes the password when calling the 'login' API function - so it's not
# a great idea. Instead just show the function name and headers. That's safe.
## if args.verbose: print "\nCalling {0} with {1} and {2}".format( function, params, headers )
if args.verbose: print "\nCalling {0} with {1}".format( function, headers )
connection.request( 'POST', '/latest/' + function, json.dumps( params ), headers )
connectionResponse = connection.getresponse()
if connectionResponse.status != 200:
exit_with_message( "{0} call returned HTTP code {1} {2}".format( function, connectionResponse.status, connectionResponse.reason ), EXIT_STATUS_DICT[ 'UNKNOWN' ] )
return connectionResponse, connection
###################################################################################################
def seconds_to_time_text( inputSeconds ):
# This function converts a number of seconds into a human-readable string of
# seconds / minutes / hours / days.
#
# Usage: seconds_to_time_text( seconds )
# 'seconds' is an int, or string representation of an int
#
# Returns: string of the time in words, such as "4 hours, 1 minute, 22 seconds"
# or "1 day, 18 minutes"
# or "12 days, 1 hour, 59 seconds"
#
# Note: Due to variations in clock synchronization, it's possible for the CloudEndure
# last sync time to come back as a timestamp in the future relative to where this
# script is running. We will handle that gracefully.
try:
inputSeconds = int( inputSeconds ) # In case it's a string
except:
return "{} does not appear to be a whole number of seconds!".format( inputSeconds )
if inputSeconds == 0: return "0 seconds ago (just now)"
if inputSeconds < 0:
trailingText = " in the future!"
else:
trailingText = " ago"
inputSeconds = abs( inputSeconds ) # In case it's negative, meaning in the future
results = []
periods = (
( "days", 86400 ),
( "hours", 3600 ),
( "minutes", 60 ),
( "seconds", 1 )
)
for interval, number in periods:
timePart = inputSeconds // number # Modulus / floor divide
if timePart:
inputSeconds -= timePart * number # Take away the part so far
if timePart == 1: interval = interval.rstrip( "s" ) # Handle singular case
results.append( "{0} {1}".format( timePart, interval ) )
output = ", ".join( results )
return output + trailingText
###################################################################################################
# Set up our inputs from the command line. This also handles the "-h" and error usage output for free!
parser = argparse.ArgumentParser( description = "Nagios check of the sync status of CloudEndure replication. Exit status 0 == OK, 1 == Warning, 2 == Critical, 3 == Unknown.",
epilog = "https://github.com/stefan-wuensch/Nagios-Checks" )
parser.add_argument( "-v", "--verbose", help = "increase output verbosity", action = "store_true" )
parser.add_argument( "-u", "--username", help = "user name for the CloudEndure account - required", required = True )
parser.add_argument( "-p", "--password", help = "password for the CloudEndure account - required", required = True )
parser.add_argument( "-n", "--hostname", help = "hostname of instance to check, or \"all\" (defaults to \"all\" if not specified)", default = "all" )
args = parser.parse_args()
if args.verbose:
print "Time now", int( time.time() )
print "username", args.username
# print "password", args.password # Echoing the password is probably not a good idea, but it comes in on the command line anyway.
print "hostname", args.hostname
# Do the login
try:
response, connection = send_request( 'login', { 'username': args.username, 'password': args.password }, {} )
except Exception:
exit_with_message( "Could not get a response on the login transaction!", EXIT_STATUS_DICT[ 'UNKNOWN' ] )
# Extract the session cookie from the login
try:
session_cookie = [ header[ 1 ] for header in response.getheaders() if header[ 0 ] == 'set-cookie' ][ 0 ]
connection.close()
if args.verbose: print "Connection closed"
except Exception:
session_cookie = "" # Set it to null in case we get all the way to the 'logout' call - we at least need it initialized.
exit_with_message( "Could not get a session cookie from the login transaction!", EXIT_STATUS_DICT[ 'UNKNOWN' ] )
cookies = re.split( '; |, ', session_cookie )
session_cookie = [ cookie for cookie in cookies if cookie.startswith( 'session' ) ][ 0 ].strip()
# Get the replica location from the user info
response, connection = send_request( 'getUserDetails', {}, { 'Cookie': session_cookie } )
try:
result = json.loads( response.read() )[ 'result' ]
connection.close()
if args.verbose: print "Connection closed"
except Exception:
exit_with_message( "Could not get a \"result\" object from the \"getUserDetails\" transaction!", EXIT_STATUS_DICT[ 'UNKNOWN' ] )
if args.verbose: print "\ngetUserDetails:", json.dumps( result, sort_keys = True, indent = 4 )
try:
location = result[ 'originalLocation' ]
except Exception:
exit_with_message( "Could not get a value for \"originalLocation\" from the \"getUserDetails\" transaction!", EXIT_STATUS_DICT[ 'UNKNOWN' ] )
# This is from some sample code I incorporated into this script. Since the 'for' loop
# looks useful for future things, I'm including it here for reference. This builds and prints
# a one-line comma-separated list of machine IDs. This is not needed in this script.
# response, connection = send_request( 'listMachines', { 'location': location }, { 'Cookie': session_cookie } )
# machineIds = [ machine[ 'id' ] for machine in json.loads( response.read() )[ 'result' ] ]
# print ', '.join(machineIds)
# Now that we have the location, we list all machines. This gets us all info about everything!
response, connection = send_request( 'listMachines', { 'location': location }, { 'Cookie': session_cookie } )
try:
instances = json.loads( response.read() )[ 'result' ]
connection.close()
if args.verbose: print "Connection closed"
except Exception:
exit_with_message( "Could not get a \"result\" object from the \"listMachines\" transaction!", EXIT_STATUS_DICT[ 'UNKNOWN' ] )
if args.verbose: print "\nlistMachines:", json.dumps( instances, sort_keys = True, indent = 4 )
################################################################
# Special overrides for testing / debugging / development.
# This manipulates the timestamp and status text for evaluating
# the logic in last_sync_time_test()
#
# for x in instances:
# timetest = "2016-01-01T22:08:15.803212+00:00"
# print "\n*** Setting lastConsistencyTime to " + timetest + " for testing"
# x[ 'lastConsistencyTime' ] = timetest
# print "\n*** Setting replicationState to \"foo\" for testing"
# x[ 'replicationState' ] = "foo"
################################################################
if args.hostname == "all": # "all" means we're going to check all of them (duh)
summaryMessage = "" # Init to null because we are going to be appending text
highestError = 0 # Track the worst status for the final return code
statusDict = {} # Init a dictionary to track all the instances' status for later use
for severity in ( EXIT_STATUS_DICT[ 'OK' ], EXIT_STATUS_DICT[ 'WARNING' ], EXIT_STATUS_DICT[ 'CRITICAL' ], EXIT_STATUS_DICT[ 'UNKNOWN' ] ):
statusDict[ severity ] = [] # Initialize the structure - each severity level will hold names of instances
for instance in instances:
if args.verbose: print "\nname:", instance[ 'name' ]
message, exitCode = last_sync_time_test( instance ) # This is the heart of the analysis of health.
statusDict[ instance[ 'name' ] ] = {} # Init the structure for each host
statusDict[ instance[ 'name' ] ][ 'message' ] = message # Store the message for each host
statusDict[ instance[ 'name' ] ][ 'exitCode' ] = exitCode # Store the status code for each host
statusDict[ exitCode ].append( instance[ 'name' ] ) # Push the name of this instance into the array for its severity
statusDict[ exitCode ].sort
if args.verbose: print "\nstatusDict:", json.dumps( statusDict, sort_keys = True, indent = 4 )
if exitCode > highestError: highestError = exitCode # Capture the "worst" error state
# Now we build up the 'summaryMessage' by iterating across all the different statuses. (or stati? My Latin sucks.)
# For each level of severity we'll build a comma-separated list of hostnames with that status.
# If a severity level doesn't have any hosts in that state, we'll output '0' (zero).
# Each of the severity levels will be slash-separated.
# Example:
# OK: server12.blah.com / WARNING: 0 / CRITICAL: server1.blah.com, server8.blah.com / UNKNOWN: 0
for severity in ( EXIT_STATUS_DICT[ 'OK' ], EXIT_STATUS_DICT[ 'WARNING' ], EXIT_STATUS_DICT[ 'CRITICAL' ], EXIT_STATUS_DICT[ 'UNKNOWN' ] ):
wasPreviousCountZero = True # Track what the previous number was, so we know when to use a slash vs. comma
if len( statusDict[ severity ] ) > 0: # Is there one or more host(s) with this severity level?
isFirstHostName = True
for name in statusDict[ severity ]: # If there are hosts this time, add each one to the summary message by iterating over the list
if len( summaryMessage ) > 0: # Only add punctuation if we're not starting off for the very first time
if wasPreviousCountZero == True:
summaryMessage += " / "
else:
summaryMessage += ", "
if isFirstHostName: # Only add the name of the severity level if it's the first host with this level
summaryMessage += EXIT_STATUS_DICT_REVERSE[ severity ] + ": "
isFirstHostName = False
summaryMessage += name
wasPreviousCountZero = False
else: # If there wasn't any host in this severity, show zero
if len( summaryMessage ) > 0: # Don't add a comma if we're just starting off for the first round
summaryMessage += " / "
summaryMessage += EXIT_STATUS_DICT_REVERSE[ severity ] + ": 0"
wasPreviousCountZero = True
summaryMessage = "Status of all hosts in account \"" + args.username + "\": " + summaryMessage
exit_with_message( summaryMessage, highestError )
else: # This means we were given a specific host name to check
foundTheHostname = False
for instance in instances: # Here we are looking for one in particular out of all of them, so iterate
if instance[ 'name' ] == args.hostname:
foundTheHostname = True
if args.verbose: print "\nI found %s" % args.hostname
message, exitCode = last_sync_time_test( instance )
exit_with_message( message, exitCode )
# Not finding the host name that was specified is a big problem!!!
if foundTheHostname == False: exit_with_message( "Could not find the specified hostname \"" + args.hostname
+ "\" in account \"" + args.username + "\" !!", EXIT_STATUS_DICT[ 'CRITICAL' ] )
# Bail out fail-safe (but in this case "safe" is to notify us of the problem!)
exit_with_message( "Something went wrong - this should not happen.", EXIT_STATUS_DICT[ 'UNKNOWN' ] )
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.api import create_page
from cms.menu import CMSMenu, get_visible_pages
from cms.models import Page
from cms.models.permissionmodels import GlobalPagePermission, PagePermission
from cms.test_utils.fixtures.menus import (MenusFixture, SubMenusFixture,
SoftrootFixture)
from cms.test_utils.testcases import SettingsOverrideTestCase
from cms.test_utils.util.context_managers import (SettingsOverride,
LanguageOverride)
from cms.test_utils.util.mock import AttributeObject
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, User, Permission, Group
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.template import Template, TemplateSyntaxError
from menus.base import NavigationNode
from menus.menu_pool import menu_pool, _build_nodes_inner_for_one_menu
from menus.utils import mark_descendants, find_selected, cut_levels
class BaseMenuTest(SettingsOverrideTestCase):
settings_overrides = {
'CMS_MODERATOR': False,
}
def _get_nodes(self, path='/'):
node1 = NavigationNode('1', '/1/', 1)
node2 = NavigationNode('2', '/2/', 2, 1)
node3 = NavigationNode('3', '/3/', 3, 2)
node4 = NavigationNode('4', '/4/', 4, 2)
node5 = NavigationNode('5', '/5/', 5)
nodes = [node1, node2, node3, node4, node5]
tree = _build_nodes_inner_for_one_menu([n for n in nodes], "test")
request = self.get_request(path)
menu_pool.apply_modifiers(tree, request)
return tree, nodes
def setUp(self):
super(BaseMenuTest, self).setUp()
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
menu_pool.menus = {'CMSMenu': self.old_menu['CMSMenu']}
menu_pool.clear(settings.SITE_ID)
def tearDown(self):
menu_pool.menus = self.old_menu
super(BaseMenuTest, self).tearDown()
class FixturesMenuTests(MenusFixture, BaseMenuTest):
"""
Tree from fixture:
+ P1
| + P2
| + P3
+ P4
| + P5
+ P6 (not in menu)
+ P7
+ P8
"""
def get_page(self, num):
return Page.objects.get(title_set__title='P%s' % num)
def get_level(self, num):
return Page.objects.filter(level=num)
def get_all_pages(self):
return Page.objects.all()
def test_menu_failfast_on_invalid_usage(self):
context = self.get_context()
context['child'] = self.get_page(1)
# test standard show_menu
with SettingsOverride(DEBUG=True, TEMPLATE_DEBUG=True):
tpl = Template("{% load menu_tags %}{% show_menu 0 0 0 0 'menu/menu.html' child %}")
self.assertRaises(TemplateSyntaxError, tpl.render, context)
def test_basic_cms_menu(self):
self.assertEqual(len(menu_pool.menus), 1)
response = self.client.get(self.get_pages_root()) # path = '/'
self.assertEquals(response.status_code, 200)
request = self.get_request()
# test the cms menu class
menu = CMSMenu()
nodes = menu.get_nodes(request)
self.assertEqual(len(nodes), len(self.get_all_pages()))
def test_show_menu(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].selected, True)
self.assertEqual(nodes[0].sibling, False)
self.assertEqual(nodes[0].descendant, False)
self.assertEqual(nodes[0].children[0].descendant, True)
self.assertEqual(nodes[0].children[0].children[0].descendant, True)
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(nodes[1].get_absolute_url(), self.get_page(4).get_absolute_url())
self.assertEqual(nodes[1].sibling, True)
self.assertEqual(nodes[1].selected, False)
def test_show_menu_num_queries(self):
context = self.get_context()
# test standard show_menu
with self.assertNumQueries(4):
"""
The 4 queries should be:
get all pages
get all page permissions
get all titles
set the menu cache key
"""
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
def test_only_active_tree(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes[1].children), 0)
self.assertEqual(len(nodes[0].children), 1)
self.assertEqual(len(nodes[0].children[0].children), 1)
context = self.get_context(path=self.get_page(4).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes[1].children), 1)
self.assertEqual(len(nodes[0].children), 0)
def test_only_one_active_level(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 1 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes[1].children), 0)
self.assertEqual(len(nodes[0].children), 1)
self.assertEqual(len(nodes[0].children[0].children), 0)
def test_only_level_zero(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 0 0 0 %}")
tpl.render(context)
nodes = context['children']
for node in nodes:
self.assertEqual(len(node.children), 0)
def test_only_level_one(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 1 1 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), len(self.get_level(1)))
for node in nodes:
self.assertEqual(len(node.children), 0)
def test_only_level_one_active(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 1 1 0 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].descendant, True)
self.assertEqual(len(nodes[0].children), 0)
def test_level_zero_and_one(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 1 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
for node in nodes:
self.assertEqual(len(node.children), 1)
def test_show_submenu(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_sub_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(nodes[0].descendant, True)
self.assertEqual(len(nodes), 1)
self.assertEqual(len(nodes[0].children), 1)
tpl = Template("{% load menu_tags %}{% show_sub_menu 1 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(len(nodes[0].children), 0)
def test_show_breadcrumb(self):
context = self.get_context(path=self.get_page(3).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 3)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 1 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 2)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 1)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 1 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 0)
page1 = Page.objects.get(pk=self.get_page(1).pk)
page1.in_navigation = False
page1.save()
page2 = self.get_page(2)
context = self.get_context(path=page2.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(isinstance(nodes[0], NavigationNode), True)
self.assertEqual(nodes[1].get_absolute_url(), page2.get_absolute_url())
def test_language_chooser(self):
# test simple language chooser with default args
context = self.get_context(path=self.get_page(3).get_absolute_url())
tpl = Template("{% load menu_tags %}{% language_chooser %}")
tpl.render(context)
self.assertEqual(len(context['languages']), len(settings.CMS_SITE_LANGUAGES[settings.SITE_ID]))
# try a different template and some different args
tpl = Template("{% load menu_tags %}{% language_chooser 'menu/test_language_chooser.html' %}")
tpl.render(context)
self.assertEqual(context['template'], 'menu/test_language_chooser.html')
tpl = Template("{% load menu_tags %}{% language_chooser 'short' 'menu/test_language_chooser.html' %}")
tpl.render(context)
self.assertEqual(context['template'], 'menu/test_language_chooser.html')
for lang in context['languages']:
self.assertEqual(*lang)
def test_page_language_url(self):
path = self.get_page(3).get_absolute_url()
context = self.get_context(path=path)
tpl = Template("{%% load menu_tags %%}{%% page_language_url '%s' %%}" % settings.LANGUAGES[0][0])
url = tpl.render(context)
self.assertEqual(url, "/%s%s" % (settings.LANGUAGES[0][0], path))
def test_show_menu_below_id(self):
page2 = Page.objects.get(pk=self.get_page(2).pk)
page2.reverse_id = "hello"
page2.save()
page2 = self.reload(page2)
self.assertEqual(page2.reverse_id, "hello")
page5 = self.get_page(5)
context = self.get_context(path=page5.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'hello' %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
page3_url = self.get_page(3).get_absolute_url()
self.assertEqual(nodes[0].get_absolute_url(), page3_url)
page2.in_navigation = False
page2.save()
context = self.get_context(path=page5.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'hello' %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].get_absolute_url(), page3_url)
def test_unpublished(self):
page2 = Page.objects.get(pk=self.get_page(2).pk)
page2.published = False
page2.save()
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(len(nodes[0].children), 0)
def test_home_not_in_menu(self):
page1 = Page.objects.get(pk=self.get_page(1).pk)
page1.in_navigation = False
page1.save()
page4 = Page.objects.get(pk=self.get_page(4).pk)
page4.in_navigation = False
page4.save()
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].get_absolute_url(), self.get_page(2).get_absolute_url())
self.assertEqual(nodes[0].children[0].get_absolute_url(), self.get_page(3).get_absolute_url())
page4 = Page.objects.get(pk=self.get_page(4).pk)
page4.in_navigation = True
page4.save()
menu_pool.clear(settings.SITE_ID)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
def test_softroot(self):
"""
What is a soft root?
If a page is a soft root, it becomes the root page in the menu if
we are currently on or under that page.
If we are above that page, the children of this page are not shown.
Tree from fixture:
+ P1
| + P2 <- SOFTROOT
| + P3
+ P4
| + P5
+ P6 (not in menu)
+ P7
+ P8
"""
page2 = Page.objects.get(pk=self.get_page(2).pk)
page2.soft_root = True
page2.save()
# current page: P2
context = self.get_context(path=page2.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
"""
Assert that the top level contains only ONE page (P2), not 2: P1 and P4!
"""
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].get_absolute_url(), page2.get_absolute_url())
# current page: P3
page3 = Page.objects.get(pk=self.get_page(3).pk)
context = self.get_context(path=page3.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
"""
Assert that the top level contains only ONE page (P2), not 2: P1 and P4!
"""
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].get_absolute_url(), page2.get_absolute_url())
# current page: P1
page1 = Page.objects.get(pk=self.get_page(1).pk)
context = self.get_context(path=page1.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
"""
Assert that we have two pages in root level: P1 and P4, because the
softroot is below this level.
"""
self.assertEqual(len(nodes), 2)
# check that the first page is P1
self.assertEqual(nodes[0].get_absolute_url(), page1.get_absolute_url())
# check that we don't show the children of P2, which is a soft root!
self.assertEqual(len(nodes[0].children[0].children), 0)
# current page: NO PAGE
context = self.get_context(path="/no/real/path/")
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
"""
Check behavior is the same as on P1
"""
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].get_absolute_url(), page1.get_absolute_url())
self.assertEqual(len(nodes[0].children[0].children), 0)
# current page: P5
page5 = Page.objects.get(pk=self.get_page(5).pk)
context = self.get_context(path=page5.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
"""
Again, check the behavior is the same as on P1, because we're not under
a soft root!
"""
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].get_absolute_url(), page1.get_absolute_url())
self.assertEqual(len(nodes[0].children[0].children), 0)
def test_show_submenu_from_non_menu_page(self):
"""
Here's the structure bit we're interested in:
+ P6 (not in menu)
+ P7
+ P8
When we render P6, there should be a menu entry for P7 and P8 if the
tag parameters are "1 XXX XXX XXX"
"""
page6 = Page.objects.get(pk=self.get_page(6).pk)
context = self.get_context(page6.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 1 100 0 1 %}")
tpl.render(context)
nodes = context['children']
number_of_p6_children = len(page6.children.filter(in_navigation=True))
self.assertEqual(len(nodes), number_of_p6_children)
page7 = Page.objects.get(pk=self.get_page(7).pk)
context = self.get_context(page7.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 1 100 0 1 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), number_of_p6_children)
tpl = Template("{% load menu_tags %}{% show_menu 2 100 0 1 %}")
tpl.render(context)
nodes = context['children']
number_of_p7_children = len(page7.children.filter(in_navigation=True))
self.assertEqual(len(nodes), number_of_p7_children)
def test_show_breadcrumb_invisible(self):
invisible_page = create_page("invisible", "nav_playground.html", "en",
parent=self.get_page(3), published=True, in_navigation=False)
context = self.get_context(path=invisible_page.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 3)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 'menu/breadcrumb.html' 1 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 3)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 'menu/breadcrumb.html' 0 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 4)
class MenuTests(BaseMenuTest):
def test_build_nodes_inner_for_worst_case_menu(self):
'''
Tests the worst case scenario
node5
node4
node3
node2
node1
'''
node1 = NavigationNode('Test1', '/test1/', 1, 2)
node2 = NavigationNode('Test2', '/test2/', 2, 3)
node3 = NavigationNode('Test3', '/test3/', 3, 4)
node4 = NavigationNode('Test4', '/test4/', 4, 5)
node5 = NavigationNode('Test5', '/test5/', 5, None)
menu_class_name = 'Test'
nodes = [node1,node2,node3,node4,node5,]
len_nodes = len(nodes)
final_list = _build_nodes_inner_for_one_menu(nodes, menu_class_name)
self.assertEqual(len(final_list), len_nodes)
self.assertEqual(node1.parent, node2)
self.assertEqual(node2.parent, node3)
self.assertEqual(node3.parent, node4)
self.assertEqual(node4.parent, node5)
self.assertEqual(node5.parent, None)
self.assertEqual(node1.children, [])
self.assertEqual(node2.children, [node1])
self.assertEqual(node3.children, [node2])
self.assertEqual(node4.children, [node3])
self.assertEqual(node5.children, [node4])
def test_build_nodes_inner_for_circular_menu(self):
'''
TODO:
To properly handle this test we need to have a circular dependency
detection system.
Go nuts implementing it :)
'''
pass
def test_build_nodes_inner_for_broken_menu(self):
'''
Tests a broken menu tree (non-existing parent)
node5
node4
node3
<non-existant>
node2
node1
'''
node1 = NavigationNode('Test1', '/test1/', 1, 2)
node2 = NavigationNode('Test2', '/test2/', 2, 12)
node3 = NavigationNode('Test3', '/test3/', 3, 4)
node4 = NavigationNode('Test4', '/test4/', 4, 5)
node5 = NavigationNode('Test5', '/test5/', 5, None)
menu_class_name = 'Test'
nodes = [node1,node2,node3,node4,node5,]
final_list = _build_nodes_inner_for_one_menu(nodes, menu_class_name)
self.assertEqual(len(final_list), 3)
self.assertFalse(node1 in final_list)
self.assertFalse(node2 in final_list)
self.assertEqual(node1.parent, None)
self.assertEqual(node2.parent, None)
self.assertEqual(node3.parent, node4)
self.assertEqual(node4.parent, node5)
self.assertEqual(node5.parent, None)
self.assertEqual(node1.children, [])
self.assertEqual(node2.children, [])
self.assertEqual(node3.children, [])
self.assertEqual(node4.children, [node3])
self.assertEqual(node5.children, [node4])
def test_utils_mark_descendants(self):
tree_nodes, flat_nodes = self._get_nodes()
mark_descendants(tree_nodes)
for node in flat_nodes:
self.assertTrue(node.descendant, node)
def test_utils_find_selected(self):
tree_nodes, flat_nodes = self._get_nodes()
node = flat_nodes[0]
selected = find_selected(tree_nodes)
self.assertEqual(selected, node)
selected = find_selected([])
self.assertEqual(selected, None)
def test_utils_cut_levels(self):
tree_nodes, flat_nodes = self._get_nodes()
self.assertEqual(cut_levels(tree_nodes, 1), [flat_nodes[1]])
def test_empty_menu(self):
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 0)
class AdvancedSoftrootTests(SoftrootFixture, SettingsOverrideTestCase):
"""
Tree in fixture (as taken from issue 662):
top
root
aaa
111
ccc
ddd
222
bbb
333
444
In the fixture, all pages are "in_navigation", "published" and
NOT-"soft_root".
What is a soft root?
If a page is a soft root, it becomes the root page in the menu if
we are currently on or under that page.
If we are above that page, the children of this page are not shown.
"""
settings_overrides = {
'CMS_MODERATOR': False,
'CMS_PERMISSION': False
}
def tearDown(self):
Page.objects.all().delete()
def get_page(self, name):
return Page.objects.get(title_set__slug=name)
def assertTreeQuality(self, a, b, *attrs):
"""
Checks that the node-lists a and b are the same for attrs.
This is recursive over the tree
"""
msg = '%r != %r with %r, %r' % (len(a), len(b), a, b)
self.assertEqual(len(a), len(b), msg)
for n1, n2 in zip(a,b):
for attr in attrs:
a1 = getattr(n1, attr)
a2 = getattr(n2, attr)
msg = '%r != %r with %r, %r (%s)' % (a1, a2, n1, n2, attr)
self.assertEqual(a1, a2, msg)
self.assertTreeQuality(n1.children, n2.children)
def test_top_not_in_nav(self):
"""
top: not in navigation
tag: show_menu 0 100 0 100
context shared: current page is aaa
context 1: root is NOT a softroot
context 2: root IS a softroot
expected result: the two node-trees should be equal
"""
top = self.get_page('top')
top.in_navigation = False
top.save()
aaa = self.get_page('aaa')
# root is NOT a soft root
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
hard_root = context['children']
# root IS a soft root
root = self.get_page('root')
root.soft_root = True
root.save()
aaa = self.get_page('aaa')
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
soft_root = context['children']
# assert the two trees are equal in terms of 'level' and 'title'
self.assertTreeQuality(hard_root, soft_root, 'level', 'title')
def test_top_in_nav(self):
"""
top: in navigation
tag: show_menu 0 100 0 100
context shared: current page is aaa
context 1: root is NOT a softroot
context 2: root IS a softroot
expected result 1:
0:top
1:root
2:aaa
3:111
4:ccc
5:ddd
3:222
2:bbb
expected result 2:
0:root
1:aaa
2:111
3:ccc
4:ddd
2:222
1:bbb
"""
aaa = self.get_page('aaa')
# root is NOT a soft root
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
hard_root = context['children']
mock_tree = [
AttributeObject(title='top', level=0, children=[
AttributeObject(title='root', level=1, children=[
AttributeObject(title='aaa', level=2, children=[
AttributeObject(title='111', level=3, children=[
AttributeObject(title='ccc', level=4, children=[
AttributeObject(title='ddd', level=5, children=[])
])
]),
AttributeObject(title='222', level=3, children=[])
]),
AttributeObject(title='bbb', level=2, children=[])
])
])
]
self.assertTreeQuality(hard_root, mock_tree)
# root IS a soft root
root = self.get_page('root')
root.soft_root = True
root.save()
aaa = self.get_page('aaa')
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
soft_root = context['children']
mock_tree = [
AttributeObject(title='root', level=0, children=[
AttributeObject(title='aaa', level=1, children=[
AttributeObject(title='111', level=2, children=[
AttributeObject(title='ccc', level=3, children=[
AttributeObject(title='ddd', level=4, children=[])
])
]),
AttributeObject(title='222', level=2, children=[])
]),
AttributeObject(title='bbb', level=1, children=[])
])
]
self.assertTreeQuality(soft_root, mock_tree, 'title', 'level')
class ShowSubMenuCheck(SubMenusFixture, BaseMenuTest):
"""
Tree from fixture:
+ P1
| + P2
| + P3
+ P4
| + P5
+ P6
+ P7 (not in menu)
+ P8
"""
def test_show_submenu(self):
page = Page.objects.get(title_set__title='P6')
context = self.get_context(page.get_absolute_url())
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_sub_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].id, 8)
def test_show_submenu_num_queries(self):
page = Page.objects.get(title_set__title='P6')
context = self.get_context(page.get_absolute_url())
# test standard show_menu
with self.assertNumQueries(4):
"""
The 4 queries should be:
get all pages
get all page permissions
get all titles
set the menu cache key
"""
tpl = Template("{% load menu_tags %}{% show_sub_menu %}")
tpl.render(context)
class ShowMenuBelowIdTests(BaseMenuTest):
def test_not_in_navigation(self):
"""
Test for issue 521
Build the following tree:
A
|-B
|-C
\-D (not in nav)
"""
a = create_page('A', 'nav_playground.html', 'en', published=True,
in_navigation=True, reverse_id='a')
b =create_page('B', 'nav_playground.html', 'en', parent=a,
published=True, in_navigation=True)
c = create_page('C', 'nav_playground.html', 'en', parent=b,
published=True, in_navigation=True)
create_page('D', 'nav_playground.html', 'en', parent=self.reload(b),
published=True, in_navigation=False)
context = self.get_context(a.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1, nodes)
node = nodes[0]
self.assertEqual(node.id, b.id)
children = node.children
self.assertEqual(len(children), 1, repr(children))
child = children[0]
self.assertEqual(child.id, c.id)
def test_not_in_navigation_num_queries(self):
"""
Test for issue 521
Build the following tree:
A
|-B
|-C
\-D (not in nav)
"""
a = create_page('A', 'nav_playground.html', 'en', published=True,
in_navigation=True, reverse_id='a')
b =create_page('B', 'nav_playground.html', 'en', parent=a,
published=True, in_navigation=True)
c = create_page('C', 'nav_playground.html', 'en', parent=b,
published=True, in_navigation=True)
create_page('D', 'nav_playground.html', 'en', parent=self.reload(b),
published=True, in_navigation=False)
with LanguageOverride('en'):
context = self.get_context(a.get_absolute_url())
with self.assertNumQueries(4):
"""
The 4 queries should be:
get all pages
get all page permissions
get all titles
set the menu cache key
"""
# Actually seems to run:
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
class ViewPermissionMenuTests(SettingsOverrideTestCase):
settings_overrides = {
'CMS_MODERATOR': False,
'CMS_PERMISSION': True,
'CMS_PUBLIC_FOR': 'all',
}
def get_request(self, user=None):
attrs = {
'user': user or AnonymousUser(),
'REQUEST': {},
'session': {},
}
return type('Request', (object,), attrs)
def test_public_for_all_staff(self):
request = self.get_request()
request.user.is_staff = True
page = Page()
page.pk = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [1])
def test_public_for_all_staff_assert_num_queries(self):
request = self.get_request()
request.user.is_staff = True
page = Page()
page.pk = 1
pages = [page]
with self.assertNumQueries(0):
get_visible_pages(request, pages)
def test_public_for_all(self):
user = User.objects.create_user('user', 'user@domain.com', 'user')
request = self.get_request(user)
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [1])
def test_public_for_all_num_queries(self):
user = User.objects.create_user('user', 'user@domain.com', 'user')
request = self.get_request(user)
site = Site()
site.pk = 1
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
with self.assertNumQueries(2):
"""
The queries are:
PagePermission query for affected pages
GlobalpagePermission query for user
"""
get_visible_pages(request, pages, site)
def test_unauthed(self):
request = self.get_request()
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [1])
def test_unauthed_num_queries(self):
request = self.get_request()
site = Site()
site.pk = 1
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
with self.assertNumQueries(1):
"""
The query is:
PagePermission query for affected pages
global is not executed because it's lazy
"""
get_visible_pages(request, pages, site)
def test_authed_basic_perm(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = User.objects.create_user('user', 'user@domain.com', 'user')
user.user_permissions.add(Permission.objects.get(codename='view_page'))
request = self.get_request(user)
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [1])
def test_authed_basic_perm_num_queries(self):
site = Site()
site.pk = 1
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = User.objects.create_user('user', 'user@domain.com', 'user')
user.user_permissions.add(Permission.objects.get(codename='view_page'))
request = self.get_request(user)
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
with self.assertNumQueries(4):
"""
The queries are:
PagePermission query for affected pages
GlobalpagePermission query for user
Generic django permission lookup
content type lookup by permission lookup
"""
get_visible_pages(request, pages, site)
def test_authed_no_access(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = User.objects.create_user('user', 'user@domain.com', 'user')
request = self.get_request(user)
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [])
def test_authed_no_access_num_queries(self):
site = Site()
site.pk = 1
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = User.objects.create_user('user', 'user@domain.com', 'user')
request = self.get_request(user)
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
with self.assertNumQueries(4):
"""
The queries are:
PagePermission query for affected pages
GlobalpagePermission query for user
Generic django permission lookup
content type lookup by permission lookup
"""
get_visible_pages(request, pages, site)
def test_unauthed_no_access(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
request = self.get_request()
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [])
def test_unauthed_no_access_num_queries(self):
site = Site()
site.pk = 1
request = self.get_request()
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
with self.assertNumQueries(1):
get_visible_pages(request, pages, site)
def test_page_permissions(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = User.objects.create_user('user', 'user@domain.com', 'user')
request = self.get_request(user)
page = create_page('A', 'nav_playground.html', 'en')
PagePermission.objects.create(can_view=True, user=user, page=page)
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [1])
def test_page_permissions_num_queries(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = User.objects.create_user('user', 'user@domain.com', 'user')
request = self.get_request(user)
page = create_page('A', 'nav_playground.html', 'en')
PagePermission.objects.create(can_view=True, user=user, page=page)
pages = [page]
with self.assertNumQueries(2):
"""
The queries are:
PagePermission query for affected pages
GlobalpagePermission query for user
"""
get_visible_pages(request, pages)
def test_page_permissions_view_groups(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = User.objects.create_user('user', 'user@domain.com', 'user')
group = Group.objects.create(name='testgroup')
group.user_set.add(user)
request = self.get_request(user)
page = create_page('A', 'nav_playground.html', 'en')
PagePermission.objects.create(can_view=True, group=group, page=page)
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [1])
def test_page_permissions_view_groups_num_queries(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = User.objects.create_user('user', 'user@domain.com', 'user')
group = Group.objects.create(name='testgroup')
group.user_set.add(user)
request = self.get_request(user)
page = create_page('A', 'nav_playground.html', 'en')
PagePermission.objects.create(can_view=True, group=group, page=page)
pages = [page]
with self.assertNumQueries(3):
"""
The queries are:
PagePermission query for affected pages
GlobalpagePermission query for user
Group query via PagePermission
"""
get_visible_pages(request, pages)
def test_global_permission(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = User.objects.create_user('user', 'user@domain.com', 'user')
GlobalPagePermission.objects.create(can_view=True, user=user)
request = self.get_request(user)
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [1])
def test_global_permission_num_queries(self):
site = Site()
site.pk = 1
user = User.objects.create_user('user', 'user@domain.com', 'user')
GlobalPagePermission.objects.create(can_view=True, user=user)
request = self.get_request(user)
site = Site()
site.pk = 1
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
with self.assertNumQueries(2):
"""
The queries are:
PagePermission query for affected pages
GlobalpagePermission query for user
"""
get_visible_pages(request, pages, site)
|
|
import numpy as np
import scipy.linalg as slg
from pyscf.lib import logger
def get_tdm_uhf(pymol, C0, C1, s1e=None, r_aos=None, orth=0, thr_zero=1.E-9):
""" C0 and C1 be a list of two numpy arrays, [Caocc, Cbocc]. For RHF, simply replicate the C matrix twice, i.e., np.asarray([Cocc, Cocc])
orth:
0 do not orthogonalize C0 and C1
1 orthogonalize C1 to C0
-1 orthogonalize C0 to C1
2 symmetric orthogonalization
"""
nao = pymol.nao_nr()
assert(len(C0) == len(C1) == 2)
no = [C0[s].shape[1] for s in [0,1]]
for s in [0,1]: assert(no[s] == C1[s].shape[1]) # no. of e^-'s must match
if s1e is None: s1e = pymol.intor_symmetric("int1e_ovlp")
for s in [0,1]: # orthonormality
assert(np.allclose(C0[s].T@s1e@C0[s], np.eye(no[s])))
assert(np.allclose(C1[s].T@s1e@C1[s], np.eye(no[s])))
if r_aos is None: r_aos = pymol.intor_symmetric("int1e_r")
compute_00 = compute_11 = compute_01 = True
if orth == 0:
compute_00 = compute_11 = False
elif orth == 1:
compute_11 = False
elif orth == -1:
compute_00 = False
else:
pass # symm orth needs all
if compute_00:
d00s = np.asarray(
[sum([np.trace(C0[s].T@r_aos[i]@C0[s]) for s in [0,1]])
for i in range(3)])
if compute_11:
d11s = np.asarray(
[sum([np.trace(C1[s].T@r_aos[i]@C1[s]) for s in [0,1]])
for i in range(3)])
S01 = [C0[s].T @ s1e @ C1[s] for s in [0,1]]
if compute_01:
u = [None] * 2
l = [None] * 2
v = [None] * 2
for s in [0,1]:
u[s], l[s], vt = np.linalg.svd(S01[s])
v[s] = vt.T
nz = [sum(l[s] < thr_zero) for s in [0,1]]
nztot = sum(nz)
if nztot == 0:
D = np.prod([np.prod(l[s]) for s in [0,1]])
C0p = [C0[s] @ u[s] for s in [0,1]]
C1p = [C1[s] @ v[s] for s in [0,1]]
d01s = D * np.asarray([sum(
[np.sum(np.diag(C0p[s].T@r_aos[i]@C1p[s])/l[s]) for s in [0,1]])
for i in range(3)])
elif nztot == 1:
s = 0 if nz[0] == 1 else 1
D0 = np.prod(l[s][:-1]) * np.prod(l[1-s])
C0p0 = C0[s] @ u[s][:,-1]
C1p0 = C1[s] @ v[s][:,-1]
d01s = D0 * np.asarray([C0p0@r_aos[i]@C1p0 for i in range(3)])
else:
d01s = np.zeros(3)
S = np.prod([np.prod(l[s]) for s in [0,1]])
else:
S = np.prod([np.linalg.det(S01[s]) for s in [0,1]])
logger.note(pymol, "Overlap: % .10g", S)
if orth == 0:
ds = d01s
elif orth == 1:
ds = (d01s - S*d00s) / (1-S**2.)**0.5
elif orth == -1:
ds = (d01s - S*d11s) / (1-S**2.)**0.5
else:
ds = (d01s - (d00s+d11s)*0.5*S) / (1-S**2.)
return ds
def get_tdm_unoci(pymol, C0, C1s, cs, s1e=None, r_aos=None, orth=0,
thr_zero=1.E-9):
""" C0, C1s = [C1a, C1b, ...] are UHF mo coeff matrices. cs is the NOCI coefficient.
"""
nao = pymol.nao_nr()
no = [C0[s].shape[1] for s in [0,1]]
if s1e is None: s1e = pymol.intor_symmetric("int1e_ovlp")
for C1 in C1s:
assert(len(C0) == len(C1) == 2)
for s in [0,1]:
assert(no[s] == C1[s].shape[1]) # no. of e^-'s must match
# orthogonality
assert(np.allclose(C0[s].T@s1e@C0[s], np.eye(no[s])))
assert(np.allclose(C1[s].T@s1e@C1[s], np.eye(no[s])))
if r_aos is None: r_aos = pymol.intor_symmetric("int1e_r")
compute_00 = compute_11 = compute_01 = True
if orth == 0:
compute_00 = compute_11 = False
elif orth == 1:
compute_11 = False
elif orth == -1:
compute_00 = False
else:
pass # symm orth needs all
def contract_uhf_self(CA):
dAAs = np.asarray(
[sum([np.trace(CA[s].T@r_aos[i]@CA[s]) for s in [0,1]])
for i in range(3)])
return dAAs
def contract_uhf(CA, CB):
SAB = [CA[s].T @ s1e @ CB[s] for s in [0,1]]
us = [None] * 2
ls = [None] * 2
vts = [None] * 2
for s in [0,1]:
us[s], ls[s], vts[s] = np.linalg.svd(SAB[s])
S = np.prod([np.linalg.det(SAB[s]) for s in [0,1]])
nzs = [sum(ls[s] < thr_zero) for s in [0,1]]
nztot = sum(nzs)
if nztot == 0:
dABs = np.zeros(3)
for s in [0,1]:
cA = CA[s] @ us[s]
cB = CB[s] @ vts[s].T
dABs += np.asarray([np.sum(np.diag(cA.T@r_aos[i]@cB)/ls[s])
for i in range(3)])
dABs *= S
elif nztot == 1:
s = 0 if nzs[0] == 1 else 1
cA = CA[s] @ us[s][:,-1]
cB = CB[s] @ vts[s][-1]
D0 = np.prod(ls[s][:-1]) * np.prod(ls[1-s])
dABs = D0 * np.asarray([cA@r_aos[i]@cB for i in range(3)])
else:
dABs = np.zeros(3)
return S, dABs
S01 = 0.
d00s = contract_uhf_self(C0)
d11s = 0.
d01s = 0.
S11 = 0.
S11mat = np.zeros([len(C1s)]*2)
for i,C1 in enumerate(C1s):
S0i, d0is = contract_uhf(C0, C1)
S01 += cs[i] * S0i
d01s += cs[i] * d0is
for j,C2 in enumerate(C1s):
if j < i: continue
if i == j:
Sij = 1.
dijs = contract_uhf_self(C1)
d11s += cs[i] * cs[j] * dijs
S11 += cs[i] * cs[j] * Sij
else:
Sij, dijs = contract_uhf(C1, C2)
d11s += 2. * cs[i] * cs[j] * dijs
S11 += 2. * cs[i] * cs[j] * Sij
S11mat[i,j] = S11mat[j,i] = Sij
assert(np.allclose(S11, 1.))
S = S01
logger.note(pymol, "Overlap: % .10g", S)
if orth == 0:
ds = d01s
elif orth == 1:
ds = (d01s - S*d00s) / (1-S**2.)**0.5
elif orth == -1:
ds = (d01s - S*d11s) / (1-S**2.)**0.5
else:
ds = (d01s - (d00s+d11s)*0.5*S) / (1-S**2.)
return ds
from frankenstein.tools.spscf_utils import get_Rbeta
def get_tdm_sphf(pymol, C0, C1, betas, ws,
s1e=None, r_aos=None, orth=0, thr_zero=1.E-9):
""" C0 and C1 be a list of two numpy arrays, [Caocc, Cbocc].
betas and ws are the grid points and weights.
orth:
0 do not orthogonalize C0 and C1
1 orthogonalize C1 to C0
-1 orthogonalize C0 to C1
2 symmetric orthogonalization
"""
nao = pymol.nao_nr()
assert(len(C0) == len(C1) == 2)
no = [C0[s].shape[1] for s in [0,1]]
for s in [0,1]: assert(no[s] == C1[s].shape[1]) # no. of e^-'s must match
if s1e is None: s1e = pymol.intor_symmetric("int1e_ovlp")
for s in [0,1]: # orthonormality
assert(np.allclose(C0[s].T@s1e@C0[s], np.eye(no[s])))
assert(np.allclose(C1[s].T@s1e@C1[s], np.eye(no[s])))
if r_aos is None: r_aos = pymol.intor_symmetric("int1e_r")
C0 = slg.block_diag(*C0)
C1 = slg.block_diag(*C1)
s1e = slg.block_diag(s1e,s1e)
r_aos_ = [None] * 3
for i in range(3): r_aos_[i] = slg.block_diag(r_aos[i],r_aos[i])
r_aos = r_aos_
compute_00 = compute_11 = compute_01 = True
if orth == 0:
compute_00 = compute_11 = False
elif orth == 1:
compute_11 = False
elif orth == -1:
compute_00 = False
else:
pass # symm orth needs all
def contract_ghf(c0, c1, u, l, vt, dlast=None):
nz = sum(l < thr_zero)
if nz == 0:
D = np.prod(l)
c0p = c0 @ u
c1p = c1 @ vt.T
d = D * np.asarray([np.sum(np.diag(c0p.T@r_aos[i]@c1p)/l)
for i in range(3)])
elif nz == 1:
D0 = np.prod(l[:-1])
c0p = c0 @ u[:,-1]
c1p = c1 @ vt[-1]
d = D0 * np.asarray([c0p@r_aos[i]@c1p for i in range(3)])
else:
d = np.zeros(3)
if not dlast is None:
svec = d*dlast
svec = svec[np.abs(svec) > 1.E-6]
if sum(svec < 0) == len(svec):
d *= -1.
return d
ngrid = len(betas)
d00s = np.zeros([ngrid,3])
d11s = np.zeros([ngrid,3])
d01s = np.zeros([ngrid,3])
D00s = np.zeros([ngrid])
D11s = np.zeros([ngrid])
D01s = np.zeros([ngrid])
for ig,beta,w in zip(range(ngrid),betas,ws):
R = get_Rbeta(nao, beta)
S00 = C0.T @ s1e @ R @ C0
u, l, vt = np.linalg.svd(S00)
D00s[ig] = np.prod(l)
if compute_00:
d00last = None if ig == 0 else d00s[ig-1]
d00s[ig] = contract_ghf(C0, R@C0, u, l, vt, d00last)
S11 = C1.T @ s1e @ R @ C1
u, l, vt = np.linalg.svd(S11)
D11s[ig] = np.prod(l)
if compute_11:
d11last = None if ig == 0 else d11s[ig-1]
d11s[ig] = contract_ghf(C1, R@C1, u, l, vt, d11last)
S01 = C0.T @ s1e @ R @ C1
u, l, vt = np.linalg.svd(S01)
D01s[ig] = np.prod(l)
if compute_01:
d01last = None if ig == 0 else d01s[ig-1]
d01s[ig] = contract_ghf(C0, R@C1, u, l, vt, d01last)
S00 = ws @ D00s
S11 = ws @ D11s
S01 = (ws @ D01s) / (S00*S11)**0.5
d00s = (ws @ d00s) / S00
d11s = (ws @ d11s) / S11
d01s = (ws @ d01s) / (S00*S11)**0.5
S = S01
logger.note(pymol, "Overlap: % .10g", S)
if orth == 0:
ds = d01s
elif orth == 1:
ds = (d01s - S*d00s) / (1-S**2.)**0.5
elif orth == -1:
ds = (d01s - S*d11s) / (1-S**2.)**0.5
else:
ds = (d01s - (d00s+d11s)*0.5*S) / (1-S**2.)
return ds
|
|
import unittest, random, sys, time, math
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_glm
BINS = 100
def gen_rand_equation(colCount,
INTCPT_VALUE_MIN, INTCPT_VALUE_MAX,
COEFF_VALUE_MIN, COEFF_VALUE_MAX, SEED):
r1 = random.Random(SEED)
coefficients = []
# y = 1/(1 + exp(-(sum(coefficients*x)+intercept))
for j in range(colCount):
rif = r1.uniform(COEFF_VALUE_MIN, COEFF_VALUE_MAX)
# rif = (j+0.0)/colCount # git bigger for each one
coefficients.append(rif)
# FIX! temporary try fixed = col+1
# coefficients.append(j+1)
# coefficients.append(2 + 2*(j%2))
intercept = r1.uniform(INTCPT_VALUE_MIN, INTCPT_VALUE_MAX)
# intercept = 0
print "Expected coefficients:", coefficients
print "Expected intercept:", intercept
return(coefficients, intercept)
# FIX! random noise on coefficients? randomly force 5% to 0?
#y = 1/(1 + math.exp(-(sum(coefficients*x)+intercept))
def yFromEqnAndData(coefficients, intercept, rowData, DATA_DISTS, ALGO):
# FIX! think about using noise on some of the rowData
cx = [a*b for a,b in zip(coefficients, rowData)]
if ALGO=='binomial':
y = 1.0/(1.0 + math.exp(-(sum(cx) + intercept)))
if y<0 or y>1:
raise Exception("Generated y result is should be between 0 and 1: %s" % y)
elif ALGO=='poisson':
y = math.exp(sum(cx) + intercept)
if y<0:
raise Exception("Generated y result is should be >= 0: %s" % y)
elif ALGO=='gamma':
y = 1.0/(sum(cx) + intercept)
if y<0:
raise Exception("Generated y result is should be >= 0: %s" % y)
else:
raise Exception('Unknown ALGO: %s' % ALGO)
return y
def write_syn_dataset(csvPathname, rowCount, colCount, coefficients, intercept,
DATA_VALUE_MIN, DATA_VALUE_MAX, DATA_DISTS, ALGO, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
# assuming output is always last col
yMin = None
yMax = None
# generate a mode per column that is reused
# this will make every column have a different data distribution
if DATA_DISTS == 'unique_pos_neg':
d = DATA_VALUE_MIN
fullRange= DATA_VALUE_MAX - DATA_VALUE_MIN
colModes = []
for j in range(colCount):
colModes += [(random.randint(0,1) * -1) * (((float(j)/colCount) * fullRange) + DATA_VALUE_MIN)]
elif DATA_DISTS == 'mean':
colDataMean = (DATA_VALUE_MIN + DATA_VALUE_MAX) / 2
colModes = [colDataMean for j in range(colCount)]
elif DATA_DISTS == 'random':
colModes = [r1.uniform(DATA_VALUE_MIN, DATA_VALUE_MAX) for j in range(colCount)]
else:
raise Exception('Unknown DATA_DIST: %s' % DATA_DIST)
print "\ncolModes:", colModes
if ALGO=='binomial':
print "gen'ed y is probability! generate 1/0 data rows wth that probability, binned to %d bins" % BINS
print "100 implies 2 places of accuracy in getting the probability."
print "this means we should get 1 place of accuracy in the result coefficients/intercept????"
for i in range(rowCount):
rowData = []
for j in range(colCount):
# ri = r1.uniform(0,1)
ri = r1.triangular(DATA_VALUE_MIN, DATA_VALUE_MAX, colModes[j])
rowData.append(ri)
# Do a walk from 0 to 1 by .1
# writing 0 or 1 depending on whether you are below or above the probability
# coarse approximation to get better coefficient match in GLM
y = yFromEqnAndData(coefficients, intercept, rowData, DATA_DISTS, ALGO)
if yMin is None or y<yMin: yMin = y
if yMax is None or y>yMax: yMax = y
if ALGO=='binomial':
for i in range(1,BINS+1): # 10 bins
if y > (i + 0.0)/BINS:
binomial = 1
else:
binomial = 0
rowDataCsv = ",".join(map(str,rowData + [binomial]))
dsf.write(rowDataCsv + "\n")
elif ALGO=='poisson':
rowDataCsv = ",".join(map(str,rowData + [int(y)]))
dsf.write(rowDataCsv + "\n")
elif ALGO=='gamma':
rowDataCsv = ",".join(map(str,rowData + [y]))
dsf.write(rowDataCsv + "\n")
else:
raise Exception('Unknown ALGO: %s' % ALGO)
dsf.close()
print "yMin:", yMin, " yMax:", yMax
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.beta_features = True
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1,java_heap_GB=12)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
### time.sleep(3600)
h2o.tear_down_cloud()
#************************************************************************************
def GLM_syn_eqns_data(self,
ALGO='binomial',
DATA_VALUE_MIN=-1, DATA_VALUE_MAX=1,
COEFF_VALUE_MIN=-1, COEFF_VALUE_MAX=1,
INTCPT_VALUE_MIN=-1, INTCPT_VALUE_MAX=1,
DATA_DISTS='unique_pos_neg'):
SYNDATASETS_DIR = h2o.make_syn_dir()
if ALGO=='poisson':
tryList = [
(50000, 5, 'cD', 300),
]
else:
tryList = [
# (100, 1, 'cA', 300),
# (100, 25, 'cB', 300),
# (1000, 25, 'cC', 300),
# 50 fails, 40 fails
# (10000, 50, 'cD', 300),
# 30 passes
# (10000, 30, 'cD', 300),
# 200 passed
(500, 30, 'cD', 300),
(500, 30, 'cD', 300),
]
### h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
modeString = \
"_Bins" + str(BINS) + \
"_Dmin" + str(DATA_VALUE_MIN) + \
"_Dmax" + str(DATA_VALUE_MAX) + \
"_Cmin" + str(COEFF_VALUE_MIN) + \
"_Cmax" + str(COEFF_VALUE_MAX) + \
"_Imin" + str(INTCPT_VALUE_MIN) + \
"_Imax" + str(INTCPT_VALUE_MAX) + \
"_Ddist" + str(DATA_DISTS)
print "modeString:", modeString
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_' + modeString + "_" + str(SEEDPERFILE) + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname, \
"using random coefficients and intercept and logit eqn. for output"
(coefficientsGen, interceptGen) = gen_rand_equation(colCount,
INTCPT_VALUE_MIN, INTCPT_VALUE_MAX,
COEFF_VALUE_MIN, COEFF_VALUE_MAX, SEEDPERFILE)
print coefficientsGen, interceptGen
write_syn_dataset(csvPathname, rowCount, colCount, coefficientsGen, interceptGen,
DATA_VALUE_MIN, DATA_VALUE_MAX, DATA_DISTS, ALGO, SEED)
parseResult = h2i.import_parse(path=csvPathname, hex_key=hex_key, schema='put', timeoutSecs=60)
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
y = colCount
print "GLM is ignoring the thresholds I give it? deciding what's best?"
kwargs = {
'family': ALGO,
'response': y,
'max_iter': 10,
'lambda': 0,
'alpha': 0,
'n_folds': 0,
'beta_epsilon': 1e-4,
# 'thresholds': 0.5,
}
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
(warnings, coefficients, intercept) = h2o_glm.simpleCheckGLM(self, glm, 'C1', **kwargs)
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
if ALGO=='binomial':
deltaCoeff = 0.1
deltaIntcpt = 0.2
else: # poisson needs more?
deltaCoeff = 0.4
deltaIntcpt = 1.0
for i,c in enumerate(coefficients):
g = coefficientsGen[i] # generated
print "coefficient[%d]: %8.4f, generated: %8.4f, delta: %8.4f" % (i, c, g, abs(g-c))
self.assertAlmostEqual(c, g, delta=deltaCoeff, msg="not close enough. coefficient[%d]: %s, generated %s" % (i, c, g))
c = intercept
g = interceptGen
print "intercept: %8.4f, generated: %8.4f, delta: %8.4f" % (c, g, abs(g-c))
print "need a larger delta compare for intercept?"
self.assertAlmostEqual(c, g, delta=deltaIntcpt, msg="not close enough. intercept: %s, generated %s" % (c, g))
#************************************************************************************
def test_GLM2_syn_eqns_data_A(self):
self.GLM_syn_eqns_data(
ALGO='binomial',
DATA_VALUE_MIN=-1, DATA_VALUE_MAX=1,
COEFF_VALUE_MIN=-1, COEFF_VALUE_MAX=1,
INTCPT_VALUE_MIN=-1, INTCPT_VALUE_MAX=1,
DATA_DISTS='unique_pos_neg')
def test_GLM2_syn_eqns_data_B(self):
self.GLM_syn_eqns_data(
ALGO='binomial',
DATA_VALUE_MIN=-1, DATA_VALUE_MAX=1,
COEFF_VALUE_MIN=-1, COEFF_VALUE_MAX=1,
INTCPT_VALUE_MIN=-1, INTCPT_VALUE_MAX=1,
DATA_DISTS='mean')
def test_GLM2_syn_eqns_data_C(self):
self.GLM_syn_eqns_data(
ALGO='poisson',
DATA_VALUE_MIN=0, DATA_VALUE_MAX=1,
COEFF_VALUE_MIN=0, COEFF_VALUE_MAX=1,
INTCPT_VALUE_MIN=0, INTCPT_VALUE_MAX=1,
DATA_DISTS='mean')
def test_GLM2_syn_eqns_data_D(self):
# data and y have to be 0 to N for poisson
self.GLM_syn_eqns_data(
ALGO='poisson',
DATA_VALUE_MIN=0, DATA_VALUE_MAX=1,
COEFF_VALUE_MIN=0, COEFF_VALUE_MAX=1,
INTCPT_VALUE_MIN=0, INTCPT_VALUE_MAX=1,
DATA_DISTS='unique_pos_neg')
def test_GLM2_syn_eqns_data_E(self):
# data and y have to be 0 to N for poisson
# y seems to be tightly clamped between 0 and 1 if you have coefficient range from -1 to 0
self.GLM_syn_eqns_data(
ALGO='poisson',
DATA_VALUE_MIN=0, DATA_VALUE_MAX=2,
COEFF_VALUE_MIN=-.2, COEFF_VALUE_MAX=2,
INTCPT_VALUE_MIN=-.2, INTCPT_VALUE_MAX=2,
DATA_DISTS='random')
def test_GLM2_syn_eqns_data_F(self):
# data and y have to be 0 to N for poisson
# y seems to be tightly clamped between 0 and 1 if you have coefficient range from -1 to 0
self.GLM_syn_eqns_data(
ALGO='gamma',
DATA_VALUE_MIN=0, DATA_VALUE_MAX=2,
COEFF_VALUE_MIN=-.2, COEFF_VALUE_MAX=2,
INTCPT_VALUE_MIN=-.2, INTCPT_VALUE_MAX=2,
DATA_DISTS='random')
if __name__ == '__main__':
h2o.unit_main()
|
|
import requests
import json
from threading import Thread
def own_thread(func):
"""
Decorator that starts a method or function on its own thread
:param func: function
:return: wrapped function
"""
def wrapped_f(*args, **kwargs):
thread = Thread(target=func, args=args, kwargs=kwargs, daemon=True)
thread.start()
return wrapped_f
def slackresponse_from_message(original_message, delete_buttons=None, footer=None, change_buttons=None):
"""Return a SlackResponse object from an original message dict"""
response = SlackResponse(text=original_message.get('text', ''))
attachments = original_message.get('attachments', list())
if delete_buttons is None:
delete_buttons = list()
for attachment in attachments:
if footer is None:
footer = attachment.get('footer', None)
else:
footer = attachment.get('footer', '') + '\n' + footer
duplicate_attachment = response.add_attachment(title=attachment.get('title', None),
title_link=attachment.get('title_link', None),
fallback=attachment.get('fallback', None),
color=attachment.get('color', None),
footer=footer,
callback_id=attachment.get('callback_id', None),
image_url=attachment.get('image_url', None),
text=attachment.get('text', None),
author_name=attachment.get('author_name', None),
ts=attachment.get('ts', None))
for field in attachment.get('fields', list()):
duplicate_attachment.add_field(title=field.get('title', None), value=field.get('value', None),
short=field.get('short', False))
for button in attachment.get('actions', list()):
if button.get("text") not in delete_buttons:
button_text = button.get('text')
if change_buttons is not None:
if button_text in change_buttons:
button = change_buttons[button_text].button_dict
confirm = button.get('confirm', dict())
duplicate_attachment.add_button(button.get('text'), value=button.get('value', None),
style=button.get('style', 'default'), confirm=confirm.get('text', None),
yes=confirm.get('ok_text', 'Yes'))
return response
class IncomingWebhook:
"""
Utility class that wraps a Slack webhook
"""
def __init__(self, url):
"""
:param url: Slack webhook URL
"""
self.url = url
def send_message(self, message):
"""
Send a Slack message via the webhook
:param message: SlackResponse object
:return: requests.Response object
"""
return requests.post(self.url, data=message.get_json())
class SlackButton:
"""
Class that represents a JSON-encoded Slack button
"""
def __init__(self, text, value=None, style="default", confirm=None, yes='Yes'):
self.button_dict = dict()
self.button_dict['text'] = text
self.button_dict['name'] = text
self.button_dict['style'] = style
if value is None:
self.button_dict['value'] = text
else:
self.button_dict['value'] = value
self.button_dict['type'] = 'button'
if confirm is not None:
confirm_dict = dict()
confirm_dict['title'] = "Are you sure?"
confirm_dict['text'] = confirm
confirm_dict['ok_text'] = yes
confirm_dict['dismiss_text'] = 'Cancel'
self.button_dict['confirm'] = confirm_dict
class SlackField:
"""
Class that represents a JSON-encoded Slack message field
"""
def __init__(self, title, value, short="true"):
self.field_dict = dict()
self.field_dict['title'] = title
self.field_dict['value'] = value
self.field_dict['short'] = short
class SlackAttachment:
"""
Class that represents a JSON-encoded Slack message attachment
"""
def __init__(self, title=None, text=None, fallback=None, callback_id=None, color=None, title_link=None,
image_url=None, footer=None, author_name=None, ts=None):
self.attachment_dict = dict()
if fallback is not None:
self.attachment_dict['fallback'] = fallback
if callback_id is not None:
self.attachment_dict['callback_id'] = callback_id
if color is not None:
self.attachment_dict['color'] = color
if title_link is not None:
self.attachment_dict['title_link'] = title_link
if image_url is not None:
self.attachment_dict['image_url'] = image_url
if title is not None:
self.attachment_dict['title'] = title
if text is not None:
self.attachment_dict['text'] = text
if footer is not None:
self.attachment_dict['footer'] = footer
if author_name is not None:
self.attachment_dict['author_name'] = author_name
if ts is not None:
self.attachment_dict['ts'] = ts
self.attachment_dict['mrkdwn_in'] = ['title', 'text']
def add_field(self, title, value, short="true"):
if 'fields' not in self.attachment_dict:
self.attachment_dict['fields'] = []
field = SlackField(title, value, short)
self.attachment_dict['fields'].append(field.field_dict)
def add_button(self, text, value=None, style="default", confirm=None, yes=None):
if 'actions' not in self.attachment_dict:
self.attachment_dict['actions'] = []
button = SlackButton(text, value, style, confirm, yes)
self.attachment_dict['actions'].append(button.button_dict)
def set_footer(self, footer):
self.attachment_dict['footer'] = footer
class SlackResponse:
"""
Class used for easy crafting of a Slack response
"""
def __init__(self, text=None, response_type="in_channel", replace_original=True):
self.response_dict = dict()
self.attachments = []
self._is_prepared = False
if text is not None:
self.response_dict['text'] = text
if not replace_original:
self.response_dict['replace_original'] = False
self.response_dict['response_type'] = response_type
def set_replace_original(self, value):
self.response_dict['replace_original'] = value
def add_attachment(self, title=None, text=None, fallback=None, callback_id=None, color='#5c96ab',
title_link=None, footer=None,
image_url=None, author_name=None, ts=None):
if 'attachments' not in self.response_dict:
self.response_dict['attachments'] = []
attachment = SlackAttachment(title=title, text=text, fallback=fallback, callback_id=callback_id, color=color,
title_link=title_link, image_url=image_url, footer=footer, author_name=author_name,
ts=ts)
self.attachments.append(attachment)
return attachment
def _prepare(self):
self.response_dict['attachments'] = []
for attachment in self.attachments:
self.response_dict['attachments'].append(attachment.attachment_dict)
def get_json(self, indent=0):
"""Returns the JSON form of the response, ready to be sent to Slack via POST data"""
self._prepare()
return json.dumps(self.response_dict, indent=indent)
def get_dict(self):
"""Returns the dict form of the response, can be sent to Slack in GET or POST params"""
self._prepare()
return self.response_dict
def post_to_channel(self, token, channel, as_user=False):
"""Posts the SlackResponse object to a specific channel. The Slack team it's posted to depends on the
token that is passed. Passing as_user will make RS post the response as the user who authorized the app."""
response_dict = self.get_dict()
try:
response_dict['attachments'] = json.dumps(self.response_dict['attachments'])
except KeyError:
pass
response_dict['channel'] = channel
response_dict['token'] = token
if as_user:
response_dict['as_user'] = 'true'
request_response = requests.post('https://slack.com/api/chat.postMessage',
params=response_dict)
try:
response_dict['attachments'] = json.loads(self.response_dict['attachments'])
except KeyError:
pass
return request_response.json().get('ts', None)
def update_message(self, timestamp, channel, bot_token, parse='full'):
response_dict = self.get_dict()
response_dict['attachments'] = json.dumps(self.response_dict['attachments'])
response_dict['channel'] = channel
response_dict['token'] = bot_token
response_dict['ts'] = timestamp
response_dict['as_user'] = 'true'
response_dict['parse'] = parse
request_response = requests.post('https://slack.com/api/chat.update',
params=response_dict)
return request_response
class SlackRequest:
"""
Represents an HTTP request from Slack
"""
def __init__(self, request=None, slash_commands_secret=None, form=None):
if form is None:
self.form = request.form
else:
self.form = form
self.request_type = "command"
self.response = None
self.command = None
self.actions = None
self.callback_id = None
self.is_valid = False
self.slash_commands_secret = slash_commands_secret
if 'payload' in self.form:
self.request_type = "button"
self.form = json.loads(dict(self.form)['payload'][0])
self.user = self.form['user']['name']
self.user_id = self.form['user']['id']
self.team_domain = self.form['team']['domain']
self.team_id = self.form['team']['id']
self.callback_id = self.form['callback_id']
self.actions = self.form['actions']
self.message_ts = self.form['message_ts']
self.channel = self.form['channel']['id']
self.original_message = self.form['original_message']
else:
self.user = self.form['user_name']
self.team_domain = self.form['team_domain']
self.team_id = self.form['team_id']
self.command = self.form['command']
self.text = self.form['text']
self.command_args = self.form['text'].split()
self.channel_name = self.form['channel_name']
self.response_url = self.form['response_url']
self.token = self.form['token']
# self.team = team_from_team_name(self.team_domain)
if self.slash_commands_secret is not None:
if self.token == self.slash_commands_secret:
self.is_valid = True
def delayed_response(self, response):
"""Slack demands a response within 3 seconds. Additional responses can be sent through this method, in the
form of a SlackRequest object or plain text string"""
headers = {"content-type": "plain/text"}
if isinstance(response, SlackResponse):
headers = {"content-type": "application/json"}
response = response.get_json()
slack_response = requests.post(self.response_url, data=response, headers=headers)
return slack_response
|
|
from __future__ import division, unicode_literals
import os
import re
import sys
import time
from ..compat import compat_str
from ..utils import (
encodeFilename,
decodeArgument,
format_bytes,
timeconvert,
)
class FileDownloader(object):
"""File Downloader class.
File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk.
File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead.
Available options:
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
ratelimit: Download speed limit, in bytes/sec.
retries: Number of times to retry for HTTP error 5xx
buffersize: Size of download buffer in bytes.
noresizebuffer: Do not automatically resize the download buffer.
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar.
nopart: Do not use temporary .part files.
updatetime: Use the Last-modified header to set output file timestamps.
test: Download only first bytes to test the downloader.
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
(experimenatal)
external_downloader_args: A list of additional command-line arguments for the
external downloader.
Subclasses of this one must re-define the real_download method.
"""
_TEST_FILE_SIZE = 10241
params = None
def __init__(self, ydl, params):
"""Create a FileDownloader object with the given options."""
self.ydl = ydl
self._progress_hooks = []
self.params = params
self.add_progress_hook(self.report_progress)
@staticmethod
def format_seconds(seconds):
(mins, secs) = divmod(seconds, 60)
(hours, mins) = divmod(mins, 60)
if hours > 99:
return '--:--:--'
if hours == 0:
return '%02d:%02d' % (mins, secs)
else:
return '%02d:%02d:%02d' % (hours, mins, secs)
@staticmethod
def calc_percent(byte_counter, data_len):
if data_len is None:
return None
return float(byte_counter) / float(data_len) * 100.0
@staticmethod
def format_percent(percent):
if percent is None:
return '---.-%'
return '%6s' % ('%3.1f%%' % percent)
@staticmethod
def calc_eta(start, now, total, current):
if total is None:
return None
if now is None:
now = time.time()
dif = now - start
if current == 0 or dif < 0.001: # One millisecond
return None
rate = float(current) / dif
return int((float(total) - float(current)) / rate)
@staticmethod
def format_eta(eta):
if eta is None:
return '--:--'
return FileDownloader.format_seconds(eta)
@staticmethod
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
return None
return float(bytes) / dif
@staticmethod
def format_speed(speed):
if speed is None:
return '%10s' % '---b/s'
return '%10s' % ('%s/s' % format_bytes(speed))
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
return int(new_max)
rate = bytes / elapsed_time
if rate > new_max:
return int(new_max)
if rate < new_min:
return int(new_min)
return int(rate)
@staticmethod
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer."""
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if matchobj is None:
return None
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return int(round(number * multiplier))
def to_screen(self, *args, **kargs):
self.ydl.to_screen(*args, **kargs)
def to_stderr(self, message):
self.ydl.to_screen(message)
def to_console_title(self, message):
self.ydl.to_console_title(message)
def trouble(self, *args, **kargs):
self.ydl.trouble(*args, **kargs)
def report_warning(self, *args, **kargs):
self.ydl.report_warning(*args, **kargs)
def report_error(self, *args, **kargs):
self.ydl.report_error(*args, **kargs)
def slow_down(self, start_time, now, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None)
if rate_limit is None or byte_counter == 0:
return
if now is None:
now = time.time()
elapsed = now - start_time
if elapsed <= 0.0:
return
speed = float(byte_counter) / elapsed
if speed > rate_limit:
time.sleep(max((byte_counter // rate_limit) - elapsed, 0))
def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == '-' or \
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
return filename
return filename + '.part'
def undo_temp_name(self, filename):
if filename.endswith('.part'):
return filename[:-len('.part')]
return filename
def try_rename(self, old_filename, new_filename):
try:
if old_filename == new_filename:
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
self.report_error('unable to rename file: %s' % compat_str(err))
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
if last_modified_hdr is None:
return
if not os.path.isfile(encodeFilename(filename)):
return
timestr = last_modified_hdr
if timestr is None:
return
filetime = timeconvert(timestr)
if filetime is None:
return filetime
# Ignore obviously invalid dates
if filetime == 0:
return
try:
os.utime(filename, (time.time(), filetime))
except Exception:
pass
return filetime
def report_destination(self, filename):
"""Report destination filename."""
self.to_screen('[download] Destination: ' + filename)
def _report_progress_status(self, msg, is_last_line=False):
fullmsg = '[download] ' + msg
if self.params.get('progress_with_newline', False):
self.to_screen(fullmsg)
else:
if os.name == 'nt':
prev_len = getattr(self, '_report_progress_prev_line_length',
0)
if prev_len > len(fullmsg):
fullmsg += ' ' * (prev_len - len(fullmsg))
self._report_progress_prev_line_length = len(fullmsg)
clear_line = '\r'
else:
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
self.to_console_title('youtube-dl ' + msg)
def report_progress(self, s):
if s['status'] == 'finished':
if self.params.get('noprogress', False):
self.to_screen('[download] Download completed')
else:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
if s.get('elapsed') is not None:
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '100%% of %(_total_bytes_str)s in %(_elapsed_str)s'
else:
msg_template = '100%% of %(_total_bytes_str)s'
self._report_progress_status(
msg_template % s, is_last_line=True)
if self.params.get('noprogress'):
return
if s['status'] != 'downloading':
return
if s.get('eta') is not None:
s['_eta_str'] = self.format_eta(s['eta'])
else:
s['_eta_str'] = 'Unknown ETA'
if s.get('total_bytes') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes'])
elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate'])
else:
if s.get('downloaded_bytes') == 0:
s['_percent_str'] = self.format_percent(0)
else:
s['_percent_str'] = 'Unknown %'
if s.get('speed') is not None:
s['_speed_str'] = self.format_speed(s['speed'])
else:
s['_speed_str'] = 'Unknown speed'
if s.get('total_bytes') is not None:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s'
elif s.get('total_bytes_estimate') is not None:
s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate'])
msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s'
else:
if s.get('downloaded_bytes') is not None:
s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes'])
if s.get('elapsed'):
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)'
else:
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
else:
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
self._report_progress_status(msg_template % s)
def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte."""
self.to_screen('[download] Resuming download at byte %s' % resume_len)
def report_retry(self, count, retries):
"""Report retry in case of HTTP error 5xx"""
self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
self.to_screen('[download] Unable to resume')
def download(self, filename, info_dict):
"""Download to a filename using the info from info_dict
Return True on success and False otherwise
"""
nooverwrites_and_exists = (
self.params.get('nooverwrites', False) and
os.path.exists(encodeFilename(filename))
)
continuedl_and_exists = (
self.params.get('continuedl', True) and
os.path.isfile(encodeFilename(filename)) and
not self.params.get('nopart', False)
)
# Check file already present
if filename != '-' and (nooverwrites_and_exists or continuedl_and_exists):
self.report_file_already_downloaded(filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
'total_bytes': os.path.getsize(encodeFilename(filename)),
})
return True
sleep_interval = self.params.get('sleep_interval')
if sleep_interval:
self.to_screen('[download] Sleeping %s seconds...' % sleep_interval)
time.sleep(sleep_interval)
return self.real_download(filename, info_dict)
def real_download(self, filename, info_dict):
"""Real download process. Redefine in subclasses."""
raise NotImplementedError('This method must be implemented by subclasses')
def _hook_progress(self, status):
for ph in self._progress_hooks:
ph(status)
def add_progress_hook(self, ph):
# See YoutubeDl.py (search for progress_hooks) for a description of
# this interface
self._progress_hooks.append(ph)
def _debug_cmd(self, args, exe=None):
if not self.params.get('verbose', False):
return
str_args = [decodeArgument(a) for a in args]
if exe is None:
exe = os.path.basename(str_args[0])
try:
import pipes
shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
except ImportError:
shell_quote = repr
self.to_screen('[debug] %s command line: %s' % (
exe, shell_quote(str_args)))
|
|
"""Utilities for input validation"""
# Authors: Olivier Grisel and Gael Varoquaux and others (please update me)
# License: BSD 3
import warnings
import numbers
import numpy as np
from scipy import sparse
from .fixes import safe_copy
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Array contains NaN or infinity.")
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
# First try an O(n) time, O(1) space solution for the common case that
# there everything is finite; fall back to O(n) space np.isfinite to
# prevent false positives from overflow in sum method.
_assert_all_finite(X.data if sparse.issparse(X) else X)
def safe_asarray(X, dtype=None, order=None):
"""Convert X to an array or sparse matrix.
Prevents copying X when possible; sparse matrices are passed through."""
if sparse.issparse(X):
assert_all_finite(X.data)
else:
X = np.asarray(X, dtype, order)
assert_all_finite(X)
return X
def as_float_array(X, copy=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sparse.issparse(X)):
return safe_asarray(X, dtype=np.float64)
elif sparse.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def array2d(X, dtype=None, order=None, copy=False):
"""Returns at least 2-d array with data from X"""
if sparse.issparse(X):
raise TypeError('A sparse matrix was passed, but dense data '
'is required. Use X.toarray() to convert to dense.')
X_2d = np.asarray(np.atleast_2d(X), dtype=dtype, order=order)
_assert_all_finite(X_2d)
if X is X_2d and copy:
X_2d = safe_copy(X_2d)
return X_2d
def _atleast2d_or_sparse(X, dtype, order, copy, sparse_class, convmethod):
if sparse.issparse(X):
# Note: order is ignored because CSR matrices hold data in 1-d arrays
if dtype is None or X.dtype == dtype:
X = getattr(X, convmethod)()
else:
X = sparse_class(X, dtype=dtype)
_assert_all_finite(X.data)
else:
X = array2d(X, dtype=dtype, order=order, copy=copy)
_assert_all_finite(X)
return X
def atleast2d_or_csc(X, dtype=None, order=None, copy=False):
"""Like numpy.atleast_2d, but converts sparse matrices to CSC format.
Also, converts np.matrix to np.ndarray.
"""
return _atleast2d_or_sparse(X, dtype, order, copy, sparse.csc_matrix,
"tocsc")
def atleast2d_or_csr(X, dtype=None, order=None, copy=False):
"""Like numpy.atleast_2d, but converts sparse matrices to CSR format
Also, converts np.matrix to np.ndarray.
"""
return _atleast2d_or_sparse(X, dtype, order, copy, sparse.csr_matrix,
"tocsr")
def _num_samples(x):
"""Return number of samples in array-like x."""
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
raise TypeError("Expected sequence or array-like, got %r" % x)
return x.shape[0] if hasattr(x, 'shape') else len(x)
def check_arrays(*arrays, **options):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
By default lists and tuples are converted to numpy arrays.
It is possible to enforce certain properties, such as dtype, continguity
and sparse matrix format (if a sparse matrix is passed).
Converting lists to arrays can be disabled by setting ``allow_lists=True``.
Lists can then contain arbitrary objects and are not checked for dtype,
finiteness or anything else but length. Arrays are still checked
and possibly converted.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays, unless allow_lists is specified.
sparse_format : 'csr', 'csc' or 'dense', None by default
If not None, any scipy.sparse matrix is converted to
Compressed Sparse Rows or Compressed Sparse Columns representations.
If 'dense', an error is raised when a sparse array is
passed.
copy : boolean, False by default
If copy is True, ensure that returned arrays are copies of the original
(if not already converted to another format earlier in the process).
check_ccontiguous : boolean, False by default
Check that the arrays are C contiguous
dtype : a numpy dtype instance, None by default
Enforce a specific dtype.
allow_lists : bool
Allow lists of arbitrary objects as input, just check their length.
Disables
"""
sparse_format = options.pop('sparse_format', None)
if sparse_format not in (None, 'csr', 'csc', 'dense'):
raise ValueError('Unexpected sparse format: %r' % sparse_format)
copy = options.pop('copy', False)
check_ccontiguous = options.pop('check_ccontiguous', False)
dtype = options.pop('dtype', None)
allow_lists = options.pop('allow_lists', False)
if options:
raise TypeError("Unexpected keyword arguments: %r" % options.keys())
if len(arrays) == 0:
return None
n_samples = _num_samples(arrays[0])
checked_arrays = []
for array in arrays:
array_orig = array
if array is None:
# special case: ignore optional y=None kwarg pattern
checked_arrays.append(array)
continue
size = _num_samples(array)
if size != n_samples:
raise ValueError("Found array with dim %d. Expected %d"
% (size, n_samples))
if not allow_lists or hasattr(array, "shape"):
if sparse.issparse(array):
if sparse_format == 'csr':
array = array.tocsr()
elif sparse_format == 'csc':
array = array.tocsc()
elif sparse_format == 'dense':
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if check_ccontiguous:
array.data = np.ascontiguousarray(array.data, dtype=dtype)
else:
array.data = np.asarray(array.data, dtype=dtype)
_assert_all_finite(array.data)
else:
if check_ccontiguous:
array = np.ascontiguousarray(array, dtype=dtype)
else:
array = np.asarray(array, dtype=dtype)
_assert_all_finite(array)
if copy and array is array_orig:
array = array.copy()
checked_arrays.append(array)
return checked_arrays
def warn_if_not_float(X, estimator='This algorithm'):
"""Warning utility function to check that data type is floating point"""
if not isinstance(estimator, basestring):
estimator = estimator.__class__.__name__
if X.dtype.kind != 'f':
warnings.warn("%s assumes floating point values as input, "
"got %s" % (estimator, X.dtype))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
|
|
"""
Starter fabfile for deploying the quantfi_project project.
Change all the things marked CHANGEME. Other things can be left at their
defaults if you are happy with the default layout.
"""
import posixpath
from fabric.api import run, local, env, settings, cd, task
from fabric.contrib.files import exists
from fabric.operations import _prefix_commands, _prefix_env_vars
#from fabric.decorators import runs_once
#from fabric.context_managers import cd, lcd, settings, hide
# CHANGEME
env.hosts = ['user@quantfi_project.example.com']
env.code_dir = '/srv/www/quantfi_project'
env.project_dir = '/srv/www/quantfi_project/quantfi_project'
env.static_root = '/srv/www/quantfi_project/static/'
env.virtualenv = '/srv/www/quantfi_project/.virtualenv'
env.code_repo = 'git@github.com:user/quantfi_project.git'
env.django_settings_module = 'quantfi_project.settings'
# Python version
PYTHON_BIN = "python2.7"
PYTHON_PREFIX = "" # e.g. /usr/local Use "" for automatic
PYTHON_FULL_PATH = "%s/bin/%s" % (PYTHON_PREFIX, PYTHON_BIN) if PYTHON_PREFIX else PYTHON_BIN
# Set to true if you can restart your webserver (via wsgi.py), false to stop/start your webserver
# CHANGEME
DJANGO_SERVER_RESTART = False
def virtualenv(venv_dir):
"""
Context manager that establishes a virtualenv to use.
"""
return settings(venv=venv_dir)
def run_venv(command, **kwargs):
"""
Runs a command in a virtualenv (which has been specified using
the virtualenv context manager
"""
run("source %s/bin/activate" % env.virtualenv + " && " + command, **kwargs)
def install_dependencies():
ensure_virtualenv()
with virtualenv(env.virtualenv):
with cd(env.code_dir):
run_venv("pip install -r requirements/production.txt")
def ensure_virtualenv():
if exists(env.virtualenv):
return
with cd(env.code_dir):
run("virtualenv --no-site-packages --python=%s %s" %
(PYTHON_BIN, env.virtualenv))
run("echo %s > %s/lib/%s/site-packages/projectsource.pth" %
(env.project_dir, env.virtualenv, PYTHON_BIN))
def ensure_src_dir():
if not exists(env.code_dir):
run("mkdir -p %s" % env.code_dir)
with cd(env.code_dir):
if not exists(posixpath.join(env.code_dir, '.git')):
run('git clone %s .' % (env.code_repo))
def push_sources():
"""
Push source code to server
"""
ensure_src_dir()
local('git push origin master')
with cd(env.code_dir):
run('git pull origin master')
@task
def run_tests():
""" Runs the Django test suite as is. """
local("./manage.py test")
@task
def version():
""" Show last commit to the deployed repo. """
with cd(env.code_dir):
run('git log -1')
@task
def uname():
""" Prints information about the host. """
run("uname -a")
@task
def webserver_stop():
"""
Stop the webserver that is running the Django instance
"""
run("service apache2 stop")
@task
def webserver_start():
"""
Starts the webserver that is running the Django instance
"""
run("service apache2 start")
@task
def webserver_restart():
"""
Restarts the webserver that is running the Django instance
"""
if DJANGO_SERVER_RESTART:
with cd(env.code_dir):
run("touch %s/wsgi.py" % env.project_dir)
else:
with settings(warn_only=True):
webserver_stop()
webserver_start()
def restart():
""" Restart the wsgi process """
with cd(env.code_dir):
run("touch %s/quantfi_project/wsgi.py" % env.code_dir)
def build_static():
assert env.static_root.strip() != '' and env.static_root.strip() != '/'
with virtualenv(env.virtualenv):
with cd(env.code_dir):
run_venv("./manage.py collectstatic -v 0 --clear --noinput")
run("chmod -R ugo+r %s" % env.static_root)
@task
def first_deployment_mode():
"""
Use before first deployment to switch on fake south migrations.
"""
env.initial_deploy = True
@task
def update_database(app=None):
"""
Update the database (run the migrations)
Usage: fab update_database:app_name
"""
with virtualenv(env.virtualenv):
with cd(env.code_dir):
if getattr(env, 'initial_deploy', False):
run_venv("./manage.py syncdb --all")
run_venv("./manage.py migrate --fake --noinput")
else:
run_venv("./manage.py syncdb --noinput")
if app:
run_venv("./manage.py migrate %s --noinput" % app)
else:
run_venv("./manage.py migrate --noinput")
@task
def sshagent_run(cmd):
"""
Helper function.
Runs a command with SSH agent forwarding enabled.
Note:: Fabric (and paramiko) can't forward your SSH agent.
This helper uses your system's ssh to do so.
"""
# Handle context manager modifications
wrapped_cmd = _prefix_commands(_prefix_env_vars(cmd), 'remote')
try:
host, port = env.host_string.split(':')
return local(
"ssh -p %s -A %s@%s '%s'" % (port, env.user, host, wrapped_cmd)
)
except ValueError:
return local(
"ssh -A %s@%s '%s'" % (env.user, env.host_string, wrapped_cmd)
)
@task
def deploy():
"""
Deploy the project.
"""
with settings(warn_only=True):
webserver_stop()
push_sources()
install_dependencies()
update_database()
build_static()
webserver_start()
|
|
#!/usr/bin/env python3
# -*- coding: Utf-8 -*-
"""
Swap
A puzzle game with a grid of blocks. Form associations of blocks by
swapping blocks to destroy them!
Modes:
Survival
Battle vs CPU/Human
Stamina (life bar)
Projectile (throw blocks to opponent)
"""
import sys
from random import randrange
from time import time
from itertoolsExt import flatten
from log import *
from grid import Grid, Combo, Block
from state import State, StateMachine
SCORES = [2, 3, 5, 10, 20, 50, 100, 200, 400, 600, 800]
scoreIt = lambda x: SCORES[x-3] if x <= 10 else 1000
class Player(object):
def __init__(self, type_, name):
assert type_ in ('Human', 'AI'), 'Player type must be among (Human, AI)!'
self.type = type_ # Human, AI
self.name = name
self.score = 0
self.scoreMultiplier = 1
self.swapperPos = (0, 0)
self.grid = Grid(12, 20, 4)
self.stateMachine = StateMachine()
class Game(object):
def __init__(self):
self.players = [Player('Human', 'Human'), Player('AI', 'BOT')]
self.humanPlayerId = listFind(self.players, 'Human', key=lambda e: e.type)
self.humanPlayer = self.players[self.humanPlayerId]
self.lastTime = time()
self.pause = False
INFO("Starting Swap")
for player in self.players:
if player.type == 'AI':
player.stateMachine.transition("AI_swap", 2) # To enable AI
player.stateMachine.transition("block", 4)
def update(self):
currentTime = time()
dt = currentTime - self.lastTime
self.lastTime = currentTime
if self.pause: return
for player in self.players:
#if any(player.stateMachine.isChanging(e) for e in player.stateMachine):
# DEBUG("State: %s", player.stateMachine.vcrepr())
self.stepStateMachine(player)
player.stateMachine.update(dt)
def stepStateMachine(self, player):
for stateName in tuple(player.stateMachine.keys()):
if stateName == "AI_swap":
if player.stateMachine["AI_swap"].status == "starting":
player.swapperPos = player.grid.randomSwap()
elif player.stateMachine["AI_swap"].status == "ending":
self.swap(player)
player.stateMachine.transition("AI_swap", 1.5)
elif stateName == "block":
if player.stateMachine["block"].status == "ending":
player.grid.spawnBlock()
self.checkAndFall(player)
player.stateMachine.transition("block", .5)
elif stateName.startswith("fall#"):
if player.stateMachine[stateName].status == "ending":
pos = player.stateMachine[stateName].data
player.grid.fallStepPos(*pos)
if player.grid.isHole(*pos):
player.stateMachine.transition(stateName, .2, pos)
else: # Falling ended
lowerHoles = player.grid.lowerHoles([pos[0]])
if lowerHoles:
player.stateMachine.transition(stateName, .2, lowerHoles[0])
else:
player.stateMachine.delete(stateName)
sumFalls = sum(1 for name in player.stateMachine if name.startswith("fall#"))
comboGroup = self.checkAndCombo(player, "fall", pos)
if sumFalls == 0 and not comboGroup:
player.scoreMultiplier = 1
elif stateName.startswith("combo#"):
if player.stateMachine[stateName].status == "ending":
#DEBUG("Combos %s\n%s", stateName, player.stateMachine[stateName].data)
comboGroup = player.stateMachine[stateName].data
comboGroup = updateComboGroupLazy(player, comboGroup)
self.processCombos(player, comboGroup)
player.stateMachine.delete(stateName)
#DEBUG("After delete combo: %s", self.getComboGroups(player))
self.checkAndFall(player)
def checkAndFall(self, player, focusX=None):
"""Check whether some blocks have to fall. Return lower holes.
Creates fall state for each hole found.
If focusX, then only corresponding columns are checked."""
lowerHoles = player.grid.lowerHoles(focusX)
#DEBUG("Lower holes: %s", lowerHoles)
for pos in lowerHoles:
if "fall#" + str(pos[0]) not in player.stateMachine:
player.stateMachine.transition("fall#" + str(pos[0]), .2, pos)
return lowerHoles
def getComboGroups(self, player):
return [player.stateMachine[name].data for name in player.stateMachine if name.startswith("combo#")]
def genComboId(self, player):
for i in range(100):
if "combo#" + str(i) not in player.stateMachine:
return i
raise RuntimeError("Too much combos")
def checkAndCombo(self, player, checkType, pos):
"""Check whether there are combos. Return combo group.
Creates combo state."""
if checkType == "fall":
comboGroup = player.grid.combosAfterFall(pos)
elif checkType == "swap":
comboGroup = player.grid.combosAfterSwap(pos)
else: raise ValueError("Wrong check type: " + str(checkType))
if comboGroup:
#DEBUG("Found combo group %s\nComboGroups: %s", comboGroup, self.getComboGroups(player))
fallingX = [pos[0] for pos in player.grid.lowerHoles()]
# Filter already found combos and update old combo groups
oldStates = [player.stateMachine[name] for name in player.stateMachine if name.startswith("combo#")]
for state in oldStates: # every state
oldComboGroup = state.data
oci = 0 # old combo index
while oci < len(oldComboGroup): # every stored combo
nci = 0 # new combo index
while nci < len(comboGroup): # every current combo
#DEBUG('Current combo group: %s', comboGroup)
if any(p[0] in fallingX for p in comboGroup[nci]):
DEBUG('Filter#1 combo: %s', comboGroup[nci])
comboGroup.pop(nci)
continue
# If any common block
if comboGroup[nci] and sum(p in oldComboGroup[oci] for p in comboGroup[nci]) > 1:
if oldComboGroup[oci] != comboGroup[nci]:
DEBUG('Update old combo: %s -> %s', oldComboGroup[oci], comboGroup[nci])
oldComboGroup[oci] = comboGroup[nci] # Update old combo
else:
DEBUG('Filter#2 combo: %s', comboGroup[nci])
comboGroup.pop(nci)
continue
nci += 1
oci += 1
DEBUG("Add combo group %s", comboGroup)
if comboGroup:
player.stateMachine.transition("combo#" + str(self.genComboId(player)), 2, comboGroup)
return comboGroup
def processCombos(self, player, comboGroup):
if not len(comboGroup): return
comboGroupPos = set(flatten(comboGroup))
DEBUG('Score combos: %s %s', scoreIt(len(comboGroupPos)) * player.scoreMultiplier, comboGroup)
player.score += scoreIt(len(comboGroupPos)) * player.scoreMultiplier
player.scoreMultiplier += 1
for pos in comboGroupPos: # Remove combos
player.grid[pos] = 0
def processInputEvent(self, name):
player = self.humanPlayer
if name == "swap":
self.swap(player)
elif name in ("up", "right", "down", "left"):
self.moveSwapper(player, name)
def swap(self, player):
x, y = player.swapperPos
player.grid.swap(x, y)
self.checkAndFall(player, [x, x+1])
self.checkAndCombo(player, "swap", (x, y))
def moveSwapper(self, player, direction):
assert direction in ('up', 'right', 'down', 'left'), "direction must be one of up, right, down, left"
x, y = player.swapperPos
if direction == 'up': player.swapperPos = (x, max(0, y-1))
elif direction == 'right': player.swapperPos = (min(x+1, player.grid.width-2), y)
elif direction == 'down': player.swapperPos = (x, min(y+1, player.grid.height-1))
elif direction == 'left': player.swapperPos = (max(x-1, 0), y)
def updateComboGroupLazy(player, comboGroup):
"""Computes the final combo group based on combo state start and end, using
the lazy startegy.
Lazy:
include any combo from start state that remains in end state"""
newComboGroup = []
for combo in comboGroup:
orientation = combo.orientation()
if orientation == 'h': comboTest = player.grid.comboHorizontalAround(*combo[0])
elif orientation == 'v': comboTest = player.grid.comboVerticalAround(*combo[0])
else: raise NotImplemented
if combo == comboTest:
newComboGroup.append(combo)
return newComboGroup
def updateComboGroupMorph(comboGroup1, comboGroup2):
"""Computes the final combo group based on combo state start and end, using
the morph startegy.
Morph:
- compute the difference between the two sets of combo positions,
- include any combo from end state that has at least one position in common
with the difference set"""
# We compute the lists of blocks involved in each combo group
comboPos1 = set(flatten(comboGroup1))
comboPos2 = set(flatten(comboGroup2))
diffPos = comboPos1.intersection(comboPos2)
#DEBUG("cp: %s %s", comboPos1, comboPos2)
#DEBUG("diff pos: %s", diffPos)
comboGroup3 = []
for combo2 in comboGroup2:
for pos in diffPos:
if pos in combo2:
comboGroup3.append(combo2)
DEBUG("morph combo group: %s", comboGroup3)
return comboGroup3
def listFind(lst, val, key=(lambda x: x)):
for i,e in enumerate(lst):
if key(e) == val:
return i
return None
if __name__ == '__main__':
pass
|
|
from matplotlib import colors, gridspec
from astro.plot import puttext, A4LANDSCAPE, axvlines
from astro.constants import Ryd_Ang, eV, Ryd, k
from astro.utilities import indexnear
#pl.cm.register_cmap(name='BRG', cmap=make_cmap(*cvals.brg))
# number of axes to plot
nplot=24
M = loadobj('qg_grid.sav.gz')
#if M.nH[-1] < 1e-15:
# M.nH[-1] = 0
nNHI = len(M.NHI)
nnH = len(M.nH)
nZ = len(M.Z)
roman_map = dict(I=0, II=1, III=2, IV=3, V=4, VI=5, VII=6, VIII=7, IX=8, X=9)
roman = set('IVX')
xlabels = dict(Z='Z = log (X/H) - log (X/H)$_\odot$',
nH='log n$_H$ (cm$^{-3}$)',
NHI='log $N_{HI}$ (cm$^{-2}$)')
labels = dict(NHI='log N$_{HI}$=%.3g',
nH='log n$_H$=%.3g', Z='log Z=%.3g')
def split_trans(trans):
i = 1
while trans[i] not in roman:
i+=1
return trans[:i], trans[i:]
# Quantities that vary with each model are saved as a 3 or 4
# dimensional array. The first three array axes give values as a
# function of NHI, nH, Z.
#
# The final axis in each N arrays is the transition number. For
# example, to access SiII for all models, use:
#
# models.N.Si[:,:,:,1] or models.N.Si[...,1]
#
# to access MgI for models run with NHI[1] and Z[1] values, but all nH
# values:
#
# models.N.Mg[1,:,1,0]
def plot_mod(x, z, yvals, ylabel, ax, ind=0, cmap=pl.cm.rainbow,
printlabel=True):
""" Plot column-density-derived values yvals as a function of the
x values (NHI, nH or Z), showing variation of quantity z by
different coloured curves. ind is the index of the value used,
which isn't varied.
"""
# Want index order to be indtype, x, z. By default it's NHI, nH,
# Z. Otherwise it has to change...
if (x,z) == ('NHI','Z'):
yvals = np.swapaxes(yvals, 0, 1)
elif (x,z) == ('Z','NHI'):
yvals = np.swapaxes(yvals, 0, 1)
yvals = np.swapaxes(yvals, 1, 2)
elif (x,z) == ('nH','NHI'):
yvals = np.swapaxes(yvals, 0, 2)
elif (x,z) == ('NHI', 'nH'):
yvals = np.swapaxes(yvals, 0, 2)
yvals = np.swapaxes(yvals, 1, 2)
elif (x,z) == ('Z','nH'):
yvals = np.swapaxes(yvals, 1, 2)
norm = colors.normalize(M[z].min(), M[z].max())
label_indices = set((0, len(M[z])//2, len(M[z])-1))
for i in range(len(M[z])):
# spring, summer, autumn, winter are all good
c = cmap(norm(M[z][i]))
label = None
if i in label_indices:
label = labels[z] % M[z][i]
#ax.plot(M[x], yvals[ind,:,i], '-', lw=2.5, color='k')
ax.plot(M[x], yvals[ind,:,i], '-', lw=1.5, color=c, label=label)
val, = list(set(['nH','NHI','Z']).difference([x,z]))
if printlabel:
ax.set_title(labels[val] % M[val][ind], fontsize='medium')
ax.title.set_y(1.01)
ax.set_xlabel(xlabels[x], fontsize='small')
ax.set_ylabel(ylabel)
ax.minorticks_on()
ax.set_xlim(M[x][0]+1e-3, M[x][-1]-1e-3)
def cleanup(ax, ratio, nplot, gs):
""" After all the plots are made, clean up the spacing between
plots and labels, make legend and remove unnecessary x ticklabels
and labels.
"""
gs[0].tight_layout(fig,rect=[0, 0, 0.25, 1],pad=0.5)
gs[0].update(hspace=1e-5, wspace=1e-5)
if len(ax) > nplot/2:
gs[1].tight_layout(fig,rect=[0.25, 0, 0.5, 1],pad=0.5)
gs[1].update(hspace=1e-5, wspace=1e-5, bottom=gs[0].bottom,
top=gs[0].top)
if len(ax) > nplot:
gs[2].tight_layout(fig,rect=[0.5, 0, 0.75, 1],pad=0.5)
gs[2].update(hspace=1e-5, wspace=1e-5, bottom=gs[0].bottom,
top=gs[0].top)
if len(ax) > 3*nplot/2:
gs[3].tight_layout(fig,rect=[0.75, 0, 1, 1],pad=0.5)
gs[3].update(hspace=1e-5, wspace=1e-5, bottom=gs[0].bottom,
top=gs[0].top)
for i in range(len(ax)):
if i not in (10, 11, 22, 23, 34, 35, 46, 47) and i < (len(ax)-2):
ax[i].set_xticklabels([])
ax[i].set_xlabel('')
# axes = [ax[-2]]
# if len(ax) > nplot:
# axes.append(ax[nplot - 2])
# for a in axes:
# a.set_xticklabels(['',u'\u22123.5','',u'\u22122.5','',u'\u22121.5','',
# u'\u22120.5', ''])
ax[1].legend(frameon=0, loc='best')
def make_gridfig(nplot):
""" Make a bunch of plots in a A4 landscape figure in four
columns.
"""
fig = pl.figure(figsize=A4LANDSCAPE)
# divide into 4 columns
gs = [gridspec.GridSpec(nplot/4, 2) for i in range(4)]
gs[0].update(left=0, right=0.25)
gs[1].update(left=0.25, right=0.5)
gs[2].update(left=0.5, right=0.75)
gs[3].update(left=0.75, right=1)
return fig, gs
def match_ylim_tweak_xlim(axes):
""" Make all axes have the same y limits, and slightly tweak x
limits.
"""
ymin, ymax = 99, -99
for a in axes:
y0, y1 = a.get_ylim()
if y0 < ymin:
ymin = y0
if y1 > ymax:
ymax = y1
for a in axes:
a.set_ylim(ymin+1e-3, ymax-1e-3)
x0,x1 = a.get_xlim()
a.set_xlim(x0+1e-3, x1-1e-3)
if 1:
###################################################################
# Make lots of plots of column density ratios as a function of nH
# and NHI.
###################################################################
#ratios = ("""SiIV/SiII SiIII/SiII SiIV/SiIII CIII/CII CIV/CII CIV/CIII
#AlIII/AlII NV/NII OVI/OI OI/SiII CIV/SiIV MgII/FeII FeII/SiII
#OVI/SiII OVI/NV HI/HII SiII/HI CII/HI AlII/HI""").split()
ratios = ("""SiIV/SiII SiIII/SiII SiIV/SiIII CIV/CII
AlIII/AlII HI/HII SiII/HI CII/HI AlII/HI OI/HI""").split()
# ratios not useful for constraining U - they don't change
# monotonically with U, or change significantly with metallicity.
# AlII/SiII AlII/NII CII/SiII MgII/SiII MgII/NII NII/SiII SiII/HI
fig, gs = make_gridfig(nplot)
iax = 0
ax = []
dax = adict()
for ratio in ratios:
if iax == 2*nplot:
cleanup(ax, ratio, nplot, gs)
fig, gs = make_gridfig(nplot)
iax = 0
ax = []
print ratio
trans = ratio.split('/')
atoms,nums = zip(*[split_trans(t) for t in trans])
i0, i1 = (roman_map[n] for n in nums)
yvals = M.N[atoms[0]][..., i0] - M.N[atoms[1]][..., i1]
ylabel = r'log (N$_{%s}$ / N$_{%s}$)' % tuple(
atom + n for atom,n in zip(atoms, nums))
# if trans[1] == 'HI':
# # normalise metals
# yvals = yvals - M.Z
# ylabel = r'log (N$_{%s}$ / N$_{%s}$ / Z)' % tuple(
# atom + n for atom,n in zip(atoms, nums))
yvals = yvals.clip(-10)
ylabel = ''
i = 3
if iax < nplot / 2: i = 0
elif iax < nplot: i = 1
elif iax < 3*nplot / 2: i = 2
ax.extend([pl.subplot(gs[i][iax % (nplot/2)]),
pl.subplot(gs[i][(iax+1) % (nplot/2)])])
dax[ratio] = ax[-2]
p = (True if iax in (0, nplot/2, nplot, 3.*nplot/2) else False)
plot_mod('nH','Z', yvals, ylabel, ax[iax],
ind=indexnear(M.NHI, 16.5), printlabel=p)
plot_mod('NHI','Z', yvals, ylabel, ax[iax+1],
ind=indexnear(M.nH, -2.5), printlabel=p)
ax[iax+1].set_ylabel('')
ax[iax+1].set_yticklabels([])
puttext(0.9, 0.1, ratio, ax[iax+1], fontsize='large', ha='right')
match_ylim_tweak_xlim(ax[iax:iax+2])
iax += 2
else:
cleanup(ax, ratio, nplot, gs)
# plot
if 1:
#################################
# plot the observed ratios
####################################
# each entry is the value, 1sig low, 1sig high
obs = parse_config('observed')
for k in obs:
vals = map(float, obs[k].split())
if len(vals) == 2:
obs[k] = vals[0], vals[1], vals[1]
elif len(vals) == 3:
obs[k] = tuple(vals)
else:
raise ValueError('Error parsing entry %s' % obs[k])
def get_ratio(a, b):
""" Measure minimum and maximum ratio for a/b"""
return (a[0]-a[1]) - (b[0]+b[2]), a[0]-b[0], (a[0]+a[2]) - (b[0]-b[1])
# obs_ratios = """ SiIV/SiII SiIII/SiII SiIV/SiIII CIV/CII CIV/SiIV
# AlIII/AlII CII/SiII OI/SiII MgII/FeII FeII/SiII CII/HI SiII/HI AlII/HI
# NV/CIV """.split()
obs_ratios = """ SiIV/SiII SiIII/SiII SiIV/SiIII CIV/CII
AlIII/AlII CII/HI SiII/HI AlII/HI OI/HI""".split()
for ratio in obs_ratios:
numer, denom = ratio.split('/')
low, best, high = get_ratio(obs[numer], obs[denom])
dax[ratio].fill_between([-5, 0.5], low, high, alpha=0.2, zorder=10)
dax[ratio].plot([-5, 0.5], [best, best], 'k', zorder=11)
for a in ax:
a.axvline(-2.5, color='k', ls='--')
if 0:
###################################################################
# Make a plot showing the column density as a function of nH
###################################################################
pl.figure()
IONS = ('MgII CII CIII CIV SiII SiIII SiIV FeII '
'OI OVI HI AlI AlII AlIII NII NI').split()
#trans = ('SiII SiIII SiIV CII CIII CIV AlII AlIII '
# 'MgI MgII FeI CaII CaI FeII OI OVI NII NV NeVIII MgX HII').split()
atoms,nums = zip(*[split_trans(t) for t in IONS])
ax = pl.gca()
colors = dict(Si='y', C='k', Al='c', O='r', N='g', Fe='orange',
Ne='pink', Mg='b', H='m', Ca='purple')
count = dict((k,0) for k in colors)
ls = ['-', '--', '-.', ':']
iNHI = nNHI // 2
iZ = nZ // 2
for atom, num in zip(atoms, nums):
col = colors[atom]
N = M.N[atom][iNHI, :, iZ, roman_map[num]]
ax.plot(M.nH, N, lw=3, ls=ls[count[atom] % 3],color=col,label=atom+num)
count[atom] += 1
ax.set_xlabel(xlabels['nH'])
ax.set_ylabel('log N (cm$^{-2}$)')
ax.set_title('log Z = %.2f, log NHI = %.2f' % (M.Z[iZ], M.NHI[iNHI]),
fontsize='medium')
ax.set_ylim(8, 23)
ax.set_xlim(M.nH[0], M.nH[-1])
pl.legend(frameon=0, ncol=2)
if 0:
####################################################################
# plot the ionisation energies of ions over the incident
# continuum.
####################################################################
#from astro.pyvpfit import readatom
#atom = readatom()
#IONS = list(atom)
ions = readtxt(astro.datapath+'linelists/Verner_table4.txt.gz',readnames=1)
IONS = ('MgI MgII MgX CI CII CIII CIV SiII SiIII SiIV FeI FeII '
'OI OII OIII OIV OV OVI OVII CaI CaII HI AlI AlII AlIII '
'NII NI SI SII SIII NaI NeVIII').split()
# IONS = ('MgII CII CIII CIV SiII SiIII SiIV FeII '
# 'OI OVI HI AlI AlII AlIII NII NI').split()
ions1 = ions[np.in1d(ions.name, IONS)]
ions1.sort(order='ie')
energy = ions1.ie
fig = pl.figure(figsize=A4LANDSCAPE)
fig.subplots_adjust(left=0.11, right=0.95)
ax = pl.gca()
ax.loglog(M.cont.ryd * Ryd / eV, M.cont.fnu, label='UVB z=2.2')
ax.set_xlabel('Energy (eV)')
ax.set_ylabel(r'$F_{\nu}$ (ergs/s/cm$^2$/Hz)')
ax.set_xlim(3, 1e3)
axvlines(energy, 0, 1, ax=ax)
for i in range(len(ions1)):
puttext(energy[i], 0.8 + 0.07*(i % 3), ions1.name[i], ax,
xcoord='data', rotation=90, fontsize='small', ha='right')
ax1 = ax.twiny()
ax1.set_xscale('log')
E0, E1 = ax.get_xlim()
T0, T1 = 2*E0*eV/k , 2*E1*eV/k
ax1.set_xlim(T0,T1)
ax.set_ylim(1e-23, 3e-17)
ax1.set_xlabel("required gas temperature = 2*Energy/k (K)")
#rc('legend', borderaxespad=1.5, fontsize='small')
#rc('font', size=16)
rc('legend', borderaxespad=0.5, fontsize=8)
rc('font', size=12)
pl.show()
# Conclusions: Ratios of transitions in the same species (Si, C) are
# very sensitive to the ionization parameter (thus density), but
# mostly insensitive to the HI column density over the range logN 14
# -> 18 and metallicity over the range logZ -2 -> 0.
|
|
# Copyright (C) 2016-2018 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import logging
import mock
import os
import pytest
import shutil
import tempfile
from cuckoo.apps.import_ import (
identify, import_legacy_analyses, dumpcmd, movesql, sqldump
)
from cuckoo.common.config import config
from cuckoo.common.exceptions import CuckooOperationalError
from cuckoo.common.files import Files, temppath
from cuckoo.core.database import Database
from cuckoo.main import cuckoo_create, main
from cuckoo.misc import cwd, set_cwd, mkdir, is_windows, is_linux, is_macosx
log = logging.getLogger(__name__)
constants_04_py = """
# Copyright (C) 2010-2012 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
CUCKOO_ROOT = os.path.normpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), "..", "..", ".."))
CUCKOO_VERSION = "0.4"
CUCKOO_GUEST_PORT = 8000
CUCKOO_GUEST_INIT = 0x001
CUCKOO_GUEST_RUNNING = 0x002
CUCKOO_GUEST_COMPLETED = 0x003
CUCKOO_GUEST_FAILED = 0x004
"""
constants_041_py = """
# Copyright (C) 2010-2012 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
CUCKOO_ROOT = os.path.normpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), "..", "..", ".."))
CUCKOO_VERSION = "0.4.1"
CUCKOO_GUEST_PORT = 8000
CUCKOO_GUEST_INIT = 0x001
CUCKOO_GUEST_RUNNING = 0x002
CUCKOO_GUEST_COMPLETED = 0x003
CUCKOO_GUEST_FAILED = 0x004
"""
constants_042_py = """
# Copyright (C) 2010-2012 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
CUCKOO_ROOT = os.path.normpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), "..", "..", ".."))
CUCKOO_VERSION = "0.4.2"
CUCKOO_GUEST_PORT = 8000
CUCKOO_GUEST_INIT = 0x001
CUCKOO_GUEST_RUNNING = 0x002
CUCKOO_GUEST_COMPLETED = 0x003
CUCKOO_GUEST_FAILED = 0x004
"""
constants_05_py = """
# Copyright (C) 2010-2012 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
CUCKOO_ROOT = os.path.normpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), "..", "..", ".."))
CUCKOO_VERSION = "0.5"
CUCKOO_GUEST_PORT = 8000
CUCKOO_GUEST_INIT = 0x001
CUCKOO_GUEST_RUNNING = 0x002
CUCKOO_GUEST_COMPLETED = 0x003
CUCKOO_GUEST_FAILED = 0x004
"""
constants_06_py = """
# Copyright (C) 2010-2013 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
CUCKOO_ROOT = os.path.normpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), "..", "..", ".."))
CUCKOO_VERSION = "0.6"
CUCKOO_GUEST_PORT = 8000
CUCKOO_GUEST_INIT = 0x001
CUCKOO_GUEST_RUNNING = 0x002
CUCKOO_GUEST_COMPLETED = 0x003
CUCKOO_GUEST_FAILED = 0x004
"""
constants_10_py = """
# Copyright (C) 2010-2014 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
_current_dir = os.path.abspath(os.path.dirname(__file__))
CUCKOO_ROOT = os.path.normpath(os.path.join(_current_dir, "..", "..", ".."))
CUCKOO_VERSION = "1.0"
CUCKOO_GUEST_PORT = 8000
CUCKOO_GUEST_INIT = 0x001
CUCKOO_GUEST_RUNNING = 0x002
CUCKOO_GUEST_COMPLETED = 0x003
CUCKOO_GUEST_FAILED = 0x004
"""
constants_11_py = """
# Copyright (C) 2010-2014 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
_current_dir = os.path.abspath(os.path.dirname(__file__))
CUCKOO_ROOT = os.path.normpath(os.path.join(_current_dir, "..", "..", ".."))
CUCKOO_VERSION = "1.1"
CUCKOO_GUEST_PORT = 8000
CUCKOO_GUEST_INIT = 0x001
CUCKOO_GUEST_RUNNING = 0x002
CUCKOO_GUEST_COMPLETED = 0x003
CUCKOO_GUEST_FAILED = 0x004
"""
constants_12_py = """
# Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
_current_dir = os.path.abspath(os.path.dirname(__file__))
CUCKOO_ROOT = os.path.normpath(os.path.join(_current_dir, "..", "..", ".."))
CUCKOO_VERSION = "1.2"
CUCKOO_GUEST_PORT = 8000
CUCKOO_GUEST_INIT = 0x001
CUCKOO_GUEST_RUNNING = 0x002
CUCKOO_GUEST_COMPLETED = 0x003
CUCKOO_GUEST_FAILED = 0x004
"""
constants_20rc1_py = """
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
_current_dir = os.path.abspath(os.path.dirname(__file__))
CUCKOO_ROOT = os.path.normpath(os.path.join(_current_dir, "..", "..", ".."))
CUCKOO_VERSION = "2.0-rc1"
CUCKOO_GUEST_PORT = 8000
CUCKOO_GUEST_INIT = 0x001
CUCKOO_GUEST_RUNNING = 0x002
CUCKOO_GUEST_COMPLETED = 0x003
CUCKOO_GUEST_FAILED = 0x004
"""
constants_20rc2_py = """
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
_current_dir = os.path.abspath(os.path.dirname(__file__))
CUCKOO_ROOT = os.path.normpath(os.path.join(_current_dir, "..", "..", ".."))
CUCKOO_VERSION = "2.0-rc2"
CUCKOO_GUEST_PORT = 8000
CUCKOO_GUEST_INIT = 0x001
CUCKOO_GUEST_RUNNING = 0x002
CUCKOO_GUEST_COMPLETED = 0x003
CUCKOO_GUEST_FAILED = 0x004
"""
constants_20dev_py = """
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
_current_dir = os.path.abspath(os.path.dirname(__file__))
CUCKOO_ROOT = os.path.normpath(os.path.join(_current_dir, "..", "..", ".."))
CUCKOO_VERSION = "2.0-dev"
CUCKOO_GUEST_PORT = 8000
CUCKOO_GUEST_INIT = 0x001
CUCKOO_GUEST_RUNNING = 0x002
CUCKOO_GUEST_COMPLETED = 0x003
CUCKOO_GUEST_FAILED = 0x004
"""
def drop_constants_py(content):
dirpath = tempfile.mkdtemp()
dirpath2 = os.path.join(dirpath, "lib", "cuckoo", "common")
os.makedirs(dirpath2)
filepath = os.path.join(dirpath2, "constants.py")
open(filepath, "wb").write(content)
return dirpath
def test_identify():
dirpath = drop_constants_py(constants_04_py)
assert identify(dirpath) == "0.4"
dirpath = drop_constants_py(constants_041_py)
assert identify(dirpath) == "0.4.1"
dirpath = drop_constants_py(constants_042_py)
assert identify(dirpath) == "0.4.2"
dirpath = drop_constants_py(constants_05_py)
assert identify(dirpath) == "0.5"
dirpath = drop_constants_py(constants_06_py)
assert identify(dirpath) == "0.6"
dirpath = drop_constants_py(constants_10_py)
assert identify(dirpath) == "1.0"
dirpath = drop_constants_py(constants_11_py)
assert identify(dirpath) == "1.1"
dirpath = drop_constants_py(constants_12_py)
assert identify(dirpath) == "1.2"
dirpath = drop_constants_py(constants_20rc1_py)
assert identify(dirpath) == "2.0-rc1"
dirpath = drop_constants_py(constants_20rc2_py)
assert identify(dirpath) == "2.0-rc2"
dirpath = drop_constants_py(constants_20dev_py)
assert identify(dirpath) == "2.0-dev"
dirpath = drop_constants_py("hello world")
assert identify(dirpath) is None
def init_legacy_analyses():
dirpath = tempfile.mkdtemp()
mkdir(dirpath, "storage")
mkdir(dirpath, "storage", "analyses")
mkdir(dirpath, "storage", "analyses", "1")
mkdir(dirpath, "storage", "analyses", "1", "logs")
Files.create(
(dirpath, "storage", "analyses", "1", "logs"), "a.txt", "a"
)
mkdir(dirpath, "storage", "analyses", "1", "reports")
Files.create(
(dirpath, "storage", "analyses", "1", "reports"), "b.txt", "b"
)
mkdir(dirpath, "storage", "analyses", "2")
Files.create((dirpath, "storage", "analyses", "2"), "cuckoo.log", "log")
if not is_windows():
os.symlink(
"thisisnotanexistingfile",
os.path.join(dirpath, "storage", "analyses", "2", "binary")
)
Files.create((dirpath, "storage", "analyses"), "latest", "last!!1")
return dirpath
def init_import_legacy(mode):
set_cwd(tempfile.mkdtemp())
cuckoo_create()
dirpath = init_legacy_analyses()
assert sorted(import_legacy_analyses(mode, dirpath)) == [1, 2]
assert open(cwd("logs", "a.txt", analysis=1), "rb").read() == "a"
assert open(cwd("reports", "b.txt", analysis=1), "rb").read() == "b"
assert open(cwd("cuckoo.log", analysis=2), "rb").read() == "log"
assert not os.path.exists(cwd(analysis="latest"))
return dirpath
def test_import_cuckoo_cwd(capsys):
set_cwd(tempfile.mkdtemp())
cuckoo_create()
with pytest.raises(SystemExit):
main.main(
("--cwd", cwd(), "import", cwd()), standalone_mode=False
)
out, _ = capsys.readouterr()
assert "import a legacy Cuckoo" in out
def test_import_legacy_analyses_copy():
dirpath = init_import_legacy("copy")
dirpath1 = os.path.join(dirpath, "storage", "analyses", "1")
assert os.path.isdir(dirpath1)
filepath = os.path.join(dirpath1, "logs", "a.txt")
assert open(filepath, "rb").read() == "a"
dirpath2 = os.path.join(dirpath, "storage", "analyses", "2")
assert os.path.isdir(dirpath2)
assert os.path.isdir(cwd(analysis=1))
assert os.path.isdir(cwd(analysis=2))
def test_import_legacy_analyses_move():
dirpath = init_import_legacy("move")
dirpath1 = os.path.join(dirpath, "storage", "analyses", "1")
assert not os.path.isdir(dirpath1)
dirpath2 = os.path.join(dirpath, "storage", "analyses", "2")
assert not os.path.isdir(dirpath2)
assert os.path.isdir(cwd(analysis=1))
assert os.path.isdir(cwd(analysis=2))
if not is_windows():
def test_import_legacy_analyses_symlink():
dirpath = init_import_legacy("symlink")
assert os.path.islink(cwd(analysis=1))
assert os.path.islink(cwd(analysis=2))
dirpath1 = os.path.join(dirpath, "storage", "analyses", "1")
assert os.path.isdir(dirpath1)
filepath = os.path.join(dirpath1, "logs", "a.txt")
assert open(filepath, "rb").read() == "a"
assert os.readlink(cwd(analysis=1)) == dirpath1
dirpath2 = os.path.join(dirpath, "storage", "analyses", "2")
assert os.path.isdir(dirpath2)
assert os.readlink(cwd(analysis=2)) == dirpath2
def test_dumpcmd():
assert dumpcmd(None, "/tmp") == (
["sqlite3", os.path.join("/tmp", "db/cuckoo.db"), ".dump"], {}
)
assert dumpcmd("sqlite:///db/cuckoo.db", "/tmp") == (
["sqlite3", os.path.join("/tmp", "db/cuckoo.db"), ".dump"], {}
)
assert dumpcmd("sqlite:////tmp/cuckoo.db", "/tmp") == (
["sqlite3", "/tmp/cuckoo.db", ".dump"], {}
)
if not is_macosx():
assert dumpcmd("mysql://foo:bar@localh0st/baz", "/tmp") == (
["mysqldump", "-u", "foo", "-pbar", "-h", "localh0st", "baz"], {}
)
assert dumpcmd("mysql://cuckoo:random!@localhost/cuckoo", "/tmp") == (
["mysqldump", "-u", "cuckoo", "-prandom!", "cuckoo"], {}
)
if not is_macosx():
assert dumpcmd("postgresql://user:bar@localhost/baz", "/tmp") == (
["pg_dump", "-U", "user", "baz"], {"PGPASSWORD": "bar"}
)
assert dumpcmd("postgresql://u n!:bar@localhost/baz", "/tmp") == (
["pg_dump", "-U", "u n!", "baz"], {"PGPASSWORD": "bar"}
)
assert dumpcmd("postgresql://:b@c/d", "/tmp") == (
["pg_dump", "-h", "c", "d"], {"PGPASSWORD": "b"}
)
with pytest.raises(CuckooOperationalError) as e:
dumpcmd("notadatabaseuri", "/tmp")
e.match("URI wasn't understood")
with pytest.raises(CuckooOperationalError) as e:
dumpcmd("notadatabase://a:b@c/d", "/tmp")
e.match("URI wasn't understood")
class TestMoveSQL(object):
def setup(self):
set_cwd(tempfile.mkdtemp())
cuckoo_create()
@pytest.mark.skipif("sys.platform == 'darwin'")
def test_mysql(self):
movesql("mysql://foo:bar@localh0st/baz", None, None)
@pytest.mark.skipif("sys.platform == 'darwin'")
def test_postgresql(self):
movesql("postgresql://user:bar@localhost/baz", None, None)
def test_empty_copy(self):
oldfilepath = Files.temp_put("hello")
movesql("sqlite:///%s" % oldfilepath, "copy", temppath())
assert os.path.exists(oldfilepath)
assert os.path.exists(cwd("cuckoo.db"))
assert not os.path.islink(cwd("cuckoo.db"))
assert open(cwd("cuckoo.db"), "rb").read() == "hello"
def test_empty_move(self):
oldfilepath = Files.temp_put("hello")
movesql("sqlite:///%s" % oldfilepath, "move", temppath())
assert not os.path.exists(oldfilepath)
assert os.path.exists(cwd("cuckoo.db"))
assert not os.path.islink(cwd("cuckoo.db"))
assert open(cwd("cuckoo.db"), "rb").read() == "hello"
def test_empty_symlink(self):
oldfilepath = Files.temp_put("hello")
try:
movesql("sqlite:///%s" % oldfilepath, "symlink", temppath())
# Following is non-windows.
assert os.path.exists(oldfilepath)
assert os.path.exists(cwd("cuckoo.db"))
assert os.path.islink(cwd("cuckoo.db"))
assert open(cwd("cuckoo.db"), "rb").read() == "hello"
except RuntimeError as e:
assert is_windows()
assert "'symlink'" in e.message
@mock.patch("cuckoo.apps.import_.subprocess")
@mock.patch("click.confirm")
def test_sqldump_noconfirm(p, q):
p.return_value = False
sqldump(None, "/tmp")
q.check_call.assert_not_called()
class ImportCuckoo(object):
@mock.patch("click.confirm")
def test_sqldump(self, p):
set_cwd(tempfile.mkdtemp())
p.return_value = True
try:
sqldump(self.URI, "/tmp")
assert os.path.getsize(cwd("backup.sql"))
except CuckooOperationalError as e:
assert "SQL database dump as the command" in e.message
assert not is_linux()
@mock.patch("click.confirm")
def test_import_confirm(self, p):
set_cwd(tempfile.mkdtemp())
p.return_value = True
dirpath = init_legacy_analyses()
os.makedirs(os.path.join(dirpath, "lib", "cuckoo", "common"))
open(os.path.join(
dirpath, "lib", "cuckoo", "common", "constants.py"
), "wb").write(constants_11_py)
shutil.copytree(
"tests/files/conf/110_plain", os.path.join(dirpath, "conf")
)
filepath = os.path.join(dirpath, "conf", "cuckoo.conf")
buf = open(filepath, "rb").read()
open(filepath, "wb").write(buf.replace(
"connection =", "connection = %s" % self.URI
))
try:
main.main(
("--cwd", cwd(), "import", dirpath), standalone_mode=False
)
except CuckooOperationalError as e:
assert "SQL database dump as the command" in e.message
assert not is_linux()
return
db = Database()
db.connect()
assert db.engine.name == self.ENGINE
assert open(cwd("logs", "a.txt", analysis=1), "rb").read() == "a"
assert config("cuckoo:database:connection") == self.URI
assert db.count_tasks() == 2
@mock.patch("click.confirm")
def test_import_noconfirm(self, p):
set_cwd(tempfile.mkdtemp())
p.side_effect = True, False
dirpath = init_legacy_analyses()
os.makedirs(os.path.join(dirpath, "lib", "cuckoo", "common"))
open(os.path.join(
dirpath, "lib", "cuckoo", "common", "constants.py"
), "wb").write(constants_11_py)
shutil.copytree(
"tests/files/conf/110_plain", os.path.join(dirpath, "conf")
)
filepath = os.path.join(dirpath, "conf", "cuckoo.conf")
buf = open(filepath, "rb").read()
open(filepath, "wb").write(buf.replace(
"connection =", "connection = %s" % self.URI
))
main.main(
("--cwd", cwd(), "import", dirpath), standalone_mode=False
)
db = Database()
db.connect()
assert db.engine.name == self.ENGINE
assert open(cwd("logs", "a.txt", analysis=1), "rb").read() == "a"
assert config("cuckoo:database:connection") == self.URI
assert db.count_tasks() == 2
class TestImportCuckooSQLite3(ImportCuckoo):
ENGINE = "sqlite"
_filepath = tempfile.mktemp()
shutil.copy("tests/files/cuckoo.db", _filepath)
URI = "sqlite:///%s" % _filepath
@pytest.mark.skipif("sys.platform == 'darwin'")
class TestImportCuckooMySQL(ImportCuckoo):
ENGINE = "mysql"
URI = "mysql://cuckoo:cuckoo@localhost/cuckootestimport"
@pytest.mark.skipif("sys.platform == 'darwin'")
class TestImportCuckooPostgreSQL(ImportCuckoo):
ENGINE = "postgresql"
URI = "postgresql://cuckoo:cuckoo@localhost/cuckootestimport"
|
|
from liblinearutil_xiao import *
from active_learning import *
from check_dataset import *
def get_all_used_docs(fname, labelIndex, parents):
docs = []
n_index = 0
fd = open(fname)
for line in fd:
line = line.strip().split(' ')
id = line[0]
num_label = int(line[1])
labels = line[2:]
labels = [int(l) for l in labels]
labels = set(labels)
labels.add(-1)
if labelIndex in labels:
#positive
docs.append((id, n_index, 1))
else:#negative
par = parents[labelIndex]
if par == 0 or par in labels:
docs.append((id, n_index, -1))
n_index += 1
fd.close()
return docs
def read_problem_feature(docs, fname):
id_used = set([d[0] for d in docs])
x = []
fd = open(fname)
for line in fd:
line = line.strip().split(' ')
id = line[0]
if id in id_used:
features = {}
for l in line[1:]:
wd, v = l.split(':')
features[int(wd)] = float(v)
x.append(features)
fd.close()
return x
def select_problem_TF_feature(indices, features):
ret_features = []
for index in range(len(features)):
if index in indices:
ret_features.append(dict(features[index]))#make a copy here
return ret_features
def read_selected_problem_TF_feature(docs, fname):
id_used = set([d[0] for d in docs])
x = []
fd = open(fname)
for line in fd:
line = line.strip().split(' ')
id = line[0]
if id in id_used:
words = []
occs = []
for l in line[1:]:
wd, v = l.split(':')
words.append(int(wd))
occs.append(int(v))
#do normalization
sm = sum(occs)
occs = [float(o)/sm for o in occs]
features = {}
for i in range(len(words)):
features[words[i]] = occs[i]
#append examples
x.append(features)
fd.close()
return x
def get_max_feature(fname):
max_f = -1
fd = open(fname)
for line in fd:
line = line.strip().split(' ')
id = line[0]
for l in line[1:]:
wd, v = l.split(':')
if int(wd) > max_f:
max_f = int(wd)
fd.close()
return max_f
def read_problem_id(docs):
ids = [int(d[0]) for d in docs]
return ids
def read_problem_index(docs):
indices = [int(d[1]) for d in docs]
return indices
def read_problem_label(docs):
y = [d[2] for d in docs]
return y
def read_problem_feature(fname):
x = []
fd = open(fname)
for line in fd:
line = line.strip().split(' ')
id = line[0]
features = {}
for l in line[1:]:
wd, v = l.split(':')
features[int(wd)] = float(v)
x.append(features)
fd.close()
return x
def read_problem_TF_feature(fname):
x = []
fd = open(fname)
for line in fd:
line = line.strip().split(' ')
id = line[0]
words = []
occs = []
for l in line[1:]:
wd, v = l.split(':')
words.append(int(wd))
occs.append(int(v))
#do normalization
sm = sum(occs)
occs = [float(o)/sm for o in occs]
features = {}
for i in range(len(words)):
features[words[i]] = occs[i]
x.append(features)
fd.close()
return x
def write_list_to_file(data, fname):
fd = open(fname, 'w')
for d in data:
v = '%.4f' % d
fd.write(v + '\n')
fd.close()
def get_silbing(c, parents):
sib = []
my_parent = parents[c]
for nd in parents:
if parents[nd] == my_parent:
sib.append(nd)
return sib
def read_used_probs(fname, used_indices):
probs = []
index = 0
fd = open(fname)
for line in fd:
if index in used_indices:
p = float(line.strip())
probs.append(p)
index += 1
fd.close()
return probs
def read_used_probs_by_id(fname, used_id_set):
probs = []
fd = open(fname)
for line in fd:
line = line.strip().split(' ')
did = int(line[0])
prob = float(line[1])
if did in used_id_set:
probs.append(prob)
fd.close()
return probs
def read_probs(fname):
probs = []
fd = open(fname)
for line in fd:
p = float(line.strip())
probs.append(p)
fd.close()
return probs
def prob_dict_to_lst(probs):
all_cats = probs.keys()
all_cats.sort()
lst = []
n_probs = len(probs[all_cats[0]])
for i in range(n_probs):
lst.append([probs[c][i] for c in all_cats])
return lst
def compute_loss_with_labels(pred_labels, labels):
tp = 0
fp = 0
fn = 0
for i in range(len(pred_labels)):
if pred_labels[i] == 1:
p = 1
else:
p = -1
l = labels[i]
if p == 1 and l == 1:
tp += 1
elif p == 1 and l == -1:
fp += 1
elif p == -1 and l == 1:
fn += 1
if tp + fp != 0:
pre = float(tp)/(tp + fp)
else:
pre = 0
if tp + fn != 0:
rec = float(tp)/(tp + fn)
else:
rec = 0
if pre + rec == 0:
f1 = 0
else:
f1 = 2* pre * rec / (pre + rec)
return tp, fp, fn, pre, rec, f1
def compute_loss(probs, labels, threshold):
tp = 0
fp = 0
fn = 0
for i in range(len(probs)):
if probs[i] >= threshold:
p = 1
else:
p = -1
l = labels[i]
if p == 1 and l == 1:
tp += 1
elif p == 1 and l == -1:
fp += 1
elif p == -1 and l == 1:
fn += 1
if tp + fp != 0:
pre = float(tp)/(tp + fp)
else:
pre = 0
if tp + fn != 0:
rec = float(tp)/(tp + fn)
else:
rec = 0
if pre + rec == 0:
f1 = 0
else:
f1 = 2* pre * rec / (pre + rec)
return tp, fp, fn, pre, rec, f1
def usage():
print 'cmd --hier fname1 --trfeature fname2 --trlabel fname3 --tefeature fname4 --telabel fname4 --modelfolder --predictionfolder --trainpredictionfolder'
def check_first_positive(x, y):
if len(y) > 1:
if y[0] == -1:
#find the first positive
for i in range(len(y)):
if y[i] == 1:
break
if i < len(y):
tmp = y[0]
y[0] = y[i]
y[i] = tmp
tmp = x[0]
x[0] = x[i]
x[i] = tmp
def read_labels(fname):
labels = []
fd = open(fname)
for line in fd:
line = line.strip().split(' ')
id = line[0]
num = int(line[1])
v = line[2:]
v = [int(vv) for vv in v]
labels.append(set(v))
fd.close()
return labels
def get_binary_label(labels, label, parent):
ret_labels = []
for ls in labels:
if parent == -1:
if label in ls:
ret_labels.append(1)
else:
ret_labels.append(-1)
else:
if label in ls and parent in ls:
ret_labels.append(1)
elif parent in ls:
ret_labels.append(-1)
return ret_labels
def get_binary_label_global(labels, label):
ret_labels = []
for ls in labels:
if label in ls:
ret_labels.append(1)
else:
ret_labels.append(-1)
return ret_labels
def read_feature_map(fname):
f2map = {}
fd = open(fname)
for line in fd:
line = line.strip().split(':')
old_f = int(line[0])
new_f = int(line[1])
f2map[old_f] = new_f
fd.close()
return f2map
def remap_feature(x, f2map):
new_x = []
for xx in x:
new_xx = {}
for w in xx:
if w in f2map:
new_xx[f2map[w]] = xx[w]
new_x.append(new_xx)
return new_x
def make_feature_map(features):
f2map = {}
all_words = set()
for f in features:
for w in f:
all_words.add(w)
all_words = list(all_words)
all_words.sort()
n = 1
for w in all_words:
f2map[w] = n
n += 1
return f2map
if __name__ == '__main__':
import getopt, sys
try:
opts, args = getopt.getopt(sys.argv[1:], 'iot:h', ['help', 'hier=', 'trfeature=', 'trlabel=', 'tefeature=', 'telabel=', 'modelfolder=', 'trainpredictionfolder=', 'predictionfolder='])
except getopt.GetoptError, err:
print 'err'
usage()
sys.exit(1)
hier_fname = ''
train_feature_fname = ''
train_label_fname = ''
test_feature_fname = ''
test_label_fname = ''
model_output = ''
prediction_output = ''
trainpredictionfolder = ''
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit(0)
elif opt in ('--hier'):
hier_fname = arg
elif opt in ('--trfeature'):
train_feature_fname = arg
elif opt in ('--trlabel'):
train_label_fname = arg
elif opt in ('--tefeature'):
test_feature_fname = arg
elif opt in ('--telabel'):
test_label_fname = arg
elif opt in ('--modelfolder'):
model_output = arg
elif opt in ('--predictionfolder'):
prediction_output = arg
elif opt in ('--trainpredictionfolder'):
trainpredictionfolder = arg
if hier_fname == '' or train_feature_fname == '' or train_label_fname == '' or test_feature_fname == '' or test_label_fname == '' or model_output == '' or prediction_output == '' or trainpredictionfolder == '':
usage()
sys.exit(1)
#build hierarcy tree
root, all_nodes = Node().read_parent_child_pair_tree(hier_fname)
all_labels = all_nodes.keys()
tree_size = root.get_tree_size() - 1
levels = root.get_max_level()
nodes_per_level = [[] for i in range(levels)]
parents = {}
nd_leaves = []
root.get_nodes_per_level(0, nodes_per_level)
root.get_leaves(nd_leaves)
root.get_parents(parents)
leaves = [l.labelIndex for l in nd_leaves]
print tree_size, levels
for i in range(levels):
print i, len(nodes_per_level[i])
print len(leaves)
leaves = set(leaves)
#get maximal feature
max_f = get_max_feature(train_feature_fname)
#read train features
train_features = read_problem_TF_feature(train_feature_fname)
#read test features
test_features = read_problem_TF_feature(test_feature_fname)
test_labels = read_labels(test_label_fname)
threshold = 0.5
#do training and testing from the top-level to the bottom level
for cur_depth in range(levels):
#for cur_depth in [0]:
nodes = nodes_per_level[cur_depth]
for l in nodes:#for nd in this level
#for l in [0]:
#get <id, label> pairs
print 'train', l
#localize data to each node
docs = get_all_used_docs(train_label_fname, l, parents)
#make training dataset
y = read_problem_label(docs)
ids = read_problem_id(docs)
indices = read_problem_index(docs)
#x = read_problem_TF_feature(docs, train_feature_fname)
#this is a new copy of the old instance list
x = select_problem_TF_feature(set(indices), train_features)
#print statistics
num_pos_y = y.count(1)
num_neg_y = len(y) - num_pos_y
print 'pos', num_pos_y, 'neg', num_neg_y, 'total', len(x)
#for lower-level nodes, use the prediction from the ancestor nodes
if cur_depth > 0:
#read all its ancestor prediction, use them as features
#get all ancestor
ancestors = []
c = parents[l]
while c != -1:
ancestors.insert(0, c)
c = parents[c]
#make features from the top-level to the bottom level
#using the silbing prediction of each ancestor
used_probs = {}
for par in ancestors:
sib = get_silbing(par, parents)
#read probs from sib, only selecting the used examples
#indices are used for alignment the current training set
for s in sib:
ps = read_used_probs_by_id(trainpredictionfolder + '/' + str(s) + '_test_probs', set(ids))
used_probs[s] = ps
print s, len(ps)
#transform probs to lst
prob_lst = prob_dict_to_lst(used_probs)
#make new features by expanding some meta prob features
#training set x is changed
for i in range(len(x)):
xx = x[i]
probs = prob_lst[i]
for j in range(len(probs)):
xx[max_f + j + 1] = probs[j]
#f2map = make_feature_map(x)
#x = remap_feature(x, f2map)
#check dataset, put the first element as positive +1
check_first_positive(x, y)
#train SVM model
prob = problem(y, x)
param = parameter('-q')
m = train(prob, param)
#save SVM model
save_model(model_output + '/' + str(l) + '.svm', m)
#make prediction on training set
print 'predict train', l
train_p_labs, train_p_acc, train_p_vals, train_p_probs = predict_label_score_prob([], x, m, '-q')
#save training set prediction
fd = open(trainpredictionfolder + '/' + str(l) + '_test_probs', 'w')
for i in range(len(train_p_probs)):
fd.write(str(ids[i]) + ' ' + str('%.4f' % train_p_probs[i]) + '\n')#output as <id, prob>
fd.close()
#tp, fp, fn, pre, rec, f1 = compute_loss(train_p_probs, y, 0.5)
#print 'training loss', l, tp, fp, fn, '%.4f' % pre, '%.4f' % rec, '%.4f' % f1
#make prediction on test
print 'predict test', l
if cur_depth == 0:
p_labs, p_acc, p_vals, p_probs = predict_label_score_prob([], test_features, m, '-q')
#mapped_test_features = remap_feature(test_features, f2map)
#p_labs, p_acc, p_vals, p_probs = predict_label_score_prob([], mapped_test_features, m, '-q')
else:
#read all its ancestor prediction, use them as features
#get all ancestor
ancestors = []
c = parents[l]
while c != -1:
ancestors.insert(0, c)
c = parents[c]
#make features from the top-level to the bottom level
#using the silbing prediction of each ancestor
used_probs = {}
for par in ancestors:
#sib = get_silbing(par, parents)
sib = [par]
#read probs from sib, only selecting the used examples
for s in sib:
ps = read_probs(prediction_output + '/' + str(s) + '_test_probs')#read all test probs
used_probs[s] = ps
print s, len(ps)
#transform probs to lst
prob_lst = prob_dict_to_lst(used_probs)
#make a copy of test features
test_x = [dict(t) for t in test_features]
#make new features by expanding some meta prob features
for i in range(len(test_x)):
xx = test_x[i]
probs = prob_lst[i]
for j in range(len(probs)):
xx[max_f + j + 1] = probs[j]
p_labs, p_acc, p_vals, p_probs = predict_label_score_prob([], test_x, m, '-q')
#mapped_test_x = remap_feature(test_x, f2map)
#p_labs, p_acc, p_vals, p_probs = predict_label_score_prob([], mapped_test_x, m, '-q')
true_bin_labels = get_binary_label_global(test_labels, l)
#save prediction
fd = open(prediction_output + '/' + str(l) + '_test_labels', 'w')
for v in p_labs:
fd.write(str(v) + '\n')
fd.close()
fd = open(prediction_output + '/' + str(l) + '_test_probs', 'w')
for v in p_probs:
fd.write(str('%.4f' % v) + '\n')
fd.close()
v_labels = []
for p in p_probs:
if p >= threshold:
v_labels.append(1)
else:
v_labels.append(-1)
print compute_loss_with_labels(v_labels, true_bin_labels)
|
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import functools
from typing import ( # pylint: disable=unused-import
Union, Optional, Any, Dict, List,
TYPE_CHECKING
)
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # type: ignore
from azure.core.exceptions import HttpResponseError
from azure.core.paging import ItemPaged
from azure.core.tracing.decorator import distributed_trace
from azure.core.pipeline import Pipeline
from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query
from ._shared.response_handlers import process_storage_error
from ._generated import AzureFileStorage
from ._generated.models import StorageServiceProperties
from ._share_client import ShareClient
from ._serialize import get_api_version
from ._models import (
SharePropertiesPaged,
service_properties_deserialize,
)
if TYPE_CHECKING:
from datetime import datetime
from ._models import (
ShareProperties,
Metrics,
CorsRule,
ShareProtocolSettings
)
class ShareServiceClient(StorageAccountHostsMixin):
"""A client to interact with the File Share Service at the account level.
This client provides operations to retrieve and configure the account properties
as well as list, create and delete shares within the account.
For operations relating to a specific share, a client for that entity
can also be retrieved using the :func:`get_share_client` function.
For more optional configuration, please click
`here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
#optional-configuration>`_.
:param str account_url:
The URL to the file share storage account. Any other entities included
in the URL path (e.g. share or file) will be discarded. This URL can be optionally
authenticated with a SAS token.
:param credential:
The credential with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string,
an instance of a AzureSasCredential from azure.core.credentials or an account
shared access key.
:keyword str api_version:
The Storage API version to use for requests. Default value is the most recent service version that is
compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
.. versionadded:: 12.1.0
:keyword str secondary_hostname:
The hostname of the secondary endpoint.
:keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
.. admonition:: Example:
.. literalinclude:: ../samples/file_samples_authentication.py
:start-after: [START create_share_service_client]
:end-before: [END create_share_service_client]
:language: python
:dedent: 8
:caption: Create the share service client with url and credential.
"""
def __init__(
self, account_url, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
):
# type: (...) -> None
try:
if not account_url.lower().startswith('http'):
account_url = "https://" + account_url
except AttributeError:
raise ValueError("Account URL must be a string.")
parsed_url = urlparse(account_url.rstrip('/'))
if not parsed_url.netloc:
raise ValueError("Invalid URL: {}".format(account_url))
if hasattr(credential, 'get_token'):
raise ValueError("Token credentials not supported by the File Share service.")
_, sas_token = parse_query(parsed_url.query)
if not sas_token and not credential:
raise ValueError(
'You need to provide either an account shared key or SAS token when creating a storage service.')
self._query_str, credential = self._format_query_string(sas_token, credential)
super(ShareServiceClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs)
self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline)
self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access
def _format_url(self, hostname):
"""Format the endpoint URL according to the current location
mode hostname.
"""
return "{}://{}/{}".format(self.scheme, hostname, self._query_str)
@classmethod
def from_connection_string(
cls, conn_str, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
): # type: (...) -> ShareServiceClient
"""Create ShareServiceClient from a Connection String.
:param str conn_str:
A connection string to an Azure Storage account.
:param credential:
The credential with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string,
an instance of a AzureSasCredential from azure.core.credentials or an account
shared access key.
:returns: A File Share service client.
:rtype: ~azure.storage.fileshare.ShareServiceClient
.. admonition:: Example:
.. literalinclude:: ../samples/file_samples_authentication.py
:start-after: [START create_share_service_client_from_conn_string]
:end-before: [END create_share_service_client_from_conn_string]
:language: python
:dedent: 8
:caption: Create the share service client with connection string.
"""
account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
if 'secondary_hostname' not in kwargs:
kwargs['secondary_hostname'] = secondary
return cls(account_url, credential=credential, **kwargs)
@distributed_trace
def get_service_properties(self, **kwargs):
# type: (Any) -> Dict[str, Any]
"""Gets the properties of a storage account's File Share service, including
Azure Storage Analytics.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: A dictionary containing file service properties such as
analytics logging, hour/minute metrics, cors rules, etc.
:rtype: Dict[str, Any]
.. admonition:: Example:
.. literalinclude:: ../samples/file_samples_service.py
:start-after: [START get_service_properties]
:end-before: [END get_service_properties]
:language: python
:dedent: 8
:caption: Get file share service properties.
"""
timeout = kwargs.pop('timeout', None)
try:
service_props = self._client.service.get_properties(timeout=timeout, **kwargs)
return service_properties_deserialize(service_props)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace
def set_service_properties(
self, hour_metrics=None, # type: Optional[Metrics]
minute_metrics=None, # type: Optional[Metrics]
cors=None, # type: Optional[List[CorsRule]]
protocol=None, # type: Optional[ShareProtocolSettings],
**kwargs
):
# type: (...) -> None
"""Sets the properties of a storage account's File Share service, including
Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the
existing settings on the service for that functionality are preserved.
:param hour_metrics:
The hour metrics settings provide a summary of request
statistics grouped by API in hourly aggregates for files.
:type hour_metrics: ~azure.storage.fileshare.Metrics
:param minute_metrics:
The minute metrics settings provide request statistics
for each minute for files.
:type minute_metrics: ~azure.storage.fileshare.Metrics
:param cors:
You can include up to five CorsRule elements in the
list. If an empty list is specified, all CORS rules will be deleted,
and CORS will be disabled for the service.
:type cors: list(:class:`~azure.storage.fileshare.CorsRule`)
:param protocol:
Sets protocol settings
:type protocol: ~azure.storage.fileshare.ShareProtocolSettings
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/file_samples_service.py
:start-after: [START set_service_properties]
:end-before: [END set_service_properties]
:language: python
:dedent: 8
:caption: Sets file share service properties.
"""
timeout = kwargs.pop('timeout', None)
props = StorageServiceProperties(
hour_metrics=hour_metrics,
minute_metrics=minute_metrics,
cors=cors,
protocol=protocol
)
try:
self._client.service.set_properties(storage_service_properties=props, timeout=timeout, **kwargs)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace
def list_shares(
self, name_starts_with=None, # type: Optional[str]
include_metadata=False, # type: Optional[bool]
include_snapshots=False, # type: Optional[bool]
**kwargs
):
# type: (...) -> ItemPaged[ShareProperties]
"""Returns auto-paging iterable of dict-like ShareProperties under the specified account.
The generator will lazily follow the continuation tokens returned by
the service and stop when all shares have been returned.
:param str name_starts_with:
Filters the results to return only shares whose names
begin with the specified name_starts_with.
:param bool include_metadata:
Specifies that share metadata be returned in the response.
:param bool include_snapshots:
Specifies that share snapshot be returned in the response.
:keyword bool include_deleted:
Specifies that deleted shares be returned in the response.
This is only for share soft delete enabled account.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: An iterable (auto-paging) of ShareProperties.
:rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.ShareProperties]
.. admonition:: Example:
.. literalinclude:: ../samples/file_samples_service.py
:start-after: [START fsc_list_shares]
:end-before: [END fsc_list_shares]
:language: python
:dedent: 12
:caption: List shares in the file share service.
"""
timeout = kwargs.pop('timeout', None)
include = []
include_deleted = kwargs.pop('include_deleted', None)
if include_deleted:
include.append("deleted")
if include_metadata:
include.append('metadata')
if include_snapshots:
include.append('snapshots')
results_per_page = kwargs.pop('results_per_page', None)
command = functools.partial(
self._client.service.list_shares_segment,
include=include,
timeout=timeout,
**kwargs)
return ItemPaged(
command, prefix=name_starts_with, results_per_page=results_per_page,
page_iterator_class=SharePropertiesPaged)
@distributed_trace
def create_share(
self, share_name, # type: str
**kwargs
):
# type: (...) -> ShareClient
"""Creates a new share under the specified account. If the share
with the same name already exists, the operation fails. Returns a client with
which to interact with the newly created share.
:param str share_name: The name of the share to create.
:keyword dict(str,str) metadata:
A dict with name_value pairs to associate with the
share as metadata. Example:{'Category':'test'}
:keyword int quota:
Quota in bytes.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: ~azure.storage.fileshare.ShareClient
.. admonition:: Example:
.. literalinclude:: ../samples/file_samples_service.py
:start-after: [START fsc_create_shares]
:end-before: [END fsc_create_shares]
:language: python
:dedent: 8
:caption: Create a share in the file share service.
"""
metadata = kwargs.pop('metadata', None)
quota = kwargs.pop('quota', None)
timeout = kwargs.pop('timeout', None)
share = self.get_share_client(share_name)
kwargs.setdefault('merge_span', True)
share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs)
return share
@distributed_trace
def delete_share(
self, share_name, # type: Union[ShareProperties, str]
delete_snapshots=False, # type: Optional[bool]
**kwargs
):
# type: (...) -> None
"""Marks the specified share for deletion. The share is
later deleted during garbage collection.
:param share_name:
The share to delete. This can either be the name of the share,
or an instance of ShareProperties.
:type share_name: str or ~azure.storage.fileshare.ShareProperties
:param bool delete_snapshots:
Indicates if snapshots are to be deleted.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/file_samples_service.py
:start-after: [START fsc_delete_shares]
:end-before: [END fsc_delete_shares]
:language: python
:dedent: 12
:caption: Delete a share in the file share service.
"""
timeout = kwargs.pop('timeout', None)
share = self.get_share_client(share_name)
kwargs.setdefault('merge_span', True)
share.delete_share(
delete_snapshots=delete_snapshots, timeout=timeout, **kwargs)
@distributed_trace
def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs):
# type: (str, str, **Any) -> ShareClient
"""Restores soft-deleted share.
Operation will only be successful if used within the specified number of days
set in the delete retention policy.
.. versionadded:: 12.2.0
This operation was introduced in API version '2019-12-12'.
:param str deleted_share_name:
Specifies the name of the deleted share to restore.
:param str deleted_share_version:
Specifies the version of the deleted share to restore.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: ~azure.storage.fileshare.ShareClient
"""
share = self.get_share_client(deleted_share_name)
try:
share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access
deleted_share_version=deleted_share_version,
timeout=kwargs.pop('timeout', None), **kwargs)
return share
except HttpResponseError as error:
process_storage_error(error)
def get_share_client(self, share, snapshot=None):
# type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient
"""Get a client to interact with the specified share.
The share need not already exist.
:param share:
The share. This can either be the name of the share,
or an instance of ShareProperties.
:type share: str or ~azure.storage.fileshare.ShareProperties
:param str snapshot:
An optional share snapshot on which to operate. This can be the snapshot ID string
or the response returned from :func:`create_snapshot`.
:returns: A ShareClient.
:rtype: ~azure.storage.fileshare.ShareClient
.. admonition:: Example:
.. literalinclude:: ../samples/file_samples_service.py
:start-after: [START get_share_client]
:end-before: [END get_share_client]
:language: python
:dedent: 8
:caption: Gets the share client.
"""
try:
share_name = share.name
except AttributeError:
share_name = share
_pipeline = Pipeline(
transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
policies=self._pipeline._impl_policies # pylint: disable = protected-access
)
return ShareClient(
self.url, share_name=share_name, snapshot=snapshot, credential=self.credential,
api_version=self.api_version, _hosts=self._hosts,
_configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode)
|
|
#!/usr/bin/env python
import splashdown
import dxencode
import sys
import dxpy.exceptions
class Patchdown(splashdown.Splashdown):
post_templates = {
# For looking up previous result files, use wild-cards
# used with old lrnaLaunch script
"tophat_bam": {
"file_format": "bam",
"output_type": "alignments",
"derived_from": ["reads1", "reads2"]
},
"tophat_minus_all_bw": {
"file_format": "bigWig",
"output_type": "multi-read minus signal",
"derived_from": ["tophat_bam"]
},
"tophat_minus_uniq_bw":{
"file_format": "bigWig",
"output_type": "unique minus signal",
"derived_from": ["tophat_bam"]
},
"tophat_plus_all_bw": {
"file_format": "bigWig",
"output_type": "multi-read plus signal",
"derived_from": ["tophat_bam"]
},
"tophat_plus_uniq_bw": {
"file_format": "bigWig",
"output_type": "unique minus signal",
"derived_from": ["tophat_bam"]
},
"tophat_all_bw": {
"file_format": "bigWig",
"output_type": "multi-read signal",
"derived_from": ["tophat_bam"]
},
"tophat_uniq_bw": {
"file_format": "bigWig",
"output_type": "unique signal",
"derived_from": ["tophat_bam"]
},
"star_genome_bam": {
"file_format": "bam",
"output_type": "alignments",
"derived_from": ["reads1", "reads2"]
},
"star_minus_all_bw": {
"file_format": "bigWig",
"output_type": "multi-read minus signal",
"derived_from": ["star_genome_bam"]
},
"star_minus_uniq_bw": {
"file_format": "bigWig",
"output_type": "unique minus signal",
"derived_from": ["star_genome_bam"]
},
"star_plus_all_bw": {
"file_format": "bigWig",
"output_type": "multi-read plus signal",
"derived_from": ["star_genome_bam"]
},
"star_plus_uniq_bw": {
"file_format": "bigWig",
"output_type": "unique plus signal",
"derived_from": ["star_genome_bam"]
},
"star_all_bw": {
"file_format": "bigWig",
"output_type": "multi-read signal",
"derived_from": ["star_genome_bam"]
},
"star_uniq_bw": {
"file_format": "bigWig",
"output_type": "unique signal",
"derived_from": ["star_genome_bam"]
},
"rsem_gene_results": {
"file_format": "tsv",
"output_type": "genome quantifications",
"derived_from": ["star_anno_bam"]
# note should be derived from star_anno_bam
},
"star_anno_bam": {
"file_format": "bam",
"output_type": "transcriptome alignments",
"derived_from": ["reads1", "reads2"]
},
"rsem_iso_results": {
"file_format": "tsv",
"output_type": "transcript quantifications",
"derived_from": ["star_anno_bam"]
},
"reads1": {
"file_format": "fastq",
"output_type": "reads1",
"derived_from": []
},
"reads2": {
"file_format": "fastq",
"output_type": "reads2",
"derived_from": []
}
}
def __init__(self):
super(Patchdown, self).__init__()
self.derived_map = { x['output_type']:
[ self.post_templates[y]['output_type'] for y in x['derived_from'] ] for x in self.post_templates.values() }
def find_derived_from(self,fid,job,verbose=False):
''' wrap in exception because sometimes dx-files go missing '''
try:
derived_from = super(Patchdown, self).find_derived_from(fid,job,verbose)
except dxpy.exceptions.ResourceNotFound, e:
print "WARN: derived_from failed %s" % e
derived_from = []
if not derived_from:
#import pdb;pdb.set_trace()
# try to guess
pass
return derived_from
def run(self):
'''Override super.run()'''
args = self.get_args()
self.test = args.test
self.server_key = args.server
if self.server_key != "test":
self.acc_prefix = "ENCFF"
self.proj_name = dxencode.env_get_current_project()
if self.proj_name == None or args.project != None:
self.proj_name = args.project
if self.proj_name == None:
print "Please enter a '--project' to run in."
sys.exit(1)
self.project = dxencode.get_project(self.proj_name)
self.proj_id = self.project.get_id()
print "== Running in project [%s] and will post to the [%s] server ==" % \
(self.proj_name,self.server_key)
exp_count = 0
halted = 0
total_posted = 0
for exp_id in args.experiments:
sys.stdout.flush() # Slow running job should flush to piped log
# 1) Lookup experiment type from encoded, based on accession
print "Working on %s..." % exp_id
self.exp = dxencode.get_exp(exp_id,must_find=False,key=self.server_key)
if self.exp == None or self.exp["status"] == "error":
print "Unable to locate experiment %s in encoded" % exp_id
continue
self.exp_type = self.get_exp_type(exp_id)
if self.exp_type == None:
continue
# 2) Locate the experiment accession named folder
self.exp_folder = dxencode.find_exp_folder(self.project,exp_id,args.results_folder,warn=True)
if self.exp_folder == None:
continue
print "- Examining %s:%s for '%s' results..." % \
(self.proj_name, self.exp_folder, self.exp_type)
# 3) Given the experiment type, determine the expected results
self.pipeline = self.pipeline_specification(args,self.exp_type,self.exp_folder)
self.replicates = self.find_replicate_folders(self.exp_folder, verbose=args.verbose)
# 4) Given expected results locate any files (by glob) that should be posted for
# a) each single replicate (in replicate sub-folders named as reN_N/
# b) combined replicates in the experiment folder itself
files_expected = self.find_expected_files(self.exp_folder,self.replicates, verbose=args.verbose)
print "- Found %d files that are available to post." % len(files_expected)
if len(files_expected) == 0:
continue
# 5) For each file that should be posted, determine if the file needs to be posted.
files_to_post = { x[2]: x for x in self.find_needed_files(files_expected, verbose=args.verbose) }
# index on dx file id
print "- Found %d files that need to be posted" % len(files_to_post.keys())
# 6) For each file that needs to be posted:
exp_count += 1
file_count = 0
post_count = 0
for (out_type,rep_tech,fid) in files_expected:
sys.stdout.flush() # Slow running job should flush to piped log
# a) discover all necessary dx information needed for post.
# b) gather any other information necessary from dx and encoded.
print " Handle file %s" % dxencode.file_path_from_fid(fid)
job = dxencode.job_from_fid(fid)
try:
derived_from = self.find_derived_from(fid,job, args.verbose)
except dxpy.exceptions.ResourceNotFound, e:
print "WARN: derived_from failed %s" % e
derived_from = []
if not files_to_post.get(fid,()):
f_obj = self.found.get(fid,None)
if f_obj:
current_derived_from = f_obj['derived_from']
if derived_from and not current_derived_from:
print "Need to patch derived_from for %s/%s to %s (currently: %s)" % (f_obj['accession'], fid, derived_from, current_derived_from)
else:
print "Derived from for %s good" % f_obj['accession']
else:
print "File %s (%s) from %s/%s not found @ DNANexus" % (fid,out_type,exp_id,rep_tech)
#POSTING
else:
payload = self.make_payload_obj(out_type,rep_tech,fid, verbose=args.verbose)
if args.force_annotation:
print "WARN: forcing genome_annotation to be %s" % args.force_annotation
payload['genome_annotation'] = args.force_annotation
file_count += 1
# c) Post file and update encoded database.
accession = self.file_post(fid,payload,args.test)
if accession == None:
print "* HALTING %s - post failure could compromise 'derived_from'" % \
(self.exp_id)
halted += 1
break
# d) Update dnanexus file with file accession tag.
if not args.test:
post_count += 1
self.file_mark_accession(fid,accession,args.test)
print "- For %s Processed %d file(s), posted %s" % \
(self.exp_id, file_count, post_count)
total_posted += post_count
print "Processed %d experiment(s), halted %d, posted %d file(s)" % \
(exp_count, halted, total_posted)
if halted == exp_count:
sys.exit(1)
print "(finished)"
if __name__ == '__main__':
'''Run from the command line.'''
patch = Patchdown()
patch.run()
|
|
import re
from mangrove.errors.MangroveException import AnswerNotInListException, AnswerHasTooManyValuesException, AnswerHasNoValuesException, LatitudeNotFloat, LongitudeNotFloat, LatitudeNotInRange, LongitudeNotInRange, RegexMismatchException, ShortCodeRegexMismatchException
from mangrove.utils.types import is_empty
from mangrove.validate import is_string, is_float, VdtTypeError, VdtValueError
class ConstraintTypes(object):
REGEX = 'regex'
SELECT = 'select'
RANGE = 'range'
LENGTH = 'length'
GEO = 'geo'
SHORT_CODE = 'short_code'
class ConstraintAttributes(object):
MAX = "max"
MIN = "min"
MIN_LONG = -180
MAX_LONG = 180
MIN_LAT = -90
MAX_LAT = 90
PATTERN = '_pattern'
class NumericRangeConstraint(object):
def __init__(self, min=None, max=None, dict=None):
self.min = min
self.max = max
if dict is not None:
self.min = dict.get('min')
self.max = dict.get('max')
def _to_json(self):
dict = {}
if self.min is not None:
dict[ConstraintAttributes.MIN] = self.min
if self.max is not None:
dict[ConstraintAttributes.MAX] = self.max
return ('range', dict)
def validate(self, value):
return is_float(value, min=self.min, max=self.max)
def xform_constraint(self):
min_constraint = ". >= {0}".format(self.min) if self.min else None
max_constraint = ". <= {0}".format(self.max) if self.max else None
return " and ".join(filter(None, [min_constraint, max_constraint]))
class TextLengthConstraint(NumericRangeConstraint):
def _to_json(self):
dict = {}
if self.min is not None:
dict[ConstraintAttributes.MIN] = self.min
if self.max is not None:
dict[ConstraintAttributes.MAX] = self.max
return ("length", dict) if not is_empty(dict) else ()
def validate(self, value):
return is_string(value.strip(), min=self.min, max=self.max)
def xform_constraint(self):
min_constraint = "string-length(.) >= {0}".format(self.min) if self.min else None
max_constraint = "string-length(.) <= {0}".format(self.max) if self.max else None
return " and ".join(filter(None, [min_constraint, max_constraint]))
class ChoiceConstraint(object):
def __init__(self, single_select_constraint, list_of_valid_choices, code, dict=None, has_other=False):
self.single_select_constraint = single_select_constraint
self.list_of_valid_choices = list_of_valid_choices
self.choice_dict = self.get_item(self.list_of_valid_choices)
self.choice_vals = self.choice_dict.keys()
self.code = code
self.has_other = has_other
def get_item(self, items):
item_dict = {}
for item in items:
if type(item) is dict:
item_dict.update({item.get('val'):item.get('text')})
else:
item_dict.update({item: item})
return item_dict
def validate(self, answer):
assert answer is not None
# if self.has_other and isinstance(answer, list) and answer[0] == 'other':
# answer_string = answer[1]
# else:
answer_string = answer.lower().strip()
if not answer_string:
raise AnswerHasNoValuesException(code=self.code, answer=answer)
choices_text = []
if ',' in answer_string:
responses = answer_string.split(',')
responses = [r.strip() for r in responses]
elif ' ' in answer_string:
responses = answer_string.split(' ')
elif answer_string in self.choice_vals:
responses = [answer_string]
elif self.has_other:
responses = [answer_string]
else:
invalid_responses = re.split(r'[1-9]?[a-z]', answer_string)
invalid_responses = filter(None, invalid_responses)
if len(invalid_responses) > 0:
raise AnswerNotInListException(code=self.code, answer=invalid_responses[0])
responses = re.findall(r'[1-9]?[a-zA-Z]', answer_string)
if self.single_select_constraint and len(responses) > 1:
raise AnswerHasTooManyValuesException(code=self.code, answer=answer)
for response in responses:
if response in self.choice_vals:
choice_selected = self.choice_dict[response]
if choice_selected not in choices_text:
choices_text.append(choice_selected)
elif self.has_other:
choices_text.append(response)
else:
raise AnswerNotInListException(code=self.code, answer=response)
return choices_text
class GeoCodeConstraint(object):
def validate(self, latitude, longitude):
latitude = latitude.strip(u'\u200e')
longitude = longitude.strip(u'\u200e')
latitude = latitude.encode('ascii')
longitude = longitude.encode('ascii')
try:
lat = is_float(latitude, min=ConstraintAttributes.MIN_LAT, max=ConstraintAttributes.MAX_LAT)
except VdtTypeError:
raise LatitudeNotFloat(latitude)
except VdtValueError:
raise LatitudeNotInRange(latitude)
try:
long = is_float(longitude, min=ConstraintAttributes.MIN_LONG, max=ConstraintAttributes.MAX_LONG)
except VdtTypeError:
raise LongitudeNotFloat(longitude)
except VdtValueError:
raise LongitudeNotInRange(longitude)
return lat, long
class RegexConstraint(object):
def __init__(self, reg=None, dict=None):
self._pattern = dict if dict is not None else reg
def validate(self, text):
if re.match(self._pattern, text):
return text
raise RegexMismatchException(self._pattern)
@property
def pattern(self):
return self._pattern
def _to_json(self):
return ('regex', self._pattern)
class ShortCodeRegexConstraint(object):
def __init__(self, reg=None, dict=None):
self._pattern = dict if dict is not None else reg
def validate(self, text):
if re.match(self._pattern, text):
return text.lower()
raise ShortCodeRegexMismatchException(self._pattern)
@property
def pattern(self):
return self._pattern
def _to_json(self):
return ('short_code', self._pattern)
def constraints_factory(constraints_json):
constraints = []
for constraint_type, constraint_json in constraints_json:
constraint_class = constraint_for.get(constraint_type)
if constraint_class is not None:
constraints.append(constraint_class(dict=constraint_json))
return constraints
constraint_for = {
ConstraintTypes.LENGTH: TextLengthConstraint,
ConstraintTypes.RANGE: NumericRangeConstraint,
ConstraintTypes.SELECT: ChoiceConstraint,
ConstraintTypes.GEO: GeoCodeConstraint,
ConstraintTypes.REGEX: RegexConstraint,
ConstraintTypes.SHORT_CODE: ShortCodeRegexConstraint,
}
|
|
# -*- coding: utf-8 -*-
"""
Human Resource Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
s3db.hrm_vars()
# =============================================================================
def index():
""" Module Home Page """
mode = session.s3.hrm.mode
if mode is not None:
# Go to Personal Profile
redirect(URL(f="person"))
else:
# Bypass home page & go direct to searchable list of Staff
redirect(URL(f="staff", args="search"))
# =============================================================================
# People
# =============================================================================
def human_resource():
"""
HR Controller
- combined (unused, except for Imports)
"""
tablename = "hrm_human_resource"
table = s3db[tablename]
# Default to Staff
_type = table.type
s3.filter = (_type == 1)
def prep(r):
if r.method == "form":
return True
if r.interactive:
if r.method == "create" and not r.component:
redirect(URL(f="volunteer",
args=args,
vars=vars))
elif r.method == "delete":
# Don't redirect
pass
elif r.id:
# Redirect to person controller
vars = {
"human_resource.id" : r.id,
"group" : "staff"
}
redirect(URL(f="person",
vars=vars))
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
s3_action_buttons(r, deletable=settings.get_hrm_deletable())
if "msg" in settings.modules:
# @ToDo: Remove this now that we have it in Events?
s3.actions.append({
"url": URL(f="compose",
vars = {"hrm_id": "[id]"}),
"_class": "action-btn",
"label": str(T("Send Message"))})
elif r.representation == "plain" and \
r.method !="search":
# Map Popups
output = s3db.hrm_map_popup(r)
return output
s3.postp = postp
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def staff():
"""
Staff Controller
"""
tablename = "hrm_human_resource"
table = s3db[tablename]
_type = table.type
_type.default = 1
s3.filter = (_type == 1)
table.site_id.writable = True
table.site_id.readable = True
list_fields = ["id",
"person_id",
"job_title_id",
"organisation_id",
"department",
"site_id",
#"site_contact",
(T("Email"), "email"),
(settings.get_ui_label_mobile_phone(), "phone"),
(T("Trainings"), "course"),
(T("Certificates"), "certificate"),
(T("Contract End Date"), "end_date"),
"status",
]
s3.crud_strings[tablename] = s3.crud_strings["hrm_staff"]
if "expiring" in request.get_vars:
s3.filter = s3.filter & \
(table.end_date < (request.utcnow + datetime.timedelta(weeks=4)))
s3.crud_strings[tablename].title_list = T("Staff with Contracts Expiring in the next Month")
# Remove the big Add button
s3db.configure(tablename,
insertable=False)
# Remove Type filter from the Search widget
human_resource_search = s3db.get_config(tablename,
"search_method")
human_resource_search.advanced.pop(1)
s3db.configure(tablename,
list_fields = list_fields,
search_method = human_resource_search)
def prep(r):
if r.interactive:
if not r.component and \
not r.id and \
r.method in [None, "create"]:
# Don't redirect
# Assume staff only between 16-81
s3db.pr_person.date_of_birth.widget = S3DateWidget(past=972, future=-192)
table = r.table
table.site_id.comment = DIV(DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Office/Warehouse/Facility"),
T("The facility where this position is based."),
T("Enter some characters to bring up a list of possible matches."))))
table.status.writable = False
table.status.readable = False
elif r.method == "delete":
# Don't redirect
pass
elif r.id:
# Redirect to person controller
vars = {
"human_resource.id": r.id,
"group": "staff"
}
redirect(URL(f="person",
vars=vars))
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
s3_action_buttons(r, deletable=settings.get_hrm_deletable())
if "msg" in settings.modules:
# @ToDo: Remove this now that we have it in Events?
s3.actions.append({
"url": URL(f="compose",
vars = {"hrm_id": "[id]"}),
"_class": "action-btn",
"label": str(T("Send Message"))
})
elif r.representation == "plain" and \
r.method !="search":
# Map Popups
output = s3db.hrm_map_popup(r)
return output
s3.postp = postp
output = s3_rest_controller("hrm", "human_resource")
return output
# -----------------------------------------------------------------------------
def person():
"""
Person Controller
- used for Personal Profile & Imports
- includes components relevant to HRM
"""
configure = s3db.configure
set_method = s3db.set_method
# Custom Method for Contacts
set_method("pr", resourcename,
method="contacts",
action=s3db.pr_contacts)
# Plug-in role matrix for Admins/OrgAdmins
realms = auth.user is not None and auth.user.realms or []
if ADMIN in realms or ORG_ADMIN in realms:
set_method("pr", resourcename, method="roles",
action=s3base.S3PersonRoleManager())
if settings.has_module("asset"):
# Assets as component of people
s3db.add_component("asset_asset",
pr_person="assigned_to_id")
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
insertable = False,
editable = False,
deletable = False)
group = request.get_vars.get("group", "staff")
hr_id = request.get_vars.get("human_resource.id", None)
if not str(hr_id).isdigit():
hr_id = None
# Configure human resource table
tablename = "hrm_human_resource"
table = s3db[tablename]
table.type.default = 1
request.get_vars.update(xsltmode="staff")
if hr_id:
hr = table[hr_id]
if hr:
group = hr.type == 2 and "volunteer" or "staff"
# Also inform the back-end of this finding
request.get_vars["group"] = group
# Configure person table
tablename = "pr_person"
table = s3db[tablename]
if (group == "staff" and settings.get_hrm_staff_experience() == "programme") or \
(group == "volunteer" and settings.get_hrm_vol_experience() == "programme"):
table.virtualfields.append(s3db.hrm_programme_person_virtual_fields())
configure(tablename,
deletable=False)
mode = session.s3.hrm.mode
if mode is not None:
# Configure for personal mode
s3.crud_strings[tablename].update(
title_display = T("Personal Profile"),
title_update = T("Personal Profile"))
# People can view their own HR data, but not edit it
configure("hrm_human_resource",
insertable = False,
editable = False,
deletable = False)
configure("hrm_certification",
insertable = True,
editable = True,
deletable = True)
configure("hrm_credential",
insertable = False,
editable = False,
deletable = False)
configure("hrm_competency",
insertable = True, # Can add unconfirmed
editable = False,
deletable = False)
configure("hrm_training", # Can add but not provide grade
insertable = True,
editable = False,
deletable = False)
configure("hrm_experience",
insertable = False,
editable = False,
deletable = False)
configure("pr_group_membership",
insertable = False,
editable = False,
deletable = False)
else:
# Configure for HR manager mode
s3.crud_strings[tablename].update(
title_upload = T("Import Staff"),
title_display = T("Staff Member Details"),
title_update = T("Staff Member Details")
)
# Upload for configuration (add replace option)
s3.importerPrep = lambda: dict(ReplaceOption=T("Remove existing data before import"))
# Import pre-process
def import_prep(data, group=group):
"""
Deletes all HR records (of the given group) of the organisation
before processing a new data import, used for the import_prep
hook in s3mgr
"""
resource, tree = data
xml = current.xml
tag = xml.TAG
att = xml.ATTRIBUTE
if s3.import_replace:
if tree is not None:
if group == "staff":
group = 1
elif group == "volunteer":
group = 2
else:
return # don't delete if no group specified
root = tree.getroot()
expr = "/%s/%s[@%s='org_organisation']/%s[@%s='name']" % \
(tag.root, tag.resource, att.name, tag.data, att.field)
orgs = root.xpath(expr)
for org in orgs:
org_name = org.get("value", None) or org.text
if org_name:
try:
org_name = json.loads(xml.xml_decode(org_name))
except:
pass
if org_name:
htable = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (otable.name == org_name) & \
(htable.organisation_id == otable.id) & \
(htable.type == group)
resource = s3mgr.define_resource("hrm", "human_resource", filter=query)
ondelete = s3db.get_config("hrm_human_resource", "ondelete")
resource.delete(ondelete=ondelete, format="xml", cascade=True)
s3mgr.import_prep = import_prep
# CRUD pre-process
def prep(r):
if r.representation == "s3json":
s3mgr.show_ids = True
elif r.interactive and r.method != "import":
if r.component:
if r.component_name == "human_resource":
table = r.component.table
table.site_id.writable = True
table.site_id.readable = True
org = session.s3.hrm.org
if org is not None:
table.organisation_id.default = org
table.organisation_id.comment = None
table.organisation_id.readable = False
table.organisation_id.writable = False
table.site_id.requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"org_site.%s" % s3db.super_key(db.org_site),
s3db.org_site_represent,
filterby="organisation_id",
filter_opts=[session.s3.hrm.org]))
elif r.component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = False
table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = True
table.ethnicity.readable = True
table.blood_type.writable = True
table.blood_type.readable = True
table.medical_conditions.writable = True
table.medical_conditions.readable = True
table.other_details.writable = True
table.other_details.readable = True
elif r.component_name == "asset":
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
insertable = False,
editable = False,
deletable = False)
elif r.method == "contacts":
#s3.js_global.append('''controller="hrm"''')
pass
else:
table = r.table
# No point showing the 'Occupation' field - that's the Job Title in the Staff Record
table.occupation.readable = False
table.occupation.writable = False
table.pe_label.readable = False
table.pe_label.writable = False
table.missing.readable = False
table.missing.writable = False
table.age_group.readable = False
table.age_group.writable = False
# Assume volunteers only between 12-81
table.date_of_birth.widget = S3DateWidget(past=972, future=-144)
resource = r.resource
if mode is not None:
r.resource.build_query(id=s3_logged_in_person())
else:
if not r.id and not hr_id:
# pre-action redirect => must retain prior errors
if response.error:
session.error = response.error
redirect(URL(r=r, f="staff"))
if resource.count() == 1:
resource.load()
r.record = resource.records().first()
if r.record:
r.id = r.record.id
if not r.record:
session.error = T("Record not found")
redirect(URL(f="staff",
args=["search"]))
if hr_id and r.component_name == "human_resource":
r.component_id = hr_id
configure("hrm_human_resource",
insertable = False)
#if not r.component_id or r.method in ("create", "update"):
# s3base.s3_address_hide(s3db.pr_address)
return True
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "human_resource":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
if r.component_name == "experience":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_experience_start_date','hrm_experience_end_date')''')
elif r.component_name == "asset":
# Provide a link to assign a new Asset
# @ToDo: Proper Widget to do this inline
output["add_btn"] = A(T("Assign Asset"),
_href=URL(c="asset", f="asset"),
_id="add-btn",
_class="action-btn")
return output
s3.postp = postp
# REST Interface
if session.s3.hrm.orgname and mode is None:
orgname = session.s3.hrm.orgname
else:
orgname = None
output = s3_rest_controller("pr", resourcename,
native=False,
rheader=s3db.hrm_rheader,
orgname=orgname,
replace_option=T("Remove existing data before import"))
return output
# -----------------------------------------------------------------------------
def person_search():
"""
Person REST controller
- limited to just search.json for use in Autocompletes
- allows differential access permissions
"""
group = request.get_vars.get("group", None)
if group == "staff":
s3.filter = (s3db.hrm_human_resource.type == 1)
elif group == "volunteer":
s3.filter = (s3db.hrm_human_resource.type == 2)
s3db.configure("hrm_human_resource",
# S3HRSearch
search_method = s3db.hrm_autocomplete_search,
)
s3.prep = lambda r: r.representation == "json" and \
r.method == "search"
return s3_rest_controller("hrm", "human_resource")
# =============================================================================
# Teams
# =============================================================================
def group():
"""
Team controller
- uses the group table from PR
"""
tablename = "pr_group"
table = s3db[tablename]
_group_type = table.group_type
_group_type.label = T("Team Type")
table.description.label = T("Team Description")
table.name.label = T("Team Name")
mtable = s3db.pr_group_membership
mtable.group_id.label = T("Team ID")
mtable.group_head.label = T("Team Leader")
# Set Defaults
_group_type.default = 3 # 'Relief Team'
_group_type.readable = _group_type.writable = False
# Only show Relief Teams
# Do not show system groups
s3.filter = (table.system == False) & \
(_group_type == 3)
# CRUD Strings
ADD_TEAM = T("Add Team")
s3.crud_strings[tablename] = Storage(
title_create = ADD_TEAM,
title_display = T("Team Details"),
title_list = T("Teams"),
title_update = T("Edit Team"),
title_search = T("Search Teams"),
subtitle_create = T("Add New Team"),
label_list_button = T("List Teams"),
label_create_button = T("Add New Team"),
label_search_button = T("Search Teams"),
msg_record_created = T("Team added"),
msg_record_modified = T("Team updated"),
msg_record_deleted = T("Team deleted"),
msg_list_empty = T("No Teams currently registered"))
s3.crud_strings["pr_group_membership"] = Storage(
title_create = T("Add Member"),
title_display = T("Membership Details"),
title_list = T("Team Members"),
title_update = T("Edit Membership"),
title_search = T("Search Member"),
subtitle_create = T("Add New Member"),
label_list_button = T("List Members"),
label_create_button = T("Add Team Member"),
label_delete_button = T("Delete Membership"),
msg_record_created = T("Team Member added"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Membership deleted"),
msg_list_empty = T("No Members currently registered"))
s3db.configure(tablename, main="name", extra="description",
# Redirect to member list when a new group has been created
create_next = URL(f="group",
args=["[id]", "group_membership"]))
s3db.configure("pr_group_membership",
list_fields=["id",
"person_id",
"group_head",
"description"])
# Post-process
def postp(r, output):
if r.interactive:
if not r.component:
update_url = URL(args=["[id]", "group_membership"])
s3_action_buttons(r, deletable=False, update_url=update_url)
if "msg" in settings.modules:
s3.actions.append({
"url": URL(f="compose",
vars = {"group_id": "[id]"}),
"_class": "action-btn",
"label": str(T("Send Notification"))})
return output
s3.postp = postp
tabs = [
(T("Team Details"), None),
# Team should be contacted either via the Leader or
# simply by sending a message to the group as a whole.
#(T("Contact Data"), "contact"),
(T("Members"), "group_membership")
]
output = s3_rest_controller("pr", resourcename,
rheader=lambda r: s3db.pr_rheader(r, tabs=tabs))
return output
# =============================================================================
# Jobs
# =============================================================================
def job_role():
""" Job Roles Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
output = s3_rest_controller()
return output
def job_title():
""" Job Titles Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Skills
# =============================================================================
def skill():
""" Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_type():
""" Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def competency_rating():
""" Competency Rating for Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_provision():
""" Skill Provisions Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def course():
""" Courses Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def course_certificate():
""" Courses to Certificates Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def certificate():
""" Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def certificate_skill():
""" Certificates to Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def training():
""" Training Controller - used for Searching for Participants """
return s3db.hrm_training_controller()
# -----------------------------------------------------------------------------
def training_event():
""" Training Events Controller """
return s3db.hrm_training_event_controller()
# =============================================================================
def skill_competencies():
"""
Called by S3FilterFieldChange to provide the competency options for a
particular Skill Type
"""
table = s3db.hrm_skill
ttable = s3db.hrm_skill_type
rtable = s3db.hrm_competency_rating
query = (table.id == request.args[0]) & \
(table.skill_type_id == ttable.id) & \
(rtable.skill_type_id == table.skill_type_id)
records = db(query).select(rtable.id,
rtable.name,
orderby=~rtable.priority)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_org_site_json():
"""
Used by the Asset - Assign to Person page
"""
table = s3db.hrm_human_resource
otable = s3db.org_organisation
#db.req_commit.date.represent = lambda dt: dt[:10]
query = (table.person_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(table.site_id,
otable.id,
otable.name)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
# Messaging
# =============================================================================
def compose():
""" Send message to people/teams """
return s3db.hrm_compose()
# END =========================================================================
|
|
import json
import os
import unittest
import mock
import numpy as np
import pandas as pd
from ddt import ddt, unpack, data
from .. import io
from .. import models
from . import FIXTURES_DIR
class TestCase(unittest.TestCase):
def get_fixture_data(self, fname):
path = os.path.join(FIXTURES_DIR, fname)
with open(path) as f:
return json.load(f)
@ddt
class TestBinWM(TestCase):
model_data_fixtures = {
'Minimal Example': 'case__minimal_example.json',
'Motorcycle Helmet': 'case__motorcycle_helmet.json',
'Simple Aircraft': 'case__simple_aircraft.json'
}
def setup_binary_weighting_matrix(self, key):
fixture_fname = self.model_data_fixtures[key]
data = self.get_fixture_data(fixture_fname)
bwm = models.BinWM(*data['requirements'])
bwm._matrix = np.array(data['binary_matrix'])
bwm.label = "My Requirements"
return bwm
def test_score__motorcycle_helmet(self):
bwm = self.setup_binary_weighting_matrix('Motorcycle Helmet')
np.testing.assert_allclose(
bwm.score,
np.array([0.095, 0.286, 0.143, 0.143, 0.143, 0.19]),
atol=0.01
)
def test_score__simple_aircraft(self):
bwm = self.setup_binary_weighting_matrix('Simple Aircraft')
np.testing.assert_allclose(
bwm.score,
np.array([0.13, 0.16, 0.13, 0.04, 0.13, 0.09, 0.07, 0.09, 0.16]),
atol=0.1
)
@data(
[('n', 'n', 'n'), (0.17, 0.33, 0.5)],
[('y', 'n', 'n'), (0.33, 0.17, 0.5)],
[('n', 'y', 'n'), (0.33, 0.33, 0.33)],
[('n', 'y', 'y'), (0.33, 0.5, 0.17)],
[('y', 'y', 'y'), (0.5, 0.33, 0.17)]
)
@unpack
@mock.patch.object(models.BinWM, '_print')
@mock.patch.object(models.BinWM, '_input')
def test_prompt(self, answers, score, mock_input, mock_print):
mock_input.side_effect = answers
bwm = self.setup_binary_weighting_matrix('Minimal Example')
bwm.prompt(shuffle=False)
mock_input.assert_has_calls([
mock.call("'Requirement 1' is more important than "
"'Requirement 2': "),
mock.call("'Requirement 1' is more important than "
"'Requirement 3': "),
mock.call("'Requirement 2' is more important than "
"'Requirement 3': ")
])
np.testing.assert_allclose(bwm.score, np.array(score), atol=0.01)
@mock.patch('random.shuffle')
@mock.patch.object(models.BinWM, '_print')
@mock.patch.object(models.BinWM, '_input')
def test_prompt__shuffle(self, mock_input, mock_print, mock_shuffle):
mock_input.side_effect = ['y'] * 3
bwm = self.setup_binary_weighting_matrix('Minimal Example')
bwm.prompt(shuffle=True)
mock_shuffle.assert_called_with([
(0, 1, 'Requirement 1', 'Requirement 2'),
(0, 2, 'Requirement 1', 'Requirement 3'),
(1, 2, 'Requirement 2', 'Requirement 3')
])
def test_to_dataframe(self):
"""Method coerces the matrix to a pandas dataframe.
Test creates a matrix from source data and checks the
dataframe looks right.
"""
bwm = self.setup_binary_weighting_matrix('Minimal Example')
expected_scores = bwm.score
actual = bwm.to_dataframe()
expected_requirement_labels = [
'Requirement ' + str(x) for x in range(1, 4)
]
expected = pd.DataFrame(
data=[
[0, 0, 1, expected_scores[0]],
[0, 0, 1, expected_scores[1]],
[0, 0, 0, expected_scores[2]]
],
columns=expected_requirement_labels + ['Score'],
index=expected_requirement_labels
)
expected.index.name = 'My Requirements'
try:
pd.testing.assert_frame_equal(actual, expected)
except AssertionError:
# Ugh. mix of unicode and str causing a comparison failure
# in Python 2. Don't actually care about this so this is a
# little trap to check that's what's happening and let it
# go.
# TODO: remove >= January 2020
if not (actual.columns == expected.columns).all():
raise
else:
pass
def test_save(self):
"""Method is only implemented in special cases."""
bwm = self.setup_binary_weighting_matrix('Minimal Example')
bwm._matrix[2, 0] = 1
self.assertRaises(NotImplementedError, bwm.save)
@mock.patch.object(models.BinWM, '_get_sheet')
class TestBinWM_GoogleSheetsIntegration(TestCase):
def setup_mock_sheet(self, mock_getter):
# Get reference data
data = self.get_fixture_data('case__minimal_example.json')
requirements = data['requirements']
binary_matrix = np.array(data['binary_matrix'])
# Set up mock
mock_sheet = mock.MagicMock(spec_set=io.GSheetBinWM)
mock_getter.return_value = mock_sheet
mock_sheet.get_requirements.return_value = requirements
mock_sheet.get_value_matrix.return_value = binary_matrix
return mock_sheet
def test_from_google_sheet(self, mock_getter):
"""Constructor uses and links a google sheet to instantiate.
Requirements and binary matrix are fetched from the
io.BinWMSheet interface to populate the object.
"""
mock_sheet = self.setup_mock_sheet(mock_getter)
bwm = models.BinWM.from_google_sheet('dummy name')
actual_requirements = bwm.requirements
expected_requirements = tuple(mock_sheet.get_requirements())
self.assertEqual(actual_requirements, expected_requirements)
actual_matrix = bwm.matrix
expected_matrix = mock_sheet.get_value_matrix()
np.testing.assert_allclose(actual_matrix, expected_matrix)
def test_access_sheet_model(self, mock_getter):
"""Instances access linked sheets through a generic interface.
"""
mock_sheet = self.setup_mock_sheet(mock_getter)
bwm = models.BinWM.from_google_sheet('dummy name')
actual = bwm._sheet
expected = mock_sheet
self.assertIs(actual, expected)
@mock.patch.object(models.BinWM, 'to_dataframe')
def test_save__triggers_update(self, mock_to_dataframe,
mock_getter):
"""Save method wraps the google sheet update method."""
mock_sheet = self.setup_mock_sheet(mock_getter)
mock_to_dataframe.return_value = blank_df = pd.DataFrame()
bwm = models.BinWM.from_google_sheet('dummy name')
bwm.save()
mock_sheet.update.assert_called_once_with(blank_df)
class TestBinWM_ExcelIntegration(unittest.TestCase):
# TODO: BinWM is not current integrated with Excel
pass
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing volumes.
"""
import json
import re
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django import http
from django.template.defaultfilters import slugify # noqa
from django.utils.decorators import method_decorator
from django.utils import encoding
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import cache_control
from django.views.decorators.cache import never_cache
from django.views import generic
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard import exceptions as dashboard_exception
from openstack_dashboard.usage import quotas
from openstack_dashboard.utils import filters
from openstack_dashboard.dashboards.admin.volumes \
.volumes import forms as project_forms
from openstack_dashboard.dashboards.admin.volumes \
.volumes import tables as project_tables
from openstack_dashboard.dashboards.admin.volumes \
.volumes import tabs as project_tabs
import logging
LOG = logging.getLogger(__name__)
class DetailView(tabs.TabView):
tab_group_class = project_tabs.VolumeDetailTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ volume.name|default:volume.id }}"
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
volume = self.get_data()
table = project_tables.VolumesTable(self.request)
context["volume"] = volume
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(volume)
choices = project_tables.VolumesTableBase.STATUS_DISPLAY_CHOICES
volume.status_label = filters.get_display_label(choices, volume.status)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
snapshots = cinder.volume_snapshot_list(
self.request, search_opts={'volume_id': volume.id})
if snapshots:
setattr(volume, 'has_snapshot', True)
for att in volume.attachments:
att['instance'] = api.nova.server_get(self.request,
att['server_id'])
except Exception:
redirect = self.get_redirect_url()
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=redirect)
return volume
def get_redirect_url(self):
return reverse('horizon:admin:volumes:index')
def get_tabs(self, request, *args, **kwargs):
volume = self.get_data()
return self.tab_group_class(request, volume=volume, **kwargs)
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateForm
modal_header = _("Create Volume")
template_name = 'admin/volumes/volumes/create.html'
submit_label = _("Create Volume")
submit_url = reverse_lazy("horizon:admin:volumes:volumes:create")
success_url = reverse_lazy('horizon:admin:volumes:volumes_tab')
page_title = _("Create a Volume")
def get_initial(self):
initial = super(CreateView, self).get_initial()
self.default_vol_type = None
try:
self.default_vol_type = cinder.volume_type_default(self.request)
initial['type'] = self.default_vol_type.name
except dashboard_exception.NOT_FOUND:
pass
return initial
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
try:
context['usages'] = quotas.tenant_limit_usages(self.request)
context['volume_types'] = self._get_volume_types()
except Exception:
exceptions.handle(self.request)
return context
def _get_volume_types(self):
volume_types = []
try:
volume_types = cinder.volume_type_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume type list.'))
# check if we have default volume type so we can present the
# description of no volume type differently
no_type_description = None
if self.default_vol_type is None:
message = \
_("If \"No volume type\" is selected, the volume will be "
"created without a volume type.")
no_type_description = encoding.force_text(message)
type_descriptions = [{'name': '',
'description': no_type_description}] + \
[{'name': type.name,
'description': getattr(type, "description", "")}
for type in volume_types]
return json.dumps(type_descriptions)
class IncreaseVolumeView(forms.ModalFormView):
form_class = project_forms.IncreaseForm
template_name = 'admin/volumes/volumes/increase.html'
success_url = reverse_lazy('horizon:admin:volumes:index')
def get_object(self):
#try:
endpoints = api.base.url_for(self.request, 'volume')
expression = r'https?://(.+?):.+?'
host = re.match(expression,endpoints).groups()
LOG.info("----------------------------------------------host: %s" %host)
cloud_size = api.device.get_colud_disk_size(self.request, host=host)
loads_data = json.loads(cloud_size.text)
LOG.info("----------------------------------------------cloud_size: %s" % loads_data)
content = eval(loads_data.get('content'))
volumes = filter(lambda x: x["vg_name"] == "cinder-volumes", content)
if volumes:
volumes = volumes.pop()
vg_size = re.findall(r"[a-zA-Z]{1}", volumes['vg_size'])
return volumes['vg_size'][:-len(vg_size)]
#except Exception:
# exceptions.handle(self.request, _("Unable request the size of current volume group."))
def get_context_data(self, **kwargs):
context = super(IncreaseVolumeView, self).get_context_data(**kwargs)
context['usages'] = quotas.tenant_limit_usages(self.request)
return context
def get_initial(self):
orig_size = self.get_object()
return {'orig_size': orig_size}
class ExtendView(forms.ModalFormView):
form_class = project_forms.ExtendForm
modal_header = _("Extend Volume")
template_name = 'admin/volumes/volumes/extend.html'
submit_label = _("Extend Volume")
submit_url = "horizon:admin:volumes:volumes:extend"
success_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Extend Volume")
def get_object(self):
if not hasattr(self, "_object"):
volume_id = self.kwargs['volume_id']
try:
self._object = cinder.volume_get(self.request, volume_id)
except Exception:
self._object = None
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
return self._object
def get_context_data(self, **kwargs):
context = super(ExtendView, self).get_context_data(**kwargs)
context['volume'] = self.get_object()
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
try:
usages = quotas.tenant_limit_usages(self.request)
usages['gigabytesUsed'] = (usages['gigabytesUsed']
- context['volume'].size)
context['usages'] = usages
except Exception:
exceptions.handle(self.request)
return context
def get_initial(self):
volume = self.get_object()
return {'id': self.kwargs['volume_id'],
'name': volume.name,
'orig_size': volume.size}
class CreateSnapshotView(forms.ModalFormView):
form_class = project_forms.CreateSnapshotForm
modal_header = _("Create Volume Snapshot")
template_name = 'admin/volumes/volumes/create_snapshot.html'
submit_url = "horizon:admin:volumes:volumes:create_snapshot"
success_url = reverse_lazy('horizon:admin:volumes:snapshots_tab')
page_title = _("Create a Volume Snapshot")
def get_context_data(self, **kwargs):
context = super(CreateSnapshotView, self).get_context_data(**kwargs)
context['volume_id'] = self.kwargs['volume_id']
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
try:
volume = cinder.volume_get(self.request, context['volume_id'])
if (volume.status == 'in-use'):
context['attached'] = True
context['form'].set_warning(_("This volume is currently "
"attached to an instance. "
"In some cases, creating a "
"snapshot from an attached "
"volume can result in a "
"corrupted snapshot."))
context['usages'] = quotas.tenant_limit_usages(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
return context
def get_initial(self):
return {'volume_id': self.kwargs["volume_id"]}
class UploadToImageView(forms.ModalFormView):
form_class = project_forms.UploadToImageForm
modal_header = _("Upload Volume to Image")
template_name = 'admin/volumes/volumes/upload_to_image.html'
submit_label = _("Upload")
submit_url = "horizon:admin:volumes:volumes:upload_to_image"
success_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Upload Volume to Image")
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
error_message = _(
'Unable to retrieve volume information for volume: "%s"') \
% volume_id
exceptions.handle(self.request,
error_message,
redirect=self.success_url)
return volume
def get_context_data(self, **kwargs):
context = super(UploadToImageView, self).get_context_data(**kwargs)
context['volume'] = self.get_data()
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
volume = self.get_data()
return {'id': self.kwargs['volume_id'],
'name': volume.name,
'status': volume.status}
class CreateTransferView(forms.ModalFormView):
form_class = project_forms.CreateTransferForm
template_name = 'admin/volumes/volumes/create_transfer.html'
success_url = reverse_lazy('horizon:admin:volumes:volumes_tab')
modal_id = "create_volume_transfer_modal"
modal_header = _("Create Volume Transfer")
submit_label = _("Create Volume Transfer")
submit_url = "horizon:admin:volumes:volumes:create_transfer"
page_title = _("Create a Volume Transfer")
def get_context_data(self, *args, **kwargs):
context = super(CreateTransferView, self).get_context_data(**kwargs)
volume_id = self.kwargs['volume_id']
context['volume_id'] = volume_id
context['submit_url'] = reverse(self.submit_url, args=[volume_id])
return context
def get_initial(self):
return {'volume_id': self.kwargs["volume_id"]}
class AcceptTransferView(forms.ModalFormView):
form_class = project_forms.AcceptTransferForm
template_name = 'admin/volumes/volumes/accept_transfer.html'
success_url = reverse_lazy('horizon:admin:volumes:volumes_tab')
modal_id = "accept_volume_transfer_modal"
modal_header = _("Accept Volume Transfer")
submit_label = _("Accept Volume Transfer")
submit_url = reverse_lazy(
"horizon:admin:volumes:volumes:accept_transfer")
page_title = _("Accept Volume Transfer")
class ShowTransferView(forms.ModalFormView):
form_class = project_forms.ShowTransferForm
template_name = 'admin/volumes/volumes/show_transfer.html'
success_url = reverse_lazy('horizon:admin:volumes:volumes_tab')
modal_id = "show_volume_transfer_modal"
modal_header = _("Volume Transfer")
submit_url = "horizon:admin:volumes:volumes:show_transfer"
cancel_label = _("Close")
download_label = _("Download transfer credentials")
page_title = _("Volume Transfer Details")
def get_object(self):
try:
return self._object
except AttributeError:
transfer_id = self.kwargs['transfer_id']
try:
self._object = cinder.transfer_get(self.request, transfer_id)
return self._object
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume transfer.'))
def get_context_data(self, **kwargs):
context = super(ShowTransferView, self).get_context_data(**kwargs)
context['transfer_id'] = self.kwargs['transfer_id']
context['auth_key'] = self.kwargs['auth_key']
context['submit_url'] = reverse(self.submit_url, args=[
context['transfer_id'], context['auth_key']])
context['download_label'] = self.download_label
context['download_url'] = reverse(
'horizon:admin:volumes:volumes:download_transfer_creds',
args=[context['transfer_id'], context['auth_key']]
)
return context
def get_initial(self):
transfer = self.get_object()
return {'id': transfer.id,
'name': transfer.name,
'auth_key': self.kwargs['auth_key']}
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateForm
modal_header = _("Edit Volume")
modal_id = "update_volume_modal"
template_name = 'admin/volumes/volumes/update.html'
submit_url = "horizon:admin:volumes:volumes:update"
success_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Edit Volume")
def get_object(self):
if not hasattr(self, "_object"):
vol_id = self.kwargs['volume_id']
try:
self._object = cinder.volume_get(self.request, vol_id)
except Exception:
msg = _('Unable to retrieve volume.')
url = reverse('horizon:admin:volumes:index')
exceptions.handle(self.request, msg, redirect=url)
return self._object
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['volume'] = self.get_object()
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
volume = self.get_object()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'description': volume.description,
'bootable': volume.is_bootable}
class EditAttachmentsView(tables.DataTableView, forms.ModalFormView):
table_class = project_tables.AttachmentsTable
form_class = project_forms.AttachForm
form_id = "attach_volume_form"
modal_header = _("Manage Volume Attachments")
modal_id = "attach_volume_modal"
template_name = 'admin/volumes/volumes/attach.html'
submit_url = "horizon:admin:volumes:volumes:attach"
success_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Manage Volume Attachments")
@memoized.memoized_method
def get_object(self):
volume_id = self.kwargs['volume_id']
try:
return cinder.volume_get(self.request, volume_id)
except Exception:
self._object = None
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
def get_data(self):
attachments = []
volume = self.get_object()
if volume is not None:
for att in volume.attachments:
att['volume_name'] = getattr(volume, 'name', att['device'])
attachments.append(att)
return attachments
def get_initial(self):
try:
instances, has_more = api.nova.server_list(self.request)
except Exception:
instances = []
exceptions.handle(self.request,
_("Unable to retrieve attachment information."))
return {'volume': self.get_object(),
'instances': instances}
@memoized.memoized_method
def get_form(self, **kwargs):
form_class = kwargs.get('form_class', self.get_form_class())
return super(EditAttachmentsView, self).get_form(form_class)
def get_context_data(self, **kwargs):
context = super(EditAttachmentsView, self).get_context_data(**kwargs)
context['form'] = self.get_form()
volume = self.get_object()
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
if volume and volume.status == 'available':
context['show_attach'] = True
else:
context['show_attach'] = False
context['volume'] = volume
if self.request.is_ajax():
context['hide'] = True
return context
def get(self, request, *args, **kwargs):
# Table action handling
handled = self.construct_tables()
if handled:
return handled
return self.render_to_response(self.get_context_data(**kwargs))
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.get(request, *args, **kwargs)
class RetypeView(forms.ModalFormView):
form_class = project_forms.RetypeForm
modal_id = "retype_volume_modal"
modal_header = _("Change Volume Type")
template_name = 'admin/volumes/volumes/retype.html'
submit_label = _("Change Volume Type")
submit_url = "horizon:admin:volumes:volumes:retype"
success_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Change Volume Type")
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
error_message = _(
'Unable to retrieve volume information for volume: "%s"') \
% volume_id
exceptions.handle(self.request,
error_message,
redirect=self.success_url)
return volume
def get_context_data(self, **kwargs):
context = super(RetypeView, self).get_context_data(**kwargs)
context['volume'] = self.get_data()
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
volume = self.get_data()
return {'id': self.kwargs['volume_id'],
'name': volume.name,
'volume_type': volume.volume_type}
class EncryptionDetailView(generic.TemplateView):
template_name = 'admin/volumes/volumes/encryption_detail.html'
page_title = _("Volume Encryption Details: {{ volume.name }}")
def get_context_data(self, **kwargs):
context = super(EncryptionDetailView, self).get_context_data(**kwargs)
volume = self.get_volume_data()
context["encryption_metadata"] = self.get_encryption_data()
context["volume"] = volume
context["page_title"] = _("Volume Encryption Details: "
"%(volume_name)s") % {'volume_name':
volume.name}
return context
@memoized.memoized_method
def get_encryption_data(self):
try:
volume_id = self.kwargs['volume_id']
self._encryption_metadata = \
cinder.volume_get_encryption_metadata(self.request,
volume_id)
except Exception:
redirect = self.get_redirect_url()
exceptions.handle(self.request,
_('Unable to retrieve volume encryption '
'details.'),
redirect=redirect)
return self._encryption_metadata
@memoized.memoized_method
def get_volume_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
redirect = self.get_redirect_url()
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=redirect)
return volume
def get_redirect_url(self):
return reverse('horizon:admin:volumes:index')
class DownloadTransferCreds(generic.View):
# TODO(Itxaka): Remove cache_control in django >= 1.9
# https://code.djangoproject.com/ticket/13008
@method_decorator(cache_control(max_age=0, no_cache=True,
no_store=True, must_revalidate=True))
@method_decorator(never_cache)
def get(self, request, transfer_id, auth_key):
try:
transfer = cinder.transfer_get(self.request, transfer_id)
except Exception:
transfer = None
response = http.HttpResponse(content_type='application/text')
response['Content-Disposition'] = \
'attachment; filename=%s.txt' % slugify(transfer_id)
response.write('%s: %s\n%s: %s\n%s: %s' % (
_("Transfer name"),
getattr(transfer, 'name', ''),
_("Transfer ID"),
transfer_id,
_("Authorization Key"),
auth_key))
response['Content-Length'] = str(len(response.content))
return response
class UpdateStatusView(forms.ModalFormView):
form_class = project_forms.UpdateStatus
template_name = 'admin/volumes/volumes/update_status.html'
success_url = reverse_lazy('horizon:admin:volumes:index')
def get_context_data(self, **kwargs):
context = super(UpdateStatusView, self).get_context_data(**kwargs)
context["volume_id"] = self.kwargs['volume_id']
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'status': volume.status}
|
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import base64
from yosaipy2.core import (
AbstractRememberMeManager,
SubjectStore,
NativeSecurityManager,
)
from yosaipy2.web import (
WebSessionStorageEvaluator,
WebSessionManager,
WebSubjectContext,
WebDelegatingSubject,
WebSessionKey,
web_subject_abcs,
)
class WebSecurityManager(NativeSecurityManager):
"""
This is the default ``WebSecurityManager`` implementation used in web-based
applications or any application that requires HTTP connectivity.
- yosai omits any session_mode logic since no wsgi middleware exists (yet)
- yosai uses the native web session manager as default, unlike Shiro,
which uses the middleware version instead
- the yosai attribute is set by WebYosai when the
SecurityManager is passed to the WebYosai
"""
def __init__(self,
yosai,
settings,
realms=None,
cache_handler=None,
serialization_manager=None):
"""
:type realms: tuple
"""
super(WebSecurityManager, self).__init__(
yosai,
settings,
realms=realms,
cache_handler=cache_handler,
serialization_manager=serialization_manager,
session_manager=WebSessionManager(settings),
subject_store=SubjectStore(WebSessionStorageEvaluator()),
remember_me_manager=CookieRememberMeManager(settings)
)
def create_subject_context(self, subject):
web_registry = subject.web_registry
return WebSubjectContext(self.yosai, self, web_registry)
# overridden:
def create_session_context(self, subject_context):
web_registry = subject_context.resolve_web_registry()
session_context = {
'web_registry': web_registry,
'host': getattr(self, 'host', None)
}
return session_context
# overridden
def get_session_key(self, subject_context):
try:
web_registry = subject_context.resolve_web_registry()
subject_context.session_id = web_registry.session_id
session_id = subject_context.session_id
return WebSessionKey(session_id, web_registry=web_registry)
except AttributeError: # not dealing with a WebSubjectContext
return super(WebSecurityManager, self).get_session_key(subject_context)
# overridden
def before_logout(self, subject):
super(WebSecurityManager, self).before_logout(subject)
self.remove_identity(subject)
def remove_identity(self, subject):
try:
del subject.web_registry.remember_me # descriptor sets to None
except AttributeError: # then it's not a WebSubject
pass
# new to yosai, overriding to support CSRF token synchronization
def on_successful_login(self, authc_token, account_id, subject):
# Generating a new session_id at successful login is a recommended
# countermeasure to a session fixation
subject.session = subject.session.recreate_session()
super(WebSecurityManager, self).remember_me_successful_login(authc_token, account_id, subject)
# overridden
def do_create_subject(self, subject_context):
"""
By the time this method is invoked, all possible
``SubjectContext`` data (session, identifiers, et. al.) has been made
accessible using all known heuristics.
:returns: a Subject instance reflecting the data in the specified
SubjectContext data map
"""
if not isinstance(subject_context, web_subject_abcs.WebSubjectContext):
return super(WebSecurityManager, self).do_create_subject(subject_context=subject_context)
security_manager = subject_context.resolve_security_manager()
session = subject_context.resolve_session()
session_creation_enabled = subject_context.session_creation_enabled
# passing the session arg is new to yosai, eliminating redunant
# get_session calls:
identifiers = subject_context.resolve_identifiers(session)
authenticated = subject_context.resolve_authenticated(session)
host = subject_context.resolve_host(session)
# must run after resolve_identifiers:
remembered = getattr(subject_context, 'remembered', None)
return WebDelegatingSubject(
identifiers=identifiers,
remembered=remembered,
authenticated=authenticated,
host=host,
session=session,
session_creation_enabled=session_creation_enabled,
security_manager=security_manager,
web_registry=subject_context.web_registry
)
class CookieRememberMeManager(AbstractRememberMeManager):
"""
Remembers a Subject's identity by saving the Subject's identifiers to a Cookie
for later retrieval. The Cookie is accessed through the WebRegistry api.
"""
def __init__(self, settings):
super(CookieRememberMeManager, self).__init__(settings)
def remember_encrypted_identity(self, subject, encrypted):
"""
Base64-encodes the specified serialized byte array and sets that
base64-encoded String as the cookie value.
The ``subject`` instance is expected to be a ``WebSubject`` instance
with a web_registry handle so that an HTTP cookie may be set on an
outgoing response. If it is not a ``WebSubject`` or that ``WebSubject``
does not have a web_registry handle, this implementation does
nothing.
:param subject: the Subject for which the identity is being serialized
:param encrypted: the serialized bytes to persist
:type encrypted: bytearray
"""
try:
# base 64 encode it and store as a cookie:
encoded = base64.b64encode(encrypted).decode('utf-8')
subject.web_registry.remember_me = encoded
except AttributeError:
msg = ("Subject argument is not an HTTP-aware instance. This "
"is required to obtain a web registry in order to"
"set the RememberMe cookie. Returning immediately "
"and ignoring RememberMe operation.")
self._logger.debug(msg)
def is_identity_removed(self, subject_context):
try:
registry = subject_context.resolve_web_registry()
return not registry.remember_me
except AttributeError:
return False
def get_remembered_encrypted_identity(self, subject_context):
"""
Returns a previously serialized identity byte array or None if the byte
array could not be acquired.
This implementation retrieves an HTTP cookie, Base64-decodes the cookie
value, and returns the resulting byte array.
The ``subject_context`` instance is expected to be a ``WebSubjectContext``
instance with a web_registry so that an HTTP cookie may be
retrieved from an incoming request. If it is not a ``WebSubjectContext``
or is one yet does not have a web_registry, this implementation returns
None.
:param subject_context: the contextual data used to construct a ``Subject`` instance
:returns: an encrypted, serialized identifier collection
"""
if self.is_identity_removed(subject_context):
if not isinstance(subject_context, web_subject_abcs.WebSubjectContext):
msg = ("SubjectContext argument is not an HTTP-aware instance. "
"This is required to obtain a web registry "
"in order to retrieve the RememberMe cookie. Returning "
"immediately and ignoring rememberMe operation.")
self._logger.debug(msg)
return None
remember_me = subject_context.web_registry.remember_me
# TBD:
# Browsers do not always remove cookies immediately
# ignore cookies that are scheduled for removal
# if (web_wsgi_abcs.Cookie.DELETED_COOKIE_VALUE.equals(base64)):
# return None
if remember_me:
self._logger.debug("Acquired encoded identity [" + str(remember_me) + "]")
encrypted = base64.b64decode(remember_me)
return encrypted
else:
# no cookie set - new site visitor?
return None
# Currently, both subject and subject_context serving any function
# after porting to Python (TBD):
def forget_identity(self, subject=None, subject_context=None):
"""
Removes the 'rememberMe' cookie from the WebRegistry.
:param subject: the subject instance for which identity data should be
forgotten from the underlying persistence
:param subject_context: the contextual data
"""
del subject.web_registry.remember_me # no use of subject data (TBD)
|
|
from functools import partial
from itertools import product
from string import ascii_letters
import warnings
import numpy as np
from pandas import (
Categorical, DataFrame, MultiIndex, Series, TimeGrouper, Timestamp,
date_range, period_range)
import pandas.util.testing as tm
method_blacklist = {
'object': {'median', 'prod', 'sem', 'cumsum', 'sum', 'cummin', 'mean',
'max', 'skew', 'cumprod', 'cummax', 'rank', 'pct_change', 'min',
'var', 'mad', 'describe', 'std', 'quantile'},
'datetime': {'median', 'prod', 'sem', 'cumsum', 'sum', 'mean', 'skew',
'cumprod', 'cummax', 'pct_change', 'var', 'mad', 'describe',
'std'}
}
class ApplyDictReturn(object):
def setup(self):
self.labels = np.arange(1000).repeat(10)
self.data = Series(np.random.randn(len(self.labels)))
def time_groupby_apply_dict_return(self):
self.data.groupby(self.labels).apply(lambda x: {'first': x.values[0],
'last': x.values[-1]})
class Apply(object):
def setup_cache(self):
N = 10**4
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame({'key': labels,
'key2': labels2,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)
})
return df
def time_scalar_function_multi_col(self, df):
df.groupby(['key', 'key2']).apply(lambda x: 1)
def time_scalar_function_single_col(self, df):
df.groupby('key').apply(lambda x: 1)
@staticmethod
def df_copy_function(g):
# ensure that the group name is available (see GH #15062)
g.name
return g.copy()
def time_copy_function_multi_col(self, df):
df.groupby(['key', 'key2']).apply(self.df_copy_function)
def time_copy_overhead_single_col(self, df):
df.groupby('key').apply(self.df_copy_function)
class Groups(object):
param_names = ['key']
params = ['int64_small', 'int64_large', 'object_small', 'object_large']
def setup_cache(self):
size = 10**6
data = {'int64_small': Series(np.random.randint(0, 100, size=size)),
'int64_large': Series(np.random.randint(0, 10000, size=size)),
'object_small': Series(
tm.makeStringIndex(100).take(
np.random.randint(0, 100, size=size))),
'object_large': Series(
tm.makeStringIndex(10000).take(
np.random.randint(0, 10000, size=size)))}
return data
def setup(self, data, key):
self.ser = data[key]
def time_series_groups(self, data, key):
self.ser.groupby(self.ser).groups
class GroupManyLabels(object):
params = [1, 1000]
param_names = ['ncols']
def setup(self, ncols):
N = 1000
data = np.random.randn(N, ncols)
self.labels = np.random.randint(0, 100, size=N)
self.df = DataFrame(data)
def time_sum(self, ncols):
self.df.groupby(self.labels).sum()
class Nth(object):
param_names = ['dtype']
params = ['float32', 'float64', 'datetime', 'object']
def setup(self, dtype):
N = 10**5
# with datetimes (GH7555)
if dtype == 'datetime':
values = date_range('1/1/2011', periods=N, freq='s')
elif dtype == 'object':
values = ['foo'] * N
else:
values = np.arange(N).astype(dtype)
key = np.arange(N)
self.df = DataFrame({'key': key, 'values': values})
self.df.iloc[1, 1] = np.nan # insert missing data
def time_frame_nth_any(self, dtype):
self.df.groupby('key').nth(0, dropna='any')
def time_groupby_nth_all(self, dtype):
self.df.groupby('key').nth(0, dropna='all')
def time_frame_nth(self, dtype):
self.df.groupby('key').nth(0)
def time_series_nth_any(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0, dropna='any')
def time_series_nth_all(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0, dropna='all')
def time_series_nth(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0)
class DateAttributes(object):
def setup(self):
rng = date_range('1/1/2000', '12/31/2005', freq='H')
self.year, self.month, self.day = rng.year, rng.month, rng.day
self.ts = Series(np.random.randn(len(rng)), index=rng)
def time_len_groupby_object(self):
len(self.ts.groupby([self.year, self.month, self.day]))
class Int64(object):
def setup(self):
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 17, 5))
i = np.random.choice(len(arr), len(arr) * 5)
arr = np.vstack((arr, arr[i]))
i = np.random.permutation(len(arr))
arr = arr[i]
self.cols = list('abcde')
self.df = DataFrame(arr, columns=self.cols)
self.df['jim'], self.df['joe'] = np.random.randn(2, len(self.df)) * 10
def time_overflow(self):
self.df.groupby(self.cols).max()
class CountMultiDtype(object):
def setup_cache(self):
n = 10000
offsets = np.random.randint(n, size=n).astype('timedelta64[ns]')
dates = np.datetime64('now') + offsets
dates[np.random.rand(n) > 0.5] = np.datetime64('nat')
offsets[np.random.rand(n) > 0.5] = np.timedelta64('nat')
value2 = np.random.randn(n)
value2[np.random.rand(n) > 0.5] = np.nan
obj = np.random.choice(list('ab'), size=n).astype(object)
obj[np.random.randn(n) > 0.5] = np.nan
df = DataFrame({'key1': np.random.randint(0, 500, size=n),
'key2': np.random.randint(0, 100, size=n),
'dates': dates,
'value2': value2,
'value3': np.random.randn(n),
'ints': np.random.randint(0, 1000, size=n),
'obj': obj,
'offsets': offsets})
return df
def time_multi_count(self, df):
df.groupby(['key1', 'key2']).count()
class CountMultiInt(object):
def setup_cache(self):
n = 10000
df = DataFrame({'key1': np.random.randint(0, 500, size=n),
'key2': np.random.randint(0, 100, size=n),
'ints': np.random.randint(0, 1000, size=n),
'ints2': np.random.randint(0, 1000, size=n)})
return df
def time_multi_int_count(self, df):
df.groupby(['key1', 'key2']).count()
def time_multi_int_nunique(self, df):
df.groupby(['key1', 'key2']).nunique()
class AggFunctions(object):
def setup_cache(self):
N = 10**5
fac1 = np.array(['A', 'B', 'C'], dtype='O')
fac2 = np.array(['one', 'two'], dtype='O')
df = DataFrame({'key1': fac1.take(np.random.randint(0, 3, size=N)),
'key2': fac2.take(np.random.randint(0, 2, size=N)),
'value1': np.random.randn(N),
'value2': np.random.randn(N),
'value3': np.random.randn(N)})
return df
def time_different_str_functions(self, df):
df.groupby(['key1', 'key2']).agg({'value1': 'mean',
'value2': 'var',
'value3': 'sum'})
def time_different_numpy_functions(self, df):
df.groupby(['key1', 'key2']).agg({'value1': np.mean,
'value2': np.var,
'value3': np.sum})
def time_different_python_functions_multicol(self, df):
df.groupby(['key1', 'key2']).agg([sum, min, max])
def time_different_python_functions_singlecol(self, df):
df.groupby('key1').agg([sum, min, max])
class GroupStrings(object):
def setup(self):
n = 2 * 10**5
alpha = list(map(''.join, product(ascii_letters, repeat=4)))
data = np.random.choice(alpha, (n // 5, 4), replace=False)
data = np.repeat(data, 5, axis=0)
self.df = DataFrame(data, columns=list('abcd'))
self.df['joe'] = (np.random.randn(len(self.df)) * 10).round(3)
self.df = self.df.sample(frac=1).reset_index(drop=True)
def time_multi_columns(self):
self.df.groupby(list('abcd')).max()
class MultiColumn(object):
def setup_cache(self):
N = 10**5
key1 = np.tile(np.arange(100, dtype=object), 1000)
key2 = key1.copy()
np.random.shuffle(key1)
np.random.shuffle(key2)
df = DataFrame({'key1': key1,
'key2': key2,
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
return df
def time_lambda_sum(self, df):
df.groupby(['key1', 'key2']).agg(lambda x: x.values.sum())
def time_cython_sum(self, df):
df.groupby(['key1', 'key2']).sum()
def time_col_select_lambda_sum(self, df):
df.groupby(['key1', 'key2'])['data1'].agg(lambda x: x.values.sum())
def time_col_select_numpy_sum(self, df):
df.groupby(['key1', 'key2'])['data1'].agg(np.sum)
class Size(object):
def setup(self):
n = 10**5
offsets = np.random.randint(n, size=n).astype('timedelta64[ns]')
dates = np.datetime64('now') + offsets
self.df = DataFrame({'key1': np.random.randint(0, 500, size=n),
'key2': np.random.randint(0, 100, size=n),
'value1': np.random.randn(n),
'value2': np.random.randn(n),
'value3': np.random.randn(n),
'dates': dates})
self.draws = Series(np.random.randn(n))
labels = Series(['foo', 'bar', 'baz', 'qux'] * (n // 4))
self.cats = labels.astype('category')
def time_multi_size(self):
self.df.groupby(['key1', 'key2']).size()
def time_dt_timegrouper_size(self):
with warnings.catch_warnings(record=True):
self.df.groupby(TimeGrouper(key='dates', freq='M')).size()
def time_category_size(self):
self.draws.groupby(self.cats).size()
class GroupByMethods(object):
param_names = ['dtype', 'method', 'application']
params = [['int', 'float', 'object', 'datetime'],
['all', 'any', 'bfill', 'count', 'cumcount', 'cummax', 'cummin',
'cumprod', 'cumsum', 'describe', 'ffill', 'first', 'head',
'last', 'mad', 'max', 'min', 'median', 'mean', 'nunique',
'pct_change', 'prod', 'quantile', 'rank', 'sem', 'shift',
'size', 'skew', 'std', 'sum', 'tail', 'unique', 'value_counts',
'var'],
['direct', 'transformation']]
def setup(self, dtype, method, application):
if method in method_blacklist.get(dtype, {}):
raise NotImplementedError # skip benchmark
ngroups = 1000
size = ngroups * 2
rng = np.arange(ngroups)
values = rng.take(np.random.randint(0, ngroups, size=size))
if dtype == 'int':
key = np.random.randint(0, size, size=size)
elif dtype == 'float':
key = np.concatenate([np.random.random(ngroups) * 0.1,
np.random.random(ngroups) * 10.0])
elif dtype == 'object':
key = ['foo'] * size
elif dtype == 'datetime':
key = date_range('1/1/2011', periods=size, freq='s')
df = DataFrame({'values': values, 'key': key})
if application == 'transform':
if method == 'describe':
raise NotImplementedError
self.as_group_method = lambda: df.groupby(
'key')['values'].transform(method)
self.as_field_method = lambda: df.groupby(
'values')['key'].transform(method)
else:
self.as_group_method = getattr(df.groupby('key')['values'], method)
self.as_field_method = getattr(df.groupby('values')['key'], method)
def time_dtype_as_group(self, dtype, method, application):
self.as_group_method()
def time_dtype_as_field(self, dtype, method, application):
self.as_field_method()
class RankWithTies(object):
# GH 21237
param_names = ['dtype', 'tie_method']
params = [['float64', 'float32', 'int64', 'datetime64'],
['first', 'average', 'dense', 'min', 'max']]
def setup(self, dtype, tie_method):
N = 10**4
if dtype == 'datetime64':
data = np.array([Timestamp("2011/01/01")] * N, dtype=dtype)
else:
data = np.array([1] * N, dtype=dtype)
self.df = DataFrame({'values': data, 'key': ['foo'] * N})
def time_rank_ties(self, dtype, tie_method):
self.df.groupby('key').rank(method=tie_method)
class Float32(object):
# GH 13335
def setup(self):
tmp1 = (np.random.random(10000) * 0.1).astype(np.float32)
tmp2 = (np.random.random(10000) * 10.0).astype(np.float32)
tmp = np.concatenate((tmp1, tmp2))
arr = np.repeat(tmp, 10)
self.df = DataFrame(dict(a=arr, b=arr))
def time_sum(self):
self.df.groupby(['a'])['b'].sum()
class Categories(object):
def setup(self):
N = 10**5
arr = np.random.random(N)
data = {'a': Categorical(np.random.randint(10000, size=N)),
'b': arr}
self.df = DataFrame(data)
data = {'a': Categorical(np.random.randint(10000, size=N),
ordered=True),
'b': arr}
self.df_ordered = DataFrame(data)
data = {'a': Categorical(np.random.randint(100, size=N),
categories=np.arange(10000)),
'b': arr}
self.df_extra_cat = DataFrame(data)
def time_groupby_sort(self):
self.df.groupby('a')['b'].count()
def time_groupby_nosort(self):
self.df.groupby('a', sort=False)['b'].count()
def time_groupby_ordered_sort(self):
self.df_ordered.groupby('a')['b'].count()
def time_groupby_ordered_nosort(self):
self.df_ordered.groupby('a', sort=False)['b'].count()
def time_groupby_extra_cat_sort(self):
self.df_extra_cat.groupby('a')['b'].count()
def time_groupby_extra_cat_nosort(self):
self.df_extra_cat.groupby('a', sort=False)['b'].count()
class Datelike(object):
# GH 14338
params = ['period_range', 'date_range', 'date_range_tz']
param_names = ['grouper']
def setup(self, grouper):
N = 10**4
rng_map = {'period_range': period_range,
'date_range': date_range,
'date_range_tz': partial(date_range, tz='US/Central')}
self.grouper = rng_map[grouper]('1900-01-01', freq='D', periods=N)
self.df = DataFrame(np.random.randn(10**4, 2))
def time_sum(self, grouper):
self.df.groupby(self.grouper).sum()
class SumBools(object):
# GH 2692
def setup(self):
N = 500
self.df = DataFrame({'ii': range(N),
'bb': [True] * N})
def time_groupby_sum_booleans(self):
self.df.groupby('ii').sum()
class SumMultiLevel(object):
# GH 9049
timeout = 120.0
def setup(self):
N = 50
self.df = DataFrame({'A': list(range(N)) * 2,
'B': range(N * 2),
'C': 1}).set_index(['A', 'B'])
def time_groupby_sum_multiindex(self):
self.df.groupby(level=[0, 1]).sum()
class Transform(object):
def setup(self):
n1 = 400
n2 = 250
index = MultiIndex(levels=[np.arange(n1), tm.makeStringIndex(n2)],
codes=[np.repeat(range(n1), n2).tolist(),
list(range(n2)) * n1],
names=['lev1', 'lev2'])
arr = np.random.randn(n1 * n2, 3)
arr[::10000, 0] = np.nan
arr[1::10000, 1] = np.nan
arr[2::10000, 2] = np.nan
data = DataFrame(arr, index=index, columns=['col1', 'col20', 'col3'])
self.df = data
n = 20000
self.df1 = DataFrame(np.random.randint(1, n, (n, 3)),
columns=['jim', 'joe', 'jolie'])
self.df2 = self.df1.copy()
self.df2['jim'] = self.df2['joe']
self.df3 = DataFrame(np.random.randint(1, (n / 10), (n, 3)),
columns=['jim', 'joe', 'jolie'])
self.df4 = self.df3.copy()
self.df4['jim'] = self.df4['joe']
def time_transform_lambda_max(self):
self.df.groupby(level='lev1').transform(lambda x: max(x))
def time_transform_ufunc_max(self):
self.df.groupby(level='lev1').transform(np.max)
def time_transform_multi_key1(self):
self.df1.groupby(['jim', 'joe'])['jolie'].transform('max')
def time_transform_multi_key2(self):
self.df2.groupby(['jim', 'joe'])['jolie'].transform('max')
def time_transform_multi_key3(self):
self.df3.groupby(['jim', 'joe'])['jolie'].transform('max')
def time_transform_multi_key4(self):
self.df4.groupby(['jim', 'joe'])['jolie'].transform('max')
class TransformBools(object):
def setup(self):
N = 120000
transition_points = np.sort(np.random.choice(np.arange(N), 1400))
transitions = np.zeros(N, dtype=np.bool)
transitions[transition_points] = True
self.g = transitions.cumsum()
self.df = DataFrame({'signal': np.random.rand(N)})
def time_transform_mean(self):
self.df['signal'].groupby(self.g).transform(np.mean)
class TransformNaN(object):
# GH 12737
def setup(self):
self.df_nans = DataFrame({'key': np.repeat(np.arange(1000), 10),
'B': np.nan,
'C': np.nan})
self.df_nans.loc[4::10, 'B':'C'] = 5
def time_first(self):
self.df_nans.groupby('key').transform('first')
from .pandas_vb_common import setup # noqa: F401
|
|
from .core import position_relationship
from .core import messenger
from .core import console_print
from .core import query_exec
from .core import constants as c
from .core import transaction_factory
from .models import Customer, CustomerTransaction, Assistance, Crowd
from .models import CustomerTable, CustomerTransactionTable
from .intel import recommender as r
from .intel import recommender_test as rt
import datetime
from django.core import serializers
from rest_framework.response import Response
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
try:
from urllib import quote_plus # python 2
except:
pass
try:
from urllib.parse import quote_plus # python 3
except:
pass
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponseForbidden
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
#
# /position?
# cusId=bob&
# targetPos=6&
# currentPos=6&
# trxId=1&
# asst=True
# Response:
# [exit: False,
# nearby: True]
@csrf_exempt
def position_update(request):
if request.method == 'GET':
target = request.GET.get('targetPos', None)
current = request.GET.get('currentPos', None)
cusId = request.GET.get('cusId', None)
trxId = request.GET.get('trxId', None)
is_asst_needed = request.GET.get('asst', None)
if (target is None) | (current is None) | (cusId is None) | (trxId is None) | (is_asst_needed is None):
response = JsonResponse({'status': c.VALUE_NULL})
else:
#check position between current position with shop
rel = position_relationship.get_position_relationship(target, current)
is_nearby = (rel==c.POSITION_REL_NEARBY)
is_target = (rel==c.POSITION_REL_TARGET)
#check if exit or not
position_relationship.update_position_status(trxId, current, target)
is_exit = (position_relationship.get_position_status(trxId) == c.POSITION_STATUS_EXIT)
is_notify = is_asst_needed and (not is_exit) and (is_nearby | is_target)
trans_obj = CustomerTransaction.objects.get(TRANSACTION_ID = trxId)
if is_exit:
is_nearby = False
trans_obj.TIME_OF_EXIT = datetime.datetime.now().strftime(c.DATE_TIME_FMT)
trans_obj.save(update_fields = ['TIME_OF_EXIT'])
if is_target:
trans_obj.TIME_OF_ENTER = datetime.datetime.now().strftime(c.DATE_TIME_FMT)
trans_obj.save(update_fields = ['TIME_OF_ENTER'])
if is_notify:
messenger.notify_assistance(rel, cusId, current, target)
print('Message Sent')
else:
print('NO Messages')
print('trxId=',trxId,'\t current=',current,'\t target=',target,'\t is_exit=',is_exit,'\t STATUS=',position_relationship.get_position_status(trxId))
response = JsonResponse({'exit': is_exit, 'nearby': is_nearby })
return response
else:
raise Http404()
# cusId, shopId, shopStar, shopAsstStar, trxId=1
# reviewText="this place is nice"
# Response: # [success:True]
def send_review(request):
if request.method == 'GET':
cusId = request.GET.get('cusId', None)
shopId = request.GET.get('shopId', None)
shopStar = request.GET.get('shopStar', None)
shopAsstStar = request.GET.get('shopAsstStar', None)
trxId = request.GET.get('trxId', None)
reviewText = request.GET.get('reviewText', '')
if (cusId is None) | (shopId is None) | (shopStar is None) | (shopAsstStar is None) | (trxId is None):
response = JsonResponse({'success': False})
else:
#Commit review to DB
status = transaction_factory.update_trans(trxId, cusId, shopId, shopStar, shopAsstStar, reviewText)
response = JsonResponse({'success': status})
return response
else:
raise Http404()
#/get_recommendation_for_shop?
# cusId=bob&
# shopId=1
# Response:
# [shops: [2,3,5]]
def get_recommendation_for_shop(request):
if request.method == 'GET':
cusId = request.GET.get('cusId', None)
shopId = request.GET.get('shopId', None)
if (cusId is None) | (shopId is None):
response = JsonResponse({'status': c.VALUE_NULL})
else:
rec_shops = r.recommendation_by_shop_names(int(shopId))
records = []
rec_len = len(rec_shops)
for i in range (0,rec_len):
crowdObj = Crowd.objects.get(SHOP_ID = rec_shops[i])
crowLevel = crowdObj.CROWD_LEVEL
record = {'shop': rec_shops[i] , 'crowdLevel': crowLevel}
records.append(record)
response = JsonResponse(records,safe=False)
return response
else:
raise Http404()
#/get_recommendation_for_shop?
# cusId=bob&
# productCatId=1
# Response:
# [shops: [2,3,5]]
def get_recommendation_for_product(request):
if request.method == 'GET':
cusId = request.GET.get('cusId', None)
productCatId = request.GET.get('productCatId', None)
if (cusId is None) | (productCatId is None):
response = JsonResponse({'status': c.VALUE_NULL})
else:
rec_shops = r.recommendation_by_pdt_cat(int(cusId),int(productCatId))
records = []
rec_len = len(rec_shops)
for i in range (0,rec_len):
crowdObj = Crowd.objects.get(SHOP_ID = rec_shops[i])
crowLevel = crowdObj.CROWD_LEVEL
record = {'shop': rec_shops[i] , 'crowdLevel': crowLevel}
records.append(record)
response = JsonResponse(records,safe=False)
return response
else:
raise Http404()
# /init_trip_with_shop?
# cusId=1&
# shopId=1
# Response:
# [transactionId: 1]
def init_trip_with_shop(request):
if request.method == 'GET':
cusId = request.GET.get('cusId', None)
shopId = request.GET.get('shopId', None)
if (cusId is None) | (shopId is None):
response = JsonResponse({'status': c.VALUE_NULL})
else:
trans_id = transaction_factory.create_trans_id(cusId, shopId, None)
response = JsonResponse({'transactionId': trans_id})
return response
else:
raise Http404()
# /init_trip_with_shop_and_product?
# cusId=bob&
# shopId=1&
# productCatId=1
# Response:
# [transactionId: 1]
def init_trip_with_shop_and_product(request):
if request.method == 'GET':
cusId = request.GET.get('cusId', None)
shopId = request.GET.get('shopId', None)
productCatId = request.GET.get('productCatId', None)
if (cusId is None) | (shopId is None) | (productCatId is None) :
response = JsonResponse({'status': c.VALUE_NULL})
else:
trans_id = transaction_factory.create_trans_id(cusId, shopId, productCatId)
response = JsonResponse({'transactionId': trans_id})
return response
else:
raise Http404()
#
# /get_shop_asst?
# cusId=bob&
# trxId=1
# Response:
# [shopAsstName: "Tracy",
# shopAsstDesc:"Tracy sells shoes"]
# shopAsstId:1
def get_shop_asst(request):
if request.method == 'GET':
cusId = request.GET.get('cusId', None)
trxId = request.GET.get('trxId', None)
if (cusId is None) | (trxId is None):
response = JsonResponse({'status': c.VALUE_NULL})
else:
trans_obj = CustomerTransaction.objects.get(TRANSACTION_ID = trxId)
shop_id = trans_obj.SHOP_ID
asst_id = r.recommend_shop_asst(shop_id)
# asst_id = 15 // Chris
asst_obj = Assistance.objects.get(ASST_ID = asst_id)
asst_name = asst_obj.ASST_NAME
asst_desc = "Gender:" + asst_obj.GENDER + ", Main Language:" + asst_obj.PREF_LANG_1
asst_photo_url = asst_obj.PHOTO_URL
if asst_photo_url is None:
asst_photo_url = c.NO_PHOTO_URL
print('shopAsstId',asst_id,'shopAsstName', asst_name, 'shopAsstDesc',asst_desc)
transaction_factory.update_trans_asst_id(trxId, asst_id)
response = JsonResponse({'shopAsstId':int(asst_id),'shopAsstName': asst_name, 'shopAsstDesc':asst_desc, 'photoUrl': asst_photo_url})
return response
else:
raise Http404()
body_text = ""
@csrf_exempt
def api_post(request):
global body_text
if request.method == 'POST':
body_text = str(request.body)
response = JsonResponse({'request.body':body_text})
return response
elif request.method == 'GET':
response = JsonResponse({'request.body':body_text})
return response
else:
raise Http404()
def test (request):
module_name = request.GET.get('module', None)
if (module_name is None):
return JsonResponse({'Result':'NULL module parameter. Add ?module=some_name to URL to test'})
else:
result = rt.test(module_name)
response = JsonResponse({'Result':result})
return response
|
|
#!/usr/bin/env python2.3
# -*- coding: latin-1 -*-
# $Id: edmmotions.py,v 1.1 2006/04/27 14:20:20 twfy-live Exp $
import xml.sax
import datetime
import sys
import urllib
import urlparse
import re
import string
import os
import time
sys.path.append("../pyscraper/")
from resolvemembernames import memberList
class edmList(xml.sax.handler.ContentHandler):
def __init__(self):
self.reloadXML()
def reloadXML(self):
self.edmlookups={}
parser = xml.sax.make_parser()
parser.setContentHandler(self)
parser.parse('edm-links.xml')
def startElement(self, name, attr):
if name == 'memberinfo':
self.edmlookups[attr['edm_ais_url']] = attr['id']
def lookup(self, url):
return self.edmlookups.get(url, None)
edmList = edmList()
edm_dir = "/home/fawkes/pwdata/edms/"
edm_index_url = "http://edm.ais.co.uk/weblink/html/motions.html/EDMI_SES=/order=1/statusdrop=2/start=%s"
edm_index_cached_url = "http://edm.ais.co.uk/cache/motions/list.1.%s.2.html"
def get_motion(session, ref):
sn = sessions[session]
motion = '%s%s/%s.m.html' % (edm_dir, sn, ref)
if os.path.exists(motion):
f = open(motion, 'r')
content = f.read()
f.close()
else:
edmurl = 'http://edm.ais.co.uk/weblink/html/motion.html/EDMI_SES=%s/ref=%s' % (session, ref)
ur = urllib.urlopen(edmurl)
content = ur.read()
ur.close()
print >> sys.stderr, "Fetching %s motion %s text page" % (sn, ref)
m = re.search('<FRAME\s+SRC="(.*?)"\s+NAME="TEXT"', content)
edmurl = urlparse.urljoin(edmurl, m.group(1))
content = ''
timeout = 10
while timeout>0 and (content == '' or re.search('Not Found', content)):
if re.search('Not Found', content):
print "'Not Found' - trying again"
time.sleep(10)
ur = urllib.urlopen(edmurl)
content = ur.read()
ur.close()
timeout -= 1
fout = open(motion, 'w')
fout.write(content)
fout.close()
time.sleep(5)
return content
def get_printable(session, ref):
sn = sessions[session]
printable = '%s%s/%s.p.html' % (edm_dir, sn, ref)
if os.path.exists(printable):
f = open(printable, 'r')
content = f.read()
f.close()
else:
print >> sys.stderr, "Fetching %s motion %s printable page" % (sn, ref)
ur = urllib.urlopen('http://edm.ais.co.uk/weblink/html/printable.html/ref=%s/EDMI_SES=%s' % (ref, session))
content = ur.read()
ur.close()
fout = open(printable, 'w')
fout.write(content)
fout.close()
return content
def get_signers(session, ref):
sn = sessions[session]
signers = '%s%s/%s.s.html' % (edm_dir, sn, ref)
if os.path.exists(signers):
f = open(signers, 'r')
content = f.read()
f.close()
else:
print >> sys.stderr, "Fetching %s motion %s signature page" % (sn, ref)
content = ''
timeout = 10
while timeout>0 and (content == '' or re.search('Not Found', content)):
if re.search('Not Found', content):
print "'Not Found' - trying again"
time.sleep(10)
ur = urllib.urlopen('http://edm.ais.co.uk/weblink/html/motion_s.html/ref=%s/EDMI_SES=%s/order=1/statusdrop=2' % (ref, session))
content = ur.read()
ur.close()
timeout -= 1
fout = open(signers, 'w')
fout.write(content)
fout.close()
time.sleep(5)
return content
def get_member(memberurl, pnum, session):
sn = sessions[session]
member = '%s%s/%s.html' % (edm_dir, sn, pnum)
if os.path.exists(member):
f = open(member, 'r')
content = f.read()
f.close()
else:
print >> sys.stderr, "Having to look up %s %s" % (sn, memberurl)
url = '%s/EDMI_SES=%s' % (memberurl, session)
ur = urllib.urlopen(url)
content = ur.read()
ur.close()
m = re.search('<FRAME\s+SRC="(.*?)"\s+NAME="CONTENT"', content)
if m==None:
raise Exception, "Couldn't find content frame: %s" % content
frameurl = urlparse.urljoin(url, m.group(1))
ur = urllib.urlopen(frameurl)
content = ur.read()
ur.close()
fout = open(member, 'w')
fout.write(content)
fout.close()
return content
fixes = [
('VUNERABLE', 'VULNERABLE'), ('AVIATON', 'AVIATION'), ('LEASHOLD', 'LEASEHOLD'), ('WORKERS\(USDAW\)','WORKERS (USDAW)'), ('SEPERATION','SEPARATION'), ('OBECTIVES','OBJECTIVES'), (' AMD ',' AND '), ('ARTIC','ARCTIC')
]
matcher = '<!-- \*\*\* Reference number \*\*\* -->.*?'
matcher += '<font face="arial,helvetica" size=2>(<[BI]>)?(.*?)</FONT>.*?'
matcher += '<!-- \*\*\* Motion title \*\*\* -->.*?'
matcher += '<A HREF="(.*?)" TARGET="_parent">\s*'
matcher += '<font face="arial,helvetica" size=2>(?:<[BI]>)?([^<]*?)</font></A>\s*'
matcher += '</TD>\s*<!-- \*\*\* Signatures -->.*?'
matcher += '(?:<font face="arial,helvetica" size=2>(?:<[BI]>)?(\d+) </font>\s*)?'
matcher += '</TD>\s*<!-- \*\*\* Motion date \*\*\* -->.*?'
matcher += '<font face="arial,helvetica" size=2>(?:<[BI]>)?(\d\d)\.(\d\d)\.(\d\d)</FONT>'
matcher += '(?s)'
sessions = {'05':'2005', '':'2004', '04':'2004', '03':'2003', '02':'2002', '01':'2001', '00':'2000', '99':'1999', '98':'1998', '97':'1997'}
signers = {}
edms = {}
sigs = {}
primary = {}
session = sys.argv[1]
for memberurl in edmList.edmlookups:
pid = memberList.membertoperson(edmList.lookup(memberurl))
m = re.search('=(.*?)SlAsHcOdEsTrInG(.*)', memberurl)
lastname = urllib.unquote(m.group(1))
firstname = urllib.unquote(m.group(2))
pnum = int(re.sub('uk.org.publicwhip/person/','',pid))
# print >> sys.stderr, "Member:%s, ID:%s, session:%s" % (memberurl,pid,sessions[session])
content = get_member(memberurl, pnum, session)
if re.search('no EDMs', content):
continue;
for fix in fixes:
content = re.sub(fix[0], fix[1], content)
m = re.search('ound (\d+) EDMs? signed', content)
total = int(m.group(1))
matches = re.findall(matcher, content)
count = 0
for (type, ref, url, title, num, day, month, year) in matches:
id = "%s.%s" % (sessions[session], ref)
title = string.capwords(title)
url = urlparse.urljoin(memberurl, url)
year = sessions[year]
date = "%s-%s-%s" % (year, month, day)
if id not in edms:
edms[id] = {'session':sessions[session], 'ref':ref, 'title':title, 'url':url, 'num':num, 'status':'Open'}
content = get_motion(session, ref)
# print >> sys.stderr, "Adding EDM %s, title %s" % (ref, title)
m = re.search('<TD>(?:<font face="arial,helvetica" size=2>|<FONT SIZE="-1"><font face="arial,helvetica" size=2><B>)\s*(.*?)(?:</font>|</B></font></FONT>)</TD>', content)
if m:
motiontext = m.group(1)
edms[id]['text'] = motiontext
else:
m = re.search('<FONT SIZE="-1"><font face="arial,helvetica" size=2><B>The status of this EDM is (CLOSED|SUSPENDED). Reason: (.*?).</B></font>', content)
edms[id]['status'] = string.capwords(m.group(1))
edms[id]['closed'] = m.group(2)
if ref not in sigs:
# print >> sys.stderr, "Adding signatures, ref %s" % ref
s = get_signers(session,ref)
m = re.findall('(?:<FONT SIZE="-1">|<font face="arial,helvetica" size=2><(?:B|I)>)([^<]*?)/([^<]*?)(?:</(?:B|I)></font>|</FONT>)', s)
pos = 0
sigs[ref] = {}
for (last, first) in m:
pos += 1
sigs[ref][(last, first)] = pos
pos = sigs[ref][(lastname, firstname)]
curr = edms[id]
if curr['title']!=title or curr['url']!=url:
print >> sys.stderr, "EDM data doesn't match: %s:%s %s:%s" % (curr['title'],title,curr['url'],url)
if curr['num']!=num:
if num and not curr['num']:
edms[id]['num'] = num
elif not num and curr['num']:
pass
else:
raise Exception, "EDM number doesn't match: %s vs %s" % (curr['num'], num)
if type=='<B>':
type = 'Primary'
primary[id] = 1
if 'date' not in edms[id]:
edms[id]['date'] = date
else:
if curr['date'] != edms[id]['date']:
raise Exception, "EDM date doesn't match: %s:%s" % (curr['date'], edms[id]['date'])
elif type=='<I>':
type = 'Sponsor'
else:
type = 'Supporter'
signers.setdefault(id,[]).append( (pid, type, date, pos) )
count += 1
assert total == count
keys = edms.keys()
keys.sort()
for id in keys:
if id not in primary:
print >> sys.stderr, "%s doesn't have a primary sponsor" % id
print ' <edm id="%s" session="%s" ref="%s" title="%s" url="%s" num="%s" date="%s" closed="%s">' % (id, edms[id]['session'], edms[id]['ref'], edms[id]['title'], edms[id]['url'], edms[id]['num'], 'date' in edms[id] and edms[id]['date'] or 'Unknown', 'closed' in edms[id] and edms[id]['closed'] or '')
if 'text' in edms[id]:
print ' <text>%s</text>' % edms[id]['text']
for s in signers[id]:
print ' <signature id="%s" type="%s" date="%s" pos="%s" />' % (s[0], s[1], s[2], s[3])
print ' </edm>'
assert False
matcher = '<!-- Ref -->\s*<TD WIDTH=14>[^C]*?(Closed)?[^C]*?</TD>\s*'
matcher += '<TD ALIGN="CENTER" VALIGN="TOP">\s*<font face="arial,helvetica" size=2><FONT SIZE="-1">\s*<B>(.*?)</B>\s*</FONT>\s*</TD>\s*'
matcher += '<!-- Motion Title -->\s*<TD ALIGN="LEFT" VALIGN="TOP">\s*<font face="arial,helvetica" size=2><FONT COLOR="#0000DD">\s*<A HREF="(/weblink/html/motion.html/ref=.*?)" TARGET="_top">\s*(.*?)</A>\s*</FONT>\s*</TD>\s*'
matcher += '<!-- Sponsor -->\s*<TD ALIGN="LEFT" VALIGN="TOP">\s*<A HREF="/weblink/html/member.html/mem=.*?" TARGET="_top" >\s*<font face="arial,helvetica" size=2>.*?/.*?</A>\s*</TD>\s*'
matcher += '<!-- Count of signatures -->\s*<TD ALIGN="RIGHT" VALIGN="TOP">\s*<font face="arial,helvetica" size=2><FONT SIZE="-1">(\d+)</FONT> \s*</TD>'
print '''<?xml version="1.0" encoding="ISO-8859-1"?>
<publicwhip>'''
start = 1
edms = 0
while (start==1 or start < edms):
url = edm_index_cached_url % (start)
ur = urllib.urlopen(url)
content = ur.read()
ur.close()
if re.search("Not Found(?i)", content):
url = edm_index_url % (start)
ur = urllib.urlopen(url)
content = ur.read()
ur.close()
m = re.search('<FRAME SRC="(.*?)" NAME="CONTENT"', content)
url = urlparse.urljoin(url, m.group(1))
ur = urllib.urlopen(url)
content = ur.read()
ur.close()
if re.search("Not Found(?i)", content):
raise Exception, "Failed to get content in url %s" % url
if not edms:
m = re.search('<FONT SIZE=-1>(\d+) EDMs and Amendments',content)
edms = int(m.group(1))
matches = re.findall(matcher, content)
for (closed, ref, title_url, title, num) in matches:
content = get_printable(session, ref)
m = re.search('<TD COLSPAN="2"><font face="arial,helvetica" size=2>\s*(.*?)</TD>', content)
motiontext = m.group(1)
print ' <edm ref="%s" title="%s" number="%s" url="%s" closed="%s">' % (ref, title, num, title_url, closed)
print ' <text>%s</text>' % motiontext
print ' </edm>'
start += 50
assert False
sys.stdout.flush()
print '</publicwhip>'
|
|
import base64
import logging
import quopri
import re
import urllib2
from email import message_from_string
from email.utils import parseaddr
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils import translation
from email_reply_parser import EmailReplyParser
import mkt
from mkt.access import acl
from mkt.access.models import Group
from mkt.comm.models import CommunicationThreadToken, user_has_perm_thread
from mkt.constants import comm
from mkt.extensions.models import Extension
from mkt.site.helpers import absolutify
from mkt.site.mail import send_mail_jinja
from mkt.translations.utils import to_language
from mkt.webapps.models import Webapp
log = logging.getLogger('z.comm')
def send_mail_comm(note):
"""
Email utility used globally by the Communication Dashboard to send emails.
Given a note (its actions and permissions), recipients are determined and
emails are sent to appropriate people.
"""
log.info(u'Sending emails for %s' % note.thread.obj)
if note.note_type in comm.EMAIL_SENIOR_REVIEWERS_AND_DEV:
# Email senior reviewers (such as for escalations).
rev_template = comm.EMAIL_SENIOR_REVIEWERS_AND_DEV[note.note_type][
'reviewer']
email_recipients(get_senior_reviewers(), note, template=rev_template)
# Email developers (such as for escalations).
dev_template = comm.EMAIL_SENIOR_REVIEWERS_AND_DEV[note.note_type][
'developer']
email_recipients(get_developers(note), note, template=dev_template)
else:
email_recipients(get_recipients(note), note)
# Also send mail to the fallback emailing list.
if note.note_type == comm.DEVELOPER_COMMENT:
subject = '%s: %s' % (unicode(comm.NOTE_TYPES[note.note_type]),
note.thread.obj.name)
mail_template = comm.COMM_MAIL_MAP.get(note.note_type, 'generic')
send_mail_jinja(subject, 'comm/emails/%s.html' % mail_template,
get_mail_context(note),
recipient_list=[settings.MKT_REVIEWS_EMAIL],
from_email=settings.MKT_REVIEWERS_EMAIL,
perm_setting='app_reviewed')
def get_recipients(note):
"""
Determine email recipients mainly based on CommunicationThreadCC.
Returns user_id/user_email tuples.
"""
if note.note_type in comm.EMAIL_SENIOR_REVIEWERS:
return get_senior_reviewers()
thread = note.thread
recipients = thread.thread_cc.values_list('user__id', 'user__email')
excludes = []
if not note.read_permission_developer:
# Exclude developer.
excludes += get_developers(note)
if note.author:
# Exclude note author.
excludes.append((note.author.id, note.author.email))
return [r for r in set(recipients) if r not in excludes]
def tokenize_recipients(recipients, thread):
"""[(user_id, user_email)] -> [(user_email, token)]."""
tokenized_recipients = []
for user_id, user_email in recipients:
if not user_id:
tokenized_recipients.append((user_email, None))
else:
tok = get_reply_token(thread, user_id)
tokenized_recipients.append((user_email, tok.uuid))
return tokenized_recipients
def email_recipients(recipients, note, template=None, extra_context=None):
"""
Given a list of tuple of user_id/user_email, email bunch of people.
note -- commbadge note, the note type determines which email to use.
template -- override which template we use.
"""
subject = '%s: %s' % (unicode(comm.NOTE_TYPES[note.note_type]),
note.thread.obj.name)
for email, tok in tokenize_recipients(recipients, note.thread):
headers = {}
if tok:
headers['Reply-To'] = '{0}{1}@{2}'.format(
comm.REPLY_TO_PREFIX, tok, settings.POSTFIX_DOMAIN)
# Get the appropriate mail template.
mail_template = template or comm.COMM_MAIL_MAP.get(note.note_type,
'generic')
# Send mail.
context = get_mail_context(note)
context.update(extra_context or {})
send_mail_jinja(subject, 'comm/emails/%s.html' % mail_template,
context, recipient_list=[email],
from_email=settings.MKT_REVIEWERS_EMAIL,
perm_setting='app_reviewed', headers=headers)
def get_mail_context(note):
"""
Get context data for comm emails, specifically for review action emails.
"""
obj = note.thread.obj
# grep: comm-content-type.
if obj.name and obj.__class__ == Webapp:
# We need to display the name in some language that is relevant to the
# recipient(s) instead of using the reviewer's. addon.default_locale
# should work.
lang = to_language(obj.default_locale)
with translation.override(lang):
obj = Webapp.with_deleted.get(id=obj.id)
elif not obj.name:
# For deleted objects.
obj.name = obj.app_slug if hasattr(obj, 'app_slug') else obj.slug
# grep: comm-content-type.
manage_url = ''
obj_type = ''
review_url = ''
if obj.__class__ == Webapp:
manage_url = absolutify(obj.get_dev_url('versions'))
obj_type = 'app'
review_url = absolutify(reverse('reviewers.apps.review',
args=[obj.app_slug]))
elif obj.__class__ == Extension:
manage_url = absolutify(reverse('commonplace.content.addon_manage',
args=[obj.slug]))
# Not "Firefox OS add-on" for a/an consistency with "app".
obj_type = 'add-on'
review_url = absolutify(reverse('commonplace.content.addon_review',
args=[obj.slug]))
return {
'mkt': mkt,
'comm': comm,
'is_app': obj.__class__ == Webapp,
'is_extension': obj.__class__ == Extension,
'manage_url': manage_url,
'note': note,
'obj': obj,
'obj_type': obj_type,
'review_url': review_url,
'settings': settings
}
class CommEmailParser(object):
"""Utility to parse email replies."""
address_prefix = comm.REPLY_TO_PREFIX
def __init__(self, email_text):
"""Decode base64 email and turn it into a Django email object."""
try:
email_text = base64.standard_b64decode(
urllib2.unquote(email_text.rstrip()))
except TypeError:
# Corrupt or invalid base 64.
self.decode_error = True
log.info('Decoding error for CommEmailParser')
return
self.email = message_from_string(email_text)
payload = self.email.get_payload()
if isinstance(payload, list):
# If multipart, get the plain text part.
for part in payload:
# Nested multipart. Go deeper.
if part.get_content_type() == 'multipart/alternative':
payload = part.get_payload()
for part in payload:
if part.get_content_type() == 'text/plain':
# Found the plain text part.
payload = part.get_payload()
break
if part.get_content_type() == 'text/plain':
# Found the plain text part.
payload = part.get_payload()
break
# Decode quoted-printable data and remove non-breaking spaces.
payload = (quopri.decodestring(payload)
.replace('\xc2\xa0', ' '))
payload = self.extra_email_reply_parse(payload)
self.reply_text = EmailReplyParser.read(payload).reply
def extra_email_reply_parse(self, email):
"""
Adds an extra case to the email reply parser where the reply is
followed by headers like "From: appreviews@lists.mozilla.org" and
strips that part out.
"""
email_header_re = re.compile('From: [^@]+@[^@]+\.[^@]+')
split_email = email_header_re.split(email)
if split_email[0].startswith('From: '):
# In case, it's a bottom reply, return everything.
return email
else:
# Else just return the email reply portion.
return split_email[0]
def _get_address_line(self):
return parseaddr(self.email['to']) or parseaddr(self.email(['reply']))
def get_uuid(self):
name, addr = self._get_address_line()
if addr.startswith(self.address_prefix):
# Strip everything between "commreply+" and the "@" sign.
uuid = addr[len(self.address_prefix):].split('@')[0]
else:
log.info('TO: address missing or not related to comm. (%s)'
% unicode(self.email).strip())
return False
return uuid
def get_body(self):
return self.reply_text
def save_from_email_reply(reply_text):
from mkt.comm.utils import create_comm_note
log.debug("Saving from email reply")
parser = CommEmailParser(reply_text)
if hasattr(parser, 'decode_error'):
return False
uuid = parser.get_uuid()
if not uuid:
return False
try:
tok = CommunicationThreadToken.objects.get(uuid=uuid)
except CommunicationThreadToken.DoesNotExist:
log.error('An email was skipped with non-existing uuid %s.' % uuid)
return False
thread = tok.thread
if user_has_perm_thread(thread, tok.user) and tok.is_valid():
# Deduce an appropriate note type.
note_type = comm.NO_ACTION
# grep: comm-content-type.
if (thread.obj.__class__ == Webapp and
tok.user.addonuser_set.filter(addon=thread.obj).exists()):
note_type = comm.DEVELOPER_COMMENT
elif (thread.obj.__class__ == Extension and
tok.user.extension_set.filter(id=thread.obj.id).exists()):
note_type = comm.DEVELOPER_COMMENT
elif (acl.action_allowed_user(tok.user, 'Apps', 'Review') or
acl.action_allowed_user(tok.user, 'Firefox OS Add-ons',
'Review')):
note_type = comm.REVIEWER_COMMENT
t, note = create_comm_note(tok.thread.obj, tok.thread.version,
tok.user, parser.get_body(),
note_type=note_type)
log.info('A new note has been created (from %s using tokenid %s).'
% (tok.user.id, uuid))
return note
elif tok.is_valid():
log.error('%s did not have perms to reply to comm email thread %s.'
% (tok.user.email, tok.thread.id))
else:
log.error('%s tried to use an invalid comm token for thread %s.'
% (tok.user.email, tok.thread.id))
return False
def get_reply_token(thread, user_id):
tok, created = CommunicationThreadToken.objects.get_or_create(
thread=thread, user_id=user_id)
# We expire a token after it has been used for a maximum number of times.
# This is usually to prevent overusing a single token to spam to threads.
# Since we're re-using tokens, we need to make sure they are valid for
# replying to new notes so we reset their `use_count`.
if not created:
tok.update(use_count=0)
else:
log.info('Created token with UUID %s for user_id: %s.' %
(tok.uuid, user_id))
return tok
def get_developers(note):
return list(note.thread.obj.authors.values_list('id', 'email'))
def get_senior_reviewers():
try:
return list(Group.objects.get(name='Senior App Reviewers')
.users.values_list('id', 'email'))
except Group.DoesNotExist:
return []
|
|
from datetime import datetime
import os
try:
import unittest2 as unittest
except:
import unittest
from prisoner.gateway import *
from prisoner import SocialObjects
from prisoner.workflow import PolicyProcessor
from prisoner.workflow.Exceptions import *
"""
This test suite ensures:
- the PolicyProcessor validates XML files correctly
- requests for objects are validated and sanitised appropriately
"""
class BasePolicyProcessorTestCase(unittest.TestCase):
def setUp(self):
self.policy_processor = PolicyProcessor.PolicyProcessor()
dir = os.path.dirname(__file__)
self.good_policy = os.path.join(dir, "good-policy.xml")
self.bad_policy = os.path.join(dir, "bad-policy.xml")
self.disallow_policy = os.path.join(dir, "disallow-policy.xml")
self.good_response = None
self.good_response_bad_headers = None
self.bad_response_no_headers = None
self.bad_response_bad_headers = None
""" Returns a Policy Processor with a valid policy which approves the
requests in the test suite"""
def get_good_processor(self):
return PolicyProcessor.PolicyProcessor(self.good_policy)
""" Returns a Policy Processor with a valid policy which disallows the
requests in the test suite"""
def get_disallow_processor(self):
return PolicyProcessor.PolicyProcessor(self.disallow_policy)
""" Returns a Policy Processor with an invalid policy. This should raise
an Exception on instantiation"""
def get_bad_processor(self):
return PolicyProcessor.PolicyProcessor(self.bad_policy)
class ValidatePolicyTestCase(BasePolicyProcessorTestCase):
def test_good_policy(self):
self.policy_processor = PolicyProcessor.PolicyProcessor()
policy = self.policy_processor.validate_policy(self.good_policy)
self.assertTrue("privacy-policy" in policy.getroot().tag)
def test_bad_policy(self):
self.policy_processor = PolicyProcessor.PolicyProcessor()
with self.assertRaises(Exception) as exp:
is_valid = self.policy_processor.validate_policy(self.bad_policy)
def test_no_policy(self):
self.policy_processor = PolicyProcessor.PolicyProcessor()
with self.assertRaises(IOError) as exp:
is_valid = self.policy_processor.validate_policy(None)
class InferObjectTestCase(BasePolicyProcessorTestCase):
def test_good_literal(self):
policy_proc = self.get_good_processor()
obj = policy_proc._infer_object("literal:word")
self.assertEqual(obj,"word")
def test_invalid_literal(self):
policy_proc = self.get_good_processor()
with self.assertRaises(RuntimePrivacyPolicyParserError):
obj = policy_proc._infer_object("literal:")
def test_valid_social_gateway(self):
policy_proc = self.get_good_processor()
obj = policy_proc._infer_object("Lastfm:Image")
# self.assertTrue(isinstance(obj,LastfmServiceGateway.Image))
# TODO: Assert social gateway objects
# This is going to be refactored soon (so instances of gateways
# aren't constantly being generated, so hold off with tests for
# now
def test_valid_base(self):
policy_proc = self.get_good_processor()
obj = policy_proc._infer_object("base:Image")()
self.assertTrue(isinstance(obj, SocialObjects.Image))
def test_invalid_base(self):
policy_proc = self.get_good_processor()
with self.assertRaises(SocialObjectNotSupportedError):
obj = policy_proc._infer_object("base:NotAObject")
def test_missing_base(self):
policy_proc = self.get_good_processor()
with self.assertRaises(RuntimePrivacyPolicyParserError):
obj = policy_proc._infer_object("base:")
def test_invalid_social_gateway(self):
policy_proc = self.get_good_processor()
with self.assertRaises(ServiceGatewayNotFoundError):
obj = policy_proc._infer_object("blah:bleh")
class InferAttributesTestCase(BasePolicyProcessorTestCase):
def test_good_obj(self):
policy_proc = self.get_good_processor()
person = SocialObjects.Person()
person.id = "me"
obj = policy_proc._infer_attributes("id",person)
self.assertEqual(obj,"me")
def test_bad_obj(self):
policy_proc = self.get_good_processor()
person = SocialObjects.Person()
#with self.assertRaises(AttributeError):
obj = policy_proc._infer_attributes("id",person)
self.assertEqual(obj,None)
def test_bad_attribute(self):
policy_proc = self.get_good_processor()
person = SocialObjects.Person()
person.id = "me"
with self.assertRaises(AttributeError):
obj = policy_proc._infer_attributes("blah",person)
def test_bad_format(self):
policy_proc = self.get_good_processor()
with self.assertRaises(AttributeError):
obj = policy_proc._infer_attributes("blah",None)
def test_good_nested_obj(self):
policy_proc = self.get_good_processor()
test_obj = SocialObjects.Person()
test_obj.updated = datetime.datetime.fromtimestamp(0)
obj = policy_proc._infer_attributes("updated.year",test_obj)
self.assertEqual(obj,1970)
def test_bad_nested_obj(self):
policy_proc = self.get_good_processor()
test_obj = SocialObjects.Person()
test_obj.updated = datetime.datetime.fromtimestamp(0)
with self.assertRaises(AttributeError):
obj = policy_proc._infer_attributes("updated.blah", test_obj)
class SanitiseObjectRequestTestCase(BasePolicyProcessorTestCase):
def test_good_response(self):
policy_proc = self.get_good_processor()
def test_malformed_headers(self):
pass
def test_missing_headers(self):
pass
def test_malformed_response(self):
pass
def test_no_allow_attribute(self):
pass
def test_logic_failOnAnd(self):
pass
def test_logic_failOnOr(self):
pass
def test_logic_failOnNested(self):
pass
def test_logic_failOnImplicitAnd(self):
pass
class ValidateObjectRequestTestCase(BasePolicyProcessorTestCase):
def test_good_validation(self):
policy_proc = self.get_good_processor()
test_person = SocialObjects.Person()
test_person.id = "lukeweb"
request = policy_proc._validate_object_request("GET", "Lastfm",
"Image", test_person)
self.assertTrue(request)
def test_fail_validation(self):
policy_proc = self.get_disallow_processor()
test_person = SocialObjects.Person()
test_person.id = "lukeweb"
with self.assertRaises(DisallowedByPrivacyPolicyError) as exp:
request = policy_proc._validate_object_request("GET", "Lastfm",
"Image", test_person)
def test_bad_request_badOperation(self):
policy_proc = self.get_good_processor()
test_person = SocialObjects.Person()
test_person.id = "lukeweb"
with self.assertRaises(OperationNotImplementedError) as exp:
request = policy_proc._validate_object_request("BLAH",
"Lastfm", "Image", test_person)
"""
def test_bad_request_badProvider(self):
policy_proc = self.get_good_processor()
test_person = SocialObjects.Person()
test_person.id = "lukeweb"
with self.assertRaises(ServiceGatewayNotFoundError) as exp:
request = policy_proc._validate_object_request("GET",
"NotAProvider", "Image", test_person)
"""
def test_bad_request_badObject(self):
policy_proc = self.get_good_processor()
test_person = SocialObjects.Person()
test_person.id = "lukeweb"
with self.assertRaises(SocialObjectNotSupportedError) as exp:
request = policy_proc._validate_object_request("GET",
"Lastfm", "NotAObject", test_person)
"""
def test_bad_request_badPayload(self):
policy_proc = self.get_good_processor()
test_person = SocialObjects.Person()
with self.assertRaises(DisallowedByPrivacyPolicyError) as exp:
request = policy_proc._validate_object_request("GET",
"Lastfm", "Image", test_person)
"""
|
|
"""Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
from collections import defaultdict
import numpy as np
from ..externals import six
from ..utils import extmath, check_random_state, gen_batches
from ..utils.validation import check_is_fitted
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iterations=300):
"""Perform mean shift clustering of data using a flat kernel.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features]
Point used as initial kernel locations.
bin_seeding : boolean
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# For each seed, climb gradient until convergence or max_iterations
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iterations, addS the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iterations):
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.cast[np.int32](point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
def fit(self, X):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = np.asarray(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
|
|
'''
Created on Jul 1, 2009
This module contains tests for the face recognition algorithms.
@author: bolme
'''
import unittest
import pyvision as pv
import numpy as np
#from optic_flow import *
#from distance import *
#import cv
import os.path
class _TestNormalize(unittest.TestCase):
def setUp(self):
# Eye coordinates generated automatically
#leye = pv.Point(250.336538,174.074519)
#reye = pv.Point(343.828125,180.042067)
fname = os.path.join(pv.__path__[0],'data','misc','lena.jpg')
im = pv.Image(fname,bw_annotate=True)
#affine = pv.AffineFromPoints(leye,reye,pv.Point(48.0,64.0),pv.Point(144.0,64.0),(192,192))
self.tile = im
def test_1_meanStd(self):
'''meanStd Normalization: norm.mean() = 0.0 and norm.std() = 1.0....'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.meanStd(self.tile)
if ilog != None:
ilog.log(norm,label="meanStd_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),0.0,places=3)
self.assertAlmostEqual(mat.std(),1.0,places=3)
def test_2_meanUnit(self):
'''meanUnit Normalization: norm.mean() = 0.0 and ||norm|| = 1.0....'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.meanUnit(self.tile)
if ilog != None:
ilog.log(norm,label="meanUnit_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),0.0)
length = np.sqrt((mat**2).sum())
self.assertAlmostEqual(length,1.0,places=4)
def test_3_unit(self):
'''unit Normalization: ||norm|| = 1.0 and dot(norm,im)/||im|| = 1.0.'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.unit(self.tile)
if ilog != None:
ilog.log(norm,label="unit_Normalization")
mat = norm.asMatrix2D()
length = np.sqrt((mat**2).sum())
self.assertAlmostEqual(length,1.0,places=3)
mat = norm.asMatrix2D()
mat = mat.flatten()
im = self.tile.asMatrix2D().flatten()
proj = np.dot(mat,im)
length = np.sqrt((im**2).sum())
self.assertAlmostEqual(proj/length,1.0,places=3)
def test_4_bandPass(self):
'''bandPassFilter Normalization: ...................................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.bandPassFilter(self.tile,10.0,4.0)
if ilog != None:
ilog.log(norm,label="bandPass_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),0.0,places=4)
self.assertAlmostEqual(mat.std(),12.090113839874826,delta=0.01)
def test_5_lowPass(self):
'''lowPassFilter Normalization: ....................................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.lowPassFilter(self.tile,10.0)
if ilog != None:
ilog.log(norm,label="lowPass_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),123.69997406005859,delta=0.01)
self.assertAlmostEqual(mat.std(),36.886999835117216,delta=0.01)
def test_6_highPass(self):
'''highPassFilter Normalization: ...................................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.highPassFilter(self.tile,10.0)
if ilog != None:
ilog.log(norm,label="highPass_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),0.0,delta=0.001)
self.assertAlmostEqual(mat.std(),22.936873341661158,delta=0.01)
def test_7_veryHighPass(self):
'''highPassFilter Normalization: sigma = 1.5........................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
# This setting corsponds to the default gaussian in selfQuotient
norm = pv.highPassFilter(self.tile,1.5)
if ilog != None:
ilog.log(norm,label="veryHighPass_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),0.0,delta=4)
self.assertAlmostEqual(mat.std(),8.0027218003238687,delta=0.01)
def test_8_selfQuotient(self):
'''selfQuotient Normalization: .....................................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.selfQuotientImage(self.tile)
if ilog != None:
ilog.log(norm,label="selfQuotient_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),0.98861616849899292,delta=0.001)
self.assertAlmostEqual(mat.std(),0.1647989569275968,delta=0.001)
class _TestSURF(unittest.TestCase):
def test_1_SURF(self):
'''SURF Lena: ......................................................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
filename = os.path.join(pv.__path__[0],'data','misc','lena.jpg')
im = pv.Image(filename)
timer = pv.Timer()
keypoints,descriptors = pv.surf(im)
timer.mark("LenaSurf")
if ilog != None:
ilog(timer,"SURFLena")
for each in keypoints:
im.annotateCircle(pv.Point(each[0][0],each[0][1]), each[2])
if ilog != None:
ilog(im,'SurfKeypoints')
self.assertEqual(len(keypoints),len(descriptors))
self.assertEqual(len(keypoints),774)
#print descriptors
def test_2_SURF(self):
'''SURF Taz: .......................................................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
filename = os.path.join(pv.__path__[0],'data','test','TAZ_0010.jpg')
im = pv.Image(filename)
timer = pv.Timer()
keypoints,descriptors = pv.surf(im)
timer.mark("TazSurf")
if ilog != None:
ilog(timer,"SURFTaz")
for each in keypoints:
im.annotateCircle(pv.Point(each[0][0],each[0][1]), each[2])
if ilog != None:
ilog(im,'SurfKeypoints')
self.assertEqual(len(keypoints),len(descriptors))
self.assertEqual(len(keypoints),367)
class _TestDistance(unittest.TestCase):
def setUp(self):
'''Initialize the tests'''
def test_1_bool2Ubyte(self):
'''distance::boolToUbyte ...........................................'''
a = np.random.randint(2,size=16) > 0
b = pv.boolToUbyte(a)
c = pv.ubyteToBool(b)
d = pv.boolToUbyte(c)
self.assert_((a == c).sum() == 16)
self.assert_((b == d).sum() == 2)
a = np.random.randint(2,size=5000) > 0
b = pv.boolToUbyte(a)
c = pv.ubyteToBool(b)
d = pv.boolToUbyte(c)
self.assert_((a == c).sum() == 5000)
self.assert_((b == d).sum() == 625)
def test_2_hamming(self):
'''distance::hamming 1..............................................'''
a = np.random.randint(2,size=16) > 0
b = np.random.randint(2,size=16) > 0
bin_hamming = pv.hamming(a,b)
a = pv.boolToUbyte(a)
b = pv.boolToUbyte(b)
byte_hamming = pv.hamming(a,b)
self.assertEquals(bin_hamming,byte_hamming)
def test_3_hamming(self):
'''distance::hamming 2..............................................'''
a = np.random.randint(2,size=1769472) > 0
b = np.random.randint(2,size=1769472) > 0
bin_hamming = pv.hamming(a,b)
a = pv.boolToUbyte(a)
b = pv.boolToUbyte(b)
byte_hamming = pv.hamming(a,b)
self.assertEquals(bin_hamming,byte_hamming)
def test():
'''Run the face test suite.'''
pv.disableCommercialUseWarnings()
normalize_suite = unittest.TestLoader().loadTestsFromTestCase(_TestNormalize)
surf_suite = unittest.TestLoader().loadTestsFromTestCase(_TestSURF)
dist_suite = unittest.TestLoader().loadTestsFromTestCase(_TestDistance)
test_suites = [
normalize_suite,
surf_suite,
dist_suite
]
pyvision_suite = unittest.TestSuite(test_suites)
unittest.TextTestRunner(verbosity=2).run(pyvision_suite)
if __name__ == '__main__':
# By default run the test suite
unittest.main(testRunner = unittest.TextTestRunner(verbosity=2))
|
|
import os
import unittest
from decimal import Decimal
from django.utils.copycompat import copy
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.tests.utils import mysql
from django.contrib.gis.utils.layermapping import LayerMapping, LayerMapError, InvalidDecimal, MissingForeignKey
from models import City, County, CountyFeat, Interstate, ICity1, ICity2, State, city_mapping, co_mapping, cofeat_mapping, inter_mapping
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
city_shp = os.path.join(shp_path, 'cities', 'cities.shp')
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
inter_shp = os.path.join(shp_path, 'interstates', 'interstates.shp')
# Dictionaries to hold what's expected in the county shapefile.
NAMES = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
NUMS = [1, 2, 1, 19, 1] # Number of polygons for each.
STATES = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
class LayerMapTest(unittest.TestCase):
def test01_init(self):
"Testing LayerMapping initialization."
# Model field that does not exist.
bad1 = copy(city_mapping)
bad1['foobar'] = 'FooField'
# Shapefile field that does not exist.
bad2 = copy(city_mapping)
bad2['name'] = 'Nombre'
# Nonexistent geographic field type.
bad3 = copy(city_mapping)
bad3['point'] = 'CURVE'
# Incrementing through the bad mapping dictionaries and
# ensuring that a LayerMapError is raised.
for bad_map in (bad1, bad2, bad3):
try:
lm = LayerMapping(City, city_shp, bad_map)
except LayerMapError:
pass
else:
self.fail('Expected a LayerMapError.')
# A LookupError should be thrown for bogus encodings.
try:
lm = LayerMapping(City, city_shp, city_mapping, encoding='foobar')
except LookupError:
pass
else:
self.fail('Expected a LookupError')
def test02_simple_layermap(self):
"Test LayerMapping import of a simple point shapefile."
# Setting up for the LayerMapping.
lm = LayerMapping(City, city_shp, city_mapping)
lm.save()
# There should be three cities in the shape file.
self.assertEqual(3, City.objects.count())
# Opening up the shapefile, and verifying the values in each
# of the features made it to the model.
ds = DataSource(city_shp)
layer = ds[0]
for feat in layer:
city = City.objects.get(name=feat['Name'].value)
self.assertEqual(feat['Population'].value, city.population)
self.assertEqual(Decimal(str(feat['Density'])), city.density)
self.assertEqual(feat['Created'].value, city.dt)
# Comparing the geometries.
pnt1, pnt2 = feat.geom, city.point
self.assertAlmostEqual(pnt1.x, pnt2.x, 6)
self.assertAlmostEqual(pnt1.y, pnt2.y, 6)
def test03_layermap_strict(self):
"Testing the `strict` keyword, and import of a LineString shapefile."
# When the `strict` keyword is set an error encountered will force
# the importation to stop.
try:
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True, strict=True)
except InvalidDecimal:
# No transactions for geoms on MySQL; delete added features.
if mysql: Interstate.objects.all().delete()
else:
self.fail('Should have failed on strict import with invalid decimal values.')
# This LayerMapping should work b/c `strict` is not set.
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True)
# Two interstate should have imported correctly.
self.assertEqual(2, Interstate.objects.count())
# Verifying the values in the layer w/the model.
ds = DataSource(inter_shp)
# Only the first two features of this shapefile are valid.
valid_feats = ds[0][:2]
for feat in valid_feats:
istate = Interstate.objects.get(name=feat['Name'].value)
if feat.fid == 0:
self.assertEqual(Decimal(str(feat['Length'])), istate.length)
elif feat.fid == 1:
# Everything but the first two decimal digits were truncated,
# because the Interstate model's `length` field has decimal_places=2.
self.assertAlmostEqual(feat.get('Length'), float(istate.length), 2)
for p1, p2 in zip(feat.geom, istate.path):
self.assertAlmostEqual(p1[0], p2[0], 6)
self.assertAlmostEqual(p1[1], p2[1], 6)
def county_helper(self, county_feat=True):
"Helper function for ensuring the integrity of the mapped County models."
for name, n, st in zip(NAMES, NUMS, STATES):
# Should only be one record b/c of `unique` keyword.
c = County.objects.get(name=name)
self.assertEqual(n, len(c.mpoly))
self.assertEqual(st, c.state.name) # Checking ForeignKey mapping.
# Multiple records because `unique` was not set.
if county_feat:
qs = CountyFeat.objects.filter(name=name)
self.assertEqual(n, qs.count())
def test04_layermap_unique_multigeometry_fk(self):
"Testing the `unique`, and `transform`, geometry collection conversion, and ForeignKey mappings."
# All the following should work.
try:
# Telling LayerMapping that we want no transformations performed on the data.
lm = LayerMapping(County, co_shp, co_mapping, transform=False)
# Specifying the source spatial reference system via the `source_srs` keyword.
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269)
lm = LayerMapping(County, co_shp, co_mapping, source_srs='NAD83')
# Unique may take tuple or string parameters.
for arg in ('name', ('name', 'mpoly')):
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg)
except:
self.fail('No exception should be raised for proper use of keywords.')
# Testing invalid params for the `unique` keyword.
for e, arg in ((TypeError, 5.0), (ValueError, 'foobar'), (ValueError, ('name', 'mpolygon'))):
self.assertRaises(e, LayerMapping, County, co_shp, co_mapping, transform=False, unique=arg)
# No source reference system defined in the shapefile, should raise an error.
if not mysql:
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, co_mapping)
# Passing in invalid ForeignKey mapping parameters -- must be a dictionary
# mapping for the model the ForeignKey points to.
bad_fk_map1 = copy(co_mapping); bad_fk_map1['state'] = 'name'
bad_fk_map2 = copy(co_mapping); bad_fk_map2['state'] = {'nombre' : 'State'}
self.assertRaises(TypeError, LayerMapping, County, co_shp, bad_fk_map1, transform=False)
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, bad_fk_map2, transform=False)
# There exist no State models for the ForeignKey mapping to work -- should raise
# a MissingForeignKey exception (this error would be ignored if the `strict`
# keyword is not set).
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
self.assertRaises(MissingForeignKey, lm.save, silent=True, strict=True)
# Now creating the state models so the ForeignKey mapping may work.
co, hi, tx = State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
co.save(), hi.save(), tx.save()
# If a mapping is specified as a collection, all OGR fields that
# are not collections will be converted into them. For example,
# a Point column would be converted to MultiPoint. Other things being done
# w/the keyword args:
# `transform=False`: Specifies that no transform is to be done; this
# has the effect of ignoring the spatial reference check (because the
# county shapefile does not have implicit spatial reference info).
#
# `unique='name'`: Creates models on the condition that they have
# unique county names; geometries from each feature however will be
# appended to the geometry collection of the unique model. Thus,
# all of the various islands in Honolulu county will be in in one
# database record with a MULTIPOLYGON type.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
lm.save(silent=True, strict=True)
# A reference that doesn't use the unique keyword; a new database record will
# created for each polygon.
lm = LayerMapping(CountyFeat, co_shp, cofeat_mapping, transform=False)
lm.save(silent=True, strict=True)
# The county helper is called to ensure integrity of County models.
self.county_helper()
def test05_test_fid_range_step(self):
"Tests the `fid_range` keyword and the `step` keyword of .save()."
# Function for clearing out all the counties before testing.
def clear_counties(): County.objects.all().delete()
# Initializing the LayerMapping object to use in these tests.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
# Bad feature id ranges should raise a type error.
clear_counties()
bad_ranges = (5.0, 'foo', co_shp)
for bad in bad_ranges:
self.assertRaises(TypeError, lm.save, fid_range=bad)
# Step keyword should not be allowed w/`fid_range`.
fr = (3, 5) # layer[3:5]
self.assertRaises(LayerMapError, lm.save, fid_range=fr, step=10)
lm.save(fid_range=fr)
# Features IDs 3 & 4 are for Galveston County, Texas -- only
# one model is returned because the `unique` keyword was set.
qs = County.objects.all()
self.assertEqual(1, qs.count())
self.assertEqual('Galveston', qs[0].name)
# Features IDs 5 and beyond for Honolulu County, Hawaii, and
# FID 0 is for Pueblo County, Colorado.
clear_counties()
lm.save(fid_range=slice(5, None), silent=True, strict=True) # layer[5:]
lm.save(fid_range=slice(None, 1), silent=True, strict=True) # layer[:1]
# Only Pueblo & Honolulu counties should be present because of
# the `unique` keyword. Have to set `order_by` on this QuerySet
# or else MySQL will return a different ordering than the other dbs.
qs = County.objects.order_by('name')
self.assertEqual(2, qs.count())
hi, co = tuple(qs)
hi_idx, co_idx = tuple(map(NAMES.index, ('Honolulu', 'Pueblo')))
self.assertEqual('Pueblo', co.name); self.assertEqual(NUMS[co_idx], len(co.mpoly))
self.assertEqual('Honolulu', hi.name); self.assertEqual(NUMS[hi_idx], len(hi.mpoly))
# Testing the `step` keyword -- should get the same counties
# regardless of we use a step that divides equally, that is odd,
# or that is larger than the dataset.
for st in (4,7,1000):
clear_counties()
lm.save(step=st, strict=True)
self.county_helper(county_feat=False)
def test06_model_inheritance(self):
"Tests LayerMapping on inherited models. See #12093."
icity_mapping = {'name' : 'Name',
'population' : 'Population',
'density' : 'Density',
'point' : 'POINT',
'dt' : 'Created',
}
# Parent model has geometry field.
lm1 = LayerMapping(ICity1, city_shp, icity_mapping)
lm1.save()
# Grandparent has geometry field.
lm2 = LayerMapping(ICity2, city_shp, icity_mapping)
lm2.save()
self.assertEqual(6, ICity1.objects.count())
self.assertEqual(3, ICity2.objects.count())
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(LayerMapTest))
return s
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.slim.nets import inception
slim = tf.contrib.slim
class InceptionV3Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
final_endpoint, end_points = inception.inception_v3_base(inputs)
self.assertTrue(final_endpoint.op.name.startswith(
'InceptionV3/Mixed_7c'))
self.assertListEqual(final_endpoint.get_shape().as_list(),
[batch_size, 8, 8, 2048])
expected_endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v3_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV3/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed7c(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3_base(
inputs, final_endpoint='Mixed_7c')
endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
'MaxPool_3a_3x3': [batch_size, 73, 73, 64],
'Conv2d_3b_1x1': [batch_size, 73, 73, 80],
'Conv2d_4a_3x3': [batch_size, 71, 71, 192],
'MaxPool_5a_3x3': [batch_size, 35, 35, 192],
'Mixed_5b': [batch_size, 35, 35, 256],
'Mixed_5c': [batch_size, 35, 35, 288],
'Mixed_5d': [batch_size, 35, 35, 288],
'Mixed_6a': [batch_size, 17, 17, 768],
'Mixed_6b': [batch_size, 17, 17, 768],
'Mixed_6c': [batch_size, 17, 17, 768],
'Mixed_6d': [batch_size, 17, 17, 768],
'Mixed_6e': [batch_size, 17, 17, 768],
'Mixed_7a': [batch_size, 8, 8, 1280],
'Mixed_7b': [batch_size, 8, 8, 2048],
'Mixed_7c': [batch_size, 8, 8, 2048]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v3_arg_scope()):
inception.inception_v3_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(21802784, total_params)
def testBuildEndPoints(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue('Logits' in end_points)
logits = end_points['Logits']
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('AuxLogits' in end_points)
aux_logits = end_points['AuxLogits']
self.assertListEqual(aux_logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Mixed_7c' in end_points)
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 8, 8, 2048])
self.assertTrue('PreLogits' in end_points)
pre_logits = end_points['PreLogits']
self.assertListEqual(pre_logits.get_shape().as_list(),
[batch_size, 1, 1, 2048])
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v3(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v3(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception.inception_v3(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = inception.inception_v3(inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 2048])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 299, 299
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
def testUnknownBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v3(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False, reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 299, 299, 3])
logits, _ = inception.inception_v3(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
tf.test.main()
|
|
#!/usr/bin/python
import pygame
import sys
from gi.repository import Gtk
from entity import Entity
from bucket import Bucket
from grape import Grape
from background import Background
import random
class grapes:
def __init__(self, debug):
# Set up a clock for managing the frame rate.
self.clock = pygame.time.Clock()
self.state = 'START'
# Setup game variables
# self.background = Background(0, 0)
# self.bucket = Bucket(-100, 100)
self.grapes = []
self.spawnCount = 0
self.changeGoalCount = 0
self.paused = False
self.debug = debug
# Begin button variables
self.startButtonX = 0
self.startButtonY = 0
self.startButtonSurface = None
self.startButtonWidth = 0
self.startButtonHeight = 0
# Setup current level variables
self.level = 0
self.score = 0
self.totalScore = 0
self.goalScore = 0
self.spawnTime = 0
self.goalResetTime = 0
self.grapeVelocity = 0
self.maxGrapesPerTick = 0
# Setup goal variables
self.currentVerts = -1
self.currentDisplayGrape = None
def set_paused(self, paused):
self.paused = paused
# Called to save the state of the game to the Journal.
def write_file(self, file_path):
pass
# Called to load the state of the game from the Journal.
def read_file(self, file_path):
pass
# Takes the player to the next level
def nextLevel(self):
# Increment total score
self.totalScore += self.score
# Increment the level and reset the level score
self.level += 1
self.score = 0
# Calculate the goal score
self.goalScore = self.level * self.level * 40
# Determine level index
index = ((self.level - 1) % Background.TOTAL_LEVELS) + 1
# Calculate spawn rate, goal change rate, and fall speed
maxCalcLevel = 15
calcLevel = self.level - 1
if calcLevel > maxCalcLevel:
calcLevel = maxCalcLevel
self.spawnTime = 45 - int((calcLevel * 3.5))
self.goalResetTime = 270 - int((calcLevel * 2))
self.grapeVelocity = 5 + int((calcLevel * 1.5))
self.maxGrapesPerTick = 1 + int((calcLevel / 1.5))
if self.spawnTime < 10:
self.spawnTime = 10
if self.goalResetTime < 30:
self.goalResetTime = 30
if self.grapeVelocity > 17:
self.grapeVelocity = 17
if self.maxGrapesPerTick > 5:
self.maxGrapesPerTick = 5
# Start the music
pygame.mixer.music.stop()
pygame.mixer.music.load("assets/levels/" + str(index) + "/music.ogg")
pygame.mixer.music.play(-1) # Loop the music
# Generate first goal
self.generateNewGoal()
# Generate a new goal for the player
def generateNewGoal(self):
self.currentVerts = random.randint(Grape.MIN_VERTS, Grape.MAX_VERTS)
self.currentDisplayGrape = Grape(40, 10 + 26 + 80, self.currentVerts, 0)
self.currentDisplayGrape.color = (25, 252, 0)
self.randMod = random.randint(1, 5)
# Spawns a grape
def spawnGrape(self, width, offsetIndex):
# Don't spawn grapes off the edge of the screen
self.grapes.append(Grape(random.randrange(Grape.DEFAULT_RADIUS, width - Grape.DEFAULT_RADIUS), -Grape.DEFAULT_RADIUS * (offsetIndex + 1), random.randint(Grape.MIN_VERTS, Grape.MAX_VERTS), self.grapeVelocity))
# The main game loop.
def run(self):
self.running = True
screen = pygame.display.get_surface()
# These needed to be moved for the activity to work
self.background = Background(0, 0)
self.bucket = Bucket(-100, 100)
# Load the font
self.font = pygame.font.SysFont("monospace", 33)
self.juiceFont = pygame.font.SysFont("monospace", 30)
self.titleFont = pygame.font.SysFont("monospace", 120)
# Mixer setup
pygame.mixer.init()
# Sound setup
self.squishEffect = pygame.mixer.Sound('assets/squish.wav')
self.incorrectEffect = pygame.mixer.Sound('assets/incorrect.wav')
# Start the first level
self.nextLevel()
while self.running:
# Pump GTK messages.
while Gtk.events_pending():
Gtk.main_iteration()
pos = pygame.mouse.get_pos()
# Pump PyGame messages.
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
elif event.type == pygame.VIDEORESIZE:
pygame.display.set_mode(event.size, pygame.RESIZABLE)
elif event.type == pygame.MOUSEMOTION and self.state == 'GAME':
x, y = pos
# Center the bucket
x -= self.bucket.sprite.get_width() / 2
self.bucket.setPos(x, screen.get_height() * 0.8)
elif event.type == pygame.KEYDOWN: # Shortcut to next level
if self.debug and event.key == pygame.K_n:
self.nextLevel()
elif event.key == pygame.K_p: # Toggle pause status
self.set_paused(not self.paused)
elif event.type == pygame.MOUSEBUTTONDOWN and self.state == 'START':
x, y = pos
width, height = self.titleFont.size("Begin")
if x > self.startButtonX and x < self.startButtonX + self.startButtonWidth and y > self.startButtonY and y < self.startButtonY + self.startButtonHeight:
self.state = 'GAME'
if self.state == 'START':
self.background.draw(1, screen, False);
titleText = "Grapes of Math"
(titleWidth, titleHeight) = self.titleFont.size(titleText)
title = self.titleFont.render(titleText, 1, (200, 200, 200))
screen.blit(title, (screen.get_width() / 2 - (titleWidth / 2), 50))
startText = "Begin"
(self.startButtonWidth, self.startButtonHeight) = self.titleFont.size(startText)
# Only generate this the first draw
if self.startButtonX == 0:
overlayColor = (0, 0, 0, 127)
overlayRect = pygame.Rect(0, 0, self.startButtonWidth, self.startButtonHeight)
overlaySurface = pygame.Surface((300, 160), pygame.SRCALPHA)
overlaySurface.fill(overlayColor, overlayRect)
self.startButtonX = (screen.get_width() / 2 - (self.startButtonWidth / 2))
self.startButtonY = 200
screen.blit(overlaySurface, (self.startButtonX, self.startButtonY))
startButton = self.titleFont.render(startText, 1, (200, 200, 200))
screen.blit(startButton, (self.startButtonX, self.startButtonY))
elif self.state == 'GAME':
if not self.paused:
# Spawn Grapes
if self.spawnCount > random.randrange(self.spawnTime - 5, self.spawnTime):
for i in range(0, random.randint(1, self.maxGrapesPerTick)):
self.spawnGrape(screen.get_width(), i)
self.spawnCount = 0
self.spawnCount += 1
# Change goal
if self.changeGoalCount > random.randrange(self.goalResetTime - 7, self.goalResetTime):
self.generateNewGoal()
self.changeGoalCount = 0
self.changeGoalCount += 1
# Clear Display
screen.fill((255, 255, 255)) # 255 for white
# Draw the background
self.background.draw(self.level, screen, True)
# Draw paused text if paused
if self.paused:
pauseText = "Paused"
(pauseWidth, pauseHeight) = self.titleFont.size(pauseText)
pauseLabel = self.titleFont.render(pauseText, 1, (255, 255, 255))
pauseX = (screen.get_width() / 2) - (pauseWidth / 2)
pauseY = (screen.get_height() / 2) - (pauseHeight / 2)
screen.blit(pauseLabel, (pauseX, pauseY))
# Draw the bucket
self.bucket.draw(screen)
clone = list(self.grapes)
for i, g in enumerate(clone):
if not self.paused:
g.falling = True
g.update()
g.draw(screen)
if self.bucket.catchGrape(g.x, g.y, g.r):
# Delete the grape
del self.grapes[i]
# Check if the grape is correct
if g.numVerts == self.currentVerts:
self.score += int(g.value * 1.5)
self.squishEffect.play()
if self.score >= self.goalScore:
self.nextLevel()
else:
self.score -= g.value / 3
if self.score < 0:
self.score = 0
self.incorrectEffect.play()
else:
g.draw(screen)
# Text drawing
textX = 16
textY = 16
# Draw the current level text
label = self.font.render("Level " + str(self.level), 1, (176, 229, 255))
screen.blit(label, (textX, textY))
textY += 26
# Draw the score
label = self.juiceFont.render("Grape Juice: " + str(self.score) + " / " + str(self.goalScore), 1, (219, 140, 213))
screen.blit(label, (textX, textY))
textY += 26;
# Draw the current goal
levelText = "Collect grapes with " + str(self.currentVerts) + " sides"
if self.level == 4:
levelText = "Collect grapes with " + str(self.currentVerts + self.randMod) + ' - ' + str(self.randMod) + " sides"
label = self.juiceFont.render(levelText, 1, (162, 252, 151))
screen.blit(label, (textX, textY))
# Only draw on level one
if self.level == 1:
# Draw the current goal
self.currentDisplayGrape.draw(screen)
# Flip Display
pygame.display.flip()
# Try to stay at 30 FPS
self.clock.tick(30)
# This function is called when the game is run directly from the command line:
# ./TestGame.py
def main():
# Initalize pygame
pygame.init()
# This is the resolution of the XO
xo_screen_width = 1200
xo_screen_height = 900
# XO Mode will make the screen a fixed size
# so the background fills up the screen
xo_mode = True
# Is debugging enabled
debug = False
# Check for low resolution mode (good for testing)
if len(sys.argv) > 1 and sys.argv[1] == "-lowres":
pygame.display.set_mode((800, 600), pygame.RESIZABLE)
debug = True
elif xo_mode:
pygame.display.set_mode((xo_screen_width, xo_screen_height), pygame.RESIZABLE)
else:
pygame.display.set_mode((0, 0), pygame.RESIZABLE)
# Set the window title
pygame.display.set_caption("Grapes of Math")
# Create an instance of the game
game = grapes(debug)
# Start the game
game.run()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python3
# Copyright 2015-2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
import sys
import yaml
from ros_buildfarm.argument import add_argument_output_dir
from ros_buildfarm.catkin_workspace import call_catkin_make_isolated
from ros_buildfarm.catkin_workspace import clean_workspace
from ros_buildfarm.catkin_workspace import ensure_workspace_exists
from ros_buildfarm.common import Scope
from ros_buildfarm.rosdoc_index import RosdocIndex
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Invoke 'rosdoc_lite' on each package of a workspace")
parser.add_argument(
'--rosdistro-name',
required=True,
help='The name of the ROS distro to identify the setup file to be '
'sourced (if available)')
parser.add_argument(
'--os-code-name',
required=True,
help="The OS code name (e.g. 'trusty')")
parser.add_argument(
'--arch',
required=True,
help="The architecture (e.g. 'amd64')")
parser.add_argument(
'--workspace-root',
required=True,
help='The root path of the workspace to compile')
parser.add_argument(
'--rosdoc-lite-dir',
required=True,
help='The root path of the rosdoc_lite repository')
parser.add_argument(
'--catkin-sphinx-dir',
required=True,
help='The root path of the catkin-sphinx repository')
parser.add_argument(
'--rosdoc-index-dir',
required=True,
help='The root path of the rosdoc_index folder')
parser.add_argument(
'--canonical-base-url',
help='The canonical base URL to add to all generated HTML files')
parser.add_argument(
'pkg_tuples',
nargs='*',
help='A list of package tuples in topological order, each containing '
'the name, the relative path and optionally the package-relative '
'path of the rosdoc config file separated by a colon')
add_argument_output_dir(parser, required=True)
args = parser.parse_args(argv)
ensure_workspace_exists(args.workspace_root)
clean_workspace(args.workspace_root)
with Scope('SUBSECTION', 'build workspace in isolation and install'):
rc = call_catkin_make_isolated(
args.rosdistro_name, args.workspace_root,
['--install', '--cmake-args', '-DCATKIN_SKIP_TESTING=1',
'--catkin-make-args', '-j1'])
# TODO compile error should still allow to generate doc from static parts
if rc:
return rc
rosdoc_index = RosdocIndex([
os.path.join(args.output_dir, args.rosdistro_name),
os.path.join(args.rosdoc_index_dir, args.rosdistro_name)])
source_space = os.path.join(args.workspace_root, 'src')
for pkg_tuple in args.pkg_tuples:
pkg_name, pkg_subfolder, pkg_rosdoc_config = pkg_tuple.split(':', 2)
with Scope('SUBSECTION', 'rosdoc_lite - %s' % pkg_name):
pkg_path = os.path.join(source_space, pkg_subfolder)
pkg_doc_path = os.path.join(
args.output_dir, 'api_rosdoc', pkg_name)
pkg_tag_path = os.path.join(
args.output_dir, 'symbols', '%s.tag' % pkg_name)
source_cmd = [
'.', os.path.join(
args.workspace_root, 'install_isolated', 'setup.sh'),
]
# for workspaces with only plain cmake packages the setup files
# generated by cmi won't implicitly source the underlays
setup_file = '/opt/ros/%s/setup.sh' % args.rosdistro_name
if os.path.exists(setup_file):
source_cmd = ['.', setup_file, '&&'] + source_cmd
rosdoc_lite_cmd = [
os.path.join(args.rosdoc_lite_dir, 'scripts', 'rosdoc_lite'),
pkg_path,
'-o', pkg_doc_path,
'-g', pkg_tag_path,
'-t', os.path.join(
args.output_dir, 'rosdoc_tags', '%s.yaml' % pkg_name),
]
print("Invoking `rosdoc_lite` for package '%s': %s" %
(pkg_name, ' '.join(rosdoc_lite_cmd)))
pkg_rc = subprocess.call(
[
'sh', '-c',
' '.join(source_cmd) +
' && ' +
'PYTHONPATH=%s/src:%s/src:$PYTHONPATH ' % (
args.rosdoc_lite_dir, args.catkin_sphinx_dir) +
' '.join(rosdoc_lite_cmd)
], stderr=subprocess.STDOUT, cwd=pkg_path)
if pkg_rc:
rc = pkg_rc
# only if rosdoc runs generates a symbol file
# create the corresponding location file
if os.path.exists(pkg_tag_path):
data = {
'docs_url': '../../../api/%s/html' % pkg_name,
'location': '%s/symbols/%s.tag' %
(args.rosdistro_name, pkg_name),
'package': pkg_name,
}
# fetch generator specific output folders from rosdoc_lite
if pkg_rosdoc_config:
output_folders = get_generator_output_folders(
pkg_rosdoc_config, pkg_name)
for generator, output_folder in output_folders.items():
data['%s_output_folder' % generator] = output_folder
rosdoc_index.locations[pkg_name] = [data]
if args.canonical_base_url:
add_canonical_link(
pkg_doc_path, '%s/%s/api/%s' %
(args.canonical_base_url, args.rosdistro_name, pkg_name))
# merge manifest.yaml files
rosdoc_manifest_yaml_file = os.path.join(
pkg_doc_path, 'manifest.yaml')
job_manifest_yaml_file = os.path.join(
args.output_dir, 'manifests', pkg_name, 'manifest.yaml')
if os.path.exists(rosdoc_manifest_yaml_file):
with open(rosdoc_manifest_yaml_file, 'r') as h:
rosdoc_data = yaml.load(h)
else:
# if rosdoc_lite failed to generate the file
rosdoc_data = {}
with open(job_manifest_yaml_file, 'r') as h:
job_data = yaml.load(h)
rosdoc_data.update(job_data)
with open(rosdoc_manifest_yaml_file, 'w') as h:
yaml.safe_dump(rosdoc_data, h, default_flow_style=False)
rosdoc_index.write_modified_data(
args.output_dir, ['locations'])
return rc
# this is reimplemented here since rosdoc_lite can not be used with Python 3
def get_generator_output_folders(pkg_rosdoc_config_file, pkg_name):
output_folders = {}
if pkg_rosdoc_config_file:
with open(pkg_rosdoc_config_file, 'r') as h:
content = h.read()
try:
data = yaml.load(content)
except Exception as e:
print("WARNING: package '%s' has an invalid rosdoc config: %s" %
(pkg_name, e), file=sys.stderr)
else:
if not isinstance(data, list):
print("WARNING: package '%s' has an invalid rosdoc config" %
pkg_name, file=sys.stderr)
else:
for item in data:
if 'builder' not in item:
print("WARNING: package '%s' has an invalid rosdoc config "
"- missing builder key" % pkg_name, file=sys.stderr)
continue
if item.get('output_dir'):
output_folders[item['builder']] = item['output_dir']
return output_folders
def add_canonical_link(base_path, base_link):
print("add canonical link '%s' to all html files under '%s'" %
(base_link, base_path))
for path, dirs, files in os.walk(base_path):
for filename in [f for f in files if f.endswith('.html')]:
filepath = os.path.join(path, filename)
try:
with open(filepath, 'rb') as h:
data = h.read()
except Exception:
print("error reading file '%s'" % filepath)
raise
if data.find(b'rel="canonical"') != -1:
continue
rel_path = os.path.relpath(filepath, base_path)
link = os.path.join(base_link, rel_path)
data = data.replace(
b'</head>', b'<link rel="canonical" href="' + link.encode() +
b'" />\n</head>', 1)
with open(filepath, 'wb') as h:
h.write(data)
if __name__ == '__main__':
sys.exit(main())
|
|
# -*- coding: utf-8 -*-
"""
werkzeug._internal
~~~~~~~~~~~~~~~~~~
This module provides internally used helpers and constants.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import inspect
import logging
import re
import string
from datetime import date
from datetime import datetime
from itertools import chain
from weakref import WeakKeyDictionary
from ._compat import int_to_byte
from ._compat import integer_types
from ._compat import iter_bytes
from ._compat import range_type
from ._compat import text_type
_logger = None
_signature_cache = WeakKeyDictionary()
_epoch_ord = date(1970, 1, 1).toordinal()
_legal_cookie_chars = (
string.ascii_letters + string.digits + u"/=!#$%&'*+-.^_`|~:"
).encode("ascii")
_cookie_quoting_map = {b",": b"\\054", b";": b"\\073", b'"': b'\\"', b"\\": b"\\\\"}
for _i in chain(range_type(32), range_type(127, 256)):
_cookie_quoting_map[int_to_byte(_i)] = ("\\%03o" % _i).encode("latin1")
_octal_re = re.compile(br"\\[0-3][0-7][0-7]")
_quote_re = re.compile(br"[\\].")
_legal_cookie_chars_re = br"[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_cookie_re = re.compile(
br"""
(?P<key>[^=;]+)
(?:\s*=\s*
(?P<val>
"(?:[^\\"]|\\.)*" |
(?:.*?)
)
)?
\s*;
""",
flags=re.VERBOSE,
)
class _Missing(object):
def __repr__(self):
return "no value"
def __reduce__(self):
return "_missing"
_missing = _Missing()
def _get_environ(obj):
env = getattr(obj, "environ", obj)
assert isinstance(env, dict), (
"%r is not a WSGI environment (has to be a dict)" % type(obj).__name__
)
return env
def _has_level_handler(logger):
"""Check if there is a handler in the logging chain that will handle
the given logger's effective level.
"""
level = logger.getEffectiveLevel()
current = logger
while current:
if any(handler.level <= level for handler in current.handlers):
return True
if not current.propagate:
break
current = current.parent
return False
def _log(type, message, *args, **kwargs):
"""Log a message to the 'werkzeug' logger.
The logger is created the first time it is needed. If there is no
level set, it is set to :data:`logging.INFO`. If there is no handler
for the logger's effective level, a :class:`logging.StreamHandler`
is added.
"""
global _logger
if _logger is None:
_logger = logging.getLogger("werkzeug")
if _logger.level == logging.NOTSET:
_logger.setLevel(logging.INFO)
if not _has_level_handler(_logger):
_logger.addHandler(logging.StreamHandler())
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
def _parse_signature(func):
"""Return a signature object for the function."""
if hasattr(func, "im_func"):
func = func.im_func
# if we have a cached validator for this function, return it
parse = _signature_cache.get(func)
if parse is not None:
return parse
# inspect the function signature and collect all the information
if hasattr(inspect, "getfullargspec"):
tup = inspect.getfullargspec(func)
else:
tup = inspect.getargspec(func)
positional, vararg_var, kwarg_var, defaults = tup[:4]
defaults = defaults or ()
arg_count = len(positional)
arguments = []
for idx, name in enumerate(positional):
if isinstance(name, list):
raise TypeError(
"cannot parse functions that unpack tuples in the function signature"
)
try:
default = defaults[idx - arg_count]
except IndexError:
param = (name, False, None)
else:
param = (name, True, default)
arguments.append(param)
arguments = tuple(arguments)
def parse(args, kwargs):
new_args = []
missing = []
extra = {}
# consume as many arguments as positional as possible
for idx, (name, has_default, default) in enumerate(arguments):
try:
new_args.append(args[idx])
except IndexError:
try:
new_args.append(kwargs.pop(name))
except KeyError:
if has_default:
new_args.append(default)
else:
missing.append(name)
else:
if name in kwargs:
extra[name] = kwargs.pop(name)
# handle extra arguments
extra_positional = args[arg_count:]
if vararg_var is not None:
new_args.extend(extra_positional)
extra_positional = ()
if kwargs and kwarg_var is None:
extra.update(kwargs)
kwargs = {}
return (
new_args,
kwargs,
missing,
extra,
extra_positional,
arguments,
vararg_var,
kwarg_var,
)
_signature_cache[func] = parse
return parse
def _date_to_unix(arg):
"""Converts a timetuple, integer or datetime object into the seconds from
epoch in utc.
"""
if isinstance(arg, datetime):
arg = arg.utctimetuple()
elif isinstance(arg, integer_types + (float,)):
return int(arg)
year, month, day, hour, minute, second = arg[:6]
days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
hours = days * 24 + hour
minutes = hours * 60 + minute
seconds = minutes * 60 + second
return seconds
class _DictAccessorProperty(object):
"""Baseclass for `environ_property` and `header_property`."""
read_only = False
def __init__(
self,
name,
default=None,
load_func=None,
dump_func=None,
read_only=None,
doc=None,
):
self.name = name
self.default = default
self.load_func = load_func
self.dump_func = dump_func
if read_only is not None:
self.read_only = read_only
self.__doc__ = doc
def __get__(self, obj, type=None):
if obj is None:
return self
storage = self.lookup(obj)
if self.name not in storage:
return self.default
rv = storage[self.name]
if self.load_func is not None:
try:
rv = self.load_func(rv)
except (ValueError, TypeError):
rv = self.default
return rv
def __set__(self, obj, value):
if self.read_only:
raise AttributeError("read only property")
if self.dump_func is not None:
value = self.dump_func(value)
self.lookup(obj)[self.name] = value
def __delete__(self, obj):
if self.read_only:
raise AttributeError("read only property")
self.lookup(obj).pop(self.name, None)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.name)
def _cookie_quote(b):
buf = bytearray()
all_legal = True
_lookup = _cookie_quoting_map.get
_push = buf.extend
for char in iter_bytes(b):
if char not in _legal_cookie_chars:
all_legal = False
char = _lookup(char, char)
_push(char)
if all_legal:
return bytes(buf)
return bytes(b'"' + buf + b'"')
def _cookie_unquote(b):
if len(b) < 2:
return b
if b[:1] != b'"' or b[-1:] != b'"':
return b
b = b[1:-1]
i = 0
n = len(b)
rv = bytearray()
_push = rv.extend
while 0 <= i < n:
o_match = _octal_re.search(b, i)
q_match = _quote_re.search(b, i)
if not o_match and not q_match:
rv.extend(b[i:])
break
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j):
_push(b[i:k])
_push(b[k + 1 : k + 2])
i = k + 2
else:
_push(b[i:j])
rv.append(int(b[j + 1 : j + 4], 8))
i = j + 4
return bytes(rv)
def _cookie_parse_impl(b):
"""Lowlevel cookie parsing facility that operates on bytes."""
i = 0
n = len(b)
while i < n:
match = _cookie_re.search(b + b";", i)
if not match:
break
key = match.group("key").strip()
value = match.group("val") or b""
i = match.end(0)
yield _cookie_unquote(key), _cookie_unquote(value)
def _encode_idna(domain):
# If we're given bytes, make sure they fit into ASCII
if not isinstance(domain, text_type):
domain.decode("ascii")
return domain
# Otherwise check if it's already ascii, then return
try:
return domain.encode("ascii")
except UnicodeError:
pass
# Otherwise encode each part separately
parts = domain.split(".")
for idx, part in enumerate(parts):
parts[idx] = part.encode("idna")
return b".".join(parts)
def _decode_idna(domain):
# If the input is a string try to encode it to ascii to
# do the idna decoding. if that fails because of an
# unicode error, then we already have a decoded idna domain
if isinstance(domain, text_type):
try:
domain = domain.encode("ascii")
except UnicodeError:
return domain
# Decode each part separately. If a part fails, try to
# decode it with ascii and silently ignore errors. This makes
# most sense because the idna codec does not have error handling
parts = domain.split(b".")
for idx, part in enumerate(parts):
try:
parts[idx] = part.decode("idna")
except UnicodeError:
parts[idx] = part.decode("ascii", "ignore")
return ".".join(parts)
def _make_cookie_domain(domain):
if domain is None:
return None
domain = _encode_idna(domain)
if b":" in domain:
domain = domain.split(b":", 1)[0]
if b"." in domain:
return domain
raise ValueError(
"Setting 'domain' for a cookie on a server running locally (ex: "
"localhost) is not supported by complying browsers. You should "
"have something like: '127.0.0.1 localhost dev.localhost' on "
"your hosts file and then point your server to run on "
"'dev.localhost' and also set 'domain' for 'dev.localhost'"
)
def _easteregg(app=None):
"""Like the name says. But who knows how it works?"""
def bzzzzzzz(gyver):
import base64
import zlib
return zlib.decompress(base64.b64decode(gyver)).decode("ascii")
gyver = u"\n".join(
[
x + (77 - len(x)) * u" "
for x in bzzzzzzz(
b"""
eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
7f2zLkGNv8b191cD/3vs9Q833z8t"""
).splitlines()
]
)
def easteregged(environ, start_response):
def injecting_start_response(status, headers, exc_info=None):
headers.append(("X-Powered-By", "Werkzeug"))
return start_response(status, headers, exc_info)
if app is not None and environ.get("QUERY_STRING") != "macgybarchakku":
return app(environ, injecting_start_response)
injecting_start_response("200 OK", [("Content-Type", "text/html")])
return [
(
u"""
<!DOCTYPE html>
<html>
<head>
<title>About Werkzeug</title>
<style type="text/css">
body { font: 15px Georgia, serif; text-align: center; }
a { color: #333; text-decoration: none; }
h1 { font-size: 30px; margin: 20px 0 10px 0; }
p { margin: 0 0 30px 0; }
pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }
</style>
</head>
<body>
<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
<p>the Swiss Army knife of Python web development.</p>
<pre>%s\n\n\n</pre>
</body>
</html>"""
% gyver
).encode("latin1")
]
return easteregged
|
|
# -*- coding: utf-8 -*-
"""
.. module:: debooubuntu.py
:platform: Unix
:synopsis: Script for creating bootable debian based systems
"""
import os
import fabric.contrib.files
from fabric.api import task, execute, env, run, sudo
from fabric.utils import puts, warn, error
from fabric.contrib.console import confirm
from fabric.contrib.files import exists
from fabric.context_managers import settings, cd
from contextlib import contextmanager
@contextmanager
def shell_env(**env_vars):
orig_shell = env['shell']
env_vars_str = ' '.join('{0}={1}'.format(key, value)
for key, value in env_vars.items())
env['shell']='{0} {1}'.format(env_vars_str, orig_shell)
yield
env['shell']= orig_shell
def chroot(cmd):
return sudo("chroot mnt/ %s" %cmd)
def chins(cmd):
return sudo("chroot mnt/ apt-get install -y %s" %cmd)
def chbash(cmd):
return sudo("echo '%s' | sudo bash" %cmd)
def upload_template(filename, dest):
return fabric.contrib.files.upload_template(filename, dest,
use_jinja=True, template_dir="templates",
backup=False, use_sudo=True)
def root():
if not env.get("noroot"):
root= env.get("root") or "ubuntu"
if not exists(root): run("mkdir -p %s" %root)
env.noroot= True
return cd(root)
return cd(".")
@task
def prepare( size=2000 ):
"""
Prepares virtual disk images
:param size: Size of an image
:type size: int
"""
with root():
if exists("root.img"):
if not confirm("Do you want to create new image?"):
return
execute(unmount)
run("dd if=/dev/zero of=root.img bs=1024k count=%d"% size)
run("mkfs.ext4 -F -L root root.img")
if exists("mnt"):
run("mkdir -p mnt")
@task
def resize( new_size=1800 ):
"""
Resizes virtual disk image
:param new_size: new size
:type new_size: int
"""
with root():
# mount image without devices, create temp image and copy data
mount(False)
run("dd if=/dev/zero of=tmp.img bs=1024k count=%d"% new_size)
run("mkfs.ext4 -F -L ubuntu tmp.img")
run("mkdir -p tmp")
sudo("mount -o loop tmp.img tmp/")
sudo("cp -rv mnt/* ./tmp/")
# umount and create rename image
execute(unmount)
run("rm root.img")
sudo("umount tmp.img")
run("mv tmp.img root.img")
@task
def mount(devices=True):
"""
Mounts virtual disk image and required devices
:param devices: Should we mount devices
:type devices: boolean
"""
with root():
if not exists("root.img"):
if confirm("Root image does not seem to exist, create one?"):
execute(prepare)
run("mkdir -p mnt")
execute(unmount)
run("e2fsck -p root.img")
sudo("mount -o loop root.img mnt/")
if devices:
sudo("mkdir -p mnt/proc")
sudo("mount -t proc proc mnt/proc")
sudo("mkdir -p mnt/dev")
sudo("mount --bind /dev mnt/dev")
sudo("mkdir -p mnt/sys")
sudo("mount -t sysfs sysfs mnt/sys")
sudo("mount -t devpts /dev/pts mnt/dev/pts")
@task
def unmount():
"""
Unmounts virtual disk image and devices
"""
with root():
with settings(warn_only=True):
sudo("sudo lsof -t mnt/ | sudo xargs -r kill")
sudo("sudo chroot mnt/ /etc/init.d/udev stop")
sudo("sudo chroot mnt/ /etc/init.d/cron stop")
sudo("umount mnt/proc")
sudo("umount mnt/sys")
sudo("umount mnt/dev/pts")
sudo("umount mnt/dev")
sudo("umount mnt/")
@task
def debootstrap(release= None, mirror= None, target_arch= None):
"""
Debootstraps debian based image
:param release: [Debian](http://www.debian.org/releases/)/[Ubuntu](https://wiki.ubuntu.com/DevelopmentCodeNames) release name
:type release: str
:param mirror: Url of the mirror (default http://de.archive.ubuntu.com/ubuntu/")
:type mirror: str
:param target_arch: architecture name like x86 or amd64
:type target_arch: str
"""
opts = dict(
release= release or env.get("release") or "oneiric",
mirror= mirror or env.get("mirror") or "http://de.archive.ubuntu.com/ubuntu/",
target_arch= target_arch or env.get("target_arch") or "amd64"
)
with root():
opts["target"]= "debootstrap/%(release)s_%(target_arch)s" % opts
if not exists(opts["target"]):
run("mkdir -p %s" %opts["target"])
puts("""Debootstraping release=%(release)s
target=%(target)s mirror=%(mirror)s
target_arch=%(target_arch)s to %(target)s""" % opts)
sudo("debootstrap --arch %(target_arch)s %(release)s %(target)s %(mirror)s" % opts)
@task
def install(password= None, start_ssh=True, release= None, target_arch= None,
install_packages= True):
"""
Creates bootable debian based systems
:param password: Password to set (default root)
:type password: str
:param start_ssh: Should ssh be started on boot (default True)
:type start_ssh: booblean
:param release: [Debian](http://www.debian.org/releases/)/[Ubuntu](https://wiki.ubuntu.com/DevelopmentCodeNames) release name
:type release: str
:param mirror: Url of the mirror (default http://de.archive.ubuntu.com/ubuntu/")
:type mirror: str
:param target_arch: architecture name like x86 or amd64
:type target_arch: str
:param install_packages: Should additional pacakges be installed (default True)
:type install_packages: boolean
"""
opts = dict(
release= release or env.get("release") or "oneiric",
target_arch= target_arch or env.get("target_arch") or "amd64",
password= password or env.get("password") or "root",
start_ssh= start_ssh or env.get("start_ssh"),
)
with root():
puts("Mounting onyl devices")
execute(unmount)
execute(mount,False)
opts["target"]= "debootstrap/%(release)s_%(target_arch)s" % opts
if not exists(opts["target"]):
execute(debootstrap, release=opts["release"], target_arch=opts["target_arch"])
sudo("cp -rp %(target)s/* ./mnt/" %opts)
execute(mount)
puts("Configuring...")
if not os.path.exists("templates/sources.list"):
chbash("""cat >> mnt/etc/apt/sources.list <<EOF
deb http://archive.ubuntu.com/ubuntu $(lsb_release -cs) main restricted universe multiverse
deb http://archive.ubuntu.com/ubuntu $(lsb_release -cs)-security main restricted universe multiverse
deb http://archive.ubuntu.com/ubuntu $(lsb_release -cs)-updates main restricted universe multiverse
deb http://archive.canonical.com/ubuntu $(lsb_release -cs) partner
EOF\n
""")
else:
upload_template("sources.list", "mnt/etc/apt/sources.list")
if not os.path.exists("templates/interfaces"):
pass
else:
upload_template("intefaces", "mnt/etc/network/interfaces")
sudo("cp /etc/mtab mnt/etc/mtab")
chbash("""cat >> mnt/etc/apt/apt.conf.d/10periodic <<EOF
APT::Periodic::Enable "1";
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Download-Upgradeable-Packages "1";
APT::Periodic::AutocleanInterval "5";
APT::Periodic::Unattended-Upgrade "1";
APT::Periodic::RandomSleep "1800";
EOF\n
""")
chroot("passwd << EOF\n%(password)s\n%(password)s\nEOF\n" % opts)
if install_packages:
with shell_env(DEBIAN_FRONTEND="noninteractive"):
puts("Installing packages...")
chroot("apt-get update -y")
chins("grub-pc")
chins("linux-image")
chins("udev")
chbash("echo \"none /dev/pts devpts defaults 0 0\" >> mnt/etc/fstab")
chbash("echo \"none /proc proc defaults\" >> mnt/etc/fstab")
chins("sudo python-software-properties vim nano joe screen \
unattended-upgrades smartmontools ntp ssh openssh-server")
sudo("sudo lsof -t mnt/ | sudo xargs -r kill")
if opts["start_ssh"]:
chbash("sed -i \"s/Port 22/Port 23/g\" mnt/etc/ssh/sshd_config")
chroot("/etc/init.d/ssh start")
@task
def flash(fsroot= None, swap= None, home= None):
"""
Flashes created debian based image on flash or any disk drive
:param fsroot: Root device (default /dev/sdb1)
:type fsroot: str
:param swap: Swap device (default /dev/sdb2)
:type swap: str
:param home: Home device (optional)
:type home: str
"""
opts = dict(
root= fsroot or env.get("root") or "/dev/sdb1",
swap= swap or env.get("swap") or "/dev/sdb2",
home= home or env.get("home") or None
)
with root():
if not exists("mnt/dev"):
if not exists("root.img"):
error("Your image does not seem to exist...")
warn("Your image does not seem to be mounted...")
if confirm("Should i mount it?"):
execute(mount)
puts("Wrinting image: rootfs=%(root)s, swap=%(swap)s, home=%(home)s" %opts)
if opts["home"]:
fstab="""cat > mnt/etc/fstab <<EOF
# device mount type options freq passno
UUID=$(blkid -o value -s UUID root.img) / ext4 errors=remount-ro,user_xattr 0 1
UUID=$(blkid -o value -s UUID %(swap)s) none swap sw 0 0
UUID=$(blkid -o value -s UUID %(home)s /home ext4 defaults 0 0
EOF\n
"""
else:
fstab="""cat > mnt/etc/fstab <<EOF
# device mount type options freq passno
UUID=$(blkid -o value -s UUID root.img) / ext4 errors=remount-ro,user_xattr 0 1
UUID=$(blkid -o value -s UUID %(swap)s) none swap sw 0 0
EOF\n
"""
puts("fstab:\n"+fstab)
chbash(fstab %opts)
puts("Writing image to flash drive...")
sudo("dd if=root.img of=%(root)s" %opts)
puts("Installing grub...")
chroot("grub-install %s" %opts["root"][:-1])
chroot("update grub")
execute(unmount)
puts("Image created please dd it to your device")
|
|
#
# pdis.xpath.syntax
#
# Copyright 2004 Helsinki Institute for Information Technology (HIIT)
# and the authors. All rights reserved.
#
# Authors: Ken Rimey <rimey@hiit.fi>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
XPath syntax nodes
"""
from edgy.xml.xpath.atoms import *
from edgy.xml.xpath.evaluate import to_number, to_boolean, compare, do_step, do_function_call
from edgy.xml.xpath.data_model import is_node_set, join_node_sets
from edgy.xml.xpath.xpath_exceptions import XPathNotImplementedError, XPathEvaluationError
#
# Expression nodes
#
class UnaryOp:
"""
Unary expression node
op -- operator (a string).
right -- child node.
The operator is actually always "-".
"""
def __init__(self, op, right):
self.op = op
self.right = right
def __str__(self):
return "(%s %s)" % (self.op, self.right)
def evaluate(self, context):
assert self.op == '-'
return - to_number(self.right.evaluate(context))
class BinaryOp:
"""
Binary expression node
op -- operator (a string).
left -- left-hand child node.
right -- right-hand child node.
"""
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
def __str__(self):
return "(%s %s %s)" % (self.left, self.op, self.right)
def evaluate(self, context):
if self.op == 'or':
if to_boolean(self.left.evaluate(context)):
return True
return to_boolean(self.right.evaluate(context))
elif self.op == 'and':
if not to_boolean(self.left.evaluate(context)):
return False
return to_boolean(self.right.evaluate(context))
elif self.op in ['=', '!=', '<', '>', '<=', '>=']:
return compare(self.op, self.left.evaluate(context),
self.right.evaluate(context))
elif self.op in ['+', '-', '*', 'div', 'mod']:
x = to_number(self.left.evaluate(context))
y = to_number(self.right.evaluate(context))
if self.op == '+':
return x + y
elif self.op == '-':
return x - y
elif self.op == '*':
return x * y
elif self.op == 'div':
return x / y
elif self.op == 'mod':
z = abs(x) % abs(y)
if x >= 0:
return z
else:
return -z
else:
assert False
elif self.op == '|':
x = self.left.evaluate(context)
y = self.right.evaluate(context)
if is_node_set(x) and is_node_set(y):
# XXX This is incorrect, because it neither preserves
# document order nor removes duplicates.
return join_node_sets(x, y)
else:
raise XPathEvaluationError, "Operands of '|' must be node sets."
else:
assert False
class FunctionCall:
"""
Function call node
function -- FunctionName.
argument_list -- list of zero or more nodes.
"""
def __init__(self, function, argument_list):
self.function = function
self.argument_list = argument_list
def __str__(self):
return "%s(%s)" % (self.function, ", ".join(map(str, self.argument_list)))
def evaluate(self, context):
if self.function.prefix:
raise XPathNotImplementedError, \
"Namespace prefixes for function names not implemented."
name = self.function.local_part
args = [arg.evaluate(context) for arg in self.argument_list]
return do_function_call(name, args, context)
#
# Location path nodes
#
class Root:
"""
Node representing the head of an absolute location path
"""
def __init__(self):
pass
def __str__(self):
return "/"
def evaluate(self, context):
return [context.get_root()]
class LocationStep:
"""
Node representing a step in a location path
prefix -- preceding LocationStep, Root, None, or some other node.
axis -- axis name (a string).
node_test -- NameTest, NodeType, or Literal.
predicate_list -- list of zero or more nodes.
A value of None for the prefix indicates that this is the head
of a relative location path. A Literal value for the node
test represents a parameterized processing-instruction test.
"""
def __init__(self, prefix, axis, node_test, predicate_list):
self.prefix = prefix
self.axis = axis
self.node_test = node_test
self.predicate_list = predicate_list
def __str__(self):
parts = []
if self.prefix is None:
pass
elif isinstance(self.prefix, Root):
parts.append("/")
else:
parts.append("%s/" % self.prefix)
parts.append("%s::" % self.axis)
if isinstance(self.node_test, NodeType):
parts.append("%s()" % self.node_test)
elif isinstance(self.node_test, Literal):
parts.append("processing-instruction(%s)" % self.node_test)
else:
parts.append("%s" % self.node_test)
for predicate in self.predicate_list:
parts.append("[%s]" % predicate)
return "".join(parts)
def evaluate(self, context):
if self.prefix == None:
node_set = [context.node]
else:
node_set = self.prefix.evaluate(context)
assert is_node_set(node_set)
return do_step(node_set, self.axis, self.node_test, self.predicate_list, context)
|
|
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import helpers
from django.contrib.admin.util import (display_for_field, flatten_fieldsets,
label_for_field, lookup_field, NestedObjects)
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
from django.contrib.sites.models import Site
from django.db import models, DEFAULT_DB_ALIAS
from django import forms
from django.test import SimpleTestCase, TestCase
from django.utils.formats import localize
from django.utils.safestring import mark_safe
from django.utils import six
from .models import Article, Count, Event, Location, EventGuide
class NestedObjectsTests(TestCase):
"""
Tests for ``NestedObject`` utility collection.
"""
def setUp(self):
self.n = NestedObjects(using=DEFAULT_DB_ALIAS)
self.objs = [Count.objects.create(num=i) for i in range(5)]
def _check(self, target):
self.assertEqual(self.n.nested(lambda obj: obj.num), target)
def _connect(self, i, j):
self.objs[i].parent = self.objs[j]
self.objs[i].save()
def _collect(self, *indices):
self.n.collect([self.objs[i] for i in indices])
def test_unrelated_roots(self):
self._connect(2, 1)
self._collect(0)
self._collect(1)
self._check([0, 1, [2]])
def test_siblings(self):
self._connect(1, 0)
self._connect(2, 0)
self._collect(0)
self._check([0, [1, 2]])
def test_non_added_parent(self):
self._connect(0, 1)
self._collect(0)
self._check([0])
def test_cyclic(self):
self._connect(0, 2)
self._connect(1, 0)
self._connect(2, 1)
self._collect(0)
self._check([0, [1, [2]]])
def test_queries(self):
self._connect(1, 0)
self._connect(2, 0)
# 1 query to fetch all children of 0 (1 and 2)
# 1 query to fetch all children of 1 and 2 (none)
# Should not require additional queries to populate the nested graph.
self.assertNumQueries(2, self._collect, 0)
def test_on_delete_do_nothing(self):
"""
Check that the nested collector doesn't query for DO_NOTHING objects.
"""
n = NestedObjects(using=DEFAULT_DB_ALIAS)
objs = [Event.objects.create()]
EventGuide.objects.create(event=objs[0])
with self.assertNumQueries(2):
# One for Location, one for Guest, and no query for EventGuide
n.collect(objs)
class UtilTests(SimpleTestCase):
def test_values_from_lookup_field(self):
"""
Regression test for #12654: lookup_field
"""
SITE_NAME = 'example.com'
TITLE_TEXT = 'Some title'
CREATED_DATE = datetime.min
ADMIN_METHOD = 'admin method'
SIMPLE_FUNCTION = 'function'
INSTANCE_ATTRIBUTE = 'attr'
class MockModelAdmin(object):
def get_admin_value(self, obj):
return ADMIN_METHOD
simple_function = lambda obj: SIMPLE_FUNCTION
article = Article(
site=Site(domain=SITE_NAME),
title=TITLE_TEXT,
created=CREATED_DATE,
)
article.non_field = INSTANCE_ATTRIBUTE
verifications = (
('site', SITE_NAME),
('created', localize(CREATED_DATE)),
('title', TITLE_TEXT),
('get_admin_value', ADMIN_METHOD),
(simple_function, SIMPLE_FUNCTION),
('test_from_model', article.test_from_model()),
('non_field', INSTANCE_ATTRIBUTE)
)
mock_admin = MockModelAdmin()
for name, value in verifications:
field, attr, resolved_value = lookup_field(name, article, mock_admin)
if field is not None:
resolved_value = display_for_field(resolved_value, field)
self.assertEqual(value, resolved_value)
def test_null_display_for_field(self):
"""
Regression test for #12550: display_for_field should handle None
value.
"""
display_value = display_for_field(None, models.CharField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
display_value = display_for_field(None, models.CharField(
choices=(
(None, "test_none"),
)
))
self.assertEqual(display_value, "test_none")
display_value = display_for_field(None, models.DateField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
display_value = display_for_field(None, models.TimeField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
# Regression test for #13071: NullBooleanField has special
# handling.
display_value = display_for_field(None, models.NullBooleanField())
expected = '<img src="%sadmin/img/icon-unknown.gif" alt="None" />' % settings.STATIC_URL
self.assertHTMLEqual(display_value, expected)
display_value = display_for_field(None, models.DecimalField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
display_value = display_for_field(None, models.FloatField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
def test_label_for_field(self):
"""
Tests for label_for_field
"""
self.assertEqual(
label_for_field("title", Article),
"title"
)
self.assertEqual(
label_for_field("title2", Article),
"another name"
)
self.assertEqual(
label_for_field("title2", Article, return_attr=True),
("another name", None)
)
self.assertEqual(
label_for_field("__unicode__", Article),
"article"
)
self.assertEqual(
label_for_field("__str__", Article),
str("article")
)
self.assertRaises(
AttributeError,
lambda: label_for_field("unknown", Article)
)
def test_callable(obj):
return "nothing"
self.assertEqual(
label_for_field(test_callable, Article),
"Test callable"
)
self.assertEqual(
label_for_field(test_callable, Article, return_attr=True),
("Test callable", test_callable)
)
self.assertEqual(
label_for_field("test_from_model", Article),
"Test from model"
)
self.assertEqual(
label_for_field("test_from_model", Article, return_attr=True),
("Test from model", Article.test_from_model)
)
self.assertEqual(
label_for_field("test_from_model_with_override", Article),
"not What you Expect"
)
self.assertEqual(
label_for_field(lambda x: "nothing", Article),
"--"
)
class MockModelAdmin(object):
def test_from_model(self, obj):
return "nothing"
test_from_model.short_description = "not Really the Model"
self.assertEqual(
label_for_field("test_from_model", Article, model_admin=MockModelAdmin),
"not Really the Model"
)
self.assertEqual(
label_for_field("test_from_model", Article,
model_admin = MockModelAdmin,
return_attr = True
),
("not Really the Model", MockModelAdmin.test_from_model)
)
def test_label_for_property(self):
# NOTE: cannot use @property decorator, because of
# AttributeError: 'property' object has no attribute 'short_description'
class MockModelAdmin(object):
def my_property(self):
return "this if from property"
my_property.short_description = 'property short description'
test_from_property = property(my_property)
self.assertEqual(
label_for_field("test_from_property", Article, model_admin=MockModelAdmin),
'property short description'
)
def test_related_name(self):
"""
Regression test for #13963
"""
self.assertEqual(
label_for_field('location', Event, return_attr=True),
('location', None),
)
self.assertEqual(
label_for_field('event', Location, return_attr=True),
('awesome event', None),
)
self.assertEqual(
label_for_field('guest', Event, return_attr=True),
('awesome guest', None),
)
def test_logentry_unicode(self):
"""
Regression test for #15661
"""
log_entry = admin.models.LogEntry()
log_entry.action_flag = admin.models.ADDITION
self.assertTrue(
six.text_type(log_entry).startswith('Added ')
)
log_entry.action_flag = admin.models.CHANGE
self.assertTrue(
six.text_type(log_entry).startswith('Changed ')
)
log_entry.action_flag = admin.models.DELETION
self.assertTrue(
six.text_type(log_entry).startswith('Deleted ')
)
# Make sure custom action_flags works
log_entry.action_flag = 4
self.assertEqual(six.text_type(log_entry), 'LogEntry Object')
def test_safestring_in_field_label(self):
# safestring should not be escaped
class MyForm(forms.Form):
text = forms.CharField(label=mark_safe('<i>text</i>'))
cb = forms.BooleanField(label=mark_safe('<i>cb</i>'))
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline"><i>text</i>:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline"><i>cb</i>:</label>')
# normal strings needs to be escaped
class MyForm(forms.Form):
text = forms.CharField(label='&text')
cb = forms.BooleanField(label='&cb')
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline">&text:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline">&cb:</label>')
def test_flatten_fieldsets(self):
"""
Regression test for #18051
"""
fieldsets = (
(None, {
'fields': ('url', 'title', ('content', 'sites'))
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
fieldsets = (
(None, {
'fields': ('url', 'title', ['content', 'sites'])
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 10 14:41:26 2016
@author: Stuart
"""
import sys
import os
import os.path
import datetime
import traceback
import Tkinter as tk
import tkFileDialog
import tkMessageBox
from ..update import update
from ..share.share import ShareXPortfolio
from ..share.share_factory import ShareAnalysisFactory
from ..share.share_matrix import ShareMatrix
from ..core import benchmark
from ..core import analysis as core_analysis
from ..configuration.preferences_configuration import Preferences
from ..configuration.benchmark_configuration import BenchmarkConfiguration
from ..configuration.analysis_configuration import AnalysisConfiguration
from ..configuration.portfolio_configuration import PortfolioConfiguration
from ..exceptions.handling import ExceptionHandler
from ..core.status import Status
import version as ver
import base_dialog
import analysis
import portfolio
from preferences import PreferencesDialog
from visualisation import VisualisationDialogFactory
class ExportDataSetDialog(base_dialog.BaseDialog):
def __init__(self, master):
self.cleanDataset = True
self.allDatasets = False
self.calibrationDatasets = False
base_dialog.BaseDialog.__init__(self, master)
def validate(self):
valid = any(self.get_selections())
if valid:
return 1
else:
return 0
def body(self, master):
spacer = tk.Label(master, text=" " * 30)
spacer.grid(row=self.row,
column=self.titleColumn,
columnspan=2)
spacer = tk.Label(master, text=" " * 30)
spacer.grid(row=self.row,
column=self.secondButtonColumn, columnspan=2)
self.row += 1
clean_dataset = self.cleanDataset
allDatasets = self.allDatasets
calibrationDatasets = self.calibrationDatasets
self.cleanDataset = self.addCheckBox(master,
"Clean Combined Dataset:",
clean_dataset)
spacer = tk.Label(master, text="Extra Time Series:")
spacer.grid(row=self.row,
column=self.titleColumn,
columnspan=2)
self.row += 1
self.allDatasets = self.addCheckBox(master,
" Filtered Individual Datasets:",
allDatasets)
self.calibrationDatasets = self.addCheckBox(master,
" Calibration Datasets:",
calibrationDatasets)
def get_selections(self):
return (bool(self.cleanDataset.get()),
bool(self.allDatasets.get()),
bool(self.calibrationDatasets.get()))
def apply(self):
return self.get_selections()
class UserInterface:
def __init__(self, preferences):
ExceptionHandler.initialize_handler(self.add_exception)
Status.initialize_status(self.add_message, self.set_portfolio_status, preferences.verbosity)
self.analysis = None
self.analysisConfiguration = None
self.portfolioConfiguration = None
self.root = tk.Tk()
screen_width = self.root.winfo_screenwidth()
screen_height = self.root.winfo_screenheight()
if screen_width > 1100 and screen_height > 500:
self.root.geometry("1100x500")
else:
self.root.geometry("860x400")
self.root.title("PCWG")
try:
self.root.iconbitmap(os.path.join("Resources", "logo.ico"))
except:
Status.add("Can't set icon")
self.verbosity = Preferences.get().verbosity
console_frame = tk.Frame(self.root)
command_frame = tk.Frame(self.root)
# analyse
analyse_group = tk.LabelFrame(command_frame,
text="Analysis",
padx=5,
pady=5)
analyse_group_top = tk.Frame(analyse_group)
analyse_group_bottom = tk.Frame(analyse_group)
load_button = tk.Button(analyse_group_bottom,
text="Load",
command=self.LoadAnalysis)
edit_button = tk.Button(analyse_group_bottom,
text="Edit",
command=self.EditAnalysis)
new_button = tk.Button(analyse_group_bottom,
text="New",
command=self.NewAnalysis)
calculate_button = tk.Button(analyse_group_top,
text="Calculate",
command=self.Calculate)
export_report_button = tk.Button(analyse_group_top,
text="Export Report",
command=self.ExportReport)
export_time_series_button = tk.Button(analyse_group_top,
text="Export Time Series",
command=self.ExportTimeSeries)
export_training_data_button = tk.Button(analyse_group_top,
text="Export Training Data",
command=self.ExportTrainingData)
export_pdm_button = tk.Button(analyse_group_top,
text="Export PDM",
command=self.ExportPDM)
visualise_button = tk.Button(analyse_group_top,
text="Visulalise",
command=self.visualise)
self.visualisation = tk.StringVar(analyse_group_top, "Power Curve")
visualisation_options = ['Power Curve',
'Turbulence by Direction',
'Turbulence by Speed',
'Turbulence by Shear',
'Shear by Direction',
'Shear by Speed',
'Power Coefficient by Speed']
self.visualation_menu = apply(tk.OptionMenu, (analyse_group_top, self.visualisation)
+ tuple(visualisation_options))
load_button.pack(side=tk.RIGHT, padx=5, pady=5)
edit_button.pack(side=tk.RIGHT, padx=5, pady=5)
new_button.pack(side=tk.RIGHT, padx=5, pady=5)
calculate_button.pack(side=tk.LEFT, padx=5, pady=5)
export_report_button.pack(side=tk.LEFT, padx=5, pady=5)
export_time_series_button.pack(side=tk.LEFT, padx=5, pady=5)
export_training_data_button.pack(side=tk.LEFT, padx=5, pady=5)
export_pdm_button.pack(side=tk.LEFT, padx=5, pady=5)
visualise_button.pack(side=tk.LEFT, padx=5, pady=5)
self.visualation_menu.pack(side=tk.LEFT, padx=5, pady=5)
self.analysisFilePathLabel = tk.Label(analyse_group_bottom,
text="Analysis File")
self.analysisFilePathTextBox = tk.Entry(analyse_group_bottom)
self.analysisFilePathTextBox.config(state=tk.DISABLED)
self.analysisFilePathLabel.pack(side=tk.LEFT,
anchor=tk.NW,
padx=5,
pady=5)
self.analysisFilePathTextBox.pack(side=tk.RIGHT,
anchor=tk.NW,
fill=tk.X,
expand=1,
padx=5,
pady=5)
analyse_group_bottom.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=1)
analyse_group_top.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
analyse_group.pack(side=tk.TOP,
padx=10,
pady=5,
anchor=tk.NW,
fill=tk.X,
expand=1)
# portfolio
portfolio_group = tk.LabelFrame(command_frame,
text="PCWG-Share-X",
padx=5,
pady=5)
portfolio_group_top = tk.Frame(portfolio_group)
portfolio_group_bottom = tk.Frame(portfolio_group)
run_portfolio_button = tk.Button(portfolio_group_top,
text="PCWG-Share-1.0",
command=self.PCWG_Share_1_Portfolio)
run_portfolio_button.pack(side=tk.LEFT, padx=5, pady=5)
run_portfolio_button = tk.Button(portfolio_group_top,
text="PCWG-Share-1.1",
command=self.PCWG_Share_1_dot_1_Portfolio)
run_portfolio_button.pack(side=tk.LEFT, padx=5, pady=5)
run_portfolio_button = tk.Button(portfolio_group_top,
text="PCWG-Share-2.0",
command=self.PCWG_Share_2_Portfolio)
run_portfolio_button.pack(side=tk.LEFT, padx=5, pady=5)
run_portfolio_button = tk.Button(portfolio_group_top,
text="PCWG-Share-3.0",
command=self.PCWG_Share_3_Portfolio)
run_portfolio_button.pack(side=tk.LEFT, padx=5, pady=5)
run_portfolio_button = tk.Button(portfolio_group_top,
text="Share Matrix",
command=self.Share_Matrix)
run_portfolio_button.pack(side=tk.LEFT, padx=5, pady=5)
self.portfolio_status = tk.StringVar()
portfolio_status_label = tk.Label(portfolio_group_top,
font = "Verdana 10 bold",
textvariable=self.portfolio_status,
fg = "blue")
portfolio_status_label.pack(side=tk.RIGHT, padx=5, pady=5)
load_portfolio_button = tk.Button(portfolio_group_bottom,
text="Load",
command=self.load_portfolio)
edit_portfolio_button = tk.Button(portfolio_group_bottom,
text="Edit",
command=self.edit_portfolio)
new_portfolio_button = tk.Button(portfolio_group_bottom,
text="New",
command=self.new_portfolio)
load_portfolio_button.pack(side=tk.RIGHT, padx=5, pady=5)
edit_portfolio_button.pack(side=tk.RIGHT, padx=5, pady=5)
new_portfolio_button.pack(side=tk.RIGHT, padx=5, pady=5)
self.portfolioFilePathLabel = tk.Label(portfolio_group_bottom,
text="Portfolio File")
self.portfolioFilePathTextBox = tk.Entry(portfolio_group_bottom)
self.portfolioFilePathTextBox.config(state=tk.DISABLED)
self.portfolioFilePathLabel.pack(side=tk.LEFT,
anchor=tk.NW,
padx=5,
pady=5)
self.portfolioFilePathTextBox.pack(side=tk.RIGHT,
anchor=tk.NW,
fill=tk.X,
expand=1,
padx=5,
pady=5)
portfolio_group_bottom.pack(side=tk.BOTTOM,
fill=tk.BOTH,
expand=1)
portfolio_group_top.pack(side=tk.TOP,
fill=tk.BOTH,
expand=1)
portfolio_group.pack(side=tk.LEFT,
padx=10,
pady=5,
fill=tk.X,
expand=1)
# misc
misc_group = tk.LabelFrame(command_frame,
text="Miscellaneous",
padx=5,
pady=5)
misc_group_top = tk.Frame(misc_group)
msic_group_bottom = tk.Frame(misc_group)
benchmark_button = tk.Button(misc_group_top,
text="Benchmark",
command=self.RunBenchmark)
clear_console_button = tk.Button(misc_group_top,
text="Clear Console",
command=self.ClearConsole)
about_button = tk.Button(msic_group_bottom,
text="About",
command=self.About)
preferences_button = tk.Button(msic_group_bottom,
text="Preferences",
command=self.preferences)
benchmark_button.pack(side=tk.LEFT, padx=5, pady=5)
clear_console_button.pack(side=tk.LEFT, padx=5, pady=5)
about_button.pack(side=tk.LEFT, padx=5, pady=5)
preferences_button.pack(side=tk.LEFT, padx=5, pady=5)
msic_group_bottom.pack(side=tk.BOTTOM)
misc_group_top.pack(side=tk.TOP)
misc_group.pack(side=tk.RIGHT, padx=10, pady=5)
# console
scrollbar = tk.Scrollbar(console_frame,
orient=tk.VERTICAL)
self.listbox = tk.Listbox(console_frame,
yscrollcommand=scrollbar.set,
selectmode=tk.EXTENDED)
scrollbar.configure(command=self.listbox.yview)
self.listbox.grid(column=0, row=0, sticky='nsew')
scrollbar.grid(column=1, row=0, sticky='ns')
console_frame.grid_columnconfigure(0, weight=1)
console_frame.grid_columnconfigure(1, weight=0)
console_frame.grid_rowconfigure(0, weight=1)
command_frame.grid(row=0, column=0, sticky=tk.W+tk.E+tk.N+tk.S)
console_frame.grid(row=1, column=0, sticky=tk.W+tk.E+tk.N+tk.S)
self.root.grid_columnconfigure(0, weight=1)
self.root.grid_rowconfigure(0, weight=0)
self.root.grid_rowconfigure(1, weight=1)
preferences = Preferences.get()
if len(preferences.analysisLastOpened) > 0:
try:
Status.add("Loading last analysis opened")
self.LoadAnalysisFromPath(preferences.analysisLastOpened)
except IOError:
Status.add("Couldn't load last analysis: File could not be found.")
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "Couldn't load last analysis")
if len(preferences.portfolioLastOpened) > 0 and os.path.isfile(preferences.portfolioLastOpened):
try:
Status.add("Loading last portfolio opened")
self.LoadPortfolioFromPath(preferences.portfolioLastOpened)
except IOError:
Status.add("Couldn't load last portfolio: File could not be found.")
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "Couldn't load last portfolio")
self.update()
self.root.mainloop()
def update(self):
updator = update.Updator()
if updator.is_update_available:
if tkMessageBox.askyesno("New Version Available",
"A new version is available (current version {0}), do you want to upgrade to {1} (restart required)?".format(updator.current_version, updator.latest_version)):
try:
updator.download_latest_version()
except ExceptionHandler.ExceptionType as e:
Status.add("Failed to download latest version: {0}".format(e), red=True)
return
try:
updator.start_extractor()
except ExceptionHandler.ExceptionType as e:
Status.add("Cannot start extractor: {0}".format(e), red=True)
return
Status.add("Exiting")
sys.exit(0)
else:
Status.add("No updates available")
def RunBenchmark(self):
preferences = Preferences.get()
self.ClearConsole()
# read the benchmark config xml
path = tkFileDialog.askopenfilename(parent=self.root,
title="Select Benchmark Configuration",
initialdir=preferences.benchmark_last_opened_dir(),
initialfile=preferences.benchmark_last_opened_file())
if len(path) > 0:
try:
preferences.benchmarkLastOpened = path
preferences.save()
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "Cannot save preferences")
Status.add("Loading benchmark configuration file: %s" % path)
benchmarkConfig = BenchmarkConfiguration(path)
Status.add("Loaded benchmark configuration: %s" % benchmarkConfig.name)
Status.add("")
benchmarkPassed = True
totalTime = 0.0
failures = []
for i in range(len(benchmarkConfig.benchmarks)):
benchmark = benchmarkConfig.benchmarks[i]
Status.add("Executing Benchmark %d of %d" % (i + 1, len(benchmarkConfig.benchmarks)))
benchmarkResults, time_taken = self.BenchmarkAnalysis(benchmark.absolute_path, benchmarkConfig.tolerance, benchmark.base_line_mode, benchmark.expectedResults)
if not benchmarkResults:
failures.append(benchmark.absolute_path)
benchmarkPassed = benchmarkPassed & benchmarkResults
totalTime += time_taken
if benchmarkPassed:
Status.add("All benchmarks passed")
else:
Status.add("There are {0} failing benchmark(s):".format(len(failures)), red=True)
for failure in failures:
Status.add("- {0}".format(failure, red=True))
Status.add("Total Time Taken: %fs" % totalTime)
else:
Status.add("No benchmark loaded", red=True)
def BenchmarkAnalysis(self, path, tolerance, base_line_mode, dictExpectedResults):
Status.add("Calculating %s (please wait)..." % path)
Status.add("Benchmark Tolerance: %s" % self.formatPercentTwoDP(tolerance))
benchmarkPassed = True
start = datetime.datetime.now()
try:
analysis = benchmark.BenchmarkAnalysis(AnalysisConfiguration(path), base_line_mode)
except ExceptionHandler.ExceptionType as e:
analysis = None
Status.add("ERROR: {0}".format(e))
benchmarkPassed = False
if analysis is not None:
for (field, value) in dictExpectedResults.iteritems():
try:
benchmarkPassed = benchmarkPassed & self.compareBenchmark(field, value, float(eval("analysis.%s" % field)), tolerance)
except Exception as e:
Status.add("Evaluation of analysis.{f} has failed, does this property exist? {e}".format(f=field, e=e))
benchmarkPassed = False
if benchmarkPassed:
Status.add("Benchmark Passed")
else:
Status.add("Benchmark Failed", red=True)
end = datetime.datetime.now()
timeTaken = (end - start).total_seconds()
Status.add("Time Taken: %fs" % timeTaken)
Status.add("")
return (benchmarkPassed, timeTaken)
def formatPercentTwoDP(self, value):
return "%0.2f%%" % (value * 100.0)
def compareBenchmark(self, title, expected, actual, tolerance):
diff = abs(expected - actual)
passed = (diff <= tolerance)
text = "{title}: {expec:0.10} (expected) vs {act:0.10} (actual) =>".format(title=title, expec=expected, act=actual)
if passed:
Status.add("%s passed" % text)
else:
Status.add("%s failed" % text, red=True)
return passed
def EditAnalysis(self):
if self.analysisConfiguration is None:
Status.add("ERROR: Analysis not loaded", red=True)
return
analysis.AnalysisConfigurationDialog(self.root,
self.LoadAnalysisFromPath,
self.analysisConfiguration)
def NewAnalysis(self):
conf = AnalysisConfiguration()
analysis.AnalysisConfigurationDialog(self.root,
self.LoadAnalysisFromPath, conf)
def LoadAnalysis(self):
preferences = Preferences.get()
fileName = tkFileDialog.askopenfilename(parent=self.root,
initialdir=preferences.analysis_last_opened_dir(),
defaultextension=".xml")
if len(fileName) < 1:
return
self.LoadAnalysisFromPath(fileName)
def LoadAnalysisFromPath(self, fileName):
try:
preferences = Preferences.get()
preferences.analysisLastOpened = fileName
preferences.save()
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "Cannot save preferences")
self.analysisFilePathTextBox.config(state=tk.NORMAL)
self.analysisFilePathTextBox.delete(0, tk.END)
self.analysisFilePathTextBox.insert(0, fileName)
self.analysisFilePathTextBox.config(state=tk.DISABLED)
self.analysis = None
self.analysisConfiguration = None
if len(fileName) > 0:
try:
self.analysisConfiguration = AnalysisConfiguration(fileName)
Status.add("Analysis config loaded: %s" % fileName)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "ERROR loading config")
def LoadPortfolioFromPath(self, fileName):
try:
preferences = Preferences.get()
preferences.portfolioLastOpened = fileName
preferences.save()
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "Cannot save preferences")
self.portfolioFilePathTextBox.config(state=tk.NORMAL)
self.portfolioFilePathTextBox.delete(0, tk.END)
self.portfolioFilePathTextBox.insert(0, fileName)
self.portfolioFilePathTextBox.config(state=tk.DISABLED)
self.portfolioConfiguration = None
if len(fileName) > 0 and os.path.isfile(fileName):
try:
self.portfolioConfiguration = PortfolioConfiguration(fileName)
Status.add("Portfolio config loaded: %s" % fileName)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "ERROR loading config")
else:
self.portfolioConfiguration = None
def ExportReport(self):
preferences = Preferences.get()
if self.analysis is None:
Status.add("ERROR: Analysis not yet calculated", red=True)
return
try:
fileName = tkFileDialog.asksaveasfilename(parent=self.root,
defaultextension=".xls",
initialfile="report.xls",
title="Save Report",
initialdir=preferences.analysis_last_opened_dir())
self.analysis.report(fileName)
Status.add("Report written to %s" % fileName)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "ERROR Exporting Report")
def ExportPDM(self):
preferences = Preferences.get()
if self.analysis is None:
Status.add("ERROR: Analysis not yet calculated", red=True)
return
try:
fileName = tkFileDialog.asksaveasfilename(parent=self.root,
defaultextension=".xml",
initialfile="power_deviation_matrix.xml",
title="Save Report",
initialdir=preferences.analysis_last_opened_dir())
self.analysis.report_pdm(fileName)
Status.add("Power Deviation Matrix written to %s" % fileName)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "ERROR Exporting Report")
def visualise(self):
if self.analysis is None:
Status.add("ERROR: Analysis not yet calculated", red=True)
return
try:
VisualisationDialogFactory(self.analysis).new_visualisaton(self.visualisation.get())
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "ERROR Visualising")
def Share_Matrix(self):
try:
ShareMatrix(self.portfolioConfiguration)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e)
def PCWG_Share_X_Portfolio(self, share_name):
if self.portfolioConfiguration is None:
Status.add("ERROR: Portfolio not loaded", red=True)
return
try:
ShareXPortfolio(self.portfolioConfiguration, ShareAnalysisFactory(share_name))
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e)
def PCWG_Share_1_Portfolio(self):
self.PCWG_Share_X_Portfolio("Share01")
def PCWG_Share_1_dot_1_Portfolio(self):
self.PCWG_Share_X_Portfolio("Share01.1")
def PCWG_Share_2_Portfolio(self):
self.PCWG_Share_X_Portfolio("Share02")
def PCWG_Share_3_Portfolio(self):
self.PCWG_Share_X_Portfolio("Share03")
def new_portfolio(self):
try:
portfolioConfiguration = PortfolioConfiguration()
portfolio.PortfolioDialog(self.root,
self.LoadPortfolioFromPath,
portfolioConfiguration)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e)
def edit_portfolio(self):
if self.portfolioConfiguration is None:
Status.add("ERROR: Portfolio not loaded", red=True)
return
try:
portfolio.PortfolioDialog(self.root,
self.LoadPortfolioFromPath,
self.portfolioConfiguration)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e)
def load_portfolio(self):
try:
preferences = Preferences.get()
initial_dir = preferences.portfolio_last_opened_dir()
initial_file = preferences.portfolio_last_opened_file()
# read the benchmark config xml
portfolio_path = tkFileDialog.askopenfilename(parent=self.root,
title="Select Portfolio Configuration",
initialfile=initial_file,
initialdir=initial_dir)
self.LoadPortfolioFromPath(portfolio_path)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e)
def ExportTimeSeries(self):
if self.analysis is None:
Status.add("ERROR: Analysis not yet calculated", red=True)
return
try:
preferences = Preferences.get()
selections = ExportDataSetDialog(self.root)
clean, full, calibration = selections.get_selections()
file_name = tkFileDialog.asksaveasfilename(parent=self.root,
defaultextension=".csv",
initialfile="timeseries.csv",
title="Save Time Series",
initialdir=preferences.analysis_last_opened_dir())
full_df_output_dir = "TimeSeriesData"
self.analysis.export_time_series(file_name, clean, full, calibration, full_df_output_dir=full_df_output_dir)
if clean:
Status.add("Time series written to %s" % file_name)
if any((full, calibration)):
Status.add("Extra time series have been written to %s" % os.path.join(os.path.dirname(file_name),
full_df_output_dir))
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "ERROR Exporting Time Series")
def ExportTrainingData(self):
if self.analysis is None:
Status.add("ERROR: Analysis not yet calculated", red=True)
return
try:
preferences = Preferences.get()
fileName = tkFileDialog.asksaveasfilename(parent=self.root,
defaultextension=".csv",
initialfile="training_data.csv",
title="Save Training Data",
initialdir=preferences.analysis_last_opened_dir())
self.analysis.export_training_data(fileName)
Status.add("Time series written to %s" % fileName)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "ERROR Exporting Time Series")
def Calculate(self):
if self.analysisConfiguration is None:
Status.add("ERROR: Analysis Config file not specified", red=True)
return
try:
self.analysis = core_analysis.Analysis(self.analysisConfiguration)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "ERROR Calculating Analysis")
def ClearConsole(self):
self.listbox.delete(0, tk.END)
self.root.update()
def About(self):
tkMessageBox.showinfo("PCWG-Tool About", "Version: {vers} \nVisit http://www.pcwg.org for more info".format(vers=ver.version))
def preferences(self):
try:
PreferencesDialog(self.root)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e)
def add_message(self, message, red=False, orange=False, verbosity=1):
try:
self.listbox.insert(tk.END, message)
if red:
self.listbox.itemconfig(tk.END, {'bg': 'red', 'foreground': 'white'})
elif orange:
self.listbox.itemconfig(tk.END, {'bg': 'orange', 'foreground': 'white'})
self.listbox.see(tk.END)
self.root.update()
except:
print "Can't write message: {0}".format(message)
def set_portfolio_status(self, completed, total, finished):
if finished:
self.portfolio_status.set("{0}/{1} Successful".format(completed, total))
else:
self.portfolio_status.set("{0}/{1} In Progress".format(completed, total))
self.root.update()
def add_exception(self, exception, custom_message=None):
try:
if custom_message is not None:
message = "{0}: {1}".format(custom_message, exception)
else:
message = "{0}".format(exception)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
# write full traceback
tb = traceback.extract_tb(exc_tb)
tb_list = traceback.format_list(tb)
for line in tb_list:
self.add_message(line, red=True)
self.add_message("Exception Type {0} in {1} line {2}.".format(exc_type.__name__, fname, exc_tb.tb_lineno), red=True)
self.add_message(message, red=True)
except:
self.add_message("Can't write exception")
|
|
from __future__ import division
import math
import os
import matplotlib
from matplotlib import verbose
from matplotlib.cbook import is_string_like, onetrue
from matplotlib import backend_bases
from matplotlib.backend_bases import IdleEvent, cursors
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
import matplotlib.widgets
from PySide import QtCore, QtGui
backend_version = "0.0.1"
DEBUG = False
cursord = {
cursors.MOVE : QtCore.Qt.SizeAllCursor,
cursors.HAND : QtCore.Qt.PointingHandCursor,
cursors.POINTER : QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION : QtCore.Qt.CrossCursor,
}
#Use subclasses that inherit from object because PySide is unstable when
#subclassing with old-style classes
class SubplotTool(matplotlib.widgets.SubplotTool, object):
pass
class GraphicsContextBase(backend_bases.GraphicsContextBase, object):
pass
class FigureManagerBase(backend_bases.FigureManagerBase, object):
pass
class FigureCanvasBase(backend_bases.FigureCanvasBase, object):
pass
class NavigationToolbar2(backend_bases.NavigationToolbar2, object):
pass
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw_idle()
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
if QtGui.QApplication.startingUp():
if DEBUG: print "Starting up QApplication"
global qApp
qApp = QtGui.QApplication( [" "] )
QtCore.QObject.connect( qApp, QtCore.SIGNAL( "lastWindowClosed()" ),
qApp, QtCore.SLOT( "quit()" ) )
#remember that matplotlib created the qApp - will be used by show()
_create_qApp.qAppCreatedHere = True
_create_qApp.qAppCreatedHere = False
def show():
"""
Show all the figures and enter the qt main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if DEBUG: print 'Inside show'
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
if _create_qApp.qAppCreatedHere:
qApp.exec_()
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
thisFig = Figure( *args, **kwargs )
canvas = FigureCanvasQT( thisFig )
manager = FigureManagerQT( canvas, num )
return manager
class FigureCanvasQT( QtGui.QWidget, FigureCanvasBase ):
keyvald = { QtCore.Qt.Key_Control : 'control',
QtCore.Qt.Key_Shift : 'shift',
QtCore.Qt.Key_Alt : 'alt',
}
# left 1, middle 2, right 3
buttond = {1:1, 2:3, 4:2}
def __init__( self, figure, parent=None):
if DEBUG: print 'FigureCanvasQt: ', figure
_create_qApp()
if parent is not None:
QtGui.QWidget.__init__(self, parent)
else:
QtGui.QWidget.__init__(self, parent)
FigureCanvasBase.__init__( self, figure )
self.figure = figure
self.setMouseTracking( True )
self._idle = True
# hide until we can test and fix
#self.startTimer(backend_IdleEvent.milliseconds)
w,h = self.get_width_height()
self.resize( w, h )
def __timerEvent(self, event):
# hide until we can test and fix
self.mpl_idle_event(event)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, event)
def leaveEvent(self, event):
FigureCanvasBase.leave_notify_event(self, event)
def mousePressEvent( self, event ):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond[event.button()]
FigureCanvasBase.button_press_event( self, x, y, button )
if DEBUG: print 'button pressed:', event.button()
def mouseMoveEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event( self, x, y )
#if DEBUG: print 'mouse move'
def mouseReleaseEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond[event.button()]
FigureCanvasBase.button_release_event( self, x, y, button )
if DEBUG: print 'button released'
def wheelEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
# from QWheelEvent::delta doc
steps = event.delta()/120
if (event.orientation() == QtCore.Qt.Vertical):
FigureCanvasBase.scroll_event( self, x, y, steps)
if DEBUG: print 'scroll event : delta = %i, steps = %i ' % (event.delta(),steps)
def keyPressEvent( self, event ):
key = self._get_key( event )
FigureCanvasBase.key_press_event( self, key )
if DEBUG: print 'key press', key
def keyReleaseEvent( self, event ):
key = self._get_key(event)
FigureCanvasBase.key_release_event( self, key )
if DEBUG: print 'key release', key
def resize(self, w, h):
QtGui.QWidget.resize(self, w, h)
def resizeEvent( self, event ):
if DEBUG: print 'resize (%d x %d)' % (event.size().width(), event.size().height())
w = event.size().width()
h = event.size().height()
if DEBUG: print "FigureCanvasQtAgg.resizeEvent(", w, ",", h, ")"
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches( winch, hinch , forward=True)
self.draw()
self.update()
QtGui.QWidget.resizeEvent(self, event)
def sizeHint( self ):
w, h = self.get_width_height()
return QtCore.QSize( w, h )
def minumumSizeHint( self ):
return QtCore.QSize( 10, 10 )
def _get_key( self, event ):
if event.key() < 256:
key = str(event.text())
elif event.key() in self.keyvald:
key = self.keyvald[ event.key() ]
else:
key = None
return key
def flush_events(self):
qApp.processEvents()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
self.draw()
self._idle = True
if d: QtCore.QTimer.singleShot(0, idle_draw)
class FigureManagerQT( FigureManagerBase ):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__( self, canvas, num ):
FigureManagerBase.__init__( self, canvas, num )
self.canvas = canvas
self.window = QtGui.QMainWindow()
self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' )
self.window.setWindowIcon(QtGui.QIcon( image ))
# Give the keyboard focus to the figure instead of the manager
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
QtCore.QObject.connect( self.window, QtCore.SIGNAL( 'destroyed()' ),
self._widgetclosed )
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, self.window)
self.window.addToolBar(self.toolbar)
QtCore.QObject.connect(self.toolbar, QtCore.SIGNAL("message"),
self.window.statusBar().showMessage)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
def notify_axes_change( fig ):
# This will be called whenever the current axes is changed
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver( notify_axes_change )
def _widgetclosed( self ):
if self.window._destroying: return
self.window._destroying = True
Gcf.destroy(self.num)
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
print "Classic toolbar is not supported"
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height)
def destroy( self, *args ):
if self.window._destroying: return
self.window._destroying = True
QtCore.QObject.disconnect( self.window, QtCore.SIGNAL( 'destroyed()' ),
self._widgetclosed )
if self.toolbar: self.toolbar.destroy()
if DEBUG: print "destroy figure manager"
self.window.close()
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT( NavigationToolbar2, QtGui.QToolBar ):
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.coordinates = coordinates
QtGui.QToolBar.__init__( self, parent )
NavigationToolbar2.__init__( self, canvas )
def _icon(self, name):
return QtGui.QIcon(os.path.join(self.basedir, name))
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams[ 'datapath' ],'images')
a = self.addAction(self._icon('home.svg'), 'Home', self.home)
a.setToolTip('Reset original view')
a = self.addAction(self._icon('back.svg'), 'Back', self.back)
a.setToolTip('Back to previous view')
a = self.addAction(self._icon('forward.svg'), 'Forward', self.forward)
a.setToolTip('Forward to next view')
self.addSeparator()
a = self.addAction(self._icon('move.svg'), 'Pan', self.pan)
a.setToolTip('Pan axes with left mouse, zoom with right')
a = self.addAction(self._icon('zoom_to_rect.svg'), 'Zoom', self.zoom)
a.setToolTip('Zoom to rectangle')
self.addSeparator()
a = self.addAction(self._icon('subplots.png'), 'Subplots',
self.configure_subplots)
a.setToolTip('Configure subplots')
a = self.addAction(self._icon('filesave.svg'), 'Save',
self.save_figure)
a.setToolTip('Save the figure')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtGui.QLabel( "", self )
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop )
self.locLabel.setSizePolicy(
QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
def dynamic_update( self ):
self.canvas.draw()
def set_message( self, s ):
self.emit(QtCore.SIGNAL("message"), s)
if self.coordinates:
self.locLabel.setText(s.replace(', ', '\n'))
def set_cursor( self, cursor ):
if DEBUG: print 'Set cursor' , cursor
QtGui.QApplication.restoreOverrideCursor()
QtGui.QApplication.setOverrideCursor( QtGui.QCursor( cursord[cursor] ) )
def draw_rubberband( self, event, x0, y0, x1, y1 ):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [ int(val)for val in min(x0,x1), min(y0, y1), w, h ]
self.canvas.drawRectangle( rect )
def configure_subplots(self):
self.adj_window = QtGui.QMainWindow()
win = self.adj_window
win.setAttribute(QtCore.Qt.WA_DeleteOnClose)
win.setWindowTitle("Subplot Configuration Tool")
image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' )
win.setWindowIcon(QtGui.QIcon( image ))
tool = SubplotToolQt(self.canvas.figure, win)
win.setCentralWidget(tool)
win.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
win.show()
def _get_canvas(self, fig):
return FigureCanvasQT(fig)
def save_figure( self ):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
start = "image." + default_filetype
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname = QtGui.QFileDialog.getSaveFileName(
self, "Choose a filename to save to", start, filters, selectedFilter)
if fname:
try:
self.canvas.print_figure( unicode(fname) )
except Exception, e:
QtGui.QMessageBox.critical(
self, "Error saving file", str(e),
QtGui.QMessageBox.Ok, QtGui.QMessageBox.NoButton)
class SubplotToolQt( SubplotTool, QtGui.QWidget ):
def __init__(self, targetfig, parent):
QtGui.QWidget.__init__(self, None)
self.targetfig = targetfig
self.parent = parent
self.sliderleft = QtGui.QSlider(QtCore.Qt.Horizontal)
self.sliderbottom = QtGui.QSlider(QtCore.Qt.Vertical)
self.sliderright = QtGui.QSlider(QtCore.Qt.Horizontal)
self.slidertop = QtGui.QSlider(QtCore.Qt.Vertical)
self.sliderwspace = QtGui.QSlider(QtCore.Qt.Horizontal)
self.sliderhspace = QtGui.QSlider(QtCore.Qt.Vertical)
# constraints
QtCore.QObject.connect( self.sliderleft,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderright.setMinimum )
QtCore.QObject.connect( self.sliderright,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderleft.setMaximum )
QtCore.QObject.connect( self.sliderbottom,
QtCore.SIGNAL( "valueChanged(int)" ),
self.slidertop.setMinimum )
QtCore.QObject.connect( self.slidertop,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderbottom.setMaximum )
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
adjustments = ('left:', 'bottom:', 'right:', 'top:', 'wspace:', 'hspace:')
for slider, adjustment in zip(sliders, adjustments):
slider.setMinimum(0)
slider.setMaximum(1000)
slider.setSingleStep(5)
layout = QtGui.QGridLayout()
leftlabel = QtGui.QLabel('left')
layout.addWidget(leftlabel, 2, 0)
layout.addWidget(self.sliderleft, 2, 1)
toplabel = QtGui.QLabel('top')
layout.addWidget(toplabel, 0, 2)
layout.addWidget(self.slidertop, 1, 2)
layout.setAlignment(self.slidertop, QtCore.Qt.AlignHCenter)
bottomlabel = QtGui.QLabel('bottom')
layout.addWidget(QtGui.QLabel('bottom'), 4, 2)
layout.addWidget(self.sliderbottom, 3, 2)
layout.setAlignment(self.sliderbottom, QtCore.Qt.AlignHCenter)
rightlabel = QtGui.QLabel('right')
layout.addWidget(rightlabel, 2, 4)
layout.addWidget(self.sliderright, 2, 3)
hspacelabel = QtGui.QLabel('hspace')
layout.addWidget(hspacelabel, 0, 6)
layout.setAlignment(hspacelabel, QtCore.Qt.AlignHCenter)
layout.addWidget(self.sliderhspace, 1, 6)
layout.setAlignment(self.sliderhspace, QtCore.Qt.AlignHCenter)
wspacelabel = QtGui.QLabel('wspace')
layout.addWidget(wspacelabel, 4, 6)
layout.setAlignment(wspacelabel, QtCore.Qt.AlignHCenter)
layout.addWidget(self.sliderwspace, 3, 6)
layout.setAlignment(self.sliderwspace, QtCore.Qt.AlignBottom)
layout.setRowStretch(1,1)
layout.setRowStretch(3,1)
layout.setColumnStretch(1,1)
layout.setColumnStretch(3,1)
layout.setColumnStretch(6,1)
self.setLayout(layout)
self.sliderleft.setSliderPosition(int(targetfig.subplotpars.left*1000))
self.sliderbottom.setSliderPosition(\
int(targetfig.subplotpars.bottom*1000))
self.sliderright.setSliderPosition(\
int(targetfig.subplotpars.right*1000))
self.slidertop.setSliderPosition(int(targetfig.subplotpars.top*1000))
self.sliderwspace.setSliderPosition(\
int(targetfig.subplotpars.wspace*1000))
self.sliderhspace.setSliderPosition(\
int(targetfig.subplotpars.hspace*1000))
QtCore.QObject.connect( self.sliderleft,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcleft )
QtCore.QObject.connect( self.sliderbottom,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcbottom )
QtCore.QObject.connect( self.sliderright,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcright )
QtCore.QObject.connect( self.slidertop,
QtCore.SIGNAL( "valueChanged(int)" ),
self.functop )
QtCore.QObject.connect( self.sliderwspace,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcwspace )
QtCore.QObject.connect( self.sliderhspace,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funchspace )
def funcleft(self, val):
if val == self.sliderright.value():
val -= 1
self.targetfig.subplots_adjust(left=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
if val == self.sliderleft.value():
val += 1
self.targetfig.subplots_adjust(right=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
if val == self.slidertop.value():
val -= 1
self.targetfig.subplots_adjust(bottom=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
if val == self.sliderbottom.value():
val += 1
self.targetfig.subplots_adjust(top=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def error_msg_qt( msg, parent=None ):
if not is_string_like( msg ):
msg = ','.join( map( str,msg ) )
QtGui.QMessageBox.warning( None, "Matplotlib", msg, QtGui.QMessageBox.Ok )
def exception_handler( type, value, tb ):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename != None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror != None:
msg += value.strerror
else:
msg += str(value)
if len( msg ) : error_msg_qt( msg )
FigureManager = FigureManagerQT
|
|
# -*- encoding: utf-8 -*-
"""
Disassembly support.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from opcode import * # an undocumented builtin module
import inspect
from h2o.utils.compatibility import *
from .expr import ExprNode, ASTId
from . import h2o
BYTECODE_INSTRS = {
"BINARY_SUBSCR": "cols", # column slice; could be row slice?
"UNARY_POSITIVE": "+",
"UNARY_NEGATIVE": "-",
"UNARY_NOT": "!",
"BINARY_POWER": "**",
"BINARY_MULTIPLY": "*",
"BINARY_FLOOR_DIVIDE": "//",
"BINARY_TRUE_DIVIDE": "/",
"BINARY_DIVIDE": "/",
"BINARY_MODULO": "%",
"BINARY_ADD": "+",
"BINARY_SUBTRACT": "-",
"BINARY_AND": "&",
"BINARY_OR": "|",
"COMPARE_OP": "", # some cmp_op
"CALL_FUNCTION": "", # some function call, have nargs in ops list...
}
def is_bytecode_instruction(instr):
return instr in BYTECODE_INSTRS
def is_comp(instr):
return "COMPARE" in instr
def is_binary(instr):
return "BINARY" in instr
def is_unary(instr):
return "UNARY" in instr
def is_func(instr):
return "CALL_FUNCTION" == instr
def is_load_fast(instr):
return "LOAD_FAST" == instr
def is_attr(instr):
return "LOAD_ATTR" == instr
def is_load_global(instr):
return "LOAD_GLOBAL" == instr
def is_return(instr):
return "RETURN_VALUE" == instr
def _bytecode_decompile_lambda(co):
code = co.co_code
n = len(code)
i = 0
ops = []
while i < n:
op = code[i]
if PY2:
op = ord(op)
args = []
i += 1
if op >= HAVE_ARGUMENT:
oparg = code[i] + code[i + 1] * 256
if PY2:
oparg = ord(code[i]) + ord(code[i + 1]) * 256
i += 2
if op in hasconst:
args.append(co.co_consts[oparg]) # LOAD_CONST
elif op in hasname:
args.append(co.co_names[oparg]) # LOAD_CONST
elif op in hasjrel:
raise ValueError("unimpl: op in hasjrel")
elif op in haslocal:
args.append(co.co_varnames[oparg]) # LOAD_FAST
elif op in hascompare:
args.append(cmp_op[oparg]) # COMPARE_OP
elif is_func(opname[op]):
args.append(oparg) # oparg == nargs(fcn)
ops.append([opname[op], args])
return _lambda_bytecode_to_ast(co, ops)
def _lambda_bytecode_to_ast(co, ops):
# have a stack of ops, read from R->L to get correct oops
s = len(ops) - 1
keys = [o[0] for o in ops]
result = [ASTId("{")] + [ASTId(arg) for arg in co.co_varnames] + [ASTId(".")]
instr = keys[s]
if is_return(instr):
s -= 1
instr = keys[s]
if is_bytecode_instruction(instr) or is_load_fast(instr) or is_load_global(instr):
body, s = _opcode_read_arg(s, ops, keys)
else:
raise ValueError("unimpl bytecode instr: " + instr)
if s > 0:
print("Dumping disassembled code: ")
for i in range(len(ops)):
if i == s:
print(i, " --> " + str(ops[i]))
else:
print(i, str(ops[i]).rjust(5))
raise ValueError("Unexpected bytecode disassembly @ " + str(s))
result += [body] + [ASTId("}")]
return result
def _opcode_read_arg(start_index, ops, keys):
instr = keys[start_index]
return_idx = start_index - 1
if is_bytecode_instruction(instr):
if is_binary(instr):
return _binop_bc(BYTECODE_INSTRS[instr], return_idx, ops, keys)
elif is_comp(instr):
return _binop_bc(ops[start_index][1][0], return_idx, ops, keys)
elif is_unary(instr):
return _unop_bc(BYTECODE_INSTRS[instr], return_idx, ops, keys)
elif is_func(instr):
return _func_bc(ops[start_index][1][0], return_idx, ops, keys)
else:
raise ValueError("unimpl bytecode op: " + instr)
elif is_load_fast(instr):
return [_load_fast(ops[start_index][1][0]), return_idx]
elif is_load_global(instr):
return [_load_global(ops[start_index][1][0]), return_idx]
return [ops[start_index][1][0], return_idx]
def _binop_bc(op, idx, ops, keys):
rite, idx = _opcode_read_arg(idx, ops, keys)
left, idx = _opcode_read_arg(idx, ops, keys)
return [ExprNode(op, left, rite), idx]
def _unop_bc(op, idx, ops, keys):
arg, idx = _opcode_read_arg(idx, ops, keys)
return [ExprNode(op, arg), idx]
def _func_bc(nargs, idx, ops, keys):
named_args = {}
unnamed_args = []
args = []
while nargs > 0:
if nargs >= 256: # named args ( foo(50,True,x=10) ) read first ( right -> left )
arg, idx = _opcode_read_arg(idx, ops, keys)
named_args[ops[idx][1][0]] = arg
idx -= 1 # skip the LOAD_CONST for the named args
nargs -= 256 # drop 256
else:
arg, idx = _opcode_read_arg(idx, ops, keys)
unnamed_args.insert(0, arg)
nargs -= 1
op = ops[idx][1][0]
frcls = h2o.H2OFrame
if not hasattr(frcls, op):
raise ValueError("Unimplemented: op <%s> not bound in H2OFrame" % op)
if is_attr(ops[idx][0]):
if PY2:
argspec = inspect.getargspec(getattr(frcls, op))
argnames = argspec.args[1:]
argdefs = list(argspec.defaults or [])
else:
argnames = []
argdefs = []
for name, param in inspect.signature(getattr(frcls, op)).parameters.items():
if name == "self": continue
if param.kind == inspect._VAR_KEYWORD: continue
argnames.append(name)
argdefs.append(param.default)
args = unnamed_args + argdefs[len(unnamed_args):]
for a in named_args: args[argnames.index(a)] = named_args[a]
if op == "ceil": op = "ceiling"
if op == "sum" and len(args) > 0 and args[0]: op = "sumNA"
if op == "min" and len(args) > 0 and args[0]: op = "minNA"
if op == "max" and len(args) > 0 and args[0]: op = "maxNA"
idx -= 1
if is_bytecode_instruction(ops[idx][0]):
arg, idx = _opcode_read_arg(idx, ops, keys)
args.insert(0, arg)
elif is_load_fast(ops[idx][0]):
args.insert(0, _load_fast(ops[idx][1][0]))
idx -= 1
return [ExprNode(op, *args), idx]
def _load_fast(x):
return ASTId(x)
def _load_global(x):
if x == 'True':
return True
elif x == 'False':
return False
return x
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core conversion logic, serves as main point of access."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import imp
import sys
import threading
import types
import unittest
import weakref
import gast
from tensorflow.python.autograph import operators
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.converters import arg_defaults
from tensorflow.python.autograph.converters import asserts
from tensorflow.python.autograph.converters import break_statements
from tensorflow.python.autograph.converters import call_trees
from tensorflow.python.autograph.converters import conditional_expressions
from tensorflow.python.autograph.converters import continue_statements
from tensorflow.python.autograph.converters import control_flow
from tensorflow.python.autograph.converters import directives
from tensorflow.python.autograph.converters import function_scopes
from tensorflow.python.autograph.converters import lists
from tensorflow.python.autograph.converters import logical_expressions
from tensorflow.python.autograph.converters import return_statements
from tensorflow.python.autograph.converters import side_effect_guards
from tensorflow.python.autograph.converters import slices
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import function_wrapping
from tensorflow.python.autograph.core import naming
from tensorflow.python.autograph.core import unsupported_features_checker
from tensorflow.python.autograph.lang import special_functions
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import pretty_printer
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.utils import ag_logging as logging
from tensorflow.python.util import tf_inspect
class _ConvertedEntityFactoryInfo(
collections.namedtuple(
'_ConvertedEntityFactoryInfo',
('module_name', 'converted_name', 'factory_factory_name', 'source_map'))
):
"""Holds metadata about a converted entity stored as a dynamic factory.
The dynamic factory is assumed to be created by _wrap_into_dynamic_factory,
be named `factory_factory_name` and located inside the module named as
`module_name`.
Attributes:
module_name: Text, the name of the module containing the entity.
converted_name: Text, the name of the converted entity.
factory_factory_name: Text, the name of the dynamic factory.
source_map: Dict.
"""
def __str__(self):
return '_ConvertedEntityFactoryInfo({} in {})'.format(
self.converted_name, self.module_name)
def get_module(self):
return sys.modules[self.module_name]
def get_factory(self):
assert self.module_name in sys.modules
factory_factory = getattr(sys.modules[self.module_name],
self.factory_factory_name)
return factory_factory()
# TODO(mdan): Add a garbage collection hook for cleaning up modules.
class _ConversionCache(object):
"""A hierarchical cache that uses the converted entity as weak key.
The keys soft references (i.e. they are discarded when the key is
destroyed). The subkeys are normal hashable values.
This class is generic - see the call site for how the keys and values are
defined.
"""
def __init__(self):
self._cache = weakref.WeakKeyDictionary()
def has(self, key, subkey):
if key not in self._cache:
return False
return subkey in self._cache[key]
def __getitem__(self, key):
if key not in self._cache:
# The bucket needs to be initialized to support this usage:
# cache[key][subkey] = value
self._cache[key] = {}
return self._cache[key]
# Using a re-entrant lock to guard against the unlikely possibility that the
# conversion process tiggers additional code execution.
_CACHE_LOCK = threading.RLock()
_CACHE = _ConversionCache()
# Note: strictly speaking, a simple factory might have been sufficient for
# functions. But the double factory approach allows us to control the closure
# and globals of the converted code in a cleaner fashion.
# TODO(mdan): A simple factory may be sufficient.
def _wrap_into_dynamic_factory(nodes, entity_name, factory_factory_name,
factory_name, closure_vars, future_features):
"""Wraps an AST into the body of a dynamic factory.
This uses the dynamic factory (factory of factory) pattern to achieve the
following:
1. The inner factory, dynamically creates the entity represented by nodes.
2. The entity is parametrized by `ag__`, the internal AutoGraph module.
3. The outer factory creates the inner factory with a lexical scope
in which `closure_vars` are bound local variables. This in turn allows the
caller to control the exact closure (i.e. non-global free variables) for
the inner factory.
The AST is expected to define some symbol named by `entity_name`.
Args:
nodes: ast.AST
entity_name: Union[Text, ast.AST]
factory_factory_name: Text
factory_name: Text
closure_vars: Iterable[Text]
future_features: Iterable[Text], see EntityInfo.future_features.
Returns:
ast.AST
"""
if not isinstance(nodes, (list, tuple)):
nodes = (nodes,)
dummy_closure_defs = []
for var_name in closure_vars:
template = """
var_name = None
"""
dummy_closure_defs.extend(templates.replace(template, var_name=var_name))
if future_features:
future_imports = gast.ImportFrom(
module='__future__',
names=[gast.alias(name=name, asname=None) for name in future_features],
level=0)
else:
future_imports = []
# These dummy symbol declarations create local fariables in a function scope,
# so that the Python parser correctly marks them as free non-global variables
# upon load (that is, it creates cell slots for each symbol). Their values are
# not used, as the cells are swapped with the original entity's cells after
# the code has been loaded.
template = """
future_imports
def factory_factory_name():
dummy_closure_defs
def factory_name(ag__, ag_source_map__, ag_module__):
entity_defs
entity_name.ag_source_map = ag_source_map__
entity_name.ag_module = ag_module__
entity_name.autograph_info__ = {}
return entity_name
return factory_name
"""
return templates.replace(
template,
future_imports=future_imports,
factory_factory_name=factory_factory_name,
factory_name=factory_name,
dummy_closure_defs=dummy_closure_defs,
entity_defs=nodes,
entity_name=entity_name)
def _convert_with_cache(entity, program_ctx, free_nonglobal_var_names):
"""Returns a (possibly cached) factory for the converted result of entity."""
# The cache key is the entity's code object if it defined one, otherwise it's
# the entity itself. Keying by the code object allows caching of functions
# that are dynamically created e.g. in a loop.
if hasattr(entity, '__code__'):
key = entity.__code__
else:
key = entity
# The cache subkey encompases any conversion options on which the generated
# code may depend.
# The cached factory includes the necessary definitions to distinguish
# between the global and non-global free variables. For this reason, the
# cache subkey includes the names of the free non-globals.
subkey = (program_ctx.options, frozenset(free_nonglobal_var_names))
with _CACHE_LOCK:
# The cache values are _ConvertedEntityFactoryInfo objects.
if _CACHE.has(key, subkey):
# TODO(mdan): Check whether the module is still loaded.
converted_entity_info = _CACHE[key][subkey]
logging.log(3, 'Cache hit for entity %s key %s subkey %s: %s', entity,
key, subkey, converted_entity_info)
return converted_entity_info
logging.log(1, 'Entity %s is not cached for key %s subkey %s', entity, key,
subkey)
nodes, converted_name, entity_info = convert_entity_to_ast(
entity, program_ctx)
namer = naming.Namer(entity_info.namespace)
factory_factory_name = namer.new_symbol('create_converted_entity_factory',
())
factory_name = namer.new_symbol('create_converted_entity', ())
nodes = _wrap_into_dynamic_factory(nodes, converted_name,
factory_factory_name, factory_name,
free_nonglobal_var_names,
entity_info.future_features)
module, _, source_map = compiler.ast_to_object(
nodes, include_source_map=True)
module_name = module.__name__
converted_entity_info = _ConvertedEntityFactoryInfo(
module_name=module_name,
converted_name=converted_name,
factory_factory_name=factory_factory_name,
source_map=source_map)
_CACHE[key][subkey] = converted_entity_info
return converted_entity_info
def _instantiate(entity, converted_entity_info, free_nonglobal_var_names):
"""Creates a converted instance and binds it to match original entity."""
factory = converted_entity_info.get_factory()
# `factory` is currently bound to the empty module it was loaded from.
# It must instead be bound to the globals and closure from the original
# entity.
if tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity):
entity_globals = entity.__globals__
entity_closure = entity.__closure__ or ()
elif hasattr(entity, '__module__'):
entity_globals = sys.modules[entity.__module__].__dict__
entity_closure = ()
assert len(entity_closure) == len(free_nonglobal_var_names)
# Fit the original entity's cells to match the order of factory's cells.
original_names_and_cells = dict(zip(free_nonglobal_var_names, entity_closure))
new_factory_cells = tuple(
original_names_and_cells[name] for name in factory.__code__.co_freevars)
bound_factory = types.FunctionType(
code=factory.__code__,
globals=entity_globals,
name=factory.__name__,
argdefs=(),
closure=new_factory_cells)
# Two other free vars: the internal "ag__" module and the source
# map. These are wired via the parameters of the factory.
converted_entity = bound_factory( # pylint:disable=not-callable
ag_internal, converted_entity_info.source_map,
converted_entity_info.get_module())
if tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity):
# Attach the default argument to the converted function.
converted_entity.__defaults__ = entity.__defaults__
if hasattr(entity, '__kwdefaults__'):
converted_entity.__kwdefaults__ = entity.__kwdefaults__
return converted_entity
def convert(entity, program_ctx):
"""Converts an entity into an equivalent entity."""
if tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity):
free_nonglobal_var_names = entity.__code__.co_freevars
else:
free_nonglobal_var_names = ()
for i, name in enumerate(free_nonglobal_var_names):
if (name == 'ag__' and
entity.__closure__[i].cell_contents is not ag_internal):
raise ValueError('entity {} uses the reserved symbol "{}"'.format(
entity, name))
# TODO(mdan): In extreme cases, other ag__ symbols may also be clobbered.
converted_entity_info = _convert_with_cache(entity, program_ctx,
free_nonglobal_var_names)
return _instantiate(entity, converted_entity_info, free_nonglobal_var_names)
def is_whitelisted_for_graph(o, check_call_override=True):
"""Checks whether an entity is whitelisted for use in graph mode.
Examples of whitelisted entities include all members of the tensorflow
package.
Args:
o: A Python entity.
check_call_override: Reserved for internal use. When set to `False`, it
disables the rule according to which classes are whitelisted if their
__call__ method is whitelisted.
Returns:
Boolean
"""
# TODO(b/120224672): Fix this.
if isinstance(o, functools.partial):
# tf_inspect.getmodule(functools.partial(...)) otherwise returns None since
# functools.partial objects do not have a __module__ attribute.
m = functools
else:
m = tf_inspect.getmodule(o)
# Examples of callables that lack a __module__ property include builtins.
if hasattr(m, '__name__'):
for rule in config.CONVERSION_RULES:
action = rule.get_action(m)
if action == config.Action.CONVERT:
logging.log(2, 'Not whitelisted: %s: %s', o, rule)
return False
elif action == config.Action.DO_NOT_CONVERT:
logging.log(2, 'Whitelisted: %s: %s', o, rule)
return True
if tf_inspect.isgeneratorfunction(o):
logging.warn(
'Entity %s appears to be a generator function. It will not be converted'
' by AutoGraph.', o)
logging.log(2, 'Whitelisted: %s: generator functions are not converted', o)
return True
if check_call_override and hasattr(o, '__call__'):
# Callable objects: whitelisted if their __call__ method is.
# The type check avoids infinite recursion around the __call__ method
# of function objects.
if (type(o) != type(o.__call__)) and is_whitelisted_for_graph(o.__call__): # pylint: disable=unidiomatic-typecheck
logging.log(2, 'Whitelisted: %s: object __call__ whitelisted', o)
return True
owner_class = None
if tf_inspect.ismethod(o):
# Methods of whitelisted classes are also whitelisted, even if they are
# bound via user subclasses.
#
# For example, suppose `tf.Foo` has a method called `bar`, and `baz` is
# defined as below. `tf.Foo` is whitelisted. Then `baz.bar` is also
# whitelisted.
#
# class Custom(tf.Foo):
# pass
#
# baz = Custom()
#
# For the example above, if `Custom` did overload `bar`, then it would no
# longer be whitelisted.
owner_class = inspect_utils.getmethodclass(o)
if owner_class is not None:
if issubclass(owner_class, unittest.TestCase):
logging.log(2, 'Whitelisted: %s: method of TestCase subclass', o)
return True
owner_class = inspect_utils.getdefiningclass(o, owner_class)
is_call_override = (o.__name__ == '__call__')
if is_whitelisted_for_graph(
owner_class, check_call_override=not is_call_override):
logging.log(2, 'Whitelisted: %s: owner is whitelisted %s', o,
owner_class)
return True
if inspect_utils.isnamedtuple(o):
# Due to the way they're constructed, namedtuple types cannot be converted
# because they don't expose source code. But we assume they are safe for
# graph mode since they are just containers.
logging.log(2, 'Whitelisted: %s: named tuple', o)
return True
logging.log(2, 'Not whitelisted: %s: default rule', o)
return False
# TODO(mdan): Rename to convert_*_node to avoid confusion with convert.
def convert_entity_to_ast(o, program_ctx):
"""Compile a Python entity into equivalent TensorFlow.
Args:
o: A Python entity.
program_ctx: A ProgramContext object.
Returns:
A tuple (ast, new_name, namespace):
* ast: An AST representing an entity with interface equivalent to `o`,
but which when executed it creates TF a graph.
* new_name: The symbol name under which the new entity can be found.
* namespace: A dict mapping all symbols visible to the converted entity,
keyed by their symbol name.
Raises:
ValueError: if the entity type is not supported.
"""
logging.log(1, 'Converting %s', o)
if tf_inspect.isclass(o):
nodes, name, entity_info = convert_class_to_ast(o, program_ctx)
elif tf_inspect.isfunction(o):
nodes, name, entity_info = convert_func_to_ast(o, program_ctx)
elif tf_inspect.ismethod(o):
nodes, name, entity_info = convert_func_to_ast(o, program_ctx)
elif hasattr(o, '__class__'):
# Note: this should only be raised when attempting to convert the object
# directly. converted_call should still support it.
raise NotImplementedError(
'cannot convert entity "{}": object conversion is not yet'
' supported.'.format(o))
else:
raise ValueError(
'Entity "%s" has unsupported type "%s". Only functions and classes are '
'supported for now.' % (o, type(o)))
if logging.has_verbosity(2):
logging.log(2, 'Compiled output of %s:\n\n%s\n', o,
compiler.ast_to_source(nodes))
if logging.has_verbosity(4):
for n in nodes:
logging.log(4, 'Compiled AST of %s:\n\n%s\n\n', o,
pretty_printer.fmt(n, color=False))
return nodes, name, entity_info
def convert_class_to_ast(c, program_ctx):
"""Specialization of `convert_entity_to_ast` for classes."""
# TODO(mdan): Revisit this altogether. Not sure we still need it.
converted_members = {}
method_filter = lambda m: tf_inspect.isfunction(m) or tf_inspect.ismethod(m)
members = tf_inspect.getmembers(c, predicate=method_filter)
if not members:
raise ValueError('cannot convert %s: no member methods' % c)
# TODO(mdan): Don't clobber namespaces for each method in one class namespace.
# The assumption that one namespace suffices for all methods only holds if
# all methods were defined in the same module.
# If, instead, functions are imported from multiple modules and then spliced
# into the class, then each function has its own globals and __future__
# imports that need to stay separate.
# For example, C's methods could both have `global x` statements referring to
# mod1.x and mod2.x, but using one namespace for C would cause a conflict.
# from mod1 import f1
# from mod2 import f2
# class C(object):
# method1 = f1
# method2 = f2
class_namespace = {}
future_features = None
for _, m in members:
# Only convert the members that are directly defined by the class.
if inspect_utils.getdefiningclass(m, c) is not c:
continue
(node,), _, entity_info = convert_func_to_ast(
m, program_ctx=program_ctx, do_rename=False)
class_namespace.update(entity_info.namespace)
converted_members[m] = node
# TODO(mdan): Similarly check the globals.
if future_features is None:
future_features = entity_info.future_features
elif frozenset(future_features) ^ frozenset(entity_info.future_features):
# Note: we can support this case if ever needed.
raise ValueError(
'cannot convert {}: if has methods built with mismatched future'
' features: {} and {}'.format(c, future_features,
entity_info.future_features))
namer = naming.Namer(class_namespace)
class_name = namer.class_name(c.__name__)
# Process any base classes: if the superclass if of a whitelisted type, an
# absolute import line is generated.
output_nodes = []
renames = {}
base_names = []
for base in c.__bases__:
if isinstance(object, base):
base_names.append('object')
continue
if is_whitelisted_for_graph(base):
alias = namer.new_symbol(base.__name__, ())
output_nodes.append(
gast.ImportFrom(
module=base.__module__,
names=[gast.alias(name=base.__name__, asname=alias)],
level=0))
else:
raise NotImplementedError(
'Conversion of classes that do not directly extend classes from'
' whitelisted modules is temporarily suspended. If this breaks'
' existing code please notify the AutoGraph team immediately.')
base_names.append(alias)
renames[qual_names.QN(base.__name__)] = qual_names.QN(alias)
# Generate the definition of the converted class.
bases = [gast.Name(n, gast.Load(), None) for n in base_names]
class_def = gast.ClassDef(
class_name,
bases=bases,
keywords=[],
body=list(converted_members.values()),
decorator_list=[])
# Make a final pass to replace references to the class or its base classes.
# Most commonly, this occurs when making super().__init__() calls.
# TODO(mdan): Making direct references to superclass' superclass will fail.
class_def = qual_names.resolve(class_def)
renames[qual_names.QN(c.__name__)] = qual_names.QN(class_name)
class_def = ast_util.rename_symbols(class_def, renames)
output_nodes.append(class_def)
# TODO(mdan): Find a way better than forging this object.
entity_info = transformer.EntityInfo(
source_code=None,
source_file=None,
future_features=future_features,
namespace=class_namespace)
return output_nodes, class_name, entity_info
def _add_reserved_symbol(namespace, name, entity):
if name not in namespace:
namespace[name] = entity
elif namespace[name] != entity:
raise ValueError('The name "%s" is reserved and may not be used.' % name)
ag_internal = None
# TODO(mdan): Move into core or replace with an actual importable module.
def _add_self_references(namespace, autograph_module):
"""Adds namespace references to the module that exposes the api itself."""
global ag_internal
if ag_internal is None:
# Craft a module that exposes parts of the external API as well as certain
# internal modules.
ag_internal = imp.new_module('autograph')
ag_internal.__dict__.update(autograph_module.__dict__)
ag_internal.ConversionOptions = converter.ConversionOptions
ag_internal.STD = converter.STANDARD_OPTIONS
ag_internal.Feature = converter.Feature
ag_internal.utils = utils
ag_internal.function_scope = function_wrapping.function_scope
# TODO(mdan): Add safeguards against name clashes.
# We don't want to create a submodule because we want the operators to be
# accessible as ag__.<operator>
ag_internal.__dict__.update(special_functions.__dict__)
ag_internal.__dict__.update(operators.__dict__)
_add_reserved_symbol(namespace, 'ag__', ag_internal)
def convert_func_to_ast(f, program_ctx, do_rename=True):
"""Specialization of `convert_entity_to_ast` for callable functions."""
future_features = inspect_utils.getfutureimports(f)
node, source = parser.parse_entity(f, future_features=future_features)
logging.log(3, 'Source code of %s:\n\n%s\n', f, source)
# Parsed AST should contain future imports and one function def node.
# In general, the output of inspect.getsource is inexact for lambdas because
# it uses regex matching to adjust the exact location around the line number
# that CPython records. Then, the entire containing line is returned, which
# we may have trouble disambiguating. For example:
# x, y = lambda: 1, lambda: 2
if f.__name__ == '<lambda>':
nodes = ast_util.find_matching_definitions(node, f)
if len(nodes) != 1:
raise ValueError(
'Unable to identify source code of lambda function {}. It was'
' defined on this line: {}, which must contain a single lambda with'
' matching signature. To avoid ambiguity, define each lambda'
' in a separate expression.'.format(f, source))
node, = nodes
# TODO(znado): Place inside standard_analysis.
origin_info.resolve_entity(node, source, f)
namespace = inspect_utils.getnamespace(f)
_add_self_references(namespace, program_ctx.autograph_module)
namer = naming.Namer(namespace)
entity_info = transformer.EntityInfo(
source_code=source,
source_file='<fragment>',
future_features=future_features,
namespace=namespace)
context = converter.EntityContext(namer, entity_info, program_ctx)
node = node_to_graph(node, context)
if isinstance(node, gast.Lambda):
new_name = namer.new_symbol('tf__lambda', ())
node = gast.Assign(
targets=[gast.Name(new_name, gast.Store(), None)], value=node)
elif do_rename:
new_name = namer.function_name(f.__name__)
node.name = new_name
else:
new_name = f.__name__
assert node.name == new_name
return (node,), new_name, entity_info
def node_to_graph(node, context):
"""Convert Python code to equivalent TF graph mode code.
Args:
node: AST, the code to convert.
context: converter.EntityContext
Returns:
A tuple (node, deps):
* node: A Python ast node, representing the converted code.
* deps: A set of strings, the fully qualified names of entity
dependencies that this node has.
"""
# TODO(mdan): Insert list_comprehensions somewhere.
unsupported_features_checker.verify(node)
node = converter.standard_analysis(node, context, is_initial=True)
node = converter.apply_(node, context, arg_defaults)
node = converter.apply_(node, context, directives)
node = converter.apply_(node, context, break_statements)
if context.program.options.uses(converter.Feature.ASSERT_STATEMENTS):
node = converter.apply_(node, context, asserts)
# Note: sequencing continue canonicalization before for loop one avoids
# dealing with the extra loop increment operation that the for
# canonicalization creates.
node = converter.apply_(node, context, continue_statements)
node = converter.apply_(node, context, return_statements)
if context.program.options.uses(converter.Feature.LISTS):
node = converter.apply_(node, context, lists)
node = converter.apply_(node, context, slices)
node = converter.apply_(node, context, call_trees)
node = converter.apply_(node, context, control_flow)
node = converter.apply_(node, context, conditional_expressions)
node = converter.apply_(node, context, logical_expressions)
if context.program.options.uses(converter.Feature.AUTO_CONTROL_DEPS):
node = converter.apply_(node, context, side_effect_guards)
# TODO(mdan): If function scopes ever does more, the toggle will need moving.
if context.program.options.uses(converter.Feature.NAME_SCOPES):
node = converter.apply_(node, context, function_scopes)
return node
|
|
from drawable import Drawable
import ezdxf
from utils import btu
class Symbol(Drawable):
def __init__(self):
super().__init__()
def draw_no_contact(self):
self.add_line((0, 0), (5, 0))
self.add_line((15, 0), (20, 0))
self.add_line((5, 10), (5, -10))
self.add_line((15, 10), (15, -10))
def draw_nc_contact(self):
self.draw_no_contact()
self.add_line((0, -10), (20, 10))
def draw_magnetic(self):
self.add_polyline2d(
[
(0, 0),
(10, 10),
(10, -10),
(20, 0)
]
)
def draw_inline_terminal(self, left=True, right=True, label=None):
self.add_circle((10, 0), 5)
if left:
self.add_line((0, 0), (5, 0))
if right:
self.add_line((15, 0), (20, 0))
if label is not None:
self.add_text(label, (10, -10),
height=10, alignment='MIDDLE_CENTER')
def draw_terminal(self):
self.add_polyline2d(
[
(0, 10),
(20, 10),
(20, -10),
(0, -10)
],
attr={'flags': ezdxf.const.POLYLINE_CLOSED}
)
self.add_circle((10, 0), 10)
def draw_thermal(self):
self.add_arc((10, 0), 10, 270, 180)
self.add_arc((30, 0), 10, 90, 0)
# Ground Symbols
class PE(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
self.add_line((0, 0), (0, -12))
self.add_line((-6, -12), (6, -12))
self.add_line((-4, -14), (4, -14))
self.add_line((-2, -16), (2, -16))
class SG(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
self.add_line((0, 0), (0, -12))
self.add_polyline2d(
[
(-6, -12),
(6, -12),
(0, -18)
],
attr={'flags': ezdxf.const.POLYLINE_CLOSED}
)
class LSW_NC(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
self.draw_inline_terminal(right=False)
self.add_line(
(15, 0),
(55, 5.77350269189626),
)
self.move((btu(2), 0))
self.draw_inline_terminal(left=False)
class LSW_NO(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
self.draw_inline_terminal(right=False)
self.add_line(
(15, 0),
(49.0734179127745, -8.00013698266566),
)
self.move((btu(2), 0))
self.draw_inline_terminal(left=False)
class LSW_NC_TS(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
LSW_NC().sym_plot(self)
self.add_polyline2d(
[
(30, 2.1650635094611),
(30, -10),
(35, -10),
(35, -15),
(25, -15),
(25, -20),
(30, -20),
(30, -25)
]
)
class LSW_NO_TS(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
LSW_NO().sym_plot(self)
self.add_polyline2d(
[
(30, -3.52186725285915),
(30, -10),
(35, -10),
(35, -15),
(25, -15),
(25, -20),
(30, -20),
(30, -25)
]
)
class LSW_NC_FS(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
LSW_NC().sym_plot(self)
self.add_polyline2d(
[
(30, 2.1650635094611),
(30, -20),
(40, -20),
(30, -10)
]
)
class LSW_NO_FS(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
LSW_NO().sym_plot(self)
self.add_polyline2d(
[
(30, -3.52186725285915),
(30, -20),
(40, -20),
(30, -10)
]
)
class LSW_NC_PS(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
LSW_NC().sym_plot(self)
self.add_line(
(30, 2.1650635094611),
(30, -15)
)
self.add_arc(
(30, -25),
10,
0,
180
)
self.add_line(
(20, -25),
(40, -25)
)
class LSW_NO_PS(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
LSW_NO().sym_plot(self)
self.add_line(
(30, -3.52186725285915),
(30, -15)
)
self.add_arc(
(30, -25),
10,
0,
180
)
self.add_line(
(20, -25),
(40, -25)
)
class LSW_NC_LS(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
LSW_NC().sym_plot(self)
self.add_line(
(30, 2.1650635094611),
(30, -15)
)
self.add_circle(
(30, -25),
10,
)
class LSW_NO_LS(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
LSW_NO().sym_plot(self)
self.add_line(
(30, -3.52186725285915),
(30, -15)
)
self.add_circle(
(30, -25),
10,
)
class PB_NO(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
self.draw_inline_terminal(right=False)
self.add_line(
(10, -10),
(50, -10),
)
self.add_line(
(30, -10),
(30, 10),
)
self.move((btu(2), 0))
self.draw_inline_terminal(left=False)
class PB_NC(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
self.draw_inline_terminal(right=False)
self.add_line(
(10, -5),
(50, -5),
)
self.add_line(
(30, -5),
(30, 10),
)
self.move((btu(2), 0))
self.draw_inline_terminal(left=False)
class CG(Symbol):
def __init__(self, ):
super().__init__()
def draw(self):
self.add_line((0, 0), (0, -12))
self.add_line((-6, -12), (6, -12))
self.add_line((-6, -12), (-9, -17))
self.add_line((0, -12), (-3, -17))
self.add_line((6, -12), (3, -17))
class NO(Symbol):
def __init__(self):
super().__init__()
def draw(self):
self.draw_no_contact()
class NC(Symbol):
def __init__(self):
super().__init__()
def draw(self):
self.draw_nc_contact()
class ETERM(Symbol):
def __init__(self):
super().__init__()
def draw(self):
self.draw_terminal()
class ITERM(Symbol):
def __init__(self, left=True, right=True, label=None):
super().__init__()
self._left = left
self._right = right
self._label = label
def draw(self):
self.draw_inline_terminal(left=self._left,
right=self._right,
label=self._label)
class SOL(Symbol):
def __init__(self):
super().__init__()
def draw(self):
self.draw_inline_terminal()
self.move((btu(1), 0))
self.draw_magnetic()
self.move((btu(1), 0))
self.draw_inline_terminal()
class OL(Symbol):
def __init__(self):
super().__init__()
def draw(self):
ITERM().sym_plot(self)
self.move((btu(1), 0))
self.draw_thermal()
self.move((btu(2), 0))
ITERM().sym_plot(self)
class CB(Symbol):
def __init__(self):
super().__init__()
def draw(self):
ITERM(left=True, right=False).sym_plot(self)
self.add_arc(center=(30, -5), radius=25, start=37, end=143)
ITERM(left=False, right=True)\
.sym_plot(self, (btu(2), 0))
def draw_multipole(self):
self.draw_multipole_basic()
self.add_line(
(30, 20),
(30, 20 + (self.pole_offset[1] * (self.poles - 1))),
linetype='PHANTOM'
)
class MDS(Symbol):
def __init__(self):
super().__init__()
def draw(self):
LSW_NO().sym_plot(self)
def draw_multipole(self):
self.draw_multipole_basic()
self.add_polyline2d(
[
(30, -3.52186725285915),
(30, -3.52186725285915 +
(self.pole_offset[1] * (self.poles - 0.5))),
(49.0734179127745, -8.00013698266566 +
(self.pole_offset[1] * (self.poles - 0.5)))
],
attr={'linetype': 'PHANTOM'}
)
class CBMDS(Symbol):
def __init__(self):
super().__init__()
def draw(self):
CB().sym_plot(self)
def draw_multipole(self):
self.draw_multipole_basic()
self.add_polyline2d(
[
(30, 20),
(30, -3.52186725285915 +
(self.pole_offset[1] * (self.poles - 0.5))),
(49.0734179127745, -8.00013698266566 +
(self.pole_offset[1] * (self.poles - 0.5)))
],
attr={'linetype': 'PHANTOM'}
)
class GEN_DEV_NC(Symbol):
def __init__(self):
super().__init__()
def draw(self):
ETERM().sym_plot(self)
self.add_line(
(20, 0),
(40, 0)
)
NC().sym_plot(self, offset=(btu(2), 0))
self.add_line(
(60, 0),
(80, 0)
)
ETERM().sym_plot(self, offset=(btu(4), 0))
self.add_rectangle(
[
(-10, 20),
(110, -20)
],
attr={'flags': ezdxf.const.POLYLINE_CLOSED, 'linetype': 'PHANTOM'}
)
class GEN_DEV_NO(Symbol):
def __init__(self):
super().__init__()
def draw(self):
ETERM().sym_plot(self)
self.add_line(
(20, 0),
(40, 0)
)
NO().sym_plot(self, offset=(btu(2), 0))
self.add_line(
(60, 0),
(80, 0)
)
ETERM().sym_plot(self, offset=(btu(4), 0))
self.add_rectangle(
[
(-10, 20),
(110, -20)
],
attr={'flags': ezdxf.const.POLYLINE_CLOSED, 'linetype': 'PHANTOM'}
)
class GEN_DEV(Symbol):
def __init__(self):
super().__init__()
def draw(self):
ETERM().sym_plot(self)
ETERM().sym_plot(self, offset=(btu(4), 0))
self.add_rectangle(
[
(-10, 20),
(110, 20),
(110, -20),
(-10, -20)
],
attr={'flags': ezdxf.const.POLYLINE_CLOSED, 'linetype': 'PHANTOM'}
)
|
|
#!/usr/bin/env python
#
# Copyright 2016 Roanuz Softwares Private Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
import logging
import os
import json
from datetime import datetime
from pycricket_storagehandler import RcaFileStorageHandler, RcaStorageHandler
# To avoid request library waring uncomment below two line
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
logger = logging.getLogger('RcaApp')
logger.setLevel(logging.ERROR) # Now we handled INFO or ERROR level
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class RcaApp():
"""
The RcaApp class will be containing various function to access
the different CricketAPI API's.
"""
def __init__(self, access_key=None, secret_key=None, app_id=None, store_handler=None, device_id=None):
"""
initializing user Cricket API app details.
Arg:
access_key : Cricket API APP access_key
secret_key : Cricket API APP secret_key
store_handler : RcaStorageHandler/RcaFileStorageHandler object name
device_id : User device_id
"""
if access_key:
self.access_key = access_key
elif os.environ.get("RCA_ACCESS_KEY"):
self.access_key = os.environ.get("RCA_ACCESS_KEY")
else:
raise Exception("access key is required.!! Try again")
if secret_key:
self.secret_key = secret_key
elif os.environ.get("RCA_SECRET_KEY"):
self.secret_key = os.environ.get("RCA_SECRET_KEY")
else:
raise Exception("secret key is required.!! Try again")
if app_id:
self.app_id = app_id
elif os.environ.get("RCA_APP_ID"):
self.app_id = os.environ.get("RCA_APP_ID")
else:
raise Exception("app id is required.!! try again")
if store_handler:
self.store_handler = store_handler
else:
self.store_handler = RcaStorageHandler()
self.api_path = "https://rest.cricketapi.com/rest/v2/"
self.api_path_v3 = "https://rest.cricketapi.com/rest/v3/"
self.api_path_v4 = "https://rest.cricketapi.com/rest/v4/"
if device_id:
new_id = device_id
else:
new_id = self.store_handler.new_device_id()
self.store_handler.set_value("device_id", new_id)
self.device_id = new_id
self.auth()
def auth(self):
"""
Auth is used to call the AUTH API of CricketAPI.
Access token required for every request call to CricketAPI.
Auth functional will post user Cricket API app details to server
and return the access token.
Return:
Access token
"""
if not self.store_handler.has_value('access_token'):
params = {}
params["access_key"] = self.access_key
params["secret_key"] = self.secret_key
params["app_id"] = self.app_id
params["device_id"] = self.device_id
auth_url = self.api_path + "auth/"
response = self.get_response(auth_url, params, "post")
if 'auth' in response:
self.store_handler.set_value("access_token", response['auth']['access_token'])
self.store_handler.set_value("expires", response['auth']['expires'])
logger.info('Getting new access token')
else:
msg = "Error getting access_token, " + \
"please verify your access_key, secret_key and app_id"
logger.error(msg)
raise Exception("Auth Failed, please check your access details")
def get_response(self, url, params={}, method="get"):
"""
It will return json response based on given url, params and methods.
Arg:
params: 'dictionary'
url: 'url' format
method: default 'get', support method 'post'
Return:
json data
"""
if method == "post":
response_data = json.loads(requests.post(url, params=params).text)
else:
params["access_token"] = self.get_active_token()
response_data = json.loads(requests.get(url, params=params).text)
if not response_data['status_code'] == 200:
if "status_msg" in response_data:
logger.error("Bad response: " + response_data['status_msg'])
else:
logger.error("Some thing went wrong, please check your " + \
"request params Example: card_type and date")
return response_data
def get_active_token(self):
"""
Getting the valid access token.
Access token expires every 24 hours, It will expires then it will
generate a new token.
Return:
active access token
"""
expire_time = self.store_handler.has_value("expires")
access_token = self.store_handler.has_value("access_token")
if expire_time and access_token:
expire_time = self.store_handler.get_value("expires")
if not datetime.now() < datetime.fromtimestamp(float(expire_time)):
self.store_handler.delete_value("access_token")
self.store_handler.delete_value("expires")
logger.info('Access token expired, going to get new token')
self.auth()
else:
logger.info('Access token noy expired yet')
else:
self.auth()
return self.store_handler.get_value("access_token")
def get_match(self, match_key, card_type="full_card"):
"""
Calling the Match API.
Arg:
match_key: key of the match
card_type: optional, default to full_card. Accepted values are
micro_card, summary_card & full_card. For MG101 coverage card_type must be passed as metric_101
Return:
json data
Supported Metric Groups:
MG100, MG101
"""
base_path = self.api_path
if "." in match_key:
base_path = self.api_path_v4
match_url = base_path + "match/" + match_key + "/"
params = {}
params["card_type"] = card_type
response = self.get_response(match_url, params)
return response
def get_recent_matches(self, card_type="micro_card"):
"""
Calling the Recent Matches API.
Arg:
card_type: optional, default to micro_card. Accepted values are
micro_card & summary_card.
Return:
json data
Supported Metric Groups:
MG100
"""
recent_matches_url = self.api_path + "recent_matches/"
params = {}
params["card_type"] = card_type
response = self.get_response(recent_matches_url, params)
return response
def get_player_stats(self, player_key, board_key):
"""
Calling the Player Stats API
Args:
player_key: Key of the player
board_key: key of the board
Return:
json data
"""
player_stats_url = self.api_path + 'player/' + player_key + '/league/' + board_key + '/stats/'
response = self.get_response(player_stats_url)
return response
def get_ball_by_ball(self, match_key, over_key=None):
"""
match_key: key of the match
over_key : key of the over
Supported Metric Groups:
MG100
Return:
json data:
"""
if over_key:
ball_by_ball_url = "{base_path}match/{match_key}/balls/{over_key}/".format(base_path=self.api_path, match_key=match_key, over_key=over_key)
else:
ball_by_ball_url = "{base_path}match/{match_key}/balls/".format(base_path=self.api_path, match_key=match_key)
response = self.get_response(ball_by_ball_url)
return response
def get_recent_season_matches(self, season_key):
"""
Calling specific season recent matches.
Arg:
season_key: key of the season.
Return:
json date
Supported Metric Groups:
MG100, MG101
"""
base_path = self.api_path
if "." in season_key:
base_path = self.api_path_v4
season_recent_matches_url = base_path + "season/" + season_key + "/recent_matches/"
response = self.get_response(season_recent_matches_url)
return response
def get_recent_seasons(self):
"""
Calling the Recent Season API.
Return:
json data
Supported Metric Groups:
MG100
"""
recent_seasons_url = self.api_path + "recent_seasons/"
response = self.get_response(recent_seasons_url)
return response
def get_schedule(self, date=None):
"""
Calling the Schedule API.
Return:
json data
Supported Metric Groups:
MG100
"""
schedule_url = self.api_path + "schedule/"
params = {}
if date:
params['date'] = date
response = self.get_response(schedule_url, params)
return response
def get_season_schedule(self, season_key):
"""
Calling specific season schedule
Arg:
season_key: key of the season
Return:
json data
Supported Metric Groups:
MG100, MG101
"""
base_path = self.api_path
if "." in season_key:
base_path = self.api_path_v4
schedule_url = base_path + "season/" + season_key + "/schedule/"
response = self.get_response(schedule_url)
return response
def get_season(self, season_key, card_type="micro_card"):
"""
Calling Season API.
Arg:
season_key: key of the season
card_type: optional, default to micro_card. Accepted values are
micro_card & summary_card
Return:
json data
Supported Metric Groups:
MG100, MG101
"""
base_path = self.api_path
if "." in season_key:
base_path = self.api_path_v4
season_url = base_path + "season/" + season_key + "/"
params = {}
params["card_type"] = card_type
response = self.get_response(season_url, params)
return response
def get_season_stats(self, season_key):
"""
Calling Season Stats API.
Arg:
season_key: key of the season
Return:
json data
Supported Metric Groups:
MG100, MG101
"""
base_path = self.api_path
if "." in season_key:
base_path = self.api_path_v4
season_stats_url = base_path + "season/" + season_key + "/stats/"
response = self.get_response(season_stats_url)
return response
def get_season_team(self, season_key, season_team_key, stats_type=None):
"""
Calling Season teams API
Arg:
season_key: key of the season
Return:
json data
Supported Metric Groups:
MG100, MG101
"""
base_path = self.api_path
if "." in season_key:
base_path = self.api_path_v4
params = {"stats_type": stats_type}
season_team_url = base_path + 'season/' + season_key + '/team/' + season_team_key + '/'
response = self.get_response(season_team_url, params=params)
return response
def get_season_points(self, season_key):
"""
Calling Season Points API.
Arg:
season_key: key of the season
Return:
json data
Supported Metric Groups:
MG100, MG101
"""
base_path = self.api_path
if "." in season_key:
base_path = self.api_path_v4
season_points_url = base_path + "season/" + season_key + "/points/"
response = self.get_response(season_points_url)
return response
def get_season_player_stats(self, season_key, player_key):
"""
Calling Season Player Stats API.
Arg:
season_key: key of the season
player_key: key of the player
Return:
json data
Supported Metric Groups:
MG100, MG101
"""
base_path = self.api_path
if "." in season_key:
base_path = self.api_path_v4
season_player_stats_url = base_path + "season/" + season_key + "/player/" + player_key + "/stats/"
response = self.get_response(season_player_stats_url)
return response
def get_overs_summary(self, match_key):
"""
Calling Overs Summary API
Arg:
match_key: key of the match
Return:
json data
Supported Metric Groups:
MG100
"""
overs_summary_url = self.api_path + "match/" + match_key + "/overs_summary/"
response = self.get_response(overs_summary_url)
return response
def get_news_aggregation(self):
"""
Calling News Aggregation API
Return:
json data
"""
news_aggregation_url = self.api_path + "news_aggregation" + "/"
response = self.get_response(news_aggregation_url)
return response
def get_fantasy_credits(self, match_key, model_key=None):
"""
Calling Fantasy Credit API
Arg:
match_key: key of the match
Return:
json data
Supported Metric Groups:
MG100, MG101
"""
base_path = self.api_path_v3
params = {}
if model_key:
params['model'] = model_key
if "." in match_key:
base_path = self.api_path_v4
fantasy_credit_url = base_path + "fantasy-match-credits/" + match_key + "/"
response = self.get_response(fantasy_credit_url, params)
return response
def get_fantasy_points(self, match_key, model_key=None):
"""
Calling Fantasy Points API
Arg:
match_key: key of the match
Return:
json data
Supported Metric Groups:
MG100, MG101
"""
params = {}
if model_key:
params['model'] = model_key
base_path = self.api_path_v3
if "." in match_key:
base_path = self.api_path_v4
fantasy_points_url = base_path + "fantasy-match-points/" + match_key + "/"
response = self.get_response(fantasy_points_url, params)
return response
def get_v4_coverage(self):
"""
Calling the v4 Coverage API.
Return:
json data
Supported Metric Groups:
MG101
"""
coverage_url = self.api_path_v4 + "coverage/"
response = self.get_response(coverage_url)
return response
def get_v4_board_schedule(self, board_key, month=None):
"""
Calling the Board Schedule API.
Return:
json data
Supported Metric Groups:
MG101
"""
schedule_url = self.api_path_v4 + "board/" + board_key + "/schedule/"
params = {}
if month:
params['month'] = month
response = self.get_response(schedule_url, params)
return response
|
|
#!/usr/bin/env python
# Copyright 2015 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import distutils.spawn
import itertools
import os
import shutil
import sys
# pylint: disable=relative-import
import common
class PathMode:
"""PathMode indicates the available expected results' paths.
Attributes:
DEFAULT: Used for default expected paths in the format of
'NAME_expected(_OSNAME)?.pdf.#.png'. For a test, this path always
exists.
SKIA: Used when Skia or SkiaPaths is enabled, for paths in the format
of 'NAME_expected_skia(_OSNAME)?.pdf.#.png'.
Such paths only exist when the expected results of Skia or
SkiaPaths are different from those of AGG.
SKIAPATHS: Used when SkiaPaths is enabled, for path in the format of
'NAME_expected_skiapaths(_OSNAME)?.pdf.#.png'.
Such paths only exist when the expected results from using AGG,
Skia and SkiaPaths are all different from each other.
Always check PathMode in an incrementing order as the modes are listed in
order of its matching paths' precedence.
"""
DEFAULT = 0
SKIA = 1
SKIAPATHS = 2
class NotFoundError(Exception):
"""Raised when file doesn't exist"""
pass
class PNGDiffer():
def __init__(self, finder, features, reverse_byte_order):
self.pdfium_diff_path = finder.ExecutablePath('pdfium_diff')
self.os_name = finder.os_name
self.reverse_byte_order = reverse_byte_order
if 'SKIAPATHS' in features:
self.max_path_mode = PathMode.SKIAPATHS
elif 'SKIA' in features:
self.max_path_mode = PathMode.SKIA
else:
self.max_path_mode = PathMode.DEFAULT
def CheckMissingTools(self, regenerate_expected):
if (regenerate_expected and self.os_name == 'linux' and
not distutils.spawn.find_executable('optipng')):
return 'Please install "optipng" to regenerate expected images.'
return None
def GetActualFiles(self, input_filename, source_dir, working_dir):
actual_paths = []
path_templates = PathTemplates(input_filename, source_dir, working_dir,
self.os_name, self.max_path_mode)
for page in itertools.count():
actual_path = path_templates.GetActualPath(page)
expected_paths = path_templates.GetExpectedPaths(page)
if any(itertools.imap(os.path.exists, expected_paths)):
actual_paths.append(actual_path)
else:
break
return actual_paths
def _RunImageDiffCommand(self, expected_path, actual_path):
if not os.path.exists(expected_path):
return NotFoundError('%s does not exist.' % expected_path)
cmd = [self.pdfium_diff_path]
if self.reverse_byte_order:
cmd.append('--reverse-byte-order')
cmd.extend([expected_path, actual_path])
return common.RunCommand(cmd)
def HasDifferences(self, input_filename, source_dir, working_dir):
path_templates = PathTemplates(input_filename, source_dir, working_dir,
self.os_name, self.max_path_mode)
for page in itertools.count():
actual_path = path_templates.GetActualPath(page)
expected_paths = path_templates.GetExpectedPaths(page)
if not any(itertools.imap(os.path.exists, expected_paths)):
if page == 0:
print "WARNING: no expected results files for " + input_filename
if os.path.exists(actual_path):
print('FAILURE: Missing expected result for 0-based page %d of %s' %
(page, input_filename))
return True
break
print "Checking " + actual_path
sys.stdout.flush()
error = None
for path in expected_paths:
new_error = self._RunImageDiffCommand(path, actual_path)
# Update error code. No need to overwrite the previous error code if
# |path| doesn't exist.
if not isinstance(new_error, NotFoundError):
error = new_error
# Found a match and proceed to next page
if not error:
break
if error:
print "FAILURE: " + input_filename + "; " + str(error)
return True
return False
# TODO(crbug.com/pdfium/1508): Add support to automatically generate
# Skia/SkiaPaths specific expected results.
def Regenerate(self, input_filename, source_dir, working_dir, platform_only):
path_templates = PathTemplates(input_filename, source_dir, working_dir,
self.os_name, self.max_path_mode)
for page in itertools.count():
# Loop through the generated page images. Stop when there is a page
# missing a png, which means the document ended.
actual_path = path_templates.GetActualPath(page)
if not os.path.isfile(actual_path):
break
platform_expected_path = path_templates.GetExpectedPathByPathMode(
page, PathMode.DEFAULT, self.os_name)
# If there is a platform expected png, we will overwrite it. Otherwise,
# overwrite the generic png in "all" mode, or do nothing in "platform"
# mode.
if os.path.exists(platform_expected_path):
expected_path = platform_expected_path
elif not platform_only:
expected_path = path_templates.GetExpectedPathByPathMode(
page, PathMode.DEFAULT)
else:
continue
shutil.copyfile(actual_path, expected_path)
common.RunCommand(['optipng', expected_path])
ACTUAL_TEMPLATE = '.pdf.%d.png'
class PathTemplates(object):
def __init__(self, input_filename, source_dir, working_dir, os_name,
max_path_mode):
assert PathMode.DEFAULT <= max_path_mode <= PathMode.SKIAPATHS, (
'Unexpected Maximum PathMode: %d.' % max_path_mode)
input_root, _ = os.path.splitext(input_filename)
self.actual_path_template = os.path.join(working_dir,
input_root + ACTUAL_TEMPLATE)
self.source_dir = source_dir
self.input_root = input_root
self.max_path_mode = max_path_mode
self.os_name = os_name
# Pre-create the available templates depending on |max_path_mode|.
self.expected_templates = []
for mode in range(PathMode.DEFAULT, max_path_mode + 1):
self.expected_templates.extend([
self._GetExpectedTemplateByPathMode(mode),
self._GetExpectedTemplateByPathMode(mode, os_name),
])
def GetActualPath(self, page):
return self.actual_path_template % page
def _GetExpectedTemplateByPathMode(self, mode, os_name=None):
expected_str = '_expected'
if mode == PathMode.DEFAULT:
pass
elif mode == PathMode.SKIA:
expected_str += '_skia'
elif mode == PathMode.SKIAPATHS:
expected_str += '_skiapaths'
else:
assert False, 'Unexpected PathMode: %d.' % mode
if os_name:
expected_str += '_' + self.os_name
return os.path.join(self.source_dir,
self.input_root + expected_str + ACTUAL_TEMPLATE)
def GetExpectedPathByPathMode(self, page, mode, os_name=None):
return self._GetExpectedTemplateByPathMode(mode, os_name) % page
def GetExpectedPaths(self, page):
return [template % page for template in self.expected_templates]
|
|
# Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .. import numeric
import abc, treelog, functools, numpy, collections
class MatrixError(Exception):
'''
General error message for matrix-related failure.
'''
class BackendNotAvailable(MatrixError):
'''
Error message reporting that the selected matrix backend is not available on
the system.
'''
class ToleranceNotReached(MatrixError):
'''
Error message reporting that the configured linear solver tolerance was not
reached. The ``.best`` attribute carries the non-conforming solution.
'''
def __init__(self, best):
super().__init__('solver failed to reach tolerance')
self.best = best
class Matrix:
'matrix base class'
def __init__(self, shape):
assert len(shape) == 2
self.shape = shape
self._precon_args = None
def __reduce__(self):
from . import assemble
data, index = self.export('coo')
return assemble, (data, index, self.shape)
@abc.abstractmethod
def __add__(self, other):
'add two matrices'
raise NotImplementedError
@abc.abstractmethod
def __mul__(self, other):
'multiply matrix with a scalar'
raise NotImplementedError
@abc.abstractmethod
def __matmul__(self, other):
'multiply matrix with a dense tensor'
raise NotImplementedError
@abc.abstractmethod
def __neg__(self):
'negate matrix'
raise NotImplementedError
def __sub__(self, other):
return self.__add__(-other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.__mul__(1/other)
@property
@abc.abstractmethod
def T(self):
'transpose matrix'
raise NotImplementedError
@property
def size(self):
return numpy.prod(self.shape)
def rowsupp(self, tol=0):
'return row indices with nonzero/non-small entries'
data, (row, col) = self.export('coo')
supp = numpy.zeros(self.shape[0], dtype=bool)
supp[row[abs(data) > tol]] = True
return supp
@treelog.withcontext
def solve(self, rhs=None, *, lhs0=None, constrain=None, rconstrain=None, solver='arnoldi', atol=0., rtol=0., **solverargs):
'''Solve system given right hand side vector and/or constraints.
Args
----
rhs : :class:`float` vector or :any:`None`
Right hand side vector. A :any:`None` value implies the zero vector.
lhs0 : class:`float` vector or :any:`None`
Initial values: compute the solution by solving ``A dx = b - A lhs0``.
A :any:`None` value implies the zero vector, i.e. solving ``A x = b``
directly.
constrain : :class:`float` or :class:`bool` array, or :any:`None` Column
constraints. For float values, a number signifies a constraint, NaN
signifies a free dof. For boolean, a :any:`True` value signifies a
constraint to the value in ``lhs0``, a :any:`False` value signifies a
free dof. A :any:`None` value implies no constraints.
rconstrain : :class:`bool` array or :any:`None`
Row constrains. A True value signifies a constrains, a False value a free
dof. A :any:`None` value implies that the constraints follow those
defined in ``constrain`` (by implication the matrix must be square).
solver : :class:`str`
Name of the solver algorithm. The set of available solvers depends on
the type of the matrix (i.e. the active backend), although the 'direct'
and 'arnoldi' solvers are always available.
rtol : :class:`float`
Relative tolerance: see ``atol``.
atol : :class:`float`
Absolute tolerance: require that ``|A x - b| <= max(atol, rtol |b|)``
after applying constraints and the initial value. In case ``atol`` and
``rtol`` are both zero (the defaults) solve to machine precision.
Otherwise fail with :class:`nutils.matrix.ToleranceNotReached` if the
requirement is not reached.
**kwargs :
All remaining arguments are passed on to the selected solver method.
Returns
-------
:class:`numpy.ndarray`
Left hand side vector.
'''
# absent an initial guess and constraints we can directly forward to _solver
if lhs0 is constrain is rconstrain is None:
return self._solver(rhs, solver, atol=atol, rtol=rtol, **solverargs)
# otherwise we need to do some pre- and post-processing
nrows, ncols = self.shape
if rhs is None:
rhs = numpy.zeros(nrows)
if lhs0 is None:
lhs = numpy.zeros((ncols,)+rhs.shape[1:])
else:
lhs = numpy.array(lhs0, dtype=float)
while lhs.ndim < rhs.ndim:
lhs = lhs[...,numpy.newaxis].repeat(rhs.shape[lhs.ndim], axis=lhs.ndim)
assert lhs.shape == (ncols,)+rhs.shape[1:]
if constrain is None:
J = numpy.ones(ncols, dtype=bool)
else:
assert constrain.shape == (ncols,)
if constrain.dtype == bool:
J = ~constrain
else:
J = numpy.isnan(constrain)
lhs[~J] = constrain[~J]
if rconstrain is None:
assert nrows == ncols
I = J
else:
assert rconstrain.shape == (nrows,) and constrain.dtype == bool
I = ~rconstrain
lhs[J] += self.submatrix(I, J)._solver((rhs - self @ lhs)[I], solver, atol=atol, rtol=rtol, **solverargs)
return lhs
def solve_leniently(self, *args, **kwargs):
'''
Identical to :func:`nutils.matrix.Matrix.solve`, but emit a warning in case
tolerances are not met rather than an exception, while returning the
obtained solution vector.
'''
try:
return self.solve(*args, **kwargs)
except ToleranceNotReached as e:
treelog.warning(e)
return e.best
def _method(self, prefix, attr):
if callable(attr):
return functools.partial(attr, self), getattr(attr, '__name__', 'user defined')
if isinstance(attr, str):
fullattr = '_' + prefix + '_' + attr
if hasattr(self, fullattr):
return getattr(self, fullattr), attr
raise MatrixError('invalid {} {!r} for {}'.format(prefix, attr, self.__class__.__name__))
def _solver(self, rhs, solver, *, atol, rtol, **solverargs):
if self.shape[0] != self.shape[1]:
raise MatrixError('constrained matrix is not square: {}x{}'.format(*self.shape))
if rhs.shape[0] != self.shape[0]:
raise MatrixError('right-hand size shape does not match matrix shape')
rhsnorm = numpy.linalg.norm(rhs, axis=0).max()
atol = max(atol, rtol * rhsnorm)
if rhsnorm <= atol:
treelog.info('skipping solver because initial vector is within tolerance')
return numpy.zeros_like(rhs)
solver_method, solver_name = self._method('solver', solver)
treelog.info('solving {} dof system to {} using {} solver'.format(self.shape[0], 'tolerance {:.0e}'.format(atol) if atol else 'machine precision', solver_name))
try:
lhs = solver_method(rhs, atol=atol, **solverargs)
except MatrixError:
raise
except Exception as e:
raise MatrixError('solver failed with error: {}'.format(e)) from e
if not numpy.isfinite(lhs).all():
raise MatrixError('solver returned non-finite left hand side')
resnorm = numpy.linalg.norm(rhs - self @ lhs, axis=0).max()
treelog.info('solver returned with residual {:.0e}'.format(resnorm))
if resnorm > atol > 0:
raise ToleranceNotReached(lhs)
return lhs
def _solver_direct(self, rhs, atol, precon='direct', preconargs={}, **args):
solve = self.getprecon(precon, **args, **preconargs)
return solve(rhs)
def _solver_arnoldi(self, rhs, atol, precon='direct', truncate=None, preconargs={}, **args):
solve = self.getprecon(precon, **args, **preconargs)
lhs = numpy.zeros_like(rhs)
res = rhs
resnorm = numpy.linalg.norm(res, axis=0).max()
krylov = collections.deque(maxlen=truncate) # unlimited if truncate is None
while resnorm > atol:
k = solve(res)
v = self @ k
# In the following we use sum rather than dot for slightly higher accuracy due to partial
# pairwise summation, see https://numpy.org/doc/stable/reference/generated/numpy.sum.html
for k_, v_, v2_ in krylov: # orthogonolize v (modified Gramm-Schmidt)
c = numpy.multiply(v, v_, order='F').sum(0) / v2_
k -= k_ * c
v -= v_ * c
v2 = numpy.square(v, order='F').sum(0)
c = numpy.multiply(v, res, order='F').sum(0) / v2 # min_c |res - c v| => c = res.v / v.v
newlhs = lhs + k * c
res = rhs - self @ newlhs # recompute rather than update to avoid drift
newresnorm = numpy.linalg.norm(res, axis=0).max()
if not numpy.isfinite(newresnorm) or newresnorm >= resnorm:
break
treelog.debug('residual decreased by {:.1f} orders using {} krylov vectors'.format(numpy.log10(resnorm/newresnorm), len(krylov)))
lhs = newlhs
resnorm = newresnorm
krylov.append((k, v, v2))
return lhs
def submatrix(self, rows, cols):
'''Create submatrix from selected rows, columns.
Args
----
rows : :class:`bool`/:class:`int` array selecting rows for keeping
cols : :class:`bool`/:class:`int` array selecting columns for keeping
Returns
-------
:class:`Matrix`
Matrix instance of reduced dimensions
'''
rows = numeric.asboolean(rows, self.shape[0])
cols = numeric.asboolean(cols, self.shape[1])
if rows.all() and cols.all():
return self
return self._submatrix(rows, cols)
@abc.abstractmethod
def _submatrix(self, rows, cols):
raise NotImplementedError
def export(self, form):
'''Export matrix data to any of supported forms.
Args
----
form : :class:`str`
- "dense" : return matrix as a single dense array
- "csr" : return matrix as 3-tuple of (data, indices, indptr)
- "coo" : return matrix as 2-tuple of (data, (row, col))
'''
raise NotImplementedError('cannot export {} to {!r}'.format(self.__class__.__name__, form))
def diagonal(self):
nrows, ncols = self.shape
if nrows != ncols:
raise MatrixError('failed to extract diagonal: matrix is not square')
data, indices, indptr = self.export('csr')
diag = numpy.empty(nrows)
for irow in range(nrows):
icols = indices[indptr[irow]:indptr[irow+1]]
idiag = numpy.searchsorted(icols, irow)
diag[irow] = data[indptr[irow]+idiag] if idiag < len(icols) and icols[idiag] == irow else 0
return diag
def getprecon(self, precon, **args):
if (precon, args) == self._precon_args:
return self._precon_object
if self.shape[0] != self.shape[1]:
raise MatrixError('matrix must be square')
precon_method, precon_name = self._method('precon', precon)
try:
with treelog.context('constructing {} preconditioner'.format(precon_name)):
precon_object = precon_method(**args)
except MatrixError:
raise
except Exception as e:
raise MatrixError('failed to create preconditioner: {}'.format(e)) from e
self._precon_args = precon, args
self._precon_object = precon_object
return precon_object
def _precon_diag(self):
diag = self.diagonal()
if not diag.all():
raise MatrixError("building 'diag' preconditioner: diagonal has zero entries")
return numpy.reciprocal(diag).__mul__
def __repr__(self):
return '{}<{}x{}>'.format(type(self).__qualname__, *self.shape)
# vim:sw=2:sts=2:et
|
|
from common_fixtures import * # NOQA
import copy
from cattle import ApiError
from test_authorization import service_client # NOQA
from test_machines import machine_context
from test_svc_discovery import _validate_compose_instance_start
_ = machine_context # Needed just to avoid a pep-8 quirk
@pytest.fixture()
def infra_access_setting(admin_user_client):
id = 'modify.infrastructure.roles'
setting = admin_user_client.by_id_setting(id)
orig = setting.value
no_member = orig.replace('member,', '')
setting = admin_user_client.update(setting, value=no_member)
wait_setting_active(admin_user_client, setting)
yield
setting = admin_user_client.by_id_setting(id)
setting = admin_user_client.update(setting, value=orig)
wait_setting_active(admin_user_client, setting)
@pytest.mark.nonparallel
def test_restricted_infra_access(new_context, admin_user_client,
infra_access_setting, machine_context):
client = new_context.client
member = create_user(new_context, admin_user_client, 'member')
host = new_context.host
# No actions on host
m_host = member.by_id_host(host.id)
assert len(m_host.actions) == 0
# Can't create host
with pytest.raises(ApiError) as e:
member.create_host(hostname='foo')
assert e.value.error.status == 403
# Can't update host
with pytest.raises(ApiError) as e:
member.update(m_host, name='foo')
assert e.value.error.status == 403
# Can't delete host
with pytest.raises(ApiError) as e:
member.delete(m_host)
assert e.value.error.status == 403
reg_tokens = client.list_registration_token()
assert len(reg_tokens) > 0
reg_token = reg_tokens[0]
# Can't see any registration tokens
m_reg_tokens = member.list_registration_token()
assert len(m_reg_tokens) == 0
# Can't create registration token
with pytest.raises(ApiError) as e:
member.create_registration_token()
assert e.value.error.status == 403
# Can't update registraion token
with pytest.raises(ApiError) as e:
member.update(reg_token, name=random_str())
assert e.value.error.status == 405
# Can't delete registration token
with pytest.raises(ApiError) as e:
member.delete(reg_token)
assert e.value.error.status == 405
# Physical host has no actions
ph = m_host.physicalHost()
assert len(ph.actions) == 0
# Can't update physical host
with pytest.raises(ApiError) as e:
member.update(ph, name=random_str())
assert e.value.error.status == 405
# Can't delete physical host
with pytest.raises(ApiError) as e:
member.delete(ph)
assert e.value.error.status == 405
# Owner creates machine
machine = client.create_machine(name=random_str(), fooConfig={})
machine = client.wait_success(machine)
# Machine has no actions
mem_machine = member.by_id_machine(machine.id)
assert len(mem_machine.actions) == 0
# Can't create machine
with pytest.raises(ApiError) as e:
member.create_machine(name=random_str(), fooConfig={})
assert e.value.error.status == 403
# Can't update machine
with pytest.raises(ApiError) as e:
member.update(mem_machine, name=random_str())
assert e.value.error.status == 403
# Can't delete machine
with pytest.raises(ApiError) as e:
member.delete(mem_machine)
assert e.value.error.status == 403
def test_restricted_from_system(new_context, admin_user_client):
restricted = create_user(new_context, admin_user_client)
# Restricted can't create system stack. system property not settable
rstack = restricted.create_stack(name=random_str(), system=True)
rstack = restricted.wait_success(rstack)
assert rstack.system is False
# Owner can create system stack
owner = new_context.owner_client
stack = owner.wait_success(owner.create_stack(name="test", system=True))
assert stack.state == "active"
assert stack.system is True
# Restricted cant update system stack
rstack = restricted.by_id_stack(stack.id)
assert rstack.name == "test"
with pytest.raises(ApiError) as e:
restricted.update(rstack, name="updated")
assert e.value.error.status == 403
# Restricted user should see no actions on system stack
assert len(rstack.actions) == 0
assert len(stack.actions) > 0
# Owner can update stack
stack = owner.update(stack, name="updated")
# Restricted can't create service in system stack
lc = {"imageUuid": new_context.image_uuid}
with pytest.raises(ApiError) as e:
restricted.create_service(name=random_str(), stackId=stack.id,
launchConfig=lc)
assert e.value.error.status == 403
# Owner can create service in system stack
svc = owner.create_service(name=random_str(), stackId=stack.id,
launchConfig=lc, scale=1)
svc = owner.wait_success(svc)
# Owner can activate
svc = owner.wait_success(svc.activate())
c = _validate_compose_instance_start(owner, svc, stack, "1")
# Owner can update system service
svc = owner.update(svc, name="update")
svc = owner.wait_success(svc)
# Restricted can't update system service
rsvc = restricted.by_id_service(svc.id)
with pytest.raises(ApiError) as e:
restricted.update(rsvc, name="update")
assert e.value.error.status == 403
# Restricted user should see no actions on system service
assert len(rsvc.actions) == 0
assert len(svc.actions) > 0
# Restricted can't delete system service
with pytest.raises(ApiError) as e:
restricted.delete(rsvc)
assert e.value.error.status == 403
# Restricted can't update system container
rc = restricted.by_id_container(c.id)
with pytest.raises(ApiError) as e:
restricted.update(rc, name="update")
assert e.value.error.status == 403
# Owner can update system container
c = owner.update(c, name="update")
# Restricted can only see the logs actions of system containers
assert len(rc.actions) == 1 and rc.actions["logs"]
assert len(c.actions) > 1
# Restricted can't delete system container
rc = restricted.by_id_container(c.id)
with pytest.raises(ApiError) as e:
restricted.delete(rc)
assert e.value.error.status == 403
# Owner can delete system container
owner.delete(c)
# Owner can delete system service
owner.delete(svc)
# Restricted can't delete system stack
with pytest.raises(ApiError) as e:
restricted.delete(rstack)
assert e.value.error.status == 403
# Owner can delete system stack
owner.delete(stack)
# Restricted user can do all the above things for non-system resources
stack = restricted.wait_success(restricted.create_stack(name="restricted"))
assert stack.state == "active"
assert stack.system is False
stack = restricted.update(stack, name="r-updated")
assert len(stack.actions) > 0
svc = restricted.create_service(name=random_str(), stackId=stack.id,
launchConfig=lc)
svc = restricted.wait_success(svc)
svc = restricted.wait_success(svc.activate())
assert len(svc.actions) > 0
c = _validate_compose_instance_start(restricted, svc, stack, "1")
c = restricted.update(c, name="r-updated")
assert len(c.actions) > 1
svc = restricted.update(svc, name="r-updated")
restricted.delete(c)
restricted.delete(svc)
restricted.delete(stack)
def test_restricted_agent_containers(new_context, admin_user_client):
restricted = create_user(new_context, admin_user_client)
client = new_context.client
c = new_context.create_container(labels={
'io.rancher.container.create_agent': 'true'
})
c = client.wait_success(c)
assert c.actions["execute"]
assert c.actions["proxy"]
rc = restricted.by_id_container(c.id)
assert "execute" not in rc.actions
assert "proxy" not in rc.actions
def test_restricted_privileged_cap_add(new_context, admin_user_client):
restricted = create_user(new_context, admin_user_client)
client = new_context.client
c = new_context.create_container(privileged=True)
c = client.wait_success(c)
assert c.actions["execute"]
assert c.actions["proxy"]
rc = restricted.by_id_container(c.id)
assert "execute" not in rc.actions
assert "proxy" not in rc.actions
c = new_context.create_container(capAdd=["ALL"])
c = client.wait_success(c)
assert c.actions["execute"]
assert c.actions["proxy"]
rc = restricted.by_id_container(c.id)
assert "execute" not in rc.actions
assert "proxy" not in rc.actions
def create_user(context, admin_user_client, role='restricted'):
context2 = create_context(admin_user_client)
restricted = context2.user_client
members = get_plain_members(context.project.projectMembers())
members.append({
'role': role,
'externalId': acc_id(restricted),
'externalIdType': 'rancher_id'
})
project = context.user_client.reload(context.project)
project.setmembers(members=members)
restricted = context2.user_client
new_headers = copy.deepcopy(restricted._headers)
new_headers['X-API-Project-Id'] = project.id
restricted._headers = new_headers
restricted.reload_schema()
return restricted
|
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.xmlstream import ElementBase
from sleekxmpp.plugins import xep_0082
class Geoloc(ElementBase):
"""
XMPP's <geoloc> stanza allows entities to know the current
geographical or physical location of an entity. (XEP-0080: User Location)
Example <geoloc> stanzas:
<geoloc xmlns='http://jabber.org/protocol/geoloc'/>
<geoloc xmlns='http://jabber.org/protocol/geoloc' xml:lang='en'>
<accuracy>20</accuracy>
<country>Italy</country>
<lat>45.44</lat>
<locality>Venice</locality>
<lon>12.33</lon>
</geoloc>
Stanza Interface:
accuracy -- Horizontal GPS error in meters.
alt -- Altitude in meters above or below sea level.
area -- A named area such as a campus or neighborhood.
bearing -- GPS bearing (direction in which the entity is
heading to reach its next waypoint), measured in
decimal degrees relative to true north.
building -- A specific building on a street or in an area.
country -- The nation where the user is located.
countrycode -- The ISO 3166 two-letter country code.
datum -- GPS datum.
description -- A natural-language name for or description of
the location.
error -- Horizontal GPS error in arc minutes. Obsoleted by
the accuracy parameter.
floor -- A particular floor in a building.
lat -- Latitude in decimal degrees North.
locality -- A locality within the administrative region, such
as a town or city.
lon -- Longitude in decimal degrees East.
postalcode -- A code used for postal delivery.
region -- An administrative region of the nation, such
as a state or province.
room -- A particular room in a building.
speed -- The speed at which the entity is moving,
in meters per second.
street -- A thoroughfare within the locality, or a crossing
of two thoroughfares.
text -- A catch-all element that captures any other
information about the location.
timestamp -- UTC timestamp specifying the moment when the
reading was taken.
uri -- A URI or URL pointing to information about
the location.
"""
namespace = 'http://jabber.org/protocol/geoloc'
name = 'geoloc'
interfaces = set(('accuracy', 'alt', 'area', 'bearing', 'building',
'country', 'countrycode', 'datum', 'dscription',
'error', 'floor', 'lat', 'locality', 'lon',
'postalcode', 'region', 'room', 'speed', 'street',
'text', 'timestamp', 'uri'))
sub_interfaces = interfaces
plugin_attrib = name
def exception(self, e):
"""
Override exception passback for presence.
"""
pass
def set_accuracy(self, accuracy):
"""
Set the value of the <accuracy> element.
Arguments:
accuracy -- Horizontal GPS error in meters
"""
self._set_sub_text('accuracy', text=str(accuracy))
return self
def get_accuracy(self):
"""
Return the value of the <accuracy> element as an integer.
"""
p = self._get_sub_text('accuracy')
if not p:
return None
else:
try:
return int(p)
except ValueError:
return None
def set_alt(self, alt):
"""
Set the value of the <alt> element.
Arguments:
alt -- Altitude in meters above or below sea level
"""
self._set_sub_text('alt', text=str(alt))
return self
def get_alt(self):
"""
Return the value of the <alt> element as an integer.
"""
p = self._get_sub_text('alt')
if not p:
return None
else:
try:
return int(p)
except ValueError:
return None
def set_bearing(self, bearing):
"""
Set the value of the <bearing> element.
Arguments:
bearing -- GPS bearing (direction in which the entity is heading
to reach its next waypoint), measured in decimal
degrees relative to true north
"""
self._set_sub_text('bearing', text=str(bearing))
return self
def get_bearing(self):
"""
Return the value of the <bearing> element as a float.
"""
p = self._get_sub_text('bearing')
if not p:
return None
else:
try:
return float(p)
except ValueError:
return None
def set_error(self, error):
"""
Set the value of the <error> element.
Arguments:
error -- Horizontal GPS error in arc minutes; this
element is deprecated in favor of <accuracy/>
"""
self._set_sub_text('error', text=str(error))
return self
def get_error(self):
"""
Return the value of the <error> element as a float.
"""
p = self._get_sub_text('error')
if not p:
return None
else:
try:
return float(p)
except ValueError:
return None
def set_lat(self, lat):
"""
Set the value of the <lat> element.
Arguments:
lat -- Latitude in decimal degrees North
"""
self._set_sub_text('lat', text=str(lat))
return self
def get_lat(self):
"""
Return the value of the <lat> element as a float.
"""
p = self._get_sub_text('lat')
if not p:
return None
else:
try:
return float(p)
except ValueError:
return None
def set_lon(self, lon):
"""
Set the value of the <lon> element.
Arguments:
lon -- Longitude in decimal degrees East
"""
self._set_sub_text('lon', text=str(lon))
return self
def get_lon(self):
"""
Return the value of the <lon> element as a float.
"""
p = self._get_sub_text('lon')
if not p:
return None
else:
try:
return float(p)
except ValueError:
return None
def set_speed(self, speed):
"""
Set the value of the <speed> element.
Arguments:
speed -- The speed at which the entity is moving,
in meters per second
"""
self._set_sub_text('speed', text=str(speed))
return self
def get_speed(self):
"""
Return the value of the <speed> element as a float.
"""
p = self._get_sub_text('speed')
if not p:
return None
else:
try:
return float(p)
except ValueError:
return None
def set_timestamp(self, timestamp):
"""
Set the value of the <timestamp> element.
Arguments:
timestamp -- UTC timestamp specifying the moment when
the reading was taken
"""
self._set_sub_text('timestamp', text=str(xep_0082.datetime(timestamp)))
return self
def get_timestamp(self):
"""
Return the value of the <timestamp> element as a DateTime.
"""
p = self._get_sub_text('timestamp')
if not p:
return None
else:
return xep_0082.datetime(p)
|
|
from __future__ import absolute_import, unicode_literals
import django
from django.db import transaction
from django.db.models.query import QuerySet
from django.db.models.query_utils import QueryWrapper
from django.db.models.sql.constants import SINGLE
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.query import Query
from django.db.models.sql.subqueries import UpdateQuery
from django.db.models.sql.where import WhereNode
from django.utils import six
from django_hstore.apps import GEODJANGO_INSTALLED
from django_hstore.utils import get_cast_for_param, get_value_annotations
try:
# django <= 1.8
from django.db.models.sql.where import EmptyShortCircuit
except ImportError:
# django >= 1.9
EmptyShortCircuit = Exception
def get_field(self, name):
if django.VERSION >= (1, 8):
return self.model._meta.get_field(name)
else:
return self.model._meta.get_field_by_name(name)[0]
def select_query(method):
def selector(self, *args, **params):
query = self.query.clone()
query.default_cols = False
query.clear_select_fields()
return method(self, query, *args, **params)
return selector
def update_query(method):
def updater(self, *args, **params):
self._for_write = True
query = method(self, self.query.clone(UpdateQuery), *args, **params)
with transaction.atomic(using=self.db):
rows = query.get_compiler(self.db).execute_sql(None)
self._result_cache = None
return rows
updater.alters_data = True
return updater
class HStoreWhereNode(WhereNode):
def add(self, data, *args, **kwargs):
# WhereNode will convert params into strings, so we need to record
# the type of the params as part of the value_annotation before calling
# the super class
if not isinstance(data, (list, tuple)):
return super(HStoreWhereNode, self).add(data, *args, **kwargs)
original_value = data[2]
if isinstance(original_value, dict):
len_children = len(self.children) if self.children else 0
value_annot = get_value_annotations(original_value)
# We should be able to get the normal child node here, but it is not returned in Django 1.5
super(HStoreWhereNode, self).add(data, *args, **kwargs)
# We also need to place the type annotation into self.children
# It will either be the last item in the last child, or be the last child
# We can tell which by comparing the lengths before and after calling the super method
if len_children < len(self.children):
child = self.children[-1]
obj, lookup_type, _, value = child
annotated_child = (obj, lookup_type, value_annot, value)
self.children[-1] = annotated_child
else:
child = self.children[-1][-1]
obj, lookup_type, _, value = child
annotated_child = (obj, lookup_type, value_annot, value)
self.children[-1][-1] = annotated_child
else:
return super(HStoreWhereNode, self).add(data, *args, **kwargs)
# FIXME: this method shuld be more clear.
def make_atom(self, child, qn, connection):
lvalue, lookup_type, value_annot, param = child
kwargs = {'connection': connection}
if lvalue and lvalue.field and hasattr(lvalue.field, 'db_type') and lvalue.field.db_type(**kwargs) == 'hstore':
try:
lvalue, params = lvalue.process(lookup_type, param, connection)
except EmptyShortCircuit:
raise EmptyResultSet()
field = self.sql_for_columns(lvalue, qn, connection)
if lookup_type == 'exact':
if isinstance(param, dict):
return ('{0} = %s'.format(field), [param])
raise ValueError('invalid value')
elif lookup_type in ('gt', 'gte', 'lt', 'lte'):
if isinstance(param, dict):
sign = (lookup_type[0] == 'g' and '>%s' or '<%s') % (lookup_type[-1] == 'e' and '=' or '')
param_keys = list(param.keys())
conditions = []
for key in param_keys:
cast = get_cast_for_param(value_annot, key)
conditions.append('(%s->\'%s\')%s %s %%s' % (field, key, cast, sign))
return (" AND ".join(conditions), param.values())
raise ValueError('invalid value')
elif lookup_type in ['contains', 'icontains']:
if isinstance(param, dict):
values = list(param.values())
keys = list(param.keys())
if len(values) == 1 and isinstance(values[0], (list, tuple)):
# Can't cast here because the list could contain multiple types
return ('%s->\'%s\' = ANY(%%s)' % (field, keys[0]), [[str(x) for x in values[0]]])
elif len(keys) == 1 and len(values) == 1:
# Retrieve key and compare to param instead of using '@>' in order to cast hstore value
cast = get_cast_for_param(value_annot, keys[0])
return ('(%s->\'%s\')%s = %%s' % (field, keys[0], cast), [values[0]])
return ('%s @> %%s' % field, [param])
elif isinstance(param, (list, tuple)):
if len(param) == 0:
raise ValueError('invalid value')
if len(param) < 2:
return ('%s ? %%s' % field, [param[0]])
if param:
return ('%s ?& %%s' % field, [param])
raise ValueError('invalid value')
elif isinstance(param, six.string_types):
# if looking for a string perform the normal text lookup
# that is: look for occurence of string in all the keys
pass
elif hasattr(child[0].field, 'serializer'):
try:
child[0].field._serialize_value(param)
pass
except Exception:
raise ValueError('invalid value')
else:
raise ValueError('invalid value')
elif lookup_type == 'isnull':
if isinstance(param, dict):
param_keys = list(param.keys())
conditions = []
for key in param_keys:
op = 'IS NULL' if value_annot[key] else 'IS NOT NULL'
conditions.append('(%s->\'%s\') %s' % (field, key, op))
return (" AND ".join(conditions), [])
# do not perform any special format
return super(HStoreWhereNode, self).make_atom(child, qn, connection)
else:
raise TypeError('invalid lookup type')
return super(HStoreWhereNode, self).make_atom(child, qn, connection)
make_hstore_atom = make_atom
class HStoreQuery(Query):
def __init__(self, model):
super(HStoreQuery, self).__init__(model, HStoreWhereNode)
class HStoreQuerySet(QuerySet):
def __init__(self, model=None, query=None, using=None, *args, **kwargs):
query = query or HStoreQuery(model)
super(HStoreQuerySet, self).__init__(model=model, query=query, using=using, *args, **kwargs)
@select_query
def hkeys(self, query, attr):
"""
Enumerates the keys in the specified hstore.
"""
query.add_extra({'_': 'akeys("%s")' % attr}, None, None, None, None, None)
result = query.get_compiler(self.db).execute_sql(SINGLE)
return (result[0] if result else [])
@select_query
def hpeek(self, query, attr, key):
"""
Peeks at a value of the specified key.
"""
query.add_extra({'_': '%s -> %%s' % attr}, [key], None, None, None, None)
result = query.get_compiler(self.db).execute_sql(SINGLE)
if result and result[0]:
field = get_field(self, attr)
return field._value_to_python(result[0])
@select_query
def hslice(self, query, attr, keys):
"""
Slices the specified key/value pairs.
"""
query.add_extra({'_': 'slice("%s", %%s)' % attr}, [keys], None, None, None, None)
result = query.get_compiler(self.db).execute_sql(SINGLE)
if result and result[0]:
field = get_field(self, attr)
return dict((key, field._value_to_python(value)) for key, value in result[0].items())
return {}
@update_query
def hremove(self, query, attr, keys):
"""
Removes the specified keys in the specified hstore.
"""
value = QueryWrapper('delete("%s", %%s)' % attr, [keys])
field = get_field(self, attr)
query.add_update_fields([(field, None, value)])
return query
@update_query
def hupdate(self, query, attr, updates):
"""
Updates the specified hstore.
"""
field = get_field(self, attr)
if hasattr(field, 'serializer'):
updates = field.get_prep_value(updates)
value = QueryWrapper('"%s" || %%s' % attr, [updates])
query.add_update_fields([(field, None, value)])
return query
if GEODJANGO_INSTALLED:
from django.contrib.gis.db.models.query import GeoQuerySet
if django.VERSION[:2] <= (1, 7):
from django.contrib.gis.db.models.sql.query import GeoQuery
from django.contrib.gis.db.models.sql.where import GeoWhereNode, GeoConstraint
class HStoreGeoWhereNode(HStoreWhereNode, GeoWhereNode):
def make_atom(self, child, qn, connection):
lvalue, lookup_type, value_annot, params_or_value = child
# if spatial query
if isinstance(lvalue, GeoConstraint):
return GeoWhereNode.make_atom(self, child, qn, connection)
# else might be an HSTORE query
return HStoreWhereNode.make_atom(self, child, qn, connection)
class HStoreGeoQuery(GeoQuery, Query):
def __init__(self, *args, **kwargs):
model = kwargs.pop('model', None) or args[0]
super(HStoreGeoQuery, self).__init__(model, HStoreGeoWhereNode)
class HStoreGeoQuerySet(HStoreQuerySet, GeoQuerySet):
def __init__(self, model=None, query=None, using=None, **kwargs):
query = query or HStoreGeoQuery(model)
super(HStoreGeoQuerySet, self).__init__(model=model, query=query, using=using, **kwargs)
else:
class HStoreGeoQuerySet(HStoreQuerySet, GeoQuerySet):
pass
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ui.py:
Provides the main user functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu)
"""
import re
import os
import sys
import copy
import time
import warnings
import contextlib
import collections
from io import StringIO
import numpy as np
from . import core
from . import basic
from . import cds
from . import daophot
from . import ecsv
from . import sextractor
from . import ipac
from . import latex
from . import html
from . import rst
from . import fastbasic
from . import cparser
from . import fixedwidth
from .docs import READ_KWARG_TYPES, WRITE_KWARG_TYPES
from astropy.table import Table, MaskedColumn
from astropy.utils.data import get_readable_fileobj
from astropy.utils.exceptions import AstropyWarning, AstropyDeprecationWarning
_read_trace = []
try:
import yaml # noqa
HAS_YAML = True
except ImportError:
HAS_YAML = False
# Default setting for guess parameter in read()
_GUESS = True
def _probably_html(table, maxchars=100000):
"""
Determine if ``table`` probably contains HTML content. See PR #3693 and issue
#3691 for context.
"""
if not isinstance(table, str):
try:
# If table is an iterable (list of strings) then take the first
# maxchars of these. Make sure this is something with random
# access to exclude a file-like object
table[0]
table[:1]
size = 0
for i, line in enumerate(table):
size += len(line)
if size > maxchars:
break
table = os.linesep.join(table[:i + 1])
except Exception:
pass
if isinstance(table, str):
# Look for signs of an HTML table in the first maxchars characters
table = table[:maxchars]
# URL ending in .htm or .html
if re.match(r'( http[s]? | ftp | file ) :// .+ \.htm[l]?$', table,
re.IGNORECASE | re.VERBOSE):
return True
# Filename ending in .htm or .html which exists
if re.search(r'\.htm[l]?$', table[-5:], re.IGNORECASE) and os.path.exists(table):
return True
# Table starts with HTML document type declaration
if re.match(r'\s* <! \s* DOCTYPE \s* HTML', table, re.IGNORECASE | re.VERBOSE):
return True
# Look for <TABLE .. >, <TR .. >, <TD .. > tag openers.
if all(re.search(fr'< \s* {element} [^>]* >', table, re.IGNORECASE | re.VERBOSE)
for element in ('table', 'tr', 'td')):
return True
return False
def set_guess(guess):
"""
Set the default value of the ``guess`` parameter for read()
Parameters
----------
guess : bool
New default ``guess`` value (e.g., True or False)
"""
global _GUESS
_GUESS = guess
def get_reader(Reader=None, Inputter=None, Outputter=None, **kwargs):
"""
Initialize a table reader allowing for common customizations. Most of the
default behavior for various parameters is determined by the Reader class.
Parameters
----------
Reader : `~astropy.io.ascii.BaseReader`
Reader class (DEPRECATED). Default is :class:`Basic`.
Inputter : `~astropy.io.ascii.BaseInputter`
Inputter class
Outputter : `~astropy.io.ascii.BaseOutputter`
Outputter class
delimiter : str
Column delimiter string
comment : str
Regular expression defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
header_start : int
Line index for the header line not counting comment or blank lines.
A line with only whitespace is considered blank.
data_start : int
Line index for the start of data not counting comment or blank lines.
A line with only whitespace is considered blank.
data_end : int
Line index for the end of data not counting comment or blank lines.
This value can be negative to count from the end.
converters : dict
Dict of converters.
data_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split data columns.
header_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split header columns.
names : list
List of names corresponding to each data column.
include_names : list, optional
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``).
fill_values : tuple, list of tuple
Specification of fill values for bad or missing table values.
fill_include_names : list
List of names to include in fill_values.
fill_exclude_names : list
List of names to exclude from fill_values (applied after ``fill_include_names``).
Returns
-------
reader : `~astropy.io.ascii.BaseReader` subclass
ASCII format reader instance
"""
# This function is a light wrapper around core._get_reader to provide a
# public interface with a default Reader.
if Reader is None:
# Default reader is Basic unless fast reader is forced
fast_reader = _get_fast_reader_dict(kwargs)
if fast_reader['enable'] == 'force':
Reader = fastbasic.FastBasic
else:
Reader = basic.Basic
reader = core._get_reader(Reader, Inputter=Inputter, Outputter=Outputter, **kwargs)
return reader
def _get_format_class(format, ReaderWriter, label):
if format is not None and ReaderWriter is not None:
raise ValueError(f'Cannot supply both format and {label} keywords')
if format is not None:
if format in core.FORMAT_CLASSES:
ReaderWriter = core.FORMAT_CLASSES[format]
else:
raise ValueError('ASCII format {!r} not in allowed list {}'
.format(format, sorted(core.FORMAT_CLASSES)))
return ReaderWriter
def _get_fast_reader_dict(kwargs):
"""Convert 'fast_reader' key in kwargs into a dict if not already and make sure
'enable' key is available.
"""
fast_reader = copy.deepcopy(kwargs.get('fast_reader', True))
if isinstance(fast_reader, dict):
fast_reader.setdefault('enable', 'force')
else:
fast_reader = {'enable': fast_reader}
return fast_reader
def _validate_read_write_kwargs(read_write, **kwargs):
"""Validate types of keyword arg inputs to read() or write()."""
def is_ducktype(val, cls):
"""Check if ``val`` is an instance of ``cls`` or "seems" like one:
``cls(val) == val`` does not raise and exception and is `True`. In
this way you can pass in ``np.int16(2)`` and have that count as `int`.
This has a special-case of ``cls`` being 'list-like', meaning it is
an iterable but not a string.
"""
if cls == 'list-like':
ok = (not isinstance(val, str)
and isinstance(val, collections.abc.Iterable))
else:
ok = isinstance(val, cls)
if not ok:
# See if ``val`` walks and quacks like a ``cls```.
try:
new_val = cls(val)
assert new_val == val
except Exception:
ok = False
else:
ok = True
return ok
kwarg_types = READ_KWARG_TYPES if read_write == 'read' else WRITE_KWARG_TYPES
for arg, val in kwargs.items():
# Kwarg type checking is opt-in, so kwargs not in the list are considered OK.
# This reflects that some readers allow additional arguments that may not
# be well-specified, e.g. ```__init__(self, **kwargs)`` is an option.
if arg not in kwarg_types or val is None:
continue
# Single type or tuple of types for this arg (like isinstance())
types = kwarg_types[arg]
err_msg = (f"{read_write}() argument '{arg}' must be a "
f"{types} object, got {type(val)} instead")
# Force `types` to be a tuple for the any() check below
if not isinstance(types, tuple):
types = (types,)
if not any(is_ducktype(val, cls) for cls in types):
raise TypeError(err_msg)
def read(table, guess=None, **kwargs):
# Docstring defined below
del _read_trace[:]
# Downstream readers might munge kwargs
kwargs = copy.deepcopy(kwargs)
_validate_read_write_kwargs('read', **kwargs)
# Convert 'fast_reader' key in kwargs into a dict if not already and make sure
# 'enable' key is available.
fast_reader = _get_fast_reader_dict(kwargs)
kwargs['fast_reader'] = fast_reader
if fast_reader['enable'] and fast_reader.get('chunk_size'):
return _read_in_chunks(table, **kwargs)
if 'fill_values' not in kwargs:
kwargs['fill_values'] = [('', '0')]
# If an Outputter is supplied in kwargs that will take precedence.
if 'Outputter' in kwargs: # user specified Outputter, not supported for fast reading
fast_reader['enable'] = False
format = kwargs.get('format')
# Dictionary arguments are passed by reference per default and thus need
# special protection:
new_kwargs = copy.deepcopy(kwargs)
kwargs['fast_reader'] = copy.deepcopy(fast_reader)
# Get the Reader class based on possible format and Reader kwarg inputs.
Reader = _get_format_class(format, kwargs.get('Reader'), 'Reader')
if Reader is not None:
new_kwargs['Reader'] = Reader
format = Reader._format_name
# Remove format keyword if there, this is only allowed in read() not get_reader()
if 'format' in new_kwargs:
del new_kwargs['format']
if guess is None:
guess = _GUESS
if guess:
# If ``table`` is probably an HTML file then tell guess function to add
# the HTML reader at the top of the guess list. This is in response to
# issue #3691 (and others) where libxml can segfault on a long non-HTML
# file, thus prompting removal of the HTML reader from the default
# guess list.
new_kwargs['guess_html'] = _probably_html(table)
# If `table` is a filename or readable file object then read in the
# file now. This prevents problems in Python 3 with the file object
# getting closed or left at the file end. See #3132, #3013, #3109,
# #2001. If a `readme` arg was passed that implies CDS format, in
# which case the original `table` as the data filename must be left
# intact.
if 'readme' not in new_kwargs:
encoding = kwargs.get('encoding')
try:
with get_readable_fileobj(table, encoding=encoding) as fileobj:
table = fileobj.read()
except ValueError: # unreadable or invalid binary file
raise
except Exception:
pass
else:
# Ensure that `table` has at least one \r or \n in it
# so that the core.BaseInputter test of
# ('\n' not in table and '\r' not in table)
# will fail and so `table` cannot be interpreted there
# as a filename. See #4160.
if not re.search(r'[\r\n]', table):
table = table + os.linesep
# If the table got successfully read then look at the content
# to see if is probably HTML, but only if it wasn't already
# identified as HTML based on the filename.
if not new_kwargs['guess_html']:
new_kwargs['guess_html'] = _probably_html(table)
# Get the table from guess in ``dat``. If ``dat`` comes back as None
# then there was just one set of kwargs in the guess list so fall
# through below to the non-guess way so that any problems result in a
# more useful traceback.
dat = _guess(table, new_kwargs, format, fast_reader)
if dat is None:
guess = False
if not guess:
if format is None:
reader = get_reader(**new_kwargs)
format = reader._format_name
# Try the fast reader version of `format` first if applicable. Note that
# if user specified a fast format (e.g. format='fast_basic') this test
# will fail and the else-clause below will be used.
if fast_reader['enable'] and f'fast_{format}' in core.FAST_CLASSES:
fast_kwargs = copy.deepcopy(new_kwargs)
fast_kwargs['Reader'] = core.FAST_CLASSES[f'fast_{format}']
fast_reader_rdr = get_reader(**fast_kwargs)
try:
dat = fast_reader_rdr.read(table)
_read_trace.append({'kwargs': copy.deepcopy(fast_kwargs),
'Reader': fast_reader_rdr.__class__,
'status': 'Success with fast reader (no guessing)'})
except (core.ParameterError, cparser.CParserError, UnicodeEncodeError) as err:
# special testing value to avoid falling back on the slow reader
if fast_reader['enable'] == 'force':
raise core.InconsistentTableError(
f'fast reader {fast_reader_rdr.__class__} exception: {err}')
# If the fast reader doesn't work, try the slow version
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(new_kwargs),
'Reader': reader.__class__,
'status': 'Success with slow reader after failing'
' with fast (no guessing)'})
else:
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(new_kwargs),
'Reader': reader.__class__,
'status': 'Success with specified Reader class '
'(no guessing)'})
return dat
read.__doc__ = core.READ_DOCSTRING
def _guess(table, read_kwargs, format, fast_reader):
"""
Try to read the table using various sets of keyword args. Start with the
standard guess list and filter to make it unique and consistent with
user-supplied read keyword args. Finally, if none of those work then
try the original user-supplied keyword args.
Parameters
----------
table : str, file-like, list
Input table as a file name, file-like object, list of strings, or
single newline-separated string.
read_kwargs : dict
Keyword arguments from user to be supplied to reader
format : str
Table format
fast_reader : dict
Options for the C engine fast reader. See read() function for details.
Returns
-------
dat : `~astropy.table.Table` or None
Output table or None if only one guess format was available
"""
# Keep a trace of all failed guesses kwarg
failed_kwargs = []
# Get an ordered list of read() keyword arg dicts that will be cycled
# through in order to guess the format.
full_list_guess = _get_guess_kwargs_list(read_kwargs)
# If a fast version of the reader is available, try that before the slow version
if (fast_reader['enable'] and format is not None and f'fast_{format}' in
core.FAST_CLASSES):
fast_kwargs = copy.deepcopy(read_kwargs)
fast_kwargs['Reader'] = core.FAST_CLASSES[f'fast_{format}']
full_list_guess = [fast_kwargs] + full_list_guess
else:
fast_kwargs = None
# Filter the full guess list so that each entry is consistent with user kwarg inputs.
# This also removes any duplicates from the list.
filtered_guess_kwargs = []
fast_reader = read_kwargs.get('fast_reader')
for guess_kwargs in full_list_guess:
# If user specified slow reader then skip all fast readers
if (fast_reader['enable'] is False
and guess_kwargs['Reader'] in core.FAST_CLASSES.values()):
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': guess_kwargs['Reader'].__class__,
'status': 'Disabled: reader only available in fast version',
'dt': f'{0.0:.3f} ms'})
continue
# If user required a fast reader then skip all non-fast readers
if (fast_reader['enable'] == 'force'
and guess_kwargs['Reader'] not in core.FAST_CLASSES.values()):
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': guess_kwargs['Reader'].__class__,
'status': 'Disabled: no fast version of reader available',
'dt': f'{0.0:.3f} ms'})
continue
guess_kwargs_ok = True # guess_kwargs are consistent with user_kwargs?
for key, val in read_kwargs.items():
# Do guess_kwargs.update(read_kwargs) except that if guess_args has
# a conflicting key/val pair then skip this guess entirely.
if key not in guess_kwargs:
guess_kwargs[key] = copy.deepcopy(val)
elif val != guess_kwargs[key] and guess_kwargs != fast_kwargs:
guess_kwargs_ok = False
break
if not guess_kwargs_ok:
# User-supplied kwarg is inconsistent with the guess-supplied kwarg, e.g.
# user supplies delimiter="|" but the guess wants to try delimiter=" ",
# so skip the guess entirely.
continue
# Add the guess_kwargs to filtered list only if it is not already there.
if guess_kwargs not in filtered_guess_kwargs:
filtered_guess_kwargs.append(guess_kwargs)
# If there are not at least two formats to guess then return no table
# (None) to indicate that guessing did not occur. In that case the
# non-guess read() will occur and any problems will result in a more useful
# traceback.
if len(filtered_guess_kwargs) <= 1:
return None
# Define whitelist of exceptions that are expected from readers when
# processing invalid inputs. Note that OSError must fall through here
# so one cannot simply catch any exception.
guess_exception_classes = (core.InconsistentTableError, ValueError, TypeError,
AttributeError, core.OptionalTableImportError,
core.ParameterError, cparser.CParserError)
# Now cycle through each possible reader and associated keyword arguments.
# Try to read the table using those args, and if an exception occurs then
# keep track of the failed guess and move on.
for guess_kwargs in filtered_guess_kwargs:
t0 = time.time()
try:
# If guessing will try all Readers then use strict req'ts on column names
if 'Reader' not in read_kwargs:
guess_kwargs['strict_names'] = True
reader = get_reader(**guess_kwargs)
reader.guessing = True
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': reader.__class__,
'status': 'Success (guessing)',
'dt': f'{(time.time() - t0) * 1000:.3f} ms'})
return dat
except guess_exception_classes as err:
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'status': f'{err.__class__.__name__}: {str(err)}',
'dt': f'{(time.time() - t0) * 1000:.3f} ms'})
failed_kwargs.append(guess_kwargs)
else:
# Failed all guesses, try the original read_kwargs without column requirements
try:
reader = get_reader(**read_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(read_kwargs),
'Reader': reader.__class__,
'status': 'Success with original kwargs without strict_names '
'(guessing)'})
return dat
except guess_exception_classes as err:
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'status': f'{err.__class__.__name__}: {str(err)}'})
failed_kwargs.append(read_kwargs)
lines = ['\nERROR: Unable to guess table format with the guesses listed below:']
for kwargs in failed_kwargs:
sorted_keys = sorted([x for x in sorted(kwargs)
if x not in ('Reader', 'Outputter')])
reader_repr = repr(kwargs.get('Reader', basic.Basic))
keys_vals = ['Reader:' + re.search(r"\.(\w+)'>", reader_repr).group(1)]
kwargs_sorted = ((key, kwargs[key]) for key in sorted_keys)
keys_vals.extend([f'{key}: {val!r}' for key, val in kwargs_sorted])
lines.append(' '.join(keys_vals))
msg = ['',
'************************************************************************',
'** ERROR: Unable to guess table format with the guesses listed above. **',
'** **',
'** To figure out why the table did not read, use guess=False and **',
'** fast_reader=False, along with any appropriate arguments to read(). **',
'** In particular specify the format and any known attributes like the **',
'** delimiter. **',
'************************************************************************']
lines.extend(msg)
raise core.InconsistentTableError('\n'.join(lines))
def _get_guess_kwargs_list(read_kwargs):
"""
Get the full list of reader keyword argument dicts that are the basis
for the format guessing process. The returned full list will then be:
- Filtered to be consistent with user-supplied kwargs
- Cleaned to have only unique entries
- Used one by one to try reading the input table
Note that the order of the guess list has been tuned over years of usage.
Maintainers need to be very careful about any adjustments as the
reasoning may not be immediately evident in all cases.
This list can (and usually does) include duplicates. This is a result
of the order tuning, but these duplicates get removed later.
Parameters
----------
read_kwargs : dict
User-supplied read keyword args
Returns
-------
guess_kwargs_list : list
List of read format keyword arg dicts
"""
guess_kwargs_list = []
# If the table is probably HTML based on some heuristics then start with the
# HTML reader.
if read_kwargs.pop('guess_html', None):
guess_kwargs_list.append(dict(Reader=html.HTML))
# Start with ECSV because an ECSV file will be read by Basic. This format
# has very specific header requirements and fails out quickly.
guess_kwargs_list.append(dict(Reader=ecsv.Ecsv))
# Now try readers that accept the user-supplied keyword arguments
# (actually include all here - check for compatibility of arguments later).
# FixedWidthTwoLine would also be read by Basic, so it needs to come first;
# same for RST.
for reader in (fixedwidth.FixedWidthTwoLine, rst.RST,
fastbasic.FastBasic, basic.Basic,
fastbasic.FastRdb, basic.Rdb,
fastbasic.FastTab, basic.Tab,
cds.Cds, daophot.Daophot, sextractor.SExtractor,
ipac.Ipac, latex.Latex, latex.AASTex):
guess_kwargs_list.append(dict(Reader=reader))
# Cycle through the basic-style readers using all combinations of delimiter
# and quotechar.
for Reader in (fastbasic.FastCommentedHeader, basic.CommentedHeader,
fastbasic.FastBasic, basic.Basic,
fastbasic.FastNoHeader, basic.NoHeader):
for delimiter in ("|", ",", " ", r"\s"):
for quotechar in ('"', "'"):
guess_kwargs_list.append(dict(
Reader=Reader, delimiter=delimiter, quotechar=quotechar))
return guess_kwargs_list
def _read_in_chunks(table, **kwargs):
"""
For fast_reader read the ``table`` in chunks and vstack to create
a single table, OR return a generator of chunk tables.
"""
fast_reader = kwargs['fast_reader']
chunk_size = fast_reader.pop('chunk_size')
chunk_generator = fast_reader.pop('chunk_generator', False)
fast_reader['parallel'] = False # No parallel with chunks
tbl_chunks = _read_in_chunks_generator(table, chunk_size, **kwargs)
if chunk_generator:
return tbl_chunks
tbl0 = next(tbl_chunks)
masked = tbl0.masked
# Numpy won't allow resizing the original so make a copy here.
out_cols = {col.name: col.data.copy() for col in tbl0.itercols()}
str_kinds = ('S', 'U')
for tbl in tbl_chunks:
masked |= tbl.masked
for name, col in tbl.columns.items():
# Concatenate current column data and new column data
# If one of the inputs is string-like and the other is not, then
# convert the non-string to a string. In a perfect world this would
# be handled by numpy, but as of numpy 1.13 this results in a string
# dtype that is too long (https://github.com/numpy/numpy/issues/10062).
col1, col2 = out_cols[name], col.data
if col1.dtype.kind in str_kinds and col2.dtype.kind not in str_kinds:
col2 = np.array(col2.tolist(), dtype=col1.dtype.kind)
elif col2.dtype.kind in str_kinds and col1.dtype.kind not in str_kinds:
col1 = np.array(col1.tolist(), dtype=col2.dtype.kind)
# Choose either masked or normal concatenation
concatenate = np.ma.concatenate if masked else np.concatenate
out_cols[name] = concatenate([col1, col2])
# Make final table from numpy arrays, converting dict to list
out_cols = [out_cols[name] for name in tbl0.colnames]
out = tbl0.__class__(out_cols, names=tbl0.colnames, meta=tbl0.meta,
copy=False)
return out
def _read_in_chunks_generator(table, chunk_size, **kwargs):
"""
For fast_reader read the ``table`` in chunks and return a generator
of tables for each chunk.
"""
@contextlib.contextmanager
def passthrough_fileobj(fileobj, encoding=None):
"""Stub for get_readable_fileobj, which does not seem to work in Py3
for input File-like object, see #6460"""
yield fileobj
# Set up to coerce `table` input into a readable file object by selecting
# an appropriate function.
# Convert table-as-string to a File object. Finding a newline implies
# that the string is not a filename.
if (isinstance(table, str) and ('\n' in table or '\r' in table)):
table = StringIO(table)
fileobj_context = passthrough_fileobj
elif hasattr(table, 'read') and hasattr(table, 'seek'):
fileobj_context = passthrough_fileobj
else:
# string filename or pathlib
fileobj_context = get_readable_fileobj
# Set up for iterating over chunks
kwargs['fast_reader']['return_header_chars'] = True
header = '' # Table header (up to start of data)
prev_chunk_chars = '' # Chars from previous chunk after last newline
first_chunk = True # True for the first chunk, False afterward
with fileobj_context(table, encoding=kwargs.get('encoding')) as fh:
while True:
chunk = fh.read(chunk_size)
# Got fewer chars than requested, must be end of file
final_chunk = len(chunk) < chunk_size
# If this is the last chunk and there is only whitespace then break
if final_chunk and not re.search(r'\S', chunk):
break
# Step backwards from last character in chunk and find first newline
for idx in range(len(chunk) - 1, -1, -1):
if final_chunk or chunk[idx] == '\n':
break
else:
raise ValueError('no newline found in chunk (chunk_size too small?)')
# Stick on the header to the chunk part up to (and including) the
# last newline. Make sure the small strings are concatenated first.
complete_chunk = (header + prev_chunk_chars) + chunk[:idx + 1]
prev_chunk_chars = chunk[idx + 1:]
# Now read the chunk as a complete table
tbl = read(complete_chunk, guess=False, **kwargs)
# For the first chunk pop the meta key which contains the header
# characters (everything up to the start of data) then fix kwargs
# so it doesn't return that in meta any more.
if first_chunk:
header = tbl.meta.pop('__ascii_fast_reader_header_chars__')
first_chunk = False
yield tbl
if final_chunk:
break
extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats',
'names', 'include_names', 'exclude_names', 'strip_whitespace')
def get_writer(Writer=None, fast_writer=True, **kwargs):
"""
Initialize a table writer allowing for common customizations. Most of the
default behavior for various parameters is determined by the Writer class.
Parameters
----------
Writer : ``Writer``
Writer class (DEPRECATED). Defaults to :class:`Basic`.
delimiter : str
Column delimiter string
comment : str
String defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool
Whether to use the fast Cython writer.
Returns
-------
writer : `~astropy.io.ascii.BaseReader` subclass
ASCII format writer instance
"""
if Writer is None:
Writer = basic.Basic
if 'strip_whitespace' not in kwargs:
kwargs['strip_whitespace'] = True
writer = core._get_writer(Writer, fast_writer, **kwargs)
# Handle the corner case of wanting to disable writing table comments for the
# commented_header format. This format *requires* a string for `write_comment`
# because that is used for the header column row, so it is not possible to
# set the input `comment` to None. Without adding a new keyword or assuming
# a default comment character, there is no other option but to tell user to
# simply remove the meta['comments'].
if (isinstance(writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader))
and not isinstance(kwargs.get('comment', ''), str)):
raise ValueError("for the commented_header writer you must supply a string\n"
"value for the `comment` keyword. In order to disable writing\n"
"table comments use `del t.meta['comments']` prior to writing.")
return writer
def write(table, output=None, format=None, Writer=None, fast_writer=True, *,
overwrite=None, **kwargs):
# Docstring inserted below
_validate_read_write_kwargs('write', format=format, fast_writer=fast_writer,
overwrite=overwrite, **kwargs)
if isinstance(output, str):
if os.path.lexists(output):
if overwrite is None:
warnings.warn(
"{} already exists. "
"Automatically overwriting ASCII files is deprecated. "
"Use the argument 'overwrite=True' in the future.".format(
output), AstropyDeprecationWarning)
elif not overwrite:
raise OSError(f"{output} already exists")
if output is None:
output = sys.stdout
# Ensure that `table` is a Table subclass.
names = kwargs.get('names')
if isinstance(table, Table):
# Note that making a copy of the table here is inefficient but
# without this copy a number of tests break (e.g. in test_fixedwidth).
# See #7605.
new_tbl = table.__class__(table, names=names)
# This makes a copy of the table columns. This is subject to a
# corner-case problem if writing a table with masked columns to ECSV
# where serialize_method is set to 'data_mask'. In this case that
# attribute gets dropped in the copy, so do the copy here. This
# should be removed when `info` really contains all the attributes
# (#6720).
for new_col, col in zip(new_tbl.itercols(), table.itercols()):
if isinstance(col, MaskedColumn):
new_col.info.serialize_method = col.info.serialize_method
table = new_tbl
else:
table = Table(table, names=names)
table0 = table[:0].copy()
core._apply_include_exclude_names(table0, kwargs.get('names'),
kwargs.get('include_names'), kwargs.get('exclude_names'))
diff_format_with_names = set(kwargs.get('formats', [])) - set(table0.colnames)
if diff_format_with_names:
warnings.warn(
'The keys {} specified in the formats argument does not match a column name.'
.format(diff_format_with_names), AstropyWarning)
if table.has_mixin_columns:
fast_writer = False
Writer = _get_format_class(format, Writer, 'Writer')
writer = get_writer(Writer=Writer, fast_writer=fast_writer, **kwargs)
if writer._format_name in core.FAST_CLASSES:
writer.write(table, output)
return
lines = writer.write(table)
# Write the lines to output
outstr = os.linesep.join(lines)
if not hasattr(output, 'write'):
# NOTE: we need to specify newline='', otherwise the default
# behavior is for Python to translate \r\n (which we write because
# of os.linesep) into \r\r\n. Specifying newline='' disables any
# auto-translation.
output = open(output, 'w', newline='')
output.write(outstr)
output.write(os.linesep)
output.close()
else:
output.write(outstr)
output.write(os.linesep)
write.__doc__ = core.WRITE_DOCSTRING
def get_read_trace():
"""
Return a traceback of the attempted read formats for the last call to
`~astropy.io.ascii.read` where guessing was enabled. This is primarily for
debugging.
The return value is a list of dicts, where each dict includes the keyword
args ``kwargs`` used in the read call and the returned ``status``.
Returns
-------
trace : list of dicts
Ordered list of format guesses and status
"""
return copy.deepcopy(_read_trace)
|
|
'''
TODO:
. CS (chip select) equivalent when write to memory mapped IO address
'''
'''----------------------------- Imports -----------------------------'''
# Built ins
import math
# Hack computer
from ._x__components import *
import Assembler.disassembler as dis
'''------------------------------- CPU -------------------------------'''
'''
Instruction - FEDCBA9876543210 // msb to lsb
0123456789ABCDEF // array indexing
F . 0 -> TECS instruction type (C if 1, @ if 0)
E . 1 -> op
D . 2 -> op
C . 3 -> op
B . 4 -> op
A . 5 -> op
9 . 6 -> xSel
8 . 7 -> xSel
7 . 8 -> ySel
6 . 9 -> ySel
5 . A -> dst
4 . B -> dst
3 . C -> dst
2 . D -> jmp
1 . E -> jmp
0 . F -> jmp
x/y sel
0 D
1 A
2 B
3 M
dst
0 NULL
1 D
2 A
3 B
4 M
5 unused
6 unused
7 unused
jmp
0 NULL
1 JGT
2 JEQ
3 JGE
4 JLT
5 JNE
6 JLE
7 JMP
'''
class CPU_():
''' Fetches and executes program instructions '''
def __init__( self, N ):
self.debugMode = False
self.N = N
# Program counter
self.programCounter = CounterN_( 2 * N ) # TODO...this can be 26 instead
# Microstep counter
nStepsPerInstruction = 4
nBitsInCounter = 2 # int( math.log( nStepsPerInstruction, 2 ) )
self.microCounter = CounterN_( nBitsInCounter )
# Microcode ROM
nControlSignals = 18
nInstructionTypes = 8
# self.nBitsInInstructionType = 3 # math.ceil( math.log( nInstructionTypes, 2 ) )
nEntriesMicrocodeROM = nInstructionTypes * nStepsPerInstruction
self.microcodeROM = ROMXN_( nEntriesMicrocodeROM, nControlSignals )
# ALU ROM
nEntriesALUROM = 32
nBitsInFxSel = 4
nBitsInFxFlags = 5
self.ALUROM = ROMXN_( nEntriesALUROM, nBitsInFxSel + nBitsInFxFlags )
self.initInternalROM()
# Registers
self.A_register = RegisterN_( N )
self.D_register = RegisterN_( N )
self.B_register = RegisterN_( N )
self.AA_register = RegisterN_( N )
self.instruction_register = RegisterN_( N )
self.IOInput_register = RegisterN_( N )
self.ABkp_register = RegisterN_( N )
self.DBkp_register = RegisterN_( N )
self.BBkp_register = RegisterN_( N )
self.AABkp_register = RegisterN_( N )
self.instructionBkp_register = RegisterN_( N )
self.PCBkp_register = RegisterN_( 2 * N )
# Flip flops
self.interruptsEnabled_ff = DFlipFlop()
self.interruptAcknowledged_ff = DFlipFlop()
self.backupEnabled_ff = DFlipFlop()
# Instruction decode
self.TECSInstrType = 0
self.op = 1
self.xSel = 6
self.ySel = 8
self.dst = 10
self.jmp = 13
self.nBitsInOp = 5
# Instruction types
self.i_Aimmed = ( 1, 1, 0, 0, 0 )
self.i_AAimmed = ( 1, 1, 0, 0, 1 )
self.i_dstEqCmpJmp = ( 1, 1, 0, 1, 0 )
self.i_dstEqIOBus = ( 1, 1, 0, 1, 1 )
self.i_intAck = ( 1, 1, 1, 0, 0 )
self.i_reti = ( 1, 1, 1, 0, 1 )
self.i_nop = ( 1, 1, 1, 1, 0 )
self.i_halt = ( 1, 1, 1, 1, 1 )
# Location of ISRHandler in program
self.ISRHandlerAddress = self.intToBitArray( 0, 2 * N ) # TODO
# Miscellaneous
self.zero = self.intToBitArray( 0, N )
self.AA_registerMask = ( 0, ) * 6 + ( 1, ) * 10 # ???
# Temp debug
self.instructionTypeLookup = {
( 1, 1, 0, 0, 0 ) : 'i_Aimmed',
( 1, 1, 0, 0, 1 ) : 'i_AAimmed',
( 1, 1, 0, 1, 0 ) : 'i_dstEqCmpJmp',
( 1, 1, 0, 1, 1 ) : 'i_dstEqIOBus',
( 1, 1, 1, 0, 0 ) : 'i_intAck',
( 1, 1, 1, 0, 1 ) : 'i_reti',
( 1, 1, 1, 1, 0 ) : 'i_nop',
( 1, 1, 1, 1, 1 ) : 'i_halt',
}
self.ALUFxLookup = {
( 0, 0, 0, 0, 0 ) : '0',
( 0, 0, 0, 0, 1 ) : '1',
( 0, 0, 0, 1, 0 ) : '-1',
( 0, 0, 0, 1, 1 ) : 'x',
( 0, 0, 1, 0, 0 ) : '! x',
( 0, 0, 1, 0, 1 ) : '- x',
( 0, 0, 1, 1, 0 ) : 'x + 1',
( 0, 0, 1, 1, 1 ) : 'x - 1',
( 0, 1, 0, 0, 0 ) : 'x + y',
( 0, 1, 0, 0, 1 ) : 'x - y',
( 0, 1, 0, 1, 0 ) : 'x & y',
( 0, 1, 0, 1, 1 ) : 'x | y',
( 0, 1, 1, 0, 0 ) : 'x ^ y',
( 0, 1, 1, 0, 1 ) : 'x >> y',
( 0, 1, 1, 1, 0 ) : 'x << y',
( 0, 1, 1, 1, 1 ) : 'x * y',
( 1, 0, 0, 0, 0 ) : 'x / y',
}
self.xyLookup = {
( 0, 0 ) : 'D',
( 0, 1 ) : 'A',
( 1, 0 ) : 'B',
( 1, 1 ) : 'M',
}
def intToBitArray( self, x, N ):
z = bin( x )[ 2 : ].zfill( N )
return tuple( map( int, z ) )
def bitArrayToBinaryString( self, x ):
return ''.join( map( str, x ) )
def bitArrayToInt( self, x ):
return int( ''.join( map( str, x ) ), 2 )
def initInternalROM( self ):
# Microcode ROM
'''
| i_Aimmed | i_AAimmed | i_dstEqCmpJmp | i_dstEqIOBus | i_intAck | i_reti | i_nop | i_halt |
| 0 1 2 3 | 0 1 2 3 | 0 1 2 3 | 0 1 2 3 | 0 1 2 3 | 0 1 2 3 | 0 1 2 3 | 0 1 2 3 |
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
c_cInst | 0 0 0 0 | 0 0 0 0 | 0 1 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_ARegisterWr | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_ARegisterInSel_instructionRegister | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_AARegisterWr | 0 0 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_instructionRegisterWr | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 |
c_PCIncrement | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 |
c_PCWr | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 1 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_PCInSel_ISRHandler | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 1 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_readIODatabus | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 1 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_dstInSel_IOInputRegister | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_enableInterrupts | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 1 0 | 0 0 0 0 | 0 0 0 0 |
c_disableInterrupts | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_acknowledgeInterrupt | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_servicedInterrupt | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 1 0 | 0 0 0 0 | 0 0 0 0 |
c_enableRegisterBackup | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 1 0 | 0 0 0 0 | 0 0 0 0 |
c_disableRegisterBackup | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_restoreRegisters | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 |
c_halt | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 1 1 1 |
'''
# i_Aimmed
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 0 )
self.microcodeROM.write( 1, ( 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 1 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 2 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 3 )
# i_AAimmed
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 4 )
self.microcodeROM.write( 1, ( 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 5 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 6 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 7 )
# i_dstEqCmpJmp
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 8 )
self.microcodeROM.write( 1, ( 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 9 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 10 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 11 )
# i_dstEqIOBus
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 12 )
self.microcodeROM.write( 1, ( 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 13 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 14 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 15 )
# i_intAck
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 16 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0 ), 1, 17 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 18 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 19 )
# i_reti
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 20 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0 ), 1, 21 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0 ), 1, 22 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 23 )
# i_nop
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 24 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 25 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 26 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 27 )
# i_halt
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 28 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 1, 29 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 1, 30 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 1, 31 )
# ALU ROM
'''
op fsel flags composite
----- ---- ----- ----------
0 add zx, zy 0000 10100
1 add zx, nx, zy, ny, no 0000 11111
- 1 add zx, nx, zy 0000 11100
x and zy, ny 0001 00110
! x and zy, ny, no 0001 00111
- x add zy, ny, no 0000 00111
x + 1 add nx, zy, ny, no 0000 01111
x - 1 add zy, ny 0000 00110
x + y add 0000 00000
x - y add nx, no 0000 01001
x & y and 0001 00000
x | y and nx, ny, no 0001 01011
x ^ y xor 0010 00000
x >> y lsr 0011 00000
x << y lsl 0100 00000
x * y mul 0101 00000
x / y div 0110 00000
'''
self.ALUROM.write( 1, ( 0, 0, 0, 0, 1, 0, 1, 0, 0 ), 1, 0 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 1, 1, 1, 1, 1 ), 1, 1 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 1, 1, 1, 0, 0 ), 1, 2 )
self.ALUROM.write( 1, ( 0, 0, 0, 1, 0, 0, 1, 1, 0 ), 1, 3 )
self.ALUROM.write( 1, ( 0, 0, 0, 1, 0, 0, 1, 1, 1 ), 1, 4 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 0, 0, 1, 1, 1 ), 1, 5 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 0, 1, 1, 1, 1 ), 1, 6 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 0, 0, 1, 1, 0 ), 1, 7 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 8 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 0, 1, 0, 0, 1 ), 1, 9 )
self.ALUROM.write( 1, ( 0, 0, 0, 1, 0, 0, 0, 0, 0 ), 1, 10 )
self.ALUROM.write( 1, ( 0, 0, 0, 1, 0, 1, 0, 1, 1 ), 1, 11 )
self.ALUROM.write( 1, ( 0, 0, 1, 0, 0, 0, 0, 0, 0 ), 1, 12 )
self.ALUROM.write( 1, ( 0, 0, 1, 1, 0, 0, 0, 0, 0 ), 1, 13 )
self.ALUROM.write( 1, ( 0, 1, 0, 0, 0, 0, 0, 0, 0 ), 1, 14 )
self.ALUROM.write( 1, ( 0, 1, 0, 1, 0, 0, 0, 0, 0 ), 1, 15 )
self.ALUROM.write( 1, ( 0, 1, 1, 0, 0, 0, 0, 0, 0 ), 1, 16 )
def compareOp( self, a, b ):
# if a == b, a ^ b == 0
# submodule, dry
c = xorN_( self.nBitsInOp, a, b )
d = not_( orNto1_( self.nBitsInOp, c ) )
return d
def doTheThing(
self,
computer, # ...
clk, # input
RESET, # input
interruptRequested, # input
IODatabus # bidirectional
):
'''
. Everything happens at once/simultaneously
. Assumes all memory modules can be read asynchronously
'''
# Alias -
data_memory = computer.data_memory
program_memory = computer.program_memory
# Constants -
# Always increment microCounter
microCounterIn = self.zero
microCounterWr = 0
microCounterIncrement = 1
# Read memory -
D_registerOut = self.D_register.read()
A_registerOut = self.A_register.read()
B_registerOut = self.B_register.read()
AA_registerOut = self.AA_register.read()
instruction_registerOut = self.instruction_register.read()
IOInput_registerOut = self.IOInput_register.read()
ABkp_registerOut = self.ABkp_register.read()
DBkp_registerOut = self.DBkp_register.read()
BBkp_registerOut = self.BBkp_register.read()
AABkp_registerOut = self.AABkp_register.read()
instructionBkp_registerOut = self.instructionBkp_register.read()
PCBkp_registerOut = self.PCBkp_register.read()
# interruptsEnabled = self.interruptsEnabled_ff.read()
# interruptAcknowledged = self.interruptAcknowledged_ff.read()
# backupEnabled = self.backupEnabled_ff.read()
instruction = instruction_registerOut
lowerAddress = A_registerOut
upperAddress = AA_registerOut
dataMemoryOut = data_memory.read( lowerAddress )
instructionAddress = self.programCounter.read()
microStep = self.microCounter.read()
if self.debugMode:
print( 'instruction {}'.format( self.bitArrayToBinaryString( instruction ) ) )
print( ' {}'.format( dis.disassemble( self.bitArrayToBinaryString( instruction ) ) ) )
print( 'instructionAddress {}'.format( self.programCounter.readDecimal() ) )
print( 'microStep {}'.format( self.bitArrayToInt( microStep ) ) )
programMemoryOut = program_memory.read( self.programCounter.read() )
# Decode -
interruptsEnabled = 1 # TODO, fix me!
op = instruction[ self.op : self.op + self.nBitsInOp ]
isAimmed = not_( instruction[ self.TECSInstrType ] )
iDecode2 = muxN_(
self.nBitsInOp,
op, # 11xxx (special op)
self.i_dstEqCmpJmp, # everything else ('dst=cmp;jmp')
and_( instruction[ self.op ], instruction[ self.op + 1 ] )
)
iDecode1 = muxN_(
self.nBitsInOp,
self.i_Aimmed, # '@' instruction
iDecode2,
isAimmed
)
instructionType = muxN_(
self.nBitsInOp,
self.i_intAck, # interrupt acknowledge
iDecode1,
and_( interruptRequested, interruptsEnabled )
)
microAddress = instructionType[ 2 : ] + microStep # 3bits(8) + 2bits(4)
microInstruction = self.microcodeROM.read( microAddress )
if self.debugMode:
print( 'instructionType {} {}'.format( instructionType, self.instructionTypeLookup[ instructionType ] ) )
if instructionType == self.i_dstEqCmpJmp:
print( ' alu op {}'.format( self.ALUFxLookup[ op ] ) )
# Control signals -
c_cInst = microInstruction[ 0 ]
c_ARegisterWr = microInstruction[ 1 ]
c_ARegisterInSel_instructionRegister = microInstruction[ 2 ]
c_AARegisterWr = microInstruction[ 3 ]
c_instructionRegisterWr = microInstruction[ 4 ]
c_PCIncrement = microInstruction[ 5 ]
c_PCWr = microInstruction[ 6 ]
c_PCInSel_ISRHandler = microInstruction[ 7 ]
c_readIODatabus = microInstruction[ 8 ]
c_dstInSel_IOInputRegister = microInstruction[ 9 ]
c_enableInterrupts = microInstruction[ 10 ]
c_disableInterrupts = microInstruction[ 11 ]
c_acknowledgeInterrupt = microInstruction[ 12 ]
c_servicedInterrupt = microInstruction[ 13 ]
c_enableRegisterBackup = microInstruction[ 14 ]
c_disableRegisterBackup = microInstruction[ 15 ]
c_restoreRegisters = microInstruction[ 16 ]
c_halt = microInstruction[ 17 ]
if self.debugMode:
print( 'controlSignals ', end='' )
if c_cInst: print( 'c_cInst', end = ' | ' )
if c_ARegisterWr: print( 'c_ARegisterWr', end = ' | ' )
if c_ARegisterInSel_instructionRegister: print( 'c_ARegisterInSel_instructionRegister', end = ' | ' )
if c_AARegisterWr: print( 'c_AARegisterWr', end = ' | ' )
if c_instructionRegisterWr: print( 'c_instructionRegisterWr', end = ' | ' )
if c_PCIncrement: print( 'c_PCIncrement', end = ' | ' )
if c_PCWr: print( 'c_PCWr', end = ' | ' )
if c_PCInSel_ISRHandler: print( 'c_PCInSel_ISRHandler', end = ' | ' )
if c_readIODatabus: print( 'c_readIODatabus', end = ' | ' )
if c_dstInSel_IOInputRegister: print( 'c_dstInSel_IOInputRegister', end = ' | ' )
if c_enableInterrupts: print( 'c_enableInterrupts', end = ' | ' )
if c_disableInterrupts: print( 'c_disableInterrupts', end = ' | ' )
if c_acknowledgeInterrupt: print( 'c_acknowledgeInterrupt', end = ' | ' )
if c_servicedInterrupt: print( 'c_servicedInterrupt', end = ' | ' )
if c_enableRegisterBackup: print( 'c_enableRegisterBackup', end = ' | ' )
if c_disableRegisterBackup: print( 'c_disableRegisterBackup', end = ' | ' )
if c_restoreRegisters: print( 'c_restoreRegisters', end = ' | ' )
if c_halt: print( 'c_halt', end = ' | ' )
print()
# Hold value over time (via register), but switch immediately with control signal
'''
en | 100x
dis | 001x
regOut | x110
desired | 110x
'''
interruptsEnabled = and_(
or_( c_enableInterrupts, self.interruptsEnabled_ff.read() ),
not_( c_disableInterrupts )
)
interruptAcknowledged = and_(
or_( c_acknowledgeInterrupt, self.interruptAcknowledged_ff.read() ),
not_( c_servicedInterrupt )
)
backupEnabled = and_(
or_( c_enableRegisterBackup, self.backupEnabled_ff.read() ),
not_( c_disableRegisterBackup )
)
# x,y select -
x = muxN4to1_(
self.N,
dataMemoryOut,
B_registerOut,
A_registerOut,
D_registerOut,
instruction[ self.xSel + 0 ], instruction[ self.xSel + 1 ]
)
y = muxN4to1_(
self.N,
dataMemoryOut,
B_registerOut,
A_registerOut,
D_registerOut,
instruction[ self.ySel + 0 ], instruction[ self.ySel + 1 ]
)
# ALU -
ALU_control = self.ALUROM.read( op )
ALU_out = ALU_( self.N, x, y, ALU_control )
z = ALU_out[ 0 ] # result of computation
zr = ALU_out[ 1 ] # result is zero
ng = ALU_out[ 2 ] # result is negative
if self.debugMode:
# print( 'ALU_control {}'.format( ALU_control ) )
print( 'x {} {} {}'.format( x, self.xyLookup[ instruction[ self.xSel : self.xSel + 2 ] ], self.bitArrayToInt( x ) ) )
print( 'y {} {} {}'.format( y, self.xyLookup[ instruction[ self.ySel : self.ySel + 2 ] ], self.bitArrayToInt( y ) ) )
print( 'z {} {}'.format( z, self.bitArrayToInt( z ) ) )
# Jump -
jump = mux8to1_(
1, # JMP
or_( zr, ng ), # JLE
not_( zr ), # JNE
ng, # JLT
not_( ng ), # JGE
zr, # JEQ
not_( or_( zr, ng ) ), # JGT
0, # NULL
instruction[ self.jmp + 0 ], instruction[ self.jmp + 1 ], instruction[ self.jmp + 2 ]
)
# Write data select -
D_registerIn = muxN4to1_(
self.N,
self.zero,
DBkp_registerOut,
IOInput_registerOut,
z,
c_restoreRegisters, c_dstInSel_IOInputRegister
)
B_registerIn = muxN4to1_(
self.N,
self.zero,
BBkp_registerOut,
IOInput_registerOut,
z,
c_restoreRegisters, c_dstInSel_IOInputRegister
)
A_registerIn = muxN8to1_(
self.N,
self.zero,
self.zero,
self.zero,
instruction,
self.zero,
ABkp_registerOut,
IOInput_registerOut,
z,
c_ARegisterInSel_instructionRegister, c_restoreRegisters, c_dstInSel_IOInputRegister
)
AA_registerIn = andN_( self.N, instruction, self.AA_registerMask )
IOInput_registerIn = bufferN_( self.N, IODatabus, c_readIODatabus )
dataMemoryIn = muxN_(
self.N,
IOInput_registerOut,
z,
c_dstInSel_IOInputRegister
)
PCIn = muxN4to1_(
self.N * 2,
self.zero + self.zero,
PCBkp_registerOut,
self.zero + self.ISRHandlerAddress,
upperAddress + lowerAddress,
c_restoreRegisters, c_PCInSel_ISRHandler
)
# Write dst select -
dst = decoder3to8_( # returns ( q7, q6, q5, q4, q3, q2, q1, q0 )
instruction[ self.dst + 0 ],
instruction[ self.dst + 1 ],
instruction[ self.dst + 2 ],
)
D_registerWr = and_( dst[ 7 - 1 ], c_cInst )
A_registerWr = or_( and_( dst[ 7 - 2 ], c_cInst ), c_ARegisterWr )
B_registerWr = and_( dst[ 7 - 3 ], c_cInst )
dataMemoryWr = and_( dst[ 7 - 4 ], c_cInst )
PCWr = or_( and_( jump, c_cInst ), c_PCWr )
# Write memory -
self.D_register.write ( clk, D_registerIn, D_registerWr )
self.A_register.write ( clk, A_registerIn, A_registerWr )
self.B_register.write ( clk, B_registerIn, B_registerWr )
self.AA_register.write ( clk, AA_registerIn, c_AARegisterWr )
self.instruction_register.write ( clk, programMemoryOut, c_instructionRegisterWr )
self.IOInput_register.write ( clk, IOInput_registerIn, c_readIODatabus )
self.DBkp_register.write ( clk, D_registerIn, and_( backupEnabled, D_registerWr ) )
self.ABkp_register.write ( clk, A_registerIn, and_( backupEnabled, A_registerWr ) )
self.BBkp_register.write ( clk, B_registerIn, and_( backupEnabled, B_registerWr ) )
self.AABkp_register.write ( clk, AA_registerOut, and_( backupEnabled, c_AARegisterWr ) )
self.instructionBkp_register.write( clk, instruction_registerOut, and_( backupEnabled, c_instructionRegisterWr ) )
self.PCBkp_register.write ( clk, instructionAddress, and_( backupEnabled, c_instructionRegisterWr ) )
self.interruptsEnabled_ff.doTheThing ( clk, c_disableInterrupts, or_( RESET, c_enableInterrupts ), 0 )
self.interruptAcknowledged_ff.doTheThing( clk, or_( RESET, c_servicedInterrupt ), c_acknowledgeInterrupt, 0 )
self.backupEnabled_ff.doTheThing ( clk, c_disableRegisterBackup, or_( RESET, c_enableRegisterBackup ), 0 )
data_memory.write( clk, dataMemoryIn, dataMemoryWr, lowerAddress )
if self.debugMode:
print( 'dataMemoryWr {}'.format( dataMemoryWr ) )
print( 'dataMemoryIn {} {}'.format( dataMemoryIn, self.bitArrayToInt( dataMemoryIn ) ) )
# print( 'lowerAddress', lowerAddress )
self.programCounter.doTheThing( clk, RESET, PCIn, PCWr, c_PCIncrement )
self.microCounter.doTheThing( clk, RESET, microCounterIn, microCounterWr, microCounterIncrement )
if self.debugMode:
print( 'ARegOut {}'.format( self.A_register.readDecimal() ) )
print( 'DRegOut {}'.format( self.D_register.readDecimal() ) )
print( 'BRegOut {}'.format( self.B_register.readDecimal() ) )
# print( 'mem_16 ', data_memory.readDecimal( 16 ) )
# print( 'mem_17 ', data_memory.readDecimal( 17 ) )
# print( 'mem_0 ', data_memory.readDecimal( 0 ) )
# print( 'mem_1 ', data_memory.readDecimal( 1 ) )
print()
# Set output signals -
computer.halted = c_halt
|
|
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os
import re
import tempfile
import random
import flask
import werkzeug.exceptions
import numpy as np
from google.protobuf import text_format
try:
import caffe_pb2
except ImportError:
# See issue #32
from caffe.proto import caffe_pb2
import digits
from digits.config import config_value
from digits import utils
from digits.utils.routing import request_wants_json, job_from_request
from digits.webapp import app, scheduler, autodoc
from digits.dataset import ImageClassificationDatasetJob
from digits.model import tasks
from forms import ImageClassificationModelForm
from job import ImageClassificationModelJob
from digits.status import Status
NAMESPACE = '/models/images/classification'
@app.route(NAMESPACE + '/new', methods=['GET'])
@autodoc('models')
def image_classification_model_new():
"""
Return a form for a new ImageClassificationModelJob
"""
form = ImageClassificationModelForm()
form.dataset.choices = get_datasets()
form.standard_networks.choices = get_standard_networks()
form.standard_networks.default = get_default_standard_network()
form.previous_networks.choices = get_previous_networks()
prev_network_snapshots = get_previous_network_snapshots()
return flask.render_template('models/images/classification/new.html',
form = form,
previous_network_snapshots = prev_network_snapshots,
multi_gpu = config_value('caffe_root')['multi_gpu'],
)
@app.route(NAMESPACE + '.json', methods=['POST'])
@app.route(NAMESPACE, methods=['POST'])
@autodoc(['models', 'api'])
def image_classification_model_create():
"""
Create a new ImageClassificationModelJob
Returns JSON when requested: {job_id,name,status} or {errors:[]}
"""
form = ImageClassificationModelForm()
form.dataset.choices = get_datasets()
form.standard_networks.choices = get_standard_networks()
form.standard_networks.default = get_default_standard_network()
form.previous_networks.choices = get_previous_networks()
prev_network_snapshots = get_previous_network_snapshots()
if not form.validate_on_submit():
if request_wants_json():
return flask.jsonify({'errors': form.errors}), 400
else:
return flask.render_template('models/images/classification/new.html',
form = form,
previous_network_snapshots = prev_network_snapshots,
multi_gpu = config_value('caffe_root')['multi_gpu'],
), 400
datasetJob = scheduler.get_job(form.dataset.data)
if not datasetJob:
raise werkzeug.exceptions.BadRequest(
'Unknown dataset job_id "%s"' % form.dataset.data)
job = None
try:
job = ImageClassificationModelJob(
name = form.model_name.data,
dataset_id = datasetJob.id(),
)
network = caffe_pb2.NetParameter()
pretrained_model = None
if form.method.data == 'standard':
found = False
networks_dir = os.path.join(os.path.dirname(digits.__file__), 'standard-networks')
for filename in os.listdir(networks_dir):
path = os.path.join(networks_dir, filename)
if os.path.isfile(path):
match = re.match(r'%s.prototxt' % form.standard_networks.data, filename)
if match:
with open(path) as infile:
text_format.Merge(infile.read(), network)
found = True
break
if not found:
raise werkzeug.exceptions.BadRequest(
'Unknown standard model "%s"' % form.standard_networks.data)
elif form.method.data == 'previous':
old_job = scheduler.get_job(form.previous_networks.data)
if not old_job:
raise werkzeug.exceptions.BadRequest(
'Job not found: %s' % form.previous_networks.data)
network.CopyFrom(old_job.train_task().network)
# Rename the final layer
# XXX making some assumptions about network architecture here
ip_layers = [l for l in network.layer if l.type == 'InnerProduct']
if len(ip_layers) > 0:
ip_layers[-1].name = '%s_retrain' % ip_layers[-1].name
for choice in form.previous_networks.choices:
if choice[0] == form.previous_networks.data:
epoch = float(flask.request.form['%s-snapshot' % form.previous_networks.data])
if epoch != 0:
for filename, e in old_job.train_task().snapshots:
if e == epoch:
pretrained_model = filename
break
if pretrained_model is None:
raise werkzeug.exceptions.BadRequest(
"For the job %s, selected pretrained_model for epoch %d is invalid!"
% (form.previous_networks.data, epoch))
if not (os.path.exists(pretrained_model)):
raise werkzeug.exceptions.BadRequest(
"Pretrained_model for the selected epoch doesn't exists. May be deleted by another user/process. Please restart the server to load the correct pretrained_model details")
break
elif form.method.data == 'custom':
text_format.Merge(form.custom_network.data, network)
pretrained_model = form.custom_network_snapshot.data.strip()
else:
raise werkzeug.exceptions.BadRequest(
'Unrecognized method: "%s"' % form.method.data)
policy = {'policy': form.lr_policy.data}
if form.lr_policy.data == 'fixed':
pass
elif form.lr_policy.data == 'step':
policy['stepsize'] = form.lr_step_size.data
policy['gamma'] = form.lr_step_gamma.data
elif form.lr_policy.data == 'multistep':
policy['stepvalue'] = form.lr_multistep_values.data
policy['gamma'] = form.lr_multistep_gamma.data
elif form.lr_policy.data == 'exp':
policy['gamma'] = form.lr_exp_gamma.data
elif form.lr_policy.data == 'inv':
policy['gamma'] = form.lr_inv_gamma.data
policy['power'] = form.lr_inv_power.data
elif form.lr_policy.data == 'poly':
policy['power'] = form.lr_poly_power.data
elif form.lr_policy.data == 'sigmoid':
policy['stepsize'] = form.lr_sigmoid_step.data
policy['gamma'] = form.lr_sigmoid_gamma.data
else:
raise werkzeug.exceptions.BadRequest(
'Invalid learning rate policy')
if config_value('caffe_root')['multi_gpu']:
if form.select_gpus.data:
selected_gpus = [str(gpu) for gpu in form.select_gpus.data]
gpu_count = None
elif form.select_gpu_count.data:
gpu_count = form.select_gpu_count.data
selected_gpus = None
else:
gpu_count = 1
selected_gpus = None
else:
if form.select_gpu.data == 'next':
gpu_count = 1
selected_gpus = None
else:
selected_gpus = [str(form.select_gpu.data)]
gpu_count = None
job.tasks.append(
tasks.CaffeTrainTask(
job_dir = job.dir(),
dataset = datasetJob,
train_epochs = form.train_epochs.data,
snapshot_interval = form.snapshot_interval.data,
learning_rate = form.learning_rate.data,
lr_policy = policy,
gpu_count = gpu_count,
selected_gpus = selected_gpus,
batch_size = form.batch_size.data,
val_interval = form.val_interval.data,
pretrained_model= pretrained_model,
crop_size = form.crop_size.data,
use_mean = bool(form.use_mean.data),
network = network,
random_seed = form.random_seed.data,
solver_type = form.solver_type.data,
)
)
scheduler.add_job(job)
if request_wants_json():
return flask.jsonify(job.json_dict())
else:
return flask.redirect(flask.url_for('models_show', job_id=job.id()))
except:
if job:
scheduler.delete_job(job)
raise
def show(job):
"""
Called from digits.model.views.models_show()
"""
return flask.render_template('models/images/classification/show.html', job=job)
@app.route(NAMESPACE + '/large_graph', methods=['GET'])
@autodoc('models')
def image_classification_model_large_graph():
"""
Show the loss/accuracy graph, but bigger
"""
job = job_from_request()
return flask.render_template('models/images/classification/large_graph.html', job=job)
@app.route(NAMESPACE + '/classify_one.json', methods=['POST'])
@app.route(NAMESPACE + '/classify_one', methods=['POST', 'GET'])
@autodoc(['models', 'api'])
def image_classification_model_classify_one():
"""
Classify one image and return the top 5 classifications
Returns JSON when requested: {predictions: {category: confidence,...}}
"""
job = job_from_request()
image = None
if 'image_url' in flask.request.form and flask.request.form['image_url']:
image = utils.image.load_image(flask.request.form['image_url'])
elif 'image_file' in flask.request.files and flask.request.files['image_file']:
with tempfile.NamedTemporaryFile() as outfile:
flask.request.files['image_file'].save(outfile.name)
image = utils.image.load_image(outfile.name)
else:
raise werkzeug.exceptions.BadRequest('must provide image_url or image_file')
# resize image
db_task = job.train_task().dataset.train_db_task()
height = db_task.image_dims[0]
width = db_task.image_dims[1]
if job.train_task().crop_size:
height = job.train_task().crop_size
width = job.train_task().crop_size
image = utils.image.resize_image(image, height, width,
channels = db_task.image_dims[2],
resize_mode = db_task.resize_mode,
)
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
layers = 'none'
if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
layers = 'all'
predictions, visualizations = job.train_task().infer_one(image, snapshot_epoch=epoch, layers=layers)
# take top 5
predictions = [(p[0], round(100.0*p[1],2)) for p in predictions[:5]]
if request_wants_json():
return flask.jsonify({'predictions': predictions})
else:
return flask.render_template('models/images/classification/classify_one.html',
image_src = utils.image.embed_image_html(image),
predictions = predictions,
visualizations = visualizations,
)
@app.route(NAMESPACE + '/classify_many.json', methods=['POST'])
@app.route(NAMESPACE + '/classify_many', methods=['POST', 'GET'])
@autodoc(['models', 'api'])
def image_classification_model_classify_many():
"""
Classify many images and return the top 5 classifications for each
Returns JSON when requested: {classifications: {filename: [[category,confidence],...],...}}
"""
job = job_from_request()
image_list = flask.request.files.get('image_list')
if not image_list:
raise werkzeug.exceptions.BadRequest('image_list is a required field')
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
paths = []
images = []
ground_truths = []
dataset = job.train_task().dataset
for line in image_list.readlines():
line = line.strip()
if not line:
continue
path = None
# might contain a numerical label at the end
match = re.match(r'(.*\S)\s+(\d+)$', line)
if match:
path = match.group(1)
ground_truth = int(match.group(2))
else:
path = line
ground_truth = None
try:
image = utils.image.load_image(path)
image = utils.image.resize_image(image,
dataset.image_dims[0], dataset.image_dims[1],
channels = dataset.image_dims[2],
resize_mode = dataset.resize_mode,
)
paths.append(path)
images.append(image)
ground_truths.append(ground_truth)
except utils.errors.LoadImageError as e:
print e
if not len(images):
raise werkzeug.exceptions.BadRequest(
'Unable to load any images from the file')
labels, scores = job.train_task().infer_many(images, snapshot_epoch=epoch)
if scores is None:
raise RuntimeError('An error occured while processing the images')
# take top 5
indices = (-scores).argsort()[:, :5]
classifications = []
for image_index, index_list in enumerate(indices):
result = []
for i in index_list:
# `i` is a category in labels and also an index into scores
result.append((labels[i], round(100.0*scores[image_index, i],2)))
classifications.append(result)
# replace ground truth indices with labels
ground_truths = [labels[x] if x is not None else None for x in ground_truths]
if request_wants_json():
joined = dict(zip(paths, classifications))
return flask.jsonify({'classifications': joined})
else:
return flask.render_template('models/images/classification/classify_many.html',
paths=paths,
classifications=classifications,
show_ground_truth=not(ground_truths == [None]*len(ground_truths)),
ground_truths=ground_truths
)
@app.route(NAMESPACE + '/top_n', methods=['POST'])
@autodoc('models')
def image_classification_model_top_n():
"""
Classify many images and show the top N images per category by confidence
"""
job = job_from_request()
image_list = flask.request.files['image_list']
if not image_list:
raise werkzeug.exceptions.BadRequest('File upload not found')
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
if 'top_n' in flask.request.form and flask.request.form['top_n'].strip():
top_n = int(flask.request.form['top_n'])
else:
top_n = 9
if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
num_images = int(flask.request.form['num_test_images'])
else:
num_images = None
paths = []
for line in image_list.readlines():
line = line.strip()
if not line:
continue
path = None
# might contain a numerical label at the end
match = re.match(r'(.*\S)\s+\d+$', line)
if match:
path = match.group(1)
else:
path = line
paths.append(path)
random.shuffle(paths)
images = []
dataset = job.train_task().dataset
for path in paths:
try:
image = utils.image.load_image(path)
image = utils.image.resize_image(image,
dataset.image_dims[0], dataset.image_dims[1],
channels = dataset.image_dims[2],
resize_mode = dataset.resize_mode,
)
images.append(image)
if num_images and len(images) >= num_images:
break
except utils.errors.LoadImageError as e:
print e
if not len(images):
raise werkzeug.exceptions.BadRequest(
'Unable to load any images from the file')
labels, scores = job.train_task().infer_many(images, snapshot_epoch=epoch)
if scores is None:
raise RuntimeError('An error occured while processing the images')
indices = (-scores).argsort(axis=0)[:top_n]
results = []
for i in xrange(indices.shape[1]):
result_images = []
for j in xrange(top_n):
result_images.append(images[indices[j][i]])
results.append((
labels[i],
utils.image.embed_image_html(
utils.image.vis_square(np.array(result_images))
)
))
return flask.render_template('models/images/classification/top_n.html',
job=job,
results=results,
)
def get_datasets():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs if isinstance(j, ImageClassificationDatasetJob) and (j.status.is_running() or j.status == Status.DONE)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_standard_networks():
return [
('lenet', 'LeNet'),
('alexnet', 'AlexNet'),
#('vgg-16', 'VGG (16-layer)'), #XXX model won't learn
('googlenet', 'GoogLeNet'),
]
def get_default_standard_network():
return 'alexnet'
def get_previous_networks():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs if isinstance(j, ImageClassificationModelJob)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_previous_network_snapshots():
prev_network_snapshots = []
for job_id, _ in get_previous_networks():
job = scheduler.get_job(job_id)
e = [(0, 'None')] + [(epoch, 'Epoch #%s' % epoch)
for _, epoch in reversed(job.train_task().snapshots)]
prev_network_snapshots.append(e)
return prev_network_snapshots
|
|
# Author: Roman Goj <roman.goj@gmail.com>
#
# License: BSD (3-clause)
import copy as cp
import numpy as np
from scipy.fftpack import fftfreq
from ..io.pick import pick_types
from ..utils import logger, verbose, warn
from ..time_frequency.multitaper import (dpss_windows, _mt_spectra,
_csd_from_mt, _psd_from_mt_adaptive)
from ..externals.six.moves import xrange as range
class CrossSpectralDensity(object):
"""Cross-spectral density.
Parameters
----------
data : array of shape (n_channels, n_channels)
The cross-spectral density matrix.
ch_names : list of string
List of channels' names.
projs :
List of projectors used in CSD calculation.
bads :
List of bad channels.
frequencies : float | list of float
Frequency or frequencies for which the CSD matrix was calculated. If a
list is passed, data is a sum across CSD matrices for all frequencies.
n_fft : int
Length of the FFT used when calculating the CSD matrix.
"""
def __init__(self, data, ch_names, projs, bads, frequencies,
n_fft): # noqa: D102
self.data = data
self.dim = len(data)
self.ch_names = cp.deepcopy(ch_names)
self.projs = cp.deepcopy(projs)
self.bads = cp.deepcopy(bads)
self.frequencies = np.atleast_1d(np.copy(frequencies))
self.n_fft = n_fft
def __repr__(self): # noqa: D105
s = 'frequencies : %s' % self.frequencies
s += ', size : %s x %s' % self.data.shape
s += ', data : %s' % self.data
return '<CrossSpectralDensity | %s>' % s
@verbose
def csd_epochs(epochs, mode='multitaper', fmin=0, fmax=np.inf,
fsum=True, tmin=None, tmax=None, n_fft=None,
mt_bandwidth=None, mt_adaptive=False, mt_low_bias=True,
projs=None, verbose=None):
"""Estimate cross-spectral density from epochs.
Note: Baseline correction should be used when creating the Epochs.
Otherwise the computed cross-spectral density will be inaccurate.
Note: Results are scaled by sampling frequency for compatibility with
Matlab.
Parameters
----------
epochs : instance of Epochs
The epochs.
mode : str
Spectrum estimation mode can be either: 'multitaper' or 'fourier'.
fmin : float
Minimum frequency of interest.
fmax : float | np.inf
Maximum frequency of interest.
fsum : bool
Sum CSD values for the frequencies of interest. Summing is performed
instead of averaging so that accumulated power is comparable to power
in the time domain. If True, a single CSD matrix will be returned. If
False, the output will be a list of CSD matrices.
tmin : float | None
Minimum time instant to consider. If None start at first sample.
tmax : float | None
Maximum time instant to consider. If None end at last sample.
n_fft : int | None
Length of the FFT. If None the exact number of samples between tmin and
tmax will be used.
mt_bandwidth : float | None
The bandwidth of the multitaper windowing function in Hz.
Only used in 'multitaper' mode.
mt_adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD.
Only used in 'multitaper' mode.
mt_low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth. Only used in 'multitaper' mode.
projs : list of Projection | None
List of projectors to use in CSD calculation, or None to indicate that
the projectors from the epochs should be inherited.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
csd : instance of CrossSpectralDensity
The computed cross-spectral density.
"""
# Portions of this code adapted from mne/connectivity/spectral.py
# Check correctness of input data and parameters
if fmax < fmin:
raise ValueError('fmax must be larger than fmin')
tstep = epochs.times[1] - epochs.times[0]
if tmin is not None and tmin < epochs.times[0] - tstep:
raise ValueError('tmin should be larger than the smallest data time '
'point')
if tmax is not None and tmax > epochs.times[-1] + tstep:
raise ValueError('tmax should be smaller than the largest data time '
'point')
if tmax is not None and tmin is not None:
if tmax < tmin:
raise ValueError('tmax must be larger than tmin')
if epochs.baseline is None and epochs.info['highpass'] < 0.1:
warn('Epochs are not baseline corrected or enough highpass filtered. '
'Cross-spectral density may be inaccurate.')
if projs is None:
projs = cp.deepcopy(epochs.info['projs'])
else:
projs = cp.deepcopy(projs)
picks_meeg = pick_types(epochs[0].info, meg=True, eeg=True, eog=False,
ref_meg=False, exclude='bads')
ch_names = [epochs.ch_names[k] for k in picks_meeg]
# Preparing time window slice
tstart, tend = None, None
if tmin is not None:
tstart = np.where(epochs.times >= tmin)[0][0]
if tmax is not None:
tend = np.where(epochs.times <= tmax)[0][-1] + 1
tslice = slice(tstart, tend, None)
n_times = len(epochs.times[tslice])
n_fft = n_times if n_fft is None else n_fft
# Preparing frequencies of interest
sfreq = epochs.info['sfreq']
orig_frequencies = fftfreq(n_fft, 1. / sfreq)
freq_mask = (orig_frequencies > fmin) & (orig_frequencies < fmax)
frequencies = orig_frequencies[freq_mask]
n_freqs = len(frequencies)
if n_freqs == 0:
raise ValueError('No discrete fourier transform results within '
'the given frequency window. Please widen either '
'the frequency window or the time window')
# Preparing for computing CSD
logger.info('Computing cross-spectral density from epochs...')
window_fun, eigvals, n_tapers, mt_adaptive = _compute_csd_params(
n_times, sfreq, mode, mt_bandwidth, mt_low_bias, mt_adaptive)
csds_mean = np.zeros((len(ch_names), len(ch_names), n_freqs),
dtype=complex)
# Picking frequencies of interest
freq_mask_mt = freq_mask[orig_frequencies >= 0]
# Compute CSD for each epoch
n_epochs = 0
for epoch in epochs:
epoch = epoch[picks_meeg][:, tslice]
# Calculating Fourier transform using multitaper module
csds_epoch = _csd_array(epoch, sfreq, window_fun, eigvals, freq_mask,
freq_mask_mt, n_fft, mode, mt_adaptive)
# Scaling by number of samples and compensating for loss of power due
# to windowing (see section 11.5.2 in Bendat & Piersol).
if mode == 'fourier':
csds_epoch /= n_times
csds_epoch *= 8 / 3.
# Scaling by sampling frequency for compatibility with Matlab
csds_epoch /= sfreq
csds_mean += csds_epoch
n_epochs += 1
csds_mean /= n_epochs
logger.info('[done]')
# Summing over frequencies of interest or returning a list of separate CSD
# matrices for each frequency
if fsum is True:
csd_mean_fsum = np.sum(csds_mean, 2)
csd = CrossSpectralDensity(csd_mean_fsum, ch_names, projs,
epochs.info['bads'],
frequencies=frequencies, n_fft=n_fft)
return csd
else:
csds = []
for i in range(n_freqs):
csds.append(CrossSpectralDensity(csds_mean[:, :, i], ch_names,
projs, epochs.info['bads'],
frequencies=frequencies[i],
n_fft=n_fft))
return csds
@verbose
def csd_array(X, sfreq, mode='multitaper', fmin=0, fmax=np.inf,
fsum=True, n_fft=None, mt_bandwidth=None,
mt_adaptive=False, mt_low_bias=True, verbose=None):
"""Estimate cross-spectral density from an array.
.. note:: Results are scaled by sampling frequency for compatibility with
Matlab.
Parameters
----------
X : array-like, shape (n_replicates, n_series, n_times)
The time series data consisting of n_replicated separate observations
of signals with n_series components and of length n_times. For example,
n_replicates could be the number of epochs, and n_series the number of
vertices in a source-space.
sfreq : float
Sampling frequency of observations.
mode : str
Spectrum estimation mode can be either: 'multitaper' or 'fourier'.
fmin : float
Minimum frequency of interest.
fmax : float
Maximum frequency of interest.
fsum : bool
Sum CSD values for the frequencies of interest. Summing is performed
instead of averaging so that accumulated power is comparable to power
in the time domain. If True, a single CSD matrix will be returned. If
False, the output will be an array of CSD matrices.
n_fft : int | None
Length of the FFT. If None the exact number of samples between tmin and
tmax will be used.
mt_bandwidth : float | None
The bandwidth of the multitaper windowing function in Hz.
Only used in 'multitaper' mode.
mt_adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD.
Only used in 'multitaper' mode.
mt_low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth. Only used in 'multitaper' mode.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`).
Returns
-------
csd : array, shape (n_freqs, n_series, n_series) if fsum is True, otherwise (n_series, n_series).
The computed cross spectral-density (either summed or not).
freqs : array
Frequencies the cross spectral-density is evaluated at.
""" # noqa: E501
# Check correctness of input data and parameters
if fmax < fmin:
raise ValueError('fmax must be larger than fmin')
X = np.asarray(X, dtype=float)
if X.ndim != 3:
raise ValueError("X must be n_replicates x n_series x n_times.")
n_replicates, n_series, n_times = X.shape
# Preparing frequencies of interest
n_fft = n_times if n_fft is None else n_fft
orig_frequencies = fftfreq(n_fft, 1. / sfreq)
freq_mask = (orig_frequencies > fmin) & (orig_frequencies < fmax)
frequencies = orig_frequencies[freq_mask]
n_freqs = len(frequencies)
if n_freqs == 0:
raise ValueError('No discrete fourier transform results within '
'the given frequency window. Please widen either '
'the frequency window or the time window')
# Preparing for computing CSD
logger.info('Computing cross-spectral density from array...')
window_fun, eigvals, n_tapers, mt_adaptive = _compute_csd_params(
n_times, sfreq, mode, mt_bandwidth, mt_low_bias, mt_adaptive)
csds_mean = np.zeros((n_series, n_series, n_freqs), dtype=complex)
# Picking frequencies of interest
freq_mask_mt = freq_mask[orig_frequencies >= 0]
# Compute CSD for each trial
for xi in X:
csds_trial = _csd_array(xi, sfreq, window_fun, eigvals, freq_mask,
freq_mask_mt, n_fft, mode, mt_adaptive)
# Scaling by number of trials and compensating for loss of power due
# to windowing (see section 11.5.2 in Bendat & Piersol).
if mode == 'fourier':
csds_trial /= n_times
csds_trial *= 8 / 3.
# Scaling by sampling frequency for compatibility with Matlab
csds_trial /= sfreq
csds_mean += csds_trial
csds_mean /= n_replicates
logger.info('[done]')
# Summing over frequencies of interest or returning a list of separate CSD
# matrices for each frequency
if fsum is True:
csds_mean = np.sum(csds_mean, 2)
return csds_mean, frequencies
def _compute_csd_params(n_times, sfreq, mode, mt_bandwidth, mt_low_bias,
mt_adaptive):
"""Compute windowing and multitaper parameters.
Parameters
----------
n_times : int
Number of time points.
s_freq : int
Sampling frequency of signal.
mode : str
Spectrum estimation mode can be either: 'multitaper' or 'fourier'.
mt_bandwidth : float | None
The bandwidth of the multitaper windowing function in Hz.
Only used in 'multitaper' mode.
mt_low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth. Only used in 'multitaper' mode.
mt_adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD.
Only used in 'multitaper' mode.
Returns
-------
window_fun : array
Window function(s) of length n_times. When 'multitaper' mode is used
will correspond to first output of `dpss_windows` and when 'fourier'
mode is used will be a Hanning window of length `n_times`.
eigvals : array | float
Eigenvalues associated with wondow functions. Only needed when mode is
'multitaper'. When the mode 'fourier' is used this is set to 1.
n_tapers : int | None
Number of tapers to use. Only used when mode is 'multitaper'.
ret_mt_adaptive : bool
Updated value of `mt_adaptive` argument as certain parameter values
will not allow adaptive spectral estimators.
"""
ret_mt_adaptive = mt_adaptive
if mode == 'multitaper':
# Compute standardized half-bandwidth
if mt_bandwidth is not None:
half_nbw = float(mt_bandwidth) * n_times / (2. * sfreq)
else:
half_nbw = 2.
# Compute DPSS windows
n_tapers_max = int(2 * half_nbw)
window_fun, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max,
low_bias=mt_low_bias)
n_tapers = len(eigvals)
logger.info(' using multitaper spectrum estimation with %d DPSS '
'windows' % n_tapers)
if mt_adaptive and len(eigvals) < 3:
warn('Not adaptively combining the spectral estimators due to a '
'low number of tapers.')
ret_mt_adaptive = False
elif mode == 'fourier':
logger.info(' using FFT with a Hanning window to estimate spectra')
window_fun = np.hanning(n_times)
ret_mt_adaptive = False
eigvals = 1.
n_tapers = None
else:
raise ValueError('Mode has an invalid value.')
return window_fun, eigvals, n_tapers, ret_mt_adaptive
def _csd_array(x, sfreq, window_fun, eigvals, freq_mask, freq_mask_mt, n_fft,
mode, mt_adaptive):
"""Calculate Fourier transform using multitaper module.
The arguments correspond to the values in `compute_csd_epochs` and
`csd_array`.
"""
x_mt, _ = _mt_spectra(x, window_fun, sfreq, n_fft)
if mt_adaptive:
# Compute adaptive weights
_, weights = _psd_from_mt_adaptive(x_mt, eigvals, freq_mask,
return_weights=True)
# Tiling weights so that we can easily use _csd_from_mt()
weights = weights[:, np.newaxis, :, :]
weights = np.tile(weights, [1, x_mt.shape[0], 1, 1])
else:
# Do not use adaptive weights
if mode == 'multitaper':
weights = np.sqrt(eigvals)[np.newaxis, np.newaxis, :, np.newaxis]
else:
# Hack so we can sum over axis=-2
weights = np.array([1.])[:, np.newaxis, np.newaxis, np.newaxis]
x_mt = x_mt[:, :, freq_mask_mt]
# Calculating CSD
# Tiling x_mt so that we can easily use _csd_from_mt()
x_mt = x_mt[:, np.newaxis, :, :]
x_mt = np.tile(x_mt, [1, x_mt.shape[0], 1, 1])
y_mt = np.transpose(x_mt, axes=[1, 0, 2, 3])
weights_y = np.transpose(weights, axes=[1, 0, 2, 3])
csds = _csd_from_mt(x_mt, y_mt, weights, weights_y)
return csds
|
|
import subprocess
import io
import os
import tempfile
import glob
import json
import logging
import sys
import requests
from . import docker
from .process import get_feature, empty_subtree, stageFiles
from .errors import WorkflowException
import shutil
import stat
import re
import shellescape
import string
from .docker_uid import docker_vm_uid
from .builder import Builder
from typing import (Any, Callable, Union, Iterable, Mapping, MutableMapping,
IO, cast, Text, Tuple)
from .pathmapper import PathMapper
import functools
_logger = logging.getLogger("cwltool")
needs_shell_quoting_re = re.compile(r"""(^$|[\s|&;()<>\'"$@])""")
FORCE_SHELLED_POPEN = os.getenv("CWLTOOL_FORCE_SHELL_POPEN", "0") == "1"
SHELL_COMMAND_TEMPLATE = """#!/bin/bash
python "run_job.py" "job.json"
"""
PYTHON_RUN_SCRIPT = """
import json
import sys
import subprocess
with open(sys.argv[1], "r") as f:
popen_description = json.load(f)
commands = popen_description["commands"]
cwd = popen_description["cwd"]
env = popen_description["env"]
stdin_path = popen_description["stdin_path"]
stdout_path = popen_description["stdout_path"]
stderr_path = popen_description["stderr_path"]
if stdin_path is not None:
stdin = open(stdin_path, "rb")
else:
stdin = subprocess.PIPE
if stdout_path is not None:
stdout = open(stdout_path, "wb")
else:
stdout = sys.stderr
if stderr_path is not None:
stderr = open(stderr_path, "wb")
else:
stderr = sys.stderr
sp = subprocess.Popen(commands,
shell=False,
close_fds=True,
stdin=stdin,
stdout=stdout,
stderr=stderr,
env=env,
cwd=cwd)
if sp.stdin:
sp.stdin.close()
rcode = sp.wait()
if isinstance(stdin, file):
stdin.close()
if stdout is not sys.stderr:
stdout.close()
if stderr is not sys.stderr:
stderr.close()
sys.exit(rcode)
"""
def deref_links(outputs): # type: (Any) -> None
if isinstance(outputs, dict):
if outputs.get("class") == "File":
st = os.lstat(outputs["path"])
if stat.S_ISLNK(st.st_mode):
outputs["path"] = os.readlink(outputs["path"])
else:
for v in outputs.values():
deref_links(v)
if isinstance(outputs, list):
for v in outputs:
deref_links(v)
class CommandLineJob(object):
def __init__(self): # type: () -> None
self.builder = None # type: Builder
self.joborder = None # type: Dict[Text, Union[Dict[Text, Any], List, Text]]
self.stdin = None # type: Text
self.stderr = None # type: Text
self.stdout = None # type: Text
self.successCodes = None # type: Iterable[int]
self.temporaryFailCodes = None # type: Iterable[int]
self.permanentFailCodes = None # type: Iterable[int]
self.requirements = None # type: List[Dict[Text, Text]]
self.hints = None # type: Dict[Text,Text]
self.name = None # type: Text
self.command_line = None # type: List[Text]
self.pathmapper = None # type: PathMapper
self.collect_outputs = None # type: Union[Callable[[Any], Any], functools.partial[Any]]
self.output_callback = None # type: Callable[[Any, Any], Any]
self.outdir = None # type: Text
self.tmpdir = None # type: Text
self.environment = None # type: MutableMapping[Text, Text]
self.generatefiles = None # type: Dict[Text, Union[List[Dict[Text, Text]], Dict[Text, Text], Text]]
self.stagedir = None # type: Text
def run(self, dry_run=False, pull_image=True, rm_container=True,
rm_tmpdir=True, move_outputs="move", **kwargs):
# type: (bool, bool, bool, bool, bool, Text, **Any) -> Union[Tuple[Text, Dict[None, None]], None]
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
#with open(os.path.join(outdir, "cwl.input.json"), "w") as fp:
# json.dump(self.joborder, fp)
runtime = [] # type: List[Text]
(docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
for knownfile in self.pathmapper.files():
p = self.pathmapper.mapper(knownfile)
if p.type == "File" and not os.path.isfile(p[0]):
raise WorkflowException(
u"Input file %s (at %s) not found or is not a regular "
"file." % (knownfile, self.pathmapper.mapper(knownfile)[0]))
img_id = None
env = None # type: Union[MutableMapping[Text, Text], MutableMapping[str, str]]
if docker_req and kwargs.get("use_container") is not False:
env = os.environ
img_id = docker.get_from_requirements(docker_req, docker_is_req, pull_image)
elif kwargs.get("default_container", None) is not None:
env = os.environ
img_id = kwargs.get("default_container")
if docker_is_req and img_id is None:
raise WorkflowException("Docker is required for running this tool.")
if img_id:
runtime = ["docker", "run", "-i"]
for src in self.pathmapper.files():
vol = self.pathmapper.mapper(src)
if vol.type == "File":
runtime.append(u"--volume=%s:%s:ro" % (vol.resolved, vol.target))
if vol.type == "CreateFile":
createtmp = os.path.join(self.stagedir, os.path.basename(vol.target))
with open(createtmp, "w") as f:
f.write(vol.resolved.encode("utf-8"))
runtime.append(u"--volume=%s:%s:ro" % (createtmp, vol.target))
runtime.append(u"--volume=%s:%s:rw" % (os.path.realpath(self.outdir), "/var/spool/cwl"))
runtime.append(u"--volume=%s:%s:rw" % (os.path.realpath(self.tmpdir), "/tmp"))
runtime.append(u"--workdir=%s" % ("/var/spool/cwl"))
runtime.append("--read-only=true")
if kwargs.get("custom_net", None) is not None:
runtime.append("--net={0}".format(kwargs.get("custom_net")))
elif kwargs.get("disable_net", None):
runtime.append("--net=none")
if self.stdout:
runtime.append("--log-driver=none")
euid = docker_vm_uid() or os.geteuid()
runtime.append(u"--user=%s" % (euid))
if rm_container:
runtime.append("--rm")
runtime.append("--env=TMPDIR=/tmp")
# spec currently says "HOME must be set to the designated output
# directory." but spec might change to designated temp directory.
# runtime.append("--env=HOME=/tmp")
runtime.append("--env=HOME=/var/spool/cwl")
for t,v in self.environment.items():
runtime.append(u"--env=%s=%s" % (t, v))
runtime.append(img_id)
else:
env = self.environment
if not os.path.exists(self.tmpdir):
os.makedirs(self.tmpdir)
vars_to_preserve = kwargs.get("preserve_environment")
if kwargs.get("preserve_entire_environment"):
vars_to_preserve = os.environ
if vars_to_preserve is not None:
for key, value in os.environ.items():
if key in vars_to_preserve and key not in env:
env[key] = value
env["HOME"] = self.outdir
env["TMPDIR"] = self.tmpdir
stageFiles(self.pathmapper, os.symlink)
scr, _ = get_feature(self, "ShellCommandRequirement")
if scr:
shouldquote = lambda x: False
else:
shouldquote = needs_shell_quoting_re.search
_logger.info(u"[job %s] %s$ %s%s%s%s",
self.name,
self.outdir,
" \\\n ".join([shellescape.quote(Text(arg)) if shouldquote(Text(arg)) else Text(arg) for arg in (runtime + self.command_line)]),
u' < %s' % self.stdin if self.stdin else '',
u' > %s' % os.path.join(self.outdir, self.stdout) if self.stdout else '',
u' 2> %s' % os.path.join(self.outdir, self.stderr) if self.stderr else '')
if dry_run:
return (self.outdir, {})
outputs = {} # type: Dict[Text,Text]
try:
if self.generatefiles["listing"]:
generatemapper = PathMapper([self.generatefiles], self.outdir,
self.outdir, separateDirs=False)
_logger.debug(u"[job %s] initial work dir %s", self.name,
json.dumps({p: generatemapper.mapper(p) for p in generatemapper.files()}, indent=4))
def linkoutdir(src, tgt):
# Need to make the link to the staged file (may be inside
# the container)
for _, item in self.pathmapper.items():
if src == item.resolved:
os.symlink(item.target, tgt)
break
stageFiles(generatemapper, linkoutdir)
stdin_path = None
if self.stdin:
stdin_path = self.pathmapper.reversemap(self.stdin)[1]
stderr_path = None
if self.stderr:
abserr = os.path.join(self.outdir, self.stderr)
dnerr = os.path.dirname(abserr)
if dnerr and not os.path.exists(dnerr):
os.makedirs(dnerr)
stderr_path = abserr
stdout_path = None
if self.stdout:
absout = os.path.join(self.outdir, self.stdout)
dn = os.path.dirname(absout)
if dn and not os.path.exists(dn):
os.makedirs(dn)
stdout_path = absout
build_job_script = self.builder.build_job_script # type: Callable[[List[str]], Text]
rcode = _job_popen(
[Text(x).encode('utf-8') for x in runtime + self.command_line],
stdin_path=stdin_path,
stdout_path=stdout_path,
stderr_path=stderr_path,
env=env,
cwd=self.outdir,
build_job_script=build_job_script,
)
if self.successCodes and rcode in self.successCodes:
processStatus = "success"
elif self.temporaryFailCodes and rcode in self.temporaryFailCodes:
processStatus = "temporaryFail"
elif self.permanentFailCodes and rcode in self.permanentFailCodes:
processStatus = "permanentFail"
elif rcode == 0:
processStatus = "success"
else:
processStatus = "permanentFail"
if self.generatefiles["listing"]:
def linkoutdir(src, tgt):
# Need to make the link to the staged file (may be inside
# the container)
if os.path.islink(tgt):
os.remove(tgt)
os.symlink(src, tgt)
stageFiles(generatemapper, linkoutdir, ignoreWritable=True)
outputs = self.collect_outputs(self.outdir)
except OSError as e:
if e.errno == 2:
if runtime:
_logger.error(u"'%s' not found", runtime[0])
else:
_logger.error(u"'%s' not found", self.command_line[0])
else:
_logger.exception("Exception while running job")
processStatus = "permanentFail"
except WorkflowException as e:
_logger.error(u"Error while running job: %s" % e)
processStatus = "permanentFail"
except Exception as e:
_logger.exception("Exception while running job")
processStatus = "permanentFail"
if processStatus != "success":
_logger.warn(u"[job %s] completed %s", self.name, processStatus)
else:
_logger.debug(u"[job %s] completed %s", self.name, processStatus)
_logger.debug(u"[job %s] %s", self.name, json.dumps(outputs, indent=4))
self.output_callback(outputs, processStatus)
if self.stagedir and os.path.exists(self.stagedir):
_logger.debug(u"[job %s] Removing input staging directory %s", self.name, self.stagedir)
shutil.rmtree(self.stagedir, True)
if rm_tmpdir:
_logger.debug(u"[job %s] Removing temporary directory %s", self.name, self.tmpdir)
shutil.rmtree(self.tmpdir, True)
if move_outputs == "move" and empty_subtree(self.outdir):
_logger.debug(u"[job %s] Removing empty output directory %s", self.name, self.outdir)
shutil.rmtree(self.outdir, True)
def _job_popen(
commands, # type: List[str]
stdin_path, # type: Text
stdout_path, # type: Text
stderr_path, # type: Text
env, # type: Union[MutableMapping[Text, Text], MutableMapping[str, str]]
cwd, # type: Text
job_dir=None, # type: Text
build_job_script=None, # type: Callable[[List[str]], Text]
):
# type: (...) -> int
job_script_contents = None # type: Text
if build_job_script:
job_script_contents = build_job_script(commands)
if not job_script_contents and not FORCE_SHELLED_POPEN:
stdin = None # type: Union[IO[Any], int]
stderr = None # type: IO[Any]
stdout = None # type: IO[Any]
if stdin_path is not None:
stdin = open(stdin_path, "rb")
else:
stdin = subprocess.PIPE
if stdout_path is not None:
stdout = open(stdout_path, "wb")
else:
stdout = sys.stderr
if stderr_path is not None:
stderr = open(stderr_path, "wb")
else:
stderr = sys.stderr
sp = subprocess.Popen(commands,
shell=False,
close_fds=True,
stdin=stdin,
stdout=stdout,
stderr=stderr,
env=env,
cwd=cwd)
if sp.stdin:
sp.stdin.close()
rcode = sp.wait()
if isinstance(stdin, file):
stdin.close()
if stdout is not sys.stderr:
stdout.close()
if stderr is not sys.stderr:
stderr.close()
return rcode
else:
if job_dir is None:
job_dir = tempfile.mkdtemp(prefix="cwltooljob")
if not job_script_contents:
job_script_contents = SHELL_COMMAND_TEMPLATE
env_copy = {}
for key in env:
key = key.encode("utf-8")
env_copy[key] = env[key]
job_description = dict(
commands=commands,
cwd=cwd,
env=env_copy,
stdout_path=stdout_path,
stderr_path=stderr_path,
stdin_path=stdin_path,
)
with open(os.path.join(job_dir, "job.json"), "w") as f:
json.dump(job_description, f)
try:
job_script = os.path.join(job_dir, "run_job.bash")
with open(job_script, "w") as f:
f.write(job_script_contents)
job_run = os.path.join(job_dir, "run_job.py")
with open(job_run, "w") as f:
f.write(PYTHON_RUN_SCRIPT)
sp = subprocess.Popen(
["bash", job_script.encode("utf-8")],
shell=False,
cwd=job_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
if sp.stdin:
sp.stdin.close()
rcode = sp.wait()
return rcode
finally:
shutil.rmtree(job_dir)
|
|
import six
import hashlib
import random
from django.conf import settings
from django.contrib.auth import models as auth_models
from django.core.urlresolvers import reverse
from django.db import models
from django.template import Template, Context, TemplateDoesNotExist
from django.template.loader import get_template
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from oscar.apps.customer.managers import CommunicationTypeManager
from oscar.core.compat import AUTH_USER_MODEL
from oscar.models.fields import AutoSlugField
class UserManager(auth_models.BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""
Creates and saves a User with the given username, email and
password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = UserManager.normalize_email(email)
user = self.model(
email=email, is_staff=False, is_active=True,
is_superuser=False,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **extra_fields):
u = self.create_user(email, password, **extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
class AbstractUser(auth_models.AbstractBaseUser,
auth_models.PermissionsMixin):
"""
An abstract base user suitable for use in Oscar projects.
This is basically a copy of the core AbstractUser model but without a
username field
"""
email = models.EmailField(_('email address'), unique=True)
first_name = models.CharField(
_('First name'), max_length=255, blank=True)
last_name = models.CharField(
_('Last name'), max_length=255, blank=True)
is_staff = models.BooleanField(
_('Staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(
_('Active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'),
default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'email'
class Meta:
abstract = True
verbose_name = _('User')
verbose_name_plural = _('Users')
def get_full_name(self):
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
return self.first_name
def _migrate_alerts_to_user(self):
"""
Transfer any active alerts linked to a user's email address to the
newly registered user.
"""
ProductAlert = self.alerts.model
alerts = ProductAlert.objects.filter(
email=self.email, status=ProductAlert.ACTIVE)
alerts.update(user=self, key=None, email=None)
def save(self, *args, **kwargs):
super(AbstractUser, self).save(*args, **kwargs)
# Migrate any "anonymous" product alerts to the registered user
# Ideally, this would be done via a post-save signal. But we can't
# use get_user_model to wire up signals to custom user models
# see Oscar ticket #1127, Django ticket #19218
self._migrate_alerts_to_user()
class AbstractEmail(models.Model):
"""
This is a record of all emails sent to a customer.
Normally, we only record order-related emails.
"""
user = models.ForeignKey(AUTH_USER_MODEL, related_name='emails',
verbose_name=_("User"))
subject = models.TextField(_('Subject'), max_length=255)
body_text = models.TextField(_("Body Text"))
body_html = models.TextField(_("Body HTML"), blank=True)
date_sent = models.DateTimeField(_("Date Sent"), auto_now_add=True)
class Meta:
abstract = True
app_label = 'customer'
verbose_name = _('Email')
verbose_name_plural = _('Emails')
def __unicode__(self):
return _("Email to %(user)s with subject '%(subject)s'") % {
'user': self.user.get_username(), 'subject': self.subject}
class AbstractCommunicationEventType(models.Model):
"""
A 'type' of communication. Like a order confirmation email.
"""
#: Code used for looking up this event programmatically.
# e.g. PASSWORD_RESET. AutoSlugField uppercases the code for us because
# it's a useful convention that's been enforced in previous Oscar versions
code = AutoSlugField(
_('Code'), max_length=128, unique=True, populate_from='name',
separator=six.u("_"), uppercase=True, editable=True,
help_text=_("Code used for looking up this event programmatically"))
#: Name is the friendly description of an event for use in the admin
name = models.CharField(
_('Name'), max_length=255,
help_text=_("This is just used for organisational purposes"))
# We allow communication types to be categorised
ORDER_RELATED = _('Order related')
USER_RELATED = _('User related')
category = models.CharField(_('Category'), max_length=255,
default=ORDER_RELATED)
# Template content for emails
# NOTE: There's an intentional distinction between None and ''. None
# instructs Oscar to look for a file-based template, '' is just an empty
# template.
email_subject_template = models.CharField(
_('Email Subject Template'), max_length=255, blank=True, null=True)
email_body_template = models.TextField(
_('Email Body Template'), blank=True, null=True)
email_body_html_template = models.TextField(
_('Email Body HTML Template'), blank=True, null=True,
help_text=_("HTML template"))
# Template content for SMS messages
sms_template = models.CharField(_('SMS Template'), max_length=170,
blank=True, null=True,
help_text=_("SMS template"))
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
date_updated = models.DateTimeField(_("Date Updated"), auto_now=True)
objects = CommunicationTypeManager()
# File templates
email_subject_template_file = 'customer/emails/commtype_%s_subject.txt'
email_body_template_file = 'customer/emails/commtype_%s_body.txt'
email_body_html_template_file = 'customer/emails/commtype_%s_body.html'
sms_template_file = 'customer/sms/commtype_%s_body.txt'
class Meta:
abstract = True
app_label = 'customer'
verbose_name = _("Communication event type")
verbose_name_plural = _("Communication event types")
def get_messages(self, ctx=None):
"""
Return a dict of templates with the context merged in
We look first at the field templates but fail over to
a set of file templates that follow a conventional path.
"""
code = self.code.lower()
# Build a dict of message name to Template instances
templates = {'subject': 'email_subject_template',
'body': 'email_body_template',
'html': 'email_body_html_template',
'sms': 'sms_template'}
for name, attr_name in templates.items():
field = getattr(self, attr_name, None)
if field is not None:
# Template content is in a model field
templates[name] = Template(field)
else:
# Model field is empty - look for a file template
template_name = getattr(self, "%s_file" % attr_name) % code
try:
templates[name] = get_template(template_name)
except TemplateDoesNotExist:
templates[name] = None
# Pass base URL for serving images within HTML emails
if ctx is None:
ctx = {}
ctx['static_base_url'] = getattr(
settings, 'OSCAR_STATIC_BASE_URL', None)
messages = {}
for name, template in templates.items():
messages[name] = template.render(Context(ctx)) if template else ''
# Ensure the email subject doesn't contain any newlines
messages['subject'] = messages['subject'].replace("\n", "")
messages['subject'] = messages['subject'].replace("\r", "")
return messages
def __unicode__(self):
return self.name
def is_order_related(self):
return self.category == self.ORDER_RELATED
def is_user_related(self):
return self.category == self.USER_RELATED
class AbstractNotification(models.Model):
recipient = models.ForeignKey(AUTH_USER_MODEL,
related_name='notifications', db_index=True)
# Not all notifications will have a sender.
sender = models.ForeignKey(AUTH_USER_MODEL, null=True)
# HTML is allowed in this field as it can contain links
subject = models.CharField(max_length=255)
body = models.TextField()
# Some projects may want to categorise their notifications. You may want
# to use this field to show a different icons next to the notification.
category = models.CharField(max_length=255, blank=True)
INBOX, ARCHIVE = 'Inbox', 'Archive'
choices = (
(INBOX, _('Inbox')),
(ARCHIVE, _('Archive')))
location = models.CharField(max_length=32, choices=choices,
default=INBOX)
date_sent = models.DateTimeField(auto_now_add=True)
date_read = models.DateTimeField(blank=True, null=True)
class Meta:
abstract = True
app_label = 'customer'
ordering = ('-date_sent',)
verbose_name = _('Notification')
verbose_name_plural = _('Notifications')
def __unicode__(self):
return self.subject
def archive(self):
self.location = self.ARCHIVE
self.save()
archive.alters_data = True
@property
def is_read(self):
return self.date_read is not None
class AbstractProductAlert(models.Model):
"""
An alert for when a product comes back in stock
"""
product = models.ForeignKey('catalogue.Product')
# A user is only required if the notification is created by a
# registered user, anonymous users will only have an email address
# attached to the notification
user = models.ForeignKey(AUTH_USER_MODEL, db_index=True, blank=True,
null=True, related_name="alerts",
verbose_name=_('User'))
email = models.EmailField(_("Email"), db_index=True, blank=True)
# This key are used to confirm and cancel alerts for anon users
key = models.CharField(_("Key"), max_length=128, blank=True, db_index=True)
# An alert can have two different statuses for authenticated
# users ``ACTIVE`` and ``INACTIVE`` and anonymous users have an
# additional status ``UNCONFIRMED``. For anonymous users a confirmation
# and unsubscription key are generated when an instance is saved for
# the first time and can be used to confirm and unsubscribe the
# notifications.
UNCONFIRMED, ACTIVE, CANCELLED, CLOSED = (
'Unconfirmed', 'Active', 'Cancelled', 'Closed')
STATUS_CHOICES = (
(UNCONFIRMED, _('Not yet confirmed')),
(ACTIVE, _('Active')),
(CANCELLED, _('Cancelled')),
(CLOSED, _('Closed')),
)
status = models.CharField(_("Status"), max_length=20,
choices=STATUS_CHOICES, default=ACTIVE)
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
date_confirmed = models.DateTimeField(_("Date confirmed"), blank=True,
null=True)
date_cancelled = models.DateTimeField(_("Date cancelled"), blank=True,
null=True)
date_closed = models.DateTimeField(_("Date closed"), blank=True, null=True)
class Meta:
abstract = True
app_label = 'customer'
verbose_name = _('Product alert')
verbose_name_plural = _('Product alerts')
@property
def is_anonymous(self):
return self.user is None
@property
def can_be_confirmed(self):
return self.status == self.UNCONFIRMED
@property
def can_be_cancelled(self):
return self.status == self.ACTIVE
@property
def is_cancelled(self):
return self.status == self.CANCELLED
@property
def is_active(self):
return self.status == self.ACTIVE
def confirm(self):
self.status = self.ACTIVE
self.date_confirmed = timezone.now()
self.save()
confirm.alters_data = True
def cancel(self):
self.status = self.CANCELLED
self.date_cancelled = timezone.now()
self.save()
cancel.alters_data = True
def close(self):
self.status = self.CLOSED
self.date_closed = timezone.now()
self.save()
close.alters_data = True
def get_email_address(self):
if self.user:
return self.user.email
else:
return self.email
def save(self, *args, **kwargs):
if not self.id and not self.user:
self.key = self.get_random_key()
self.status = self.UNCONFIRMED
# Ensure date fields get updated when saving from modelform (which just
# calls save, and doesn't call the methods cancel(), confirm() etc).
if self.status == self.CANCELLED and self.date_cancelled is None:
self.date_cancelled = timezone.now()
if not self.user and self.status == self.ACTIVE \
and self.date_confirmed is None:
self.date_confirmed = timezone.now()
if self.status == self.CLOSED and self.date_closed is None:
self.date_closed = timezone.now()
return super(AbstractProductAlert, self).save(*args, **kwargs)
def get_random_key(self):
"""
Get a random generated key based on SHA-1 and email address
"""
salt = hashlib.sha1(str(random.random()).encode('utf8')).hexdigest()
return hashlib.sha1((salt + self.email).encode('utf8')).hexdigest()
def get_confirm_url(self):
return reverse('customer:alerts-confirm', kwargs={'key': self.key})
def get_cancel_url(self):
return reverse('customer:alerts-cancel-by-key', kwargs={'key':
self.key})
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import psutil
from builtins import input
from past.builtins import basestring
from datetime import datetime
import getpass
import imp
import os
import re
import signal
import subprocess
import sys
import warnings
from jinja2 import Template
from airflow import configuration
from airflow.exceptions import AirflowException
# When killing processes, time to wait after issuing a SIGTERM before issuing a
# SIGKILL.
DEFAULT_TIME_TO_WAIT_AFTER_SIGTERM = configuration.conf.getint(
'core', 'KILLED_TASK_CLEANUP_TIME'
)
def validate_key(k, max_length=250):
if not isinstance(k, basestring):
raise TypeError("The key has to be a string")
elif len(k) > max_length:
raise AirflowException(
"The key has to be less than {0} characters".format(max_length))
elif not re.match(r'^[A-Za-z0-9_\-\.]+$', k):
raise AirflowException(
"The key ({k}) has to be made of alphanumeric characters, dashes, "
"dots and underscores exclusively".format(**locals()))
else:
return True
def alchemy_to_dict(obj):
"""
Transforms a SQLAlchemy model instance into a dictionary
"""
if not obj:
return None
d = {}
for c in obj.__table__.columns:
value = getattr(obj, c.name)
if type(value) == datetime:
value = value.isoformat()
d[c.name] = value
return d
def ask_yesno(question):
yes = set(['yes', 'y'])
no = set(['no', 'n'])
done = False
print(question)
while not done:
choice = input().lower()
if choice in yes:
return True
elif choice in no:
return False
else:
print("Please respond by yes or no.")
def is_in(obj, l):
"""
Checks whether an object is one of the item in the list.
This is different from ``in`` because ``in`` uses __cmp__ when
present. Here we change based on the object itself
"""
for item in l:
if item is obj:
return True
return False
def is_container(obj):
"""
Test if an object is a container (iterable) but not a string
"""
return hasattr(obj, '__iter__') and not isinstance(obj, basestring)
def as_tuple(obj):
"""
If obj is a container, returns obj as a tuple.
Otherwise, returns a tuple containing obj.
"""
if is_container(obj):
return tuple(obj)
else:
return tuple([obj])
def as_flattened_list(iterable):
"""
Return an iterable with one level flattened
>>> as_flattened_list((('blue', 'red'), ('green', 'yellow', 'pink')))
['blue', 'red', 'green', 'yellow', 'pink']
"""
return [e for i in iterable for e in i]
def chain(*tasks):
"""
Given a number of tasks, builds a dependency chain.
chain(task_1, task_2, task_3, task_4)
is equivalent to
task_1.set_downstream(task_2)
task_2.set_downstream(task_3)
task_3.set_downstream(task_4)
"""
for up_task, down_task in zip(tasks[:-1], tasks[1:]):
up_task.set_downstream(down_task)
def pprinttable(rows):
"""Returns a pretty ascii table from tuples
If namedtuple are used, the table will have headers
"""
if not rows:
return
if hasattr(rows[0], '_fields'): # if namedtuple
headers = rows[0]._fields
else:
headers = ["col{}".format(i) for i in range(len(rows[0]))]
lens = [len(s) for s in headers]
for row in rows:
for i in range(len(rows[0])):
slenght = len("{}".format(row[i]))
if slenght > lens[i]:
lens[i] = slenght
formats = []
hformats = []
for i in range(len(rows[0])):
if isinstance(rows[0][i], int):
formats.append("%%%dd" % lens[i])
else:
formats.append("%%-%ds" % lens[i])
hformats.append("%%-%ds" % lens[i])
pattern = " | ".join(formats)
hpattern = " | ".join(hformats)
separator = "-+-".join(['-' * n for n in lens])
s = ""
s += separator + '\n'
s += (hpattern % tuple(headers)) + '\n'
s += separator + '\n'
def f(t):
return "{}".format(t) if isinstance(t, basestring) else t
for line in rows:
s += pattern % tuple(f(t) for t in line) + '\n'
s += separator + '\n'
return s
def reap_process_group(pid, log, sig=signal.SIGTERM,
timeout=DEFAULT_TIME_TO_WAIT_AFTER_SIGTERM):
"""
Tries really hard to terminate all children (including grandchildren). Will send
sig (SIGTERM) to the process group of pid. If any process is alive after timeout
a SIGKILL will be send.
:param log: log handler
:param pid: pid to kill
:param sig: signal type
:param timeout: how much time a process has to terminate
"""
def on_terminate(p):
log.info("Process %s (%s) terminated with exit code %s", p, p.pid, p.returncode)
if pid == os.getpid():
raise RuntimeError("I refuse to kill myself")
parent = psutil.Process(pid)
children = parent.children(recursive=True)
children.append(parent)
log.info("Sending %s to GPID %s", sig, os.getpgid(pid))
os.killpg(os.getpgid(pid), sig)
gone, alive = psutil.wait_procs(children, timeout=timeout, callback=on_terminate)
if alive:
for p in alive:
log.warn("process %s (%s) did not respond to SIGTERM. Trying SIGKILL", p, pid)
os.killpg(os.getpgid(pid), signal.SIGKILL)
gone, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate)
if alive:
for p in alive:
log.error("Process %s (%s) could not be killed. Giving up.", p, p.pid)
def parse_template_string(template_string):
if "{{" in template_string: # jinja mode
return None, Template(template_string)
else:
return template_string, None
class AirflowImporter(object):
"""
Importer that dynamically loads a class and module from its parent. This
allows Airflow to support ``from airflow.operators import BashOperator``
even though BashOperator is actually in
``airflow.operators.bash_operator``.
The importer also takes over for the parent_module by wrapping it. This is
required to support attribute-based usage:
.. code:: python
from airflow import operators
operators.BashOperator(...)
"""
def __init__(self, parent_module, module_attributes):
"""
:param parent_module: The string package name of the parent module. For
example, 'airflow.operators'
:type parent_module: string
:param module_attributes: The file to class mappings for all importable
classes.
:type module_attributes: string
"""
self._parent_module = parent_module
self._attribute_modules = self._build_attribute_modules(module_attributes)
self._loaded_modules = {}
# Wrap the module so we can take over __getattr__.
sys.modules[parent_module.__name__] = self
@staticmethod
def _build_attribute_modules(module_attributes):
"""
Flips and flattens the module_attributes dictionary from:
module => [Attribute, ...]
To:
Attribute => module
This is useful so that we can find the module to use, given an
attribute.
"""
attribute_modules = {}
for module, attributes in list(module_attributes.items()):
for attribute in attributes:
attribute_modules[attribute] = module
return attribute_modules
def _load_attribute(self, attribute):
"""
Load the class attribute if it hasn't been loaded yet, and return it.
"""
module = self._attribute_modules.get(attribute, False)
if not module:
# This shouldn't happen. The check happens in find_modules, too.
raise ImportError(attribute)
elif module not in self._loaded_modules:
# Note that it's very important to only load a given modules once.
# If they are loaded more than once, the memory reference to the
# class objects changes, and Python thinks that an object of type
# Foo that was declared before Foo's module was reloaded is no
# longer the same type as Foo after it's reloaded.
path = os.path.realpath(self._parent_module.__file__)
folder = os.path.dirname(path)
f, filename, description = imp.find_module(module, [folder])
self._loaded_modules[module] = imp.load_module(module, f, filename, description)
# This functionality is deprecated, and AirflowImporter should be
# removed in 2.0.
warnings.warn(
"Importing {i} directly from {m} has been "
"deprecated. Please import from "
"'{m}.[operator_module]' instead. Support for direct "
"imports will be dropped entirely in Airflow 2.0.".format(
i=attribute, m=self._parent_module),
DeprecationWarning)
loaded_module = self._loaded_modules[module]
return getattr(loaded_module, attribute)
def __getattr__(self, attribute):
"""
Get an attribute from the wrapped module. If the attribute doesn't
exist, try and import it as a class from a submodule.
This is a Python trick that allows the class to pretend it's a module,
so that attribute-based usage works:
from airflow import operators
operators.BashOperator(...)
It also allows normal from imports to work:
from airflow.operators.bash_operator import BashOperator
"""
if hasattr(self._parent_module, attribute):
# Always default to the parent module if the attribute exists.
return getattr(self._parent_module, attribute)
elif attribute in self._attribute_modules:
# Try and import the attribute if it's got a module defined.
loaded_attribute = self._load_attribute(attribute)
setattr(self, attribute, loaded_attribute)
return loaded_attribute
raise AttributeError
|
|
#
# Copyright (c) 2017 Orange.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Test the configuration validator driver"""
import mock
from oslo_config import cfg
from oslo_config import types
from oslo_log import log as logging
import six
from testtools.content import text_content
from congress.datasources import cfgvalidator_driver
from congress.tests import base
from congress.tests import base_rpc
from congress.tests import helper
LOG = logging.getLogger(__name__)
# pylint: disable=protected-access
def _fake_conf():
conf = mock.MagicMock()
conf._namespace = 'ns'
opt1 = mock.MagicMock()
opt1.id_ = 'ho1'
opt1.name = 'o1'
opt1.type = types.String
opt1.ns_id = 'ns'
opt2 = mock.MagicMock()
opt2.id_ = 'ho2'
opt2.name = 'o2'
opt2.type = types.String
opt2.ns_id = 'ns'
group = mock.MagicMock()
group._opts = {'o2': {'opt': opt2}}
conf._groups = {'g': group}
conf._opts = {'o1': {'opt': opt1}}
return conf
class TestCfgValidatorDriver(base.TestCase):
"""Test the configuration validator driver"""
def setUp(self):
super(TestCfgValidatorDriver, self).setUp()
args = helper.datasource_openstack_args()
with mock.patch('congress.datasources.cfgvalidator_driver.'
'ValidatorAgentClient',
spec=cfgvalidator_driver.ValidatorAgentClient) as agm:
self.driver = cfgvalidator_driver.ValidatorDriver(args=args)
self.agent_mock = agm
self.driver.node = mock.MagicMock()
for table in cfgvalidator_driver.ValidatorDriver.get_schema():
self.driver.state[table] = set()
def test_get_info(self):
"""Test info retrieval on datasource. Minimal requirements"""
info = self.driver.get_datasource_info()
self.assertIsNotNone(info['id'])
self.assertIsNotNone(info['description'])
self.assertIsNotNone(info['config'])
def test_translate_type(self):
"""Test the translation of type"""
cases = [
{
'inputs': ['lorem',
types.String(choices=['foo'], max_length=4)],
'expected': {
cfgvalidator_driver.STR_TYPE:
(u'lorem', u'', 4, u'False', u'False', u'[\'foo\']')}
},
{
'inputs': ['lorem', types.Integer(choices=[1], min=1, max=2)],
'expected': {
cfgvalidator_driver.INT_TYPE: (u'lorem', 1, 2, u'[1]')}
},
{
'inputs': ['lorem', types.Float(min=1, max=2)],
'expected': {cfgvalidator_driver.FLOAT_TYPE: (u'lorem', 1, 2)}
},
{
'inputs': ['lorem', types.List(item_type=types.Float(min=1))],
'expected': {
cfgvalidator_driver.LIST_TYPE: (
u'lorem', u'Float', u'False'),
cfgvalidator_driver.FLOAT_TYPE: (u'lorem', 1, u''), }
},
{
'inputs': ['lorem', types.URI(max_length=2, schemes=['HTTP'])],
'expected': {
cfgvalidator_driver.URI_TYPE: (u'lorem', 2, u'[\'HTTP\']')}
},
{
'inputs': ['lorem', types.Range(min=1, max=2)],
'expected': {cfgvalidator_driver.RANGE_TYPE: (u'lorem', 1, 2)}
},
]
for case in cases:
self.driver.translate_type(*case['inputs'])
for case in cases:
for table_name, expected in six.iteritems(case['expected']):
table = self.driver.state[table_name]
if expected:
self.assertIn(expected, table)
def test_translate_host(self):
"""Test the translation of host"""
cases = [
('lorem', 'ipsum', (u'lorem', u'ipsum')),
(None, 'ipsum', None),
('', 'ipsum', None),
('lorem', None, (u'lorem', u'')),
('lorem', '', (u'lorem', u'')),
]
for host_id, host_name, _ in cases:
self.driver.translate_host(host_id, host_name)
table = self.driver.state[cfgvalidator_driver.HOST]
for _, _, expected in cases:
if expected:
self.assertIn(expected, table)
expected_size = len(set([c[-1] for c in cases if c[-1]]))
self.assertEqual(len(table), expected_size)
def test_translate_file(self):
"""Test the translation of file"""
cases = [
('lorem', 'ipsum', 'dolor', 'sit',
(u'lorem', u'ipsum', u'dolor', u'sit')),
('lorem', 'ipsum', None, '', (u'lorem', u'ipsum', u'', u'')),
('lorem', 'ipsum', '', None, (u'lorem', u'ipsum', u'', u'')),
(None, 'ipsum', 'dolor', 'sit', None),
('', 'ipsum', 'dolor', 'sit', None),
('lorem', '', 'dolor', 'sit', None),
('lorem', None, 'dolor', 'sit', None),
]
for file_id, host_id, template_h, file_name, _ in cases:
self.driver.translate_file(file_id, host_id, template_h,
file_name)
table = self.driver.state[cfgvalidator_driver.FILE]
for _, _, _, _, expected in cases:
if expected:
self.assertIn(expected, table)
expected_size = len(set([c[-1] for c in cases if c[-1]]))
self.assertEqual(len(table), expected_size)
def test_translate_template_ns(self):
"""Test the translation of namespace"""
cases = [
{
'inputs': [
'lorem',
'',
{None: 'sit', 'amet': 'consectetur'}
],
'expected': {
cfgvalidator_driver.TEMPLATE: (u'lorem', u''),
cfgvalidator_driver.NAMESPACE: (u'amet', u'consectetur'),
cfgvalidator_driver.TEMPLATE_NS: (u'lorem', u'amet'),
}
},
{
'inputs': [
'',
'ipsum',
{'dolor': 'sit', 'amet': ''}
],
'expected': {
cfgvalidator_driver.TEMPLATE: None,
cfgvalidator_driver.NAMESPACE: None,
cfgvalidator_driver.TEMPLATE_NS: None,
}
},
{
'inputs': [
'lorem',
'ipsum',
{'dolor': 'sit'}
],
'expected': {
cfgvalidator_driver.TEMPLATE: (u'lorem', u'ipsum'),
cfgvalidator_driver.NAMESPACE: (u'dolor', u'sit'),
cfgvalidator_driver.TEMPLATE_NS: (u'lorem', u'dolor'),
}
}
]
for case in cases:
self.driver.translate_template_namespace(*case['inputs'])
for case in cases:
for table_name, expected in six.iteritems(case['expected']):
table = self.driver.state[table_name]
if expected:
self.assertIn(expected, table)
for table_name in [cfgvalidator_driver.TEMPLATE,
cfgvalidator_driver.NAMESPACE,
cfgvalidator_driver.TEMPLATE_NS]:
expected_size = len(
set([c['expected'][table_name] for c in cases
if c['expected'][table_name]]))
table = self.driver.state[table_name]
self.addDetail('table name', text_content(table_name))
self.assertEqual(len(table), expected_size)
def test_translate_option(self):
"""Unit tests for the translation of option definitions"""
opt = cfg.StrOpt('host', required=True)
opt.id_ = 'hash_opt'
opt.ns_id = 'hash_ns'
self.driver.translate_option(opt, "group")
self.assertIsNotNone(self.driver.state['option'])
self.assertIsNotNone(self.driver.state['option_info'])
self.assertEqual(1, len(self.driver.state['option']))
self.assertEqual(1, len(self.driver.state['option_info']))
def test_translate_value(self):
"""Unit tests for translation of option values"""
self.driver.translate_value("fid", 'optid1', 0)
self.driver.translate_value("fid", 'optid2', [1, 2, 3])
self.driver.translate_value("fid", 'optid3', {'a': 4, 'b': 5})
self.assertEqual(6, len(self.driver.state['binding']))
def test_translate_service(self):
"""Unit tests for translation of services"""
self.driver.translate_service("hid", "svc", "vname")
self.assertEqual(1, len(self.driver.state['service']))
def test_process_template_hashes(self):
"""Test processing of template hash"""
agent = self.agent_mock.return_value
agent.get_template.return_value = {'namespaces': ['ns']}
self.driver.process_template_hashes(['t1', 't2'], 'h')
self.assertEqual(2, agent.get_template.call_count)
self.assertEqual(1, agent.get_namespace.call_count)
def test_translate_conf(self):
"""Test translation of conf"""
self.driver.translate_conf(_fake_conf(), 'fid')
state = self.driver.state
self.assertEqual(2, len(state['option']))
self.assertEqual(2, len(state['option_info']))
self.assertEqual(2, len(state['binding']))
@mock.patch('congress.cfg_validator.parsing.construct_conf_manager')
@mock.patch('congress.cfg_validator.parsing.add_parsed_conf')
def test_process_config(self, parsing_ccm, _):
"""Test complete processing of a conf"""
parsing_ccm.return_value = _fake_conf()
conf = {
'template': 't',
'service': 's',
'version': 'v',
'path': '/path/to/c',
'data': {}
}
self.driver.known_templates['t'] = mock.MagicMock()
self.driver.process_config('fhash', conf, 'h')
state = self.driver.state
self.assertEqual(1, len(state['service']))
self.assertEqual(1, len(state['host']))
self.assertEqual(1, len(state['template']))
self.assertEqual(1, len(state['file']))
@mock.patch('congress.cfg_validator.parsing.construct_conf_manager')
@mock.patch('congress.cfg_validator.parsing.add_parsed_conf')
def test_process_config_hashes(self, parsing_ccm, _):
"""Test processing of configuration hashes"""
parsing_ccm.return_value = _fake_conf()
conf = {
'template': 't',
'service': 's',
'version': 'v',
'path': '/path/to/c',
'data': {}
}
self.agent_mock.return_value.get_config.return_value = conf
self.driver.known_templates['t'] = mock.MagicMock()
self.driver.process_config_hashes(['c'], 'h')
state = self.driver.state
self.assertEqual(1, len(state['service']))
self.assertEqual(1, len(state['host']))
self.assertEqual(1, len(state['template']))
self.assertEqual(1, len(state['file']))
def test_poll(self):
"""Test poll"""
self.driver.poll()
agt = self.agent_mock.return_value
self.assertEqual(1, agt.publish_templates_hashes.call_count)
self.assertEqual(1, agt.publish_configs_hashes.call_count)
class TestValidatorAgentClient(base_rpc.BaseTestRpcClient):
"""Unit tests for the RPC calls on the agent side"""
def test_publish_config_hashes(self):
"Test publish_config_hashes"
rpcapi = cfgvalidator_driver.ValidatorAgentClient()
self._test_rpc_api(
rpcapi,
None,
'publish_configs_hashes',
rpc_method='cast', fanout=True
)
def test_publish_templates_hashes(self):
"Test publish_templates_hashes"
rpcapi = cfgvalidator_driver.ValidatorAgentClient()
self._test_rpc_api(
rpcapi,
None,
'publish_templates_hashes',
rpc_method='cast', fanout=True
)
def test_get_namespace(self):
"test get_namespace"
rpcapi = cfgvalidator_driver.ValidatorAgentClient()
self._test_rpc_api(
rpcapi,
None,
'get_namespace',
rpc_method='call', server="host",
ns_hash='fake_hash'
)
# block calling thread
def test_get_template(self):
"test get_template"
rpcapi = cfgvalidator_driver.ValidatorAgentClient()
self._test_rpc_api(
rpcapi,
None,
'get_template',
rpc_method='call', server="host",
tpl_hash='fake_hash'
)
# block calling thread
def test_get_config(self):
"test get_config"
rpcapi = cfgvalidator_driver.ValidatorAgentClient()
self._test_rpc_api(
rpcapi,
None,
'get_config',
rpc_method='call', server="host",
cfg_hash='fake_hash'
)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PublicIPAddressesOperations(object):
"""PublicIPAddressesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
public_ip_address_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
public_ip_address_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
public_ip_address_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPAddress"
"""Gets the specified public IP address in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
public_ip_address_name, # type: str
parameters, # type: "_models.PublicIPAddress"
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPAddress"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPAddress')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
public_ip_address_name, # type: str
parameters, # type: "_models.PublicIPAddress"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PublicIPAddress"]
"""Creates or updates a static or dynamic public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to the create or update public IP address operation.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.PublicIPAddress
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PublicIPAddress or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_09_01.models.PublicIPAddress]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
public_ip_address_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPAddress"
"""Updates public IP address tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to update public IP address tags.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPAddressListResult"]
"""Gets all the public IP addresses in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPAddressListResult"]
"""Gets all public IP addresses in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list_virtual_machine_scale_set_public_ip_addresses(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPAddressListResult"]
"""Gets information about all public IP addresses on a virtual machine scale set level.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/publicipaddresses'} # type: ignore
def list_virtual_machine_scale_set_vm_public_ip_addresses(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
network_interface_name, # type: str
ip_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPAddressListResult"]
"""Gets information about all public IP addresses in a virtual machine IP configuration in a
virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The network interface name.
:type network_interface_name: str
:param ip_configuration_name: The IP configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses'} # type: ignore
def get_virtual_machine_scale_set_public_ip_address(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
network_interface_name, # type: str
ip_configuration_name, # type: str
public_ip_address_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPAddress"
"""Get the specified public IP address in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the IP configuration.
:type ip_configuration_name: str
:param public_ip_address_name: The name of the public IP Address.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_public_ip_address.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_public_ip_address.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses/{publicIpAddressName}'} # type: ignore
|
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script runs power measurements for browsers using Intel Power Gadget.
This script only works on Windows/Mac with Intel CPU. Intel Power Gadget needs
to be installed on the machine before this script works. The software can be
downloaded from:
https://software.intel.com/en-us/articles/intel-power-gadget
Newer IPG versions might also require Visual C++ 2010 runtime to be installed
on Windows:
https://www.microsoft.com/en-us/download/details.aspx?id=14632
Install selenium via pip: `pip install selenium`
Selenium 4 is required for Edge. Selenium 4.00-alpha5 or later is recommended:
`pip install selenium==4.0.0a5`
And finally install the web drivers for Chrome (and Edge if needed):
http://chromedriver.chromium.org/downloads
https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/
Sample runs:
python measure_power_intel.py --browser=canary --duration=10 --delay=5
--verbose --url="https://www.youtube.com/watch?v=0XdS37Re1XQ"
--extra-browser-args="--no-sandbox"
Supported browsers (--browser=xxx): 'stable', 'beta', 'dev', 'canary',
'chromium', 'edge', and path_to_exe_file.
For Edge from insider channels (beta, dev, can), use path_to_exe_file.
It is recommended to test with optimized builds of Chromium e.g. these GN args:
is_debug = false
is_component_build = false
is_official_build = true # optimization similar to official builds
use_goma = true
enable_nacl = false
proprietary_codecs = true
ffmpeg_branding = "Chrome"
It might also help to disable unnecessary background services and to unplug the
power source some time before measuring. See "Computer setup" section here:
https://microsoftedge.github.io/videotest/2017-04/WebdriverMethodology.html
"""
import argparse
import csv
import datetime
import logging
import os
import shutil
import sys
import tempfile
try:
from selenium import webdriver
from selenium.common import exceptions
except ImportError as error:
logging.error(
'This script needs selenium and appropriate web drivers to be installed.')
raise
import gpu_tests.ipg_utils as ipg_utils
CHROME_STABLE_PATH_WIN = (
r'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe')
CHROME_BETA_PATH_WIN = (
r'C:\Program Files (x86)\Google\Chrome Beta\Application\chrome.exe')
CHROME_DEV_PATH_WIN = (
r'C:\Program Files (x86)\Google\Chrome Dev\Application\chrome.exe')
# The following two paths are relative to the LOCALAPPDATA
CHROME_CANARY_PATH_WIN = r'Google\Chrome SxS\Application\chrome.exe'
CHROMIUM_PATH_WIN = r'Chromium\Application\chrome.exe'
CHROME_STABLE_PATH_MAC = (
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
CHROME_BETA_PATH_MAC = CHROME_STABLE_PATH_MAC
CHROME_DEV_PATH_MAC = CHROME_STABLE_PATH_MAC
CHROME_CANARY_PATH_MAC = (
'/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary'
)
SUPPORTED_BROWSERS = ['stable', 'beta', 'dev', 'canary', 'chromium', 'edge']
def LocateBrowserWin(options_browser):
if options_browser == 'edge':
return 'edge'
browser = None
if not options_browser or options_browser == 'stable':
browser = CHROME_STABLE_PATH_WIN
elif options_browser == 'beta':
browser = CHROME_BETA_PATH_WIN
elif options_browser == 'dev':
browser = CHROME_DEV_PATH_WIN
elif options_browser == 'canary':
browser = os.path.join(os.getenv('LOCALAPPDATA'), CHROME_CANARY_PATH_WIN)
elif options_browser == 'chromium':
browser = os.path.join(os.getenv('LOCALAPPDATA'), CHROMIUM_PATH_WIN)
elif options_browser.endswith('.exe'):
browser = options_browser
else:
logging.warning('Invalid value for --browser')
logging.warning(
'Supported values: %s, or a full path to a browser executable.',
', '.join(SUPPORTED_BROWSERS))
return None
if not os.path.exists(browser):
logging.warning("Can't locate browser at %s", browser)
logging.warning('Please pass full path to the executable in --browser')
return None
return browser
def LocateBrowserMac(options_browser):
browser = None
if not options_browser or options_browser == 'stable':
browser = CHROME_STABLE_PATH_MAC
elif options_browser == 'beta':
browser = CHROME_BETA_PATH_MAC
elif options_browser == 'dev':
browser = CHROME_DEV_PATH_MAC
elif options_browser == 'canary':
browser = CHROME_CANARY_PATH_MAC
elif options_browser.endswith('Chromium'):
browser = options_browser
else:
logging.warning('Invalid value for --browser')
logging.warning(
'Supported values: %s, or a full path to a browser executable.',
', '.join(SUPPORTED_BROWSERS))
return None
if not os.path.exists(browser):
logging.warning("Can't locate browser at %s", browser)
logging.warning('Please pass full path to the executable in --browser')
return None
return browser
def LocateBrowser(options_browser):
if sys.platform == 'win32':
return LocateBrowserWin(options_browser)
if sys.platform == 'darwin':
return LocateBrowserMac(options_browser)
logging.warning('This script only runs on Windows/Mac.')
return None
def CreateWebDriver(browser, user_data_dir, url, fullscreen,
extra_browser_args):
if browser == 'edge' or browser.endswith('msedge.exe'):
options = webdriver.EdgeOptions()
# Set use_chromium to true or an error will be triggered that the latest
# MSEdgeDriver doesn't support an older version (non-chrome based) of
# MSEdge.
options.use_chromium = True
options.binary_location = browser
for arg in extra_browser_args:
options.add_argument(arg)
logging.debug(' '.join(options.arguments))
driver = webdriver.Edge(options=options)
else:
options = webdriver.ChromeOptions()
options.binary_location = browser
options.add_argument('--user-data-dir=%s' % user_data_dir)
options.add_argument('--no-first-run')
options.add_argument('--no-default-browser-check')
options.add_argument('--autoplay-policy=no-user-gesture-required')
options.add_argument('--start-maximized')
for arg in extra_browser_args:
options.add_argument(arg)
logging.debug(' '.join(options.arguments))
driver = webdriver.Chrome(options=options)
driver.implicitly_wait(30)
if url is not None:
driver.get(url)
if fullscreen:
try:
video_el = driver.find_element_by_tag_name('video')
actions = webdriver.ActionChains(driver)
actions.move_to_element(video_el)
actions.double_click(video_el)
actions.perform()
except exceptions.InvalidSelectorException:
logging.warning('Could not locate video element to make fullscreen')
return driver
# pylint: disable=too-many-arguments
def MeasurePowerOnce(browser, logfile, duration, delay, resolution, url,
fullscreen, extra_browser_args):
logging.debug('Logging into %s', logfile)
user_data_dir = tempfile.mkdtemp()
driver = CreateWebDriver(browser, user_data_dir, url, fullscreen,
extra_browser_args)
ipg_utils.RunIPG(duration + delay, resolution, logfile)
driver.quit()
try:
shutil.rmtree(user_data_dir)
except Exception as err: # pylint: disable=broad-except
logging.warning('Failed to remove temporary folder: %s', user_data_dir)
logging.warning('Please kill browser and remove it manually to avoid leak')
logging.debug(err)
results = ipg_utils.AnalyzeIPGLogFile(logfile, delay)
return results
# pylint: enable=too-many-arguments
def ParseArgs():
parser = argparse.ArgumentParser()
parser.add_argument('--browser',
help=('select which browser to run. Options include: ' +
', '.join(SUPPORTED_BROWSERS) +
', or a full path to a browser executable. ' +
'By default, stable is selected.'))
parser.add_argument('--duration',
default=60,
type=int,
help='specify how many seconds Intel Power Gadget '
'measures. By default, 60 seconds is selected.')
parser.add_argument('--delay',
default=10,
type=int,
help='specify how many seconds we skip in the data '
'Intel Power Gadget collects. This time is for starting '
'video play, switching to fullscreen mode, etc. '
'By default, 10 seconds is selected.')
parser.add_argument('--resolution',
default=100,
type=int,
help='specify how often Intel Power Gadget samples '
'data in milliseconds. By default, 100 ms is selected.')
parser.add_argument('--logdir',
help='specify where Intel Power Gadget stores its log.'
'By default, it is the current path.')
parser.add_argument('--logname',
help='specify the prefix for Intel Power Gadget log '
'filename. By default, it is PowerLog.')
parser.add_argument('-v',
'--verbose',
action='store_true',
default=False,
help='print out debug information.')
parser.add_argument('--repeat',
default=1,
type=int,
help='specify how many times to run the measurements.')
parser.add_argument('--url',
help='specify the webpage URL the browser launches with.')
parser.add_argument(
'--extra-browser-args',
dest='extra_browser_args',
help='specify extra command line switches for the browser '
'that are separated by spaces (quoted).')
parser.add_argument(
'--extra-browser-args-filename',
dest='extra_browser_args_filename',
metavar='FILE',
help='specify extra command line switches for the browser '
'in a text file that are separated by whitespace.')
parser.add_argument('--fullscreen',
action='store_true',
default=False,
help='specify whether video should be made fullscreen.')
return parser.parse_args()
def main():
options = ParseArgs()
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
browser = LocateBrowser(options.browser)
if not browser:
return
# TODO(zmo): Add code to disable a bunch of Windows services that might
# affect power consumption.
log_prefix = options.logname or 'PowerLog'
all_results = []
extra_browser_args = []
if options.extra_browser_args:
extra_browser_args = options.extra_browser_args.split()
if options.extra_browser_args_filename:
if not os.path.isfile(options.extra_browser_args_filename):
logging.error("Can't locate file at %s",
options.extra_browser_args_filename)
else:
with open(options.extra_browser_args_filename, 'r') as f:
extra_browser_args.extend(f.read().split())
f.close()
for run in range(1, options.repeat + 1):
logfile = ipg_utils.GenerateIPGLogFilename(log_prefix, options.logdir, run,
options.repeat, True)
print('Iteration #%d out of %d' % (run, options.repeat))
results = MeasurePowerOnce(browser, logfile, options.duration,
options.delay, options.resolution, options.url,
options.fullscreen, extra_browser_args)
print(results)
all_results.append(results)
now = datetime.datetime.now()
results_filename = '%s_%s_results.csv' % (log_prefix,
now.strftime('%Y%m%d%H%M%S'))
try:
with open(results_filename, 'wb') as results_csv:
labels = sorted(all_results[0].keys())
w = csv.DictWriter(results_csv, fieldnames=labels)
w.writeheader()
w.writerows(all_results)
except Exception as err: # pylint: disable=broad-except
logging.warning('Failed to write results file %s', results_filename)
logging.debug(err)
if __name__ == '__main__':
sys.exit(main())
|
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
class V1PodSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Swagger model
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'volumes': 'list[V1Volume]',
'containers': 'list[V1Container]',
'restart_policy': 'str',
'termination_grace_period_seconds': 'int',
'active_deadline_seconds': 'int',
'dns_policy': 'str',
'node_selector': 'str',
'service_account_name': 'str',
'service_account': 'str',
'node_name': 'str',
'host_network': 'bool',
'image_pull_secrets': 'list[V1LocalObjectReference]'
}
self.attribute_map = {
'volumes': 'volumes',
'containers': 'containers',
'restart_policy': 'restartPolicy',
'termination_grace_period_seconds': 'terminationGracePeriodSeconds',
'active_deadline_seconds': 'activeDeadlineSeconds',
'dns_policy': 'dnsPolicy',
'node_selector': 'nodeSelector',
'service_account_name': 'serviceAccountName',
'service_account': 'serviceAccount',
'node_name': 'nodeName',
'host_network': 'hostNetwork',
'image_pull_secrets': 'imagePullSecrets'
}
self._volumes = None
self._containers = None
self._restart_policy = None
self._termination_grace_period_seconds = None
self._active_deadline_seconds = None
self._dns_policy = None
self._node_selector = None
self._service_account_name = None
self._service_account = None
self._node_name = None
self._host_network = None
self._image_pull_secrets = None
@property
def volumes(self):
"""
Gets the volumes of this V1PodSpec.
list of volumes that can be mounted by containers belonging to the pod; see http://releases.k8s.io/v1.0.4/docs/volumes.md
:return: The volumes of this V1PodSpec.
:rtype: list[V1Volume]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""
Sets the volumes of this V1PodSpec.
list of volumes that can be mounted by containers belonging to the pod; see http://releases.k8s.io/v1.0.4/docs/volumes.md
:param volumes: The volumes of this V1PodSpec.
:type: list[V1Volume]
"""
self._volumes = volumes
@property
def containers(self):
"""
Gets the containers of this V1PodSpec.
list of containers belonging to the pod; cannot be updated; containers cannot currently be added or removed; there must be at least one container in a Pod; see http://releases.k8s.io/v1.0.4/docs/containers.md
:return: The containers of this V1PodSpec.
:rtype: list[V1Container]
"""
return self._containers
@containers.setter
def containers(self, containers):
"""
Sets the containers of this V1PodSpec.
list of containers belonging to the pod; cannot be updated; containers cannot currently be added or removed; there must be at least one container in a Pod; see http://releases.k8s.io/v1.0.4/docs/containers.md
:param containers: The containers of this V1PodSpec.
:type: list[V1Container]
"""
self._containers = containers
@property
def restart_policy(self):
"""
Gets the restart_policy of this V1PodSpec.
restart policy for all containers within the pod; one of Always, OnFailure, Never; defaults to Always; see http://releases.k8s.io/v1.0.4/docs/pod-states.md#restartpolicy
:return: The restart_policy of this V1PodSpec.
:rtype: str
"""
return self._restart_policy
@restart_policy.setter
def restart_policy(self, restart_policy):
"""
Sets the restart_policy of this V1PodSpec.
restart policy for all containers within the pod; one of Always, OnFailure, Never; defaults to Always; see http://releases.k8s.io/v1.0.4/docs/pod-states.md#restartpolicy
:param restart_policy: The restart_policy of this V1PodSpec.
:type: str
"""
self._restart_policy = restart_policy
@property
def termination_grace_period_seconds(self):
"""
Gets the termination_grace_period_seconds of this V1PodSpec.
optional duration in seconds the pod needs to terminate gracefully; may be decreased in delete request; value must be non-negative integer; the value zero indicates delete immediately; if this value is not set, the default grace period will be used instead; the grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal; set this value longer than the expected cleanup time for your process
:return: The termination_grace_period_seconds of this V1PodSpec.
:rtype: int
"""
return self._termination_grace_period_seconds
@termination_grace_period_seconds.setter
def termination_grace_period_seconds(self, termination_grace_period_seconds):
"""
Sets the termination_grace_period_seconds of this V1PodSpec.
optional duration in seconds the pod needs to terminate gracefully; may be decreased in delete request; value must be non-negative integer; the value zero indicates delete immediately; if this value is not set, the default grace period will be used instead; the grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal; set this value longer than the expected cleanup time for your process
:param termination_grace_period_seconds: The termination_grace_period_seconds of this V1PodSpec.
:type: int
"""
self._termination_grace_period_seconds = termination_grace_period_seconds
@property
def active_deadline_seconds(self):
"""
Gets the active_deadline_seconds of this V1PodSpec.
:return: The active_deadline_seconds of this V1PodSpec.
:rtype: int
"""
return self._active_deadline_seconds
@active_deadline_seconds.setter
def active_deadline_seconds(self, active_deadline_seconds):
"""
Sets the active_deadline_seconds of this V1PodSpec.
:param active_deadline_seconds: The active_deadline_seconds of this V1PodSpec.
:type: int
"""
self._active_deadline_seconds = active_deadline_seconds
@property
def dns_policy(self):
"""
Gets the dns_policy of this V1PodSpec.
DNS policy for containers within the pod; one of 'ClusterFirst' or 'Default'
:return: The dns_policy of this V1PodSpec.
:rtype: str
"""
return self._dns_policy
@dns_policy.setter
def dns_policy(self, dns_policy):
"""
Sets the dns_policy of this V1PodSpec.
DNS policy for containers within the pod; one of 'ClusterFirst' or 'Default'
:param dns_policy: The dns_policy of this V1PodSpec.
:type: str
"""
self._dns_policy = dns_policy
@property
def node_selector(self):
"""
Gets the node_selector of this V1PodSpec.
selector which must match a node's labels for the pod to be scheduled on that node; see http://releases.k8s.io/v1.0.4/examples/node-selection/README.md
:return: The node_selector of this V1PodSpec.
:rtype: str
"""
return self._node_selector
@node_selector.setter
def node_selector(self, node_selector):
"""
Sets the node_selector of this V1PodSpec.
selector which must match a node's labels for the pod to be scheduled on that node; see http://releases.k8s.io/v1.0.4/examples/node-selection/README.md
:param node_selector: The node_selector of this V1PodSpec.
:type: str
"""
self._node_selector = node_selector
@property
def service_account_name(self):
"""
Gets the service_account_name of this V1PodSpec.
name of the ServiceAccount to use to run this pod; see http://releases.k8s.io/v1.0.4/docs/service_accounts.md
:return: The service_account_name of this V1PodSpec.
:rtype: str
"""
return self._service_account_name
@service_account_name.setter
def service_account_name(self, service_account_name):
"""
Sets the service_account_name of this V1PodSpec.
name of the ServiceAccount to use to run this pod; see http://releases.k8s.io/v1.0.4/docs/service_accounts.md
:param service_account_name: The service_account_name of this V1PodSpec.
:type: str
"""
self._service_account_name = service_account_name
@property
def service_account(self):
"""
Gets the service_account of this V1PodSpec.
deprecated; use serviceAccountName instead
:return: The service_account of this V1PodSpec.
:rtype: str
"""
return self._service_account
@service_account.setter
def service_account(self, service_account):
"""
Sets the service_account of this V1PodSpec.
deprecated; use serviceAccountName instead
:param service_account: The service_account of this V1PodSpec.
:type: str
"""
self._service_account = service_account
@property
def node_name(self):
"""
Gets the node_name of this V1PodSpec.
node requested for this pod
:return: The node_name of this V1PodSpec.
:rtype: str
"""
return self._node_name
@node_name.setter
def node_name(self, node_name):
"""
Sets the node_name of this V1PodSpec.
node requested for this pod
:param node_name: The node_name of this V1PodSpec.
:type: str
"""
self._node_name = node_name
@property
def host_network(self):
"""
Gets the host_network of this V1PodSpec.
host networking requested for this pod
:return: The host_network of this V1PodSpec.
:rtype: bool
"""
return self._host_network
@host_network.setter
def host_network(self, host_network):
"""
Sets the host_network of this V1PodSpec.
host networking requested for this pod
:param host_network: The host_network of this V1PodSpec.
:type: bool
"""
self._host_network = host_network
@property
def image_pull_secrets(self):
"""
Gets the image_pull_secrets of this V1PodSpec.
list of references to secrets in the same namespace available for pulling the container images; see http://releases.k8s.io/v1.0.4/docs/images.md#specifying-imagepullsecrets-on-a-pod
:return: The image_pull_secrets of this V1PodSpec.
:rtype: list[V1LocalObjectReference]
"""
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, image_pull_secrets):
"""
Sets the image_pull_secrets of this V1PodSpec.
list of references to secrets in the same namespace available for pulling the container images; see http://releases.k8s.io/v1.0.4/docs/images.md#specifying-imagepullsecrets-on-a-pod
:param image_pull_secrets: The image_pull_secrets of this V1PodSpec.
:type: list[V1LocalObjectReference]
"""
self._image_pull_secrets = image_pull_secrets
def to_dict(self):
"""
Return model properties dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Return model properties str
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
|
|
# -*- coding: utf-8 -*-
"""
molvs.charge
~~~~~~~~~~~~
This module implements tools for manipulating charges on molecules. In particular, :class:`~molvs.charge.Reionizer`,
which competitively reionizes acids such that the strongest acids ionize first, and :class:`~molvs.charge.Uncharger`,
which attempts to neutralize ionized acids and bases on a molecule.
:copyright: Copyright 2016 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
import copy
import logging
from rdkit import Chem
from .utils import memoized_property
log = logging.getLogger(__name__)
class AcidBasePair(object):
"""An acid and its conjugate base, defined by SMARTS.
A strength-ordered list of AcidBasePairs can be used to ensure the strongest acids in a molecule ionize first.
"""
def __init__(self, name, acid, base):
"""Initialize an AcidBasePair with the following parameters:
:param string name: A name for this AcidBasePair.
:param string acid: SMARTS pattern for the protonated acid.
:param string base: SMARTS pattern for the conjugate ionized base.
"""
log.debug(f'Initializing AcidBasePair: {name}')
self.name = name
self.acid_str = acid
self.base_str = base
@memoized_property
def acid(self):
log.debug(f'Loading AcidBasePair acid: {self.name}')
return Chem.MolFromSmarts(self.acid_str)
@memoized_property
def base(self):
log.debug(f'Loading AcidBasePair base: {self.name}')
return Chem.MolFromSmarts(self.base_str)
def __repr__(self):
return 'AcidBasePair({!r}, {!r}, {!r})'.format(self.name, self.acid_str, self.base_str)
def __str__(self):
return self.name
#: The default list of AcidBasePairs, sorted from strongest to weakest. This list is derived from the Food and Drug
#: Administration Substance Registration System Standard Operating Procedure guide.
ACID_BASE_PAIRS = (
AcidBasePair('-OSO3H', 'OS(=O)(=O)[OH]', 'OS(=O)(=O)[O-]'),
AcidBasePair('-SO3H', '[!O]S(=O)(=O)[OH]', '[!O]S(=O)(=O)[O-]'),
AcidBasePair('-OSO2H', 'O[SD3](=O)[OH]', 'O[SD3](=O)[O-]'),
AcidBasePair('-SO2H', '[!O][SD3](=O)[OH]', '[!O][SD3](=O)[O-]'),
AcidBasePair('-OPO3H2', 'OP(=O)([OH])[OH]', 'OP(=O)([OH])[O-]'),
AcidBasePair('-PO3H2', '[!O]P(=O)([OH])[OH]', '[!O]P(=O)([OH])[O-]'),
AcidBasePair('-CO2H', 'C(=O)[OH]', 'C(=O)[O-]'),
AcidBasePair('thiophenol', 'c[SH]', 'c[S-]'),
AcidBasePair('(-OPO3H)-', 'OP(=O)([O-])[OH]', 'OP(=O)([O-])[O-]'),
AcidBasePair('(-PO3H)-', '[!O]P(=O)([O-])[OH]', '[!O]P(=O)([O-])[O-]'),
AcidBasePair('phthalimide', 'O=C2c1ccccc1C(=O)[NH]2', 'O=C2c1ccccc1C(=O)[N-]2'),
AcidBasePair('CO3H (peracetyl)', 'C(=O)O[OH]', 'C(=O)O[O-]'),
AcidBasePair('alpha-carbon-hydrogen-nitro group', 'O=N(O)[CH]', 'O=N(O)[C-]'),
AcidBasePair('-SO2NH2', 'S(=O)(=O)[NH2]', 'S(=O)(=O)[NH-]'),
AcidBasePair('-OBO2H2', 'OB([OH])[OH]', 'OB([OH])[O-]'),
AcidBasePair('-BO2H2', '[!O]B([OH])[OH]', '[!O]B([OH])[O-]'),
AcidBasePair('phenol', 'c[OH]', 'c[O-]'),
AcidBasePair('SH (aliphatic)', 'C[SH]', 'C[S-]'),
AcidBasePair('(-OBO2H)-', 'OB([O-])[OH]', 'OB([O-])[O-]'),
AcidBasePair('(-BO2H)-', '[!O]B([O-])[OH]', '[!O]B([O-])[O-]'),
AcidBasePair('cyclopentadiene', 'C1=CC=C[CH2]1', 'c1ccc[cH-]1'),
AcidBasePair('-CONH2', 'C(=O)[NH2]', 'C(=O)[NH-]'),
AcidBasePair('imidazole', 'c1cnc[nH]1', 'c1cnc[n-]1'),
AcidBasePair('-OH (aliphatic alcohol)', '[CX4][OH]', '[CX4][O-]'),
AcidBasePair('alpha-carbon-hydrogen-keto group', 'O=C([!O])[C!H0+0]', 'O=C([!O])[C-]'),
AcidBasePair('alpha-carbon-hydrogen-acetyl ester group', 'OC(=O)[C!H0+0]', 'OC(=O)[C-]'),
AcidBasePair('sp carbon hydrogen', 'C#[CH]', 'C#[C-]'),
AcidBasePair('alpha-carbon-hydrogen-sulfone group', 'CS(=O)(=O)[C!H0+0]', 'CS(=O)(=O)[C-]'),
AcidBasePair('alpha-carbon-hydrogen-sulfoxide group', 'C[SD3](=O)[C!H0+0]', 'C[SD3](=O)[C-]'),
AcidBasePair('-NH2', '[CX4][NH2]', '[CX4][NH-]'),
AcidBasePair('benzyl hydrogen', 'c[CX4H2]', 'c[CX3H-]'),
AcidBasePair('sp2-carbon hydrogen', '[CX3]=[CX3!H0+0]', '[CX3]=[CX2-]'),
AcidBasePair('sp3-carbon hydrogen', '[CX4!H0+0]', '[CX3-]'),
)
class ChargeCorrection(object):
"""An atom that should have a certain charge applied, defined by a SMARTS pattern."""
def __init__(self, name, smarts, charge):
"""Initialize a ChargeCorrection with the following parameters:
:param string name: A name for this ForcedAtomCharge.
:param string smarts: SMARTS pattern to match. Charge is applied to the first atom.
:param int charge: The charge to apply.
"""
log.debug(f'Initializing ChargeCorrection: {name}')
self.name = name
self.smarts_str = smarts
self.charge = charge
@memoized_property
def smarts(self):
log.debug(f'Loading ChargeCorrection smarts: {self.name}')
return Chem.MolFromSmarts(self.smarts_str)
def __repr__(self):
return 'ChargeCorrection({!r}, {!r}, {!r})'.format(self.name, self.smarts_str, self.charge)
def __str__(self):
return self.name
#: The default list of ChargeCorrections.
CHARGE_CORRECTIONS = (
ChargeCorrection('[Li,Na,K]', '[Li,Na,K;X0+0]', 1),
ChargeCorrection('[Mg,Ca]', '[Mg,Ca;X0+0]', 2),
ChargeCorrection('[Cl]', '[Cl;X0+0]', -1),
# TODO: Extend to other incorrectly charged atoms
)
class Reionizer(object):
"""A class to fix charges and reionize a molecule such that the strongest acids ionize first."""
def __init__(self, acid_base_pairs=ACID_BASE_PAIRS, charge_corrections=CHARGE_CORRECTIONS):
"""Initialize a Reionizer with the following parameter:
:param acid_base_pairs: A list of :class:`AcidBasePairs <molvs.charge.AcidBasePair>` to reionize, sorted from
strongest to weakest.
:param charge_corrections: A list of :class:`ChargeCorrections <molvs.charge.ChargeCorrection>`.
"""
log.debug('Initializing Reionizer')
self.acid_base_pairs = acid_base_pairs
self.charge_corrections = charge_corrections
def __call__(self, mol):
"""Calling a Reionizer instance like a function is the same as calling its reionize(mol) method."""
return self.reionize(mol)
def reionize(self, mol):
"""Enforce charges on certain atoms, then perform competitive reionization.
First, charge corrections are applied to ensure, for example, that free metals are correctly ionized. Then, if
a molecule with multiple acid groups is partially ionized, ensure the strongest acids ionize first.
The algorithm works as follows:
- Use SMARTS to find the strongest protonated acid and the weakest ionized acid.
- If the ionized acid is weaker than the protonated acid, swap proton and repeat.
:param mol: The molecule to reionize.
:type mol: :rdkit:`Mol <Chem.rdchem.Mol-class.html>`
:return: The reionized molecule.
:rtype: :rdkit:`Mol <Chem.rdchem.Mol-class.html>`
"""
log.debug('Running Reionizer')
start_charge = Chem.GetFormalCharge(mol)
# Apply forced charge corrections
for cc in self.charge_corrections:
for match in mol.GetSubstructMatches(cc.smarts):
atom = mol.GetAtomWithIdx(match[0])
log.info('Applying charge correction %s (%s %+d)',
cc.name, atom.GetSymbol(), cc.charge)
atom.SetFormalCharge(cc.charge)
current_charge = Chem.GetFormalCharge(mol)
charge_diff = Chem.GetFormalCharge(mol) - start_charge
# If molecule is now neutral, assume everything is now fixed
# But otherwise, if charge has become more positive, look for additional protonated acid groups to ionize
if not current_charge == 0:
while charge_diff > 0:
ppos, poccur = self._strongest_protonated(mol)
if ppos is None:
break
log.info(f'Ionizing {self.acid_base_pairs[ppos].name} to balance previous charge corrections')
patom = mol.GetAtomWithIdx(poccur[-1])
patom.SetFormalCharge(patom.GetFormalCharge() - 1)
if patom.GetNumExplicitHs() > 0:
patom.SetNumExplicitHs(patom.GetNumExplicitHs() - 1)
# else:
patom.UpdatePropertyCache()
charge_diff -= 1
already_moved = set()
while True:
ppos, poccur = self._strongest_protonated(mol)
ipos, ioccur = self._weakest_ionized(mol)
if ioccur and poccur and ppos < ipos:
if poccur[-1] == ioccur[-1]:
# Bad! H wouldn't be moved, resulting in infinite loop.
log.warning('Aborted reionization due to unexpected situation')
break
key = tuple(sorted([poccur[-1], ioccur[-1]]))
if key in already_moved:
log.warning(
'Aborting reionization to avoid infinite loop due to it being ambiguous where to put a Hydrogen')
break
already_moved.add(key)
log.info(f'Moved proton from {self.acid_base_pairs[ppos].name} to {self.acid_base_pairs[ipos].name}')
# Remove hydrogen from strongest protonated
patom = mol.GetAtomWithIdx(poccur[-1])
patom.SetFormalCharge(patom.GetFormalCharge() - 1)
# If no implicit Hs to autoremove, and at least 1 explicit H to remove, reduce explicit count by 1
if patom.GetNumImplicitHs() == 0 and patom.GetNumExplicitHs() > 0:
patom.SetNumExplicitHs(patom.GetNumExplicitHs() - 1)
# TODO: Remove any chiral label on patom?
patom.UpdatePropertyCache()
# Add hydrogen to weakest ionized
iatom = mol.GetAtomWithIdx(ioccur[-1])
iatom.SetFormalCharge(iatom.GetFormalCharge() + 1)
# Increase explicit H count if no implicit, or aromatic N or P, or non default valence state
if (iatom.GetNoImplicit() or
((patom.GetAtomicNum() == 7 or patom.GetAtomicNum() == 15) and patom.GetIsAromatic()) or
iatom.GetTotalValence() not in list(Chem.GetPeriodicTable().GetValenceList(iatom.GetAtomicNum()))):
iatom.SetNumExplicitHs(iatom.GetNumExplicitHs() + 1)
iatom.UpdatePropertyCache()
else:
break
# TODO: Canonical ionization position if multiple equivalent positions?
Chem.SanitizeMol(mol)
return mol
def _strongest_protonated(self, mol):
for position, pair in enumerate(self.acid_base_pairs):
for occurrence in mol.GetSubstructMatches(pair.acid):
return position, occurrence
return None, None
def _weakest_ionized(self, mol):
for position, pair in enumerate(reversed(self.acid_base_pairs)):
for occurrence in mol.GetSubstructMatches(pair.base):
return len(self.acid_base_pairs) - position - 1, occurrence
return None, None
class Uncharger(object):
"""Class for neutralizing ionized acids and bases.
This class uncharges molecules by adding and/or removing hydrogens. For zwitterions, hydrogens are moved to
eliminate charges where possible. However, in cases where there is a positive charge that is not neutralizable, an
attempt is made to also preserve the corresponding negative charge.
The method is derived from the neutralise module in `Francis Atkinson's standardiser tool
<https://github.com/flatkinson/standardiser>`_, which is released under the Apache License v2.0.
"""
def __init__(self):
log.debug('Initializing Uncharger')
#: Neutralizable positive charge (with hydrogens attached)
self._pos_h = Chem.MolFromSmarts('[+!H0!$(*~[-])]')
#: Non-neutralizable positive charge (no hydrogens attached)
self._pos_quat = Chem.MolFromSmarts('[+H0!$(*~[-])]')
#: Negative charge, not bonded to a positive charge with no hydrogens
self._neg = Chem.MolFromSmarts('[-!$(*~[+H0])]')
#: Negative oxygen bonded to [C,P,S]=O, negative aromatic nitrogen?
self._neg_acid = Chem.MolFromSmarts('[$([O-][C,P,S]=O),$([n-]1nnnc1),$(n1[n-]nnc1)]')
def __call__(self, mol):
"""Calling an Uncharger instance like a function is the same as calling its uncharge(mol) method."""
return self.uncharge(mol)
def uncharge(self, mol):
"""Neutralize molecule by adding/removing hydrogens. Attempts to preserve zwitterions.
:param mol: The molecule to uncharge.
:type mol: :rdkit:`Mol <Chem.rdchem.Mol-class.html>`
:return: The uncharged molecule.
:rtype: :rdkit:`Mol <Chem.rdchem.Mol-class.html>`
"""
log.debug('Running Uncharger')
mol = copy.deepcopy(mol)
# Get atom ids for matches
p = [x[0] for x in mol.GetSubstructMatches(self._pos_h)]
q = [x[0] for x in mol.GetSubstructMatches(self._pos_quat)]
n = [x[0] for x in mol.GetSubstructMatches(self._neg)]
a = [x[0] for x in mol.GetSubstructMatches(self._neg_acid)]
# Neutralize negative charges
if q:
# Surplus negative charges more than non-neutralizable positive charges
neg_surplus = len(n) - len(q)
if a and neg_surplus > 0:
# zwitterion with more negative charges than quaternary positive centres
while neg_surplus > 0 and a:
# Add hydrogen to first negative acid atom, increase formal charge
# Until quaternary positive == negative total or no more negative acid
atom = mol.GetAtomWithIdx(a.pop(0))
atom.SetNumExplicitHs(atom.GetNumExplicitHs() + 1)
atom.SetFormalCharge(atom.GetFormalCharge() + 1)
neg_surplus -= 1
log.info('Removed negative charge')
else:
for atom in [mol.GetAtomWithIdx(x) for x in n]:
while atom.GetFormalCharge() < 0:
atom.SetNumExplicitHs(atom.GetNumExplicitHs() + 1)
atom.SetFormalCharge(atom.GetFormalCharge() + 1)
log.info('Removed negative charge')
# Neutralize positive charges
for atom in [mol.GetAtomWithIdx(x) for x in p]:
# Remove hydrogen and reduce formal change until neutral or no more hydrogens
while atom.GetFormalCharge() > 0 and atom.GetNumExplicitHs() > 0:
atom.SetNumExplicitHs(atom.GetNumExplicitHs() - 1)
atom.SetFormalCharge(atom.GetFormalCharge() - 1)
log.info('Removed positive charge')
return mol
|
|
from plenum.common.constants import NAME, NONCE
from plenum.common.signer_did import DidIdentity
from plenum.common.types import f
from plenum.common.util import prettyDateDifference, friendlyToRaw
from plenum.common.verifier import DidVerifier
from anoncreds.protocol.types import AvailableClaim
from indy_common.exceptions import InvalidConnectionException, \
RemoteEndpointNotFound, NotFound
class constant:
TRUST_ANCHOR = "Trust Anchor"
SIGNER_IDENTIFIER = "Identifier"
SIGNER_VER_KEY = "Verification Key"
SIGNER_VER_KEY_EMPTY = '<empty>'
REMOTE_IDENTIFIER = "Remote"
REMOTE_VER_KEY = "Remote Verification Key"
REMOTE_VER_KEY_SAME_AS_ID = '<same as Remote>'
REMOTE_END_POINT = "Remote endpoint"
SIGNATURE = "Signature"
CLAIM_REQUESTS = "Claim Requests"
AVAILABLE_CLAIMS = "Available Claims"
RECEIVED_CLAIMS = "Received Claims"
CONNECTION_NONCE = "Nonce"
CONNECTION_STATUS = "Request status"
CONNECTION_LAST_SYNCED = "Last Synced"
CONNECTION_LAST_SEQ_NO = "Last Sync no"
CONNECTION_STATUS_ACCEPTED = "Accepted"
CONNECTION_NOT_SYNCHRONIZED = "<this connection has not yet been synchronized>"
UNKNOWN_WAITING_FOR_SYNC = "<unknown, waiting for sync>"
CONNECTION_ITEM_PREFIX = '\n '
NOT_AVAILABLE = "Not Available"
NOT_ASSIGNED = "not yet assigned"
class Connection:
def __init__(self,
name,
localIdentifier=None,
localVerkey=None,
trustAnchor=None,
remoteIdentifier=None,
remoteEndPoint=None,
remotePubkey=None,
request_nonce=None,
proofRequests=None,
internalId=None,
remote_verkey=None):
self.name = name
self.localIdentifier = localIdentifier
self.localVerkey = localVerkey
self.trustAnchor = trustAnchor
self.remoteIdentifier = remoteIdentifier
self.remoteEndPoint = remoteEndPoint
self.remotePubkey = remotePubkey
self.request_nonce = request_nonce
# for optionally storing a reference to an identifier in another system
# for example, a college may already have a student ID for a particular
# person, and that student ID can be put in this field
self.internalId = internalId
self.proofRequests = proofRequests or [] # type: List[ProofRequest]
self.verifiedClaimProofs = []
self.availableClaims = [] # type: List[AvailableClaim]
self.remoteVerkey = remote_verkey
self.connection_status = None
self.connection_last_synced = None
self.connection_last_sync_no = None
def __repr__(self):
return self.key
@property
def key(self):
return self.name
@property
def isRemoteEndpointAvailable(self):
return self.remoteEndPoint and self.remoteEndPoint != \
constant.NOT_AVAILABLE
@property
def isAccepted(self):
return self.connection_status == constant.CONNECTION_STATUS_ACCEPTED
def __str__(self):
localIdr = self.localIdentifier if self.localIdentifier \
else constant.NOT_ASSIGNED
trustAnchor = self.trustAnchor or ""
trustAnchorStatus = '(not yet written to Indy)'
if self.remoteVerkey is not None:
if self.remoteIdentifier == self.remoteVerkey:
remoteVerKey = constant.REMOTE_VER_KEY_SAME_AS_ID
else:
remoteVerKey = self.remoteVerkey
else:
remoteVerKey = constant.UNKNOWN_WAITING_FOR_SYNC
remoteEndPoint = self.remoteEndPoint or \
constant.UNKNOWN_WAITING_FOR_SYNC
if isinstance(remoteEndPoint, tuple):
remoteEndPoint = "{}:{}".format(*remoteEndPoint)
connectionStatus = 'not verified, remote verkey unknown'
connection_last_synced = prettyDateDifference(
self.connection_last_synced) or constant.CONNECTION_NOT_SYNCHRONIZED
if connection_last_synced != constant.CONNECTION_NOT_SYNCHRONIZED and \
remoteEndPoint == constant.UNKNOWN_WAITING_FOR_SYNC:
remoteEndPoint = constant.NOT_AVAILABLE
if self.isAccepted:
trustAnchorStatus = '(confirmed)'
if self.remoteVerkey is None:
remoteVerKey = constant.REMOTE_VER_KEY_SAME_AS_ID
connectionStatus = self.connection_status
# TODO: The verkey would be same as the local identifier until we
# support key rotation
# TODO: This should be set as verkey in case of DID but need it from
# wallet
verKey = self.localVerkey if self.localVerkey else constant.SIGNER_VER_KEY_EMPTY
fixed_connection_heading = "Connection"
if not self.isAccepted:
fixed_connection_heading += " (not yet accepted)"
# TODO: Refactor to use string interpolation
# try:
fixed_connection_items = \
'\n' \
'Name: ' + self.name + '\n' \
'DID: ' + localIdr + '\n' \
'Trust anchor: ' + trustAnchor + ' ' + trustAnchorStatus + '\n' \
'Verification key: ' + verKey + '\n' \
'Signing key: <hidden>' '\n' \
'Remote: ' + (self.remoteIdentifier or
constant.UNKNOWN_WAITING_FOR_SYNC) + '\n' \
'Remote Verification key: ' + remoteVerKey + '\n' \
'Remote endpoint: ' + remoteEndPoint + '\n' \
'Request nonce: ' + self.request_nonce + '\n' \
'Request status: ' + connectionStatus + '\n'
optional_connection_items = ""
if len(self.proofRequests) > 0:
optional_connection_items += "Proof Request(s): {}". \
format(", ".join([cr.name for cr in self.proofRequests])) \
+ '\n'
if self.availableClaims:
optional_connection_items += self.avail_claims_str()
if self.connection_last_sync_no:
optional_connection_items += 'Last sync seq no: ' + \
self.connection_last_sync_no + '\n'
fixedEndingLines = 'Last synced: ' + connection_last_synced
connection_items = fixed_connection_items + \
optional_connection_items + fixedEndingLines
indented_connection_items = constant.CONNECTION_ITEM_PREFIX.join(
connection_items.splitlines())
return fixed_connection_heading + indented_connection_items
def avail_claims_str(self):
claim_names = [name for name, _, _ in self.availableClaims]
return "Available Claim(s): {}".\
format(", ".join(claim_names)) + '\n'
@staticmethod
def validate(request_data):
def checkIfFieldPresent(msg, searchInName, fieldName):
if not msg.get(fieldName):
raise InvalidConnectionException(
"Field not found in {}: {}".format(
searchInName, fieldName))
checkIfFieldPresent(request_data, 'given input', 'sig')
checkIfFieldPresent(request_data, 'given input', 'connection-request')
connection_request = request_data.get("connection-request")
connection_request_req_fields = [f.IDENTIFIER.nm, NAME, NONCE]
for fn in connection_request_req_fields:
checkIfFieldPresent(connection_request, 'connection-request', fn)
def getRemoteEndpoint(self, required=False):
if not self.remoteEndPoint and required:
raise RemoteEndpointNotFound
if isinstance(self.remoteEndPoint, tuple):
return self.remoteEndPoint
elif isinstance(self.remoteEndPoint, str):
ip, port = self.remoteEndPoint.split(":")
return ip, int(port)
elif self.remoteEndPoint is None:
return None
else:
raise ValueError('Cannot convert endpoint {} to HA'.
format(self.remoteEndPoint))
@property
def remoteVerkey(self):
if not hasattr(self, '_remoteVerkey'):
return None
if self._remoteVerkey is None:
return None
# This property should be used to fetch verkey compared to
# remoteVerkey, its a more consistent name and takes care of
# abbreviated verkey
i = DidIdentity(self.remoteIdentifier, verkey=self._remoteVerkey)
return i.verkey
@property
def full_remote_verkey(self):
verkey = self.remoteVerkey
if verkey is None:
return None
i = DidIdentity(self.remoteIdentifier, verkey=verkey)
full_verkey = i.full_verkey
return full_verkey
@remoteVerkey.setter
def remoteVerkey(self, new_val):
self._remoteVerkey = new_val
def find_available_claims(self, name=None, version=None, origin=None):
return [ac for ac in self.availableClaims
if (not name or name == ac.name) and
(not version or version == ac.version) and
(not origin or origin == ac.origin)]
def find_available_claim(self, name=None, version=None, origin=None,
max_one=True, required=True):
_ = self.find_available_claims(name, version, origin)
assert not max_one or len(_) <= 1, \
'more than one matching available claim found'
if required and len(_) == 0:
raise NotFound
return _[0] if _ else None
def find_proof_requests(self, name=None, version=None):
return [pr for pr in self.proofRequests
if (not name or name == pr.name) and
(not version or version == pr.version)]
def find_proof_request(self, name=None, version=None,
max_one=True, required=True):
_ = self.find_proof_requests(name, version)
assert not max_one or len(_) <= 1, \
'more than one matching available claim found'
if required and len(_) == 0:
raise NotFound
return _[0] if _ else None
|
|
# -*- coding: utf-8 -*-
import logging
import pprint
import re
from cssselect import HTMLTranslator
import lxml.html
from lxml.html.clean import Cleaner
logger = logging.getLogger(__name__)
class Parser():
"""Default Parse"""
no_results_selector = []
effective_query_selector = []
num_results_search_selectors = []
page_number_selectors = []
search_types = []
def __init__(self, config={}, html='', query=''):
"""Create new Parser instance and parse all information."""
self.config = config
self.searchtype = self.config.get('search_type', 'normal')
assert self.searchtype in self.search_types, 'search type "{}" is not supported in {}'.format(
self.searchtype,
self.__class__.__name__
)
self.query = query
self.html = html
self.dom = None
self.search_results = {}
self.num_results_for_query = ''
self.num_results = 0
self.effective_query = ''
self.page_number = -1
self.no_results = False
self.related_keywords = {}
# to be set by the implementing sub classes
self.search_engine = ''
# short alias because we use it so extensively
self.css_to_xpath = HTMLTranslator().css_to_xpath
if self.html:
self.parse()
def parse(self, html=None):
"""Public function to start parsing the search engine results.
Args:
html: The raw html data to extract the SERP entries from.
"""
if html:
self.html = html.encode('utf-8').decode('utf-8')
# lets do the actual parsing
self._parse()
# Apply subclass specific behaviour after parsing has happened
# This is needed because different parsers need to clean/modify
# the parsed data uniquely.
self.after_parsing()
def _parse_lxml(self, cleaner=None):
try:
parser = lxml.html.HTMLParser(encoding='utf-8')
if cleaner:
self.dom = cleaner.clean_html(self.dom)
self.dom = lxml.html.document_fromstring(self.html, parser=parser)
self.dom.resolve_base_href()
except Exception as e:
# maybe wrong encoding
logger.error(e)
def _parse(self, cleaner=None):
"""Internal parse the dom according to the provided css selectors.
Raises: Exception
if no css selectors for the searchtype could be found.
"""
self.num_results = 0
self._parse_lxml(cleaner)
# try to parse the number of results.
attr_name = self.searchtype + '_search_selectors'
selector_dict = getattr(self, attr_name, None)
# get the appropriate css selectors for the num_results for the keyword
num_results_selector = getattr(
self,
'num_results_search_selectors',
None
)
self.num_results_for_query = self.first_match(
num_results_selector,
self.dom
)
if not self.num_results_for_query:
logger.debug(''''{}: Cannot parse num_results from serp page
with selectors {}
'''.format(self.__class__.__name__, num_results_selector))
# get the current page we are at.
try:
self.page_number = int(
self.first_match(self.page_number_selectors, self.dom)
)
except ValueError:
self.page_number = -1
# let's see if the search query was shitty (no results for that query)
self.effective_query = self.first_match(
self.effective_query_selector,
self.dom
)
if self.effective_query:
logger.debug('''{}: There was no search hit for the search query.
Search engine used {} instead.
'''.format(self.__class__.__name__, self.effective_query))
else:
self.effective_query = ''
# the element that notifies the user about no results.
self.no_results_text = self.first_match(
self.no_results_selector,
self.dom
)
# get the stuff that is of interest in SERP pages.
if not selector_dict and not isinstance(selector_dict, dict):
raise Exception('''There is no such attribute: {}. No selectors found
'''.format(attr_name))
for result_type, selector_class in selector_dict.items():
self.search_results[result_type] = []
self.related_keywords[result_type] = []
for _, selectors in selector_class.items():
if 'result_container' in selectors and selectors['result_container']:
css = '{container} {result_container}'.format(**selectors)
else:
css = selectors['container']
# logger.info('try {}: '.format(css))
results = self.dom.xpath(
self.css_to_xpath(css)
)
# logger.info('results {}: '.format(results))
to_extract = set(selectors.keys()) - {'container', 'result_container'}
selectors_to_use = {key: selectors[key] for key in to_extract if key in selectors.keys()}
for index, result in enumerate(results):
# Let's add primitive support for CSS3 pseudo selectors
serp_result = {}
# key are for example 'link', 'snippet', 'visible-url', ...
# selector is the selector to grab these items
for key, selector in selectors_to_use.items():
serp_result[key] = self.advanced_css(selector, result)
serp_result['rank'] = index + 1
# only add items that have not None links.
# Avoid duplicates. Detect them by the link.
# If statement below: Lazy evaluation.
# The more probable case first.
if 'link' in serp_result and serp_result['link'] and \
not [e for e in self.search_results[result_type]
if e['link'] == serp_result['link']]:
self.search_results[result_type].append(serp_result)
self.num_results += 1
if 'keyword' in serp_result and serp_result['keyword']:
self.related_keywords[result_type].append(serp_result)
def advanced_css(self, selector, element):
"""Evaluate the :text and ::attr(attr-name) additionally.
Args:
selector: A css selector.
element: The element on which to apply the selector.
Returns:
The targeted element.
"""
value = None
if selector.endswith('::text'):
try:
value = element.xpath(self.css_to_xpath(selector.split('::')[0]))[0].text_content()
except IndexError:
pass
else:
match = re.search(r'::attr\((?P<attr>.*)\)$', selector)
if match:
attr = match.group('attr')
try:
value = element.xpath(self.css_to_xpath(selector.split('::')[0]))[0].get(attr)
except IndexError:
pass
else:
try:
value = element.xpath(self.css_to_xpath(selector))[0].text_content()
except IndexError:
pass
return value
def first_match(self, selectors, element):
"""Get the first match.
Args:
selectors: The selectors to test for a match.
element: The element on which to apply the selectors.
Returns:
The very first match or False if all selectors didn't match anything.
"""
assert isinstance(selectors, list), 'selectors must be of type list!'
for selector in selectors:
if selector:
try:
match = self.advanced_css(selector, element=element)
if match:
return match
except IndexError:
pass
return False
def after_parsing(self):
"""Subclass specific behaviour after parsing happened.
Override in subclass to add search engine specific behaviour.
Commonly used to clean the results.
"""
def __str__(self):
"""Return a nicely formatted overview of the results."""
return pprint.pformat(self.search_results)
@property
def cleaned_html(self):
# Try to parse the provided HTML string using lxml
# strip all unnecessary information to save space
cleaner = Cleaner()
cleaner.scripts = True
cleaner.javascript = True
cleaner.comments = True
cleaner.style = True
self.dom = cleaner.clean_html(self.dom)
assert len(self.dom), 'The html needs to be parsed to get the cleaned html'
return lxml.html.tostring(self.dom)
def iter_serp_items(self):
"""Yields the key and index of any item in the serp results that has a link value"""
for key, value in self.search_results.items():
if isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, dict) and item['link']:
yield (key, i)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for the PNASNet classification networks.
Paper: https://arxiv.org/abs/1712.00559
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from nets.nasnet import nasnet
from nets.nasnet import nasnet_utils
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
def large_imagenet_config():
"""Large ImageNet configuration based on PNASNet-5."""
return tf.contrib.training.HParams(
stem_multiplier=3.0,
dense_dropout_keep_prob=0.5,
num_cells=12,
filter_scaling_rate=2.0,
num_conv_filters=216,
drop_path_keep_prob=0.6,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=1,
total_training_steps=250000,
use_bounded_activation=False,
)
def mobile_imagenet_config():
"""Mobile ImageNet configuration based on PNASNet-5."""
return tf.contrib.training.HParams(
stem_multiplier=1.0,
dense_dropout_keep_prob=0.5,
num_cells=9,
filter_scaling_rate=2.0,
num_conv_filters=54,
drop_path_keep_prob=1.0,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=1,
total_training_steps=250000,
use_bounded_activation=False,
)
def pnasnet_large_arg_scope(weight_decay=4e-5, batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Default arg scope for the PNASNet Large ImageNet model."""
return nasnet.nasnet_large_arg_scope(
weight_decay, batch_norm_decay, batch_norm_epsilon)
def pnasnet_mobile_arg_scope(weight_decay=4e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Default arg scope for the PNASNet Mobile ImageNet model."""
return nasnet.nasnet_mobile_arg_scope(weight_decay, batch_norm_decay,
batch_norm_epsilon)
def _build_pnasnet_base(images,
normal_cell,
num_classes,
hparams,
is_training,
final_endpoint=None):
"""Constructs a PNASNet image model."""
end_points = {}
def add_and_check_endpoint(endpoint_name, net):
end_points[endpoint_name] = net
return final_endpoint and (endpoint_name == final_endpoint)
# Find where to place the reduction cells or stride normal cells
reduction_indices = nasnet_utils.calc_reduction_layers(
hparams.num_cells, hparams.num_reduction_layers)
# pylint: disable=protected-access
stem = lambda: nasnet._imagenet_stem(images, hparams, normal_cell)
# pylint: enable=protected-access
net, cell_outputs = stem()
if add_and_check_endpoint('Stem', net):
return net, end_points
# Setup for building in the auxiliary head.
aux_head_cell_idxes = []
if len(reduction_indices) >= 2:
aux_head_cell_idxes.append(reduction_indices[1] - 1)
# Run the cells
filter_scaling = 1.0
# true_cell_num accounts for the stem cells
true_cell_num = 2
activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu
for cell_num in range(hparams.num_cells):
is_reduction = cell_num in reduction_indices
stride = 2 if is_reduction else 1
if is_reduction: filter_scaling *= hparams.filter_scaling_rate
if hparams.skip_reduction_layer_input or not is_reduction:
prev_layer = cell_outputs[-2]
net = normal_cell(
net,
scope='cell_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=stride,
prev_layer=prev_layer,
cell_num=true_cell_num)
if add_and_check_endpoint('Cell_{}'.format(cell_num), net):
return net, end_points
true_cell_num += 1
cell_outputs.append(net)
if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and
num_classes and is_training):
aux_net = activation_fn(net)
# pylint: disable=protected-access
nasnet._build_aux_head(aux_net, end_points, num_classes, hparams,
scope='aux_{}'.format(cell_num))
# pylint: enable=protected-access
# Final softmax layer
with tf.variable_scope('final_layer'):
net = activation_fn(net)
net = nasnet_utils.global_avg_pool(net)
if add_and_check_endpoint('global_pool', net) or not num_classes:
return net, end_points
net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout')
logits = slim.fully_connected(net, num_classes)
if add_and_check_endpoint('Logits', logits):
return net, end_points
predictions = tf.nn.softmax(logits, name='predictions')
if add_and_check_endpoint('Predictions', predictions):
return net, end_points
return logits, end_points
def build_pnasnet_large(images,
num_classes,
is_training=True,
final_endpoint=None,
config=None):
"""Build PNASNet Large model for the ImageNet Dataset."""
hparams = copy.deepcopy(config) if config else large_imagenet_config()
# pylint: disable=protected-access
nasnet._update_hparams(hparams, is_training)
# pylint: enable=protected-access
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
# Calculate the total number of cells in the network.
# There is no distinction between reduction and normal cells in PNAS so the
# total number of cells is equal to the number normal cells plus the number
# of stem cells (two by default).
total_num_cells = hparams.num_cells + 2
normal_cell = PNasNetNormalCell(hparams.num_conv_filters,
hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d,
slim.batch_norm, slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_pnasnet_base(
images,
normal_cell=normal_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
final_endpoint=final_endpoint)
build_pnasnet_large.default_image_size = 331
def build_pnasnet_mobile(images,
num_classes,
is_training=True,
final_endpoint=None,
config=None):
"""Build PNASNet Mobile model for the ImageNet Dataset."""
hparams = copy.deepcopy(config) if config else mobile_imagenet_config()
# pylint: disable=protected-access
nasnet._update_hparams(hparams, is_training)
# pylint: enable=protected-access
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
# Calculate the total number of cells in the network.
# There is no distinction between reduction and normal cells in PNAS so the
# total number of cells is equal to the number normal cells plus the number
# of stem cells (two by default).
total_num_cells = hparams.num_cells + 2
normal_cell = PNasNetNormalCell(hparams.num_conv_filters,
hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope(
[
slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm,
slim.separable_conv2d, nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim
],
data_format=hparams.data_format):
return _build_pnasnet_base(
images,
normal_cell=normal_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
final_endpoint=final_endpoint)
build_pnasnet_mobile.default_image_size = 224
class PNasNetNormalCell(nasnet_utils.NasNetABaseCell):
"""PNASNet Normal Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
# Configuration for the PNASNet-5 model.
operations = [
'separable_5x5_2', 'max_pool_3x3', 'separable_7x7_2', 'max_pool_3x3',
'separable_5x5_2', 'separable_3x3_2', 'separable_3x3_2', 'max_pool_3x3',
'separable_3x3_2', 'none'
]
used_hiddenstates = [1, 1, 0, 0, 0, 0, 0]
hiddenstate_indices = [1, 1, 0, 0, 0, 0, 4, 0, 1, 0]
super(PNasNetNormalCell, self).__init__(
num_conv_filters, operations, used_hiddenstates, hiddenstate_indices,
drop_path_keep_prob, total_num_cells, total_training_steps,
use_bounded_activation)
|
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import re
from dataclasses import dataclass
from typing import Iterable
from urllib.parse import quote_plus as url_quote_plus
from pants.engine.collection import DeduplicatedCollection
from pants.engine.target import Target
from pants.jvm.target_types import (
JvmArtifactArtifactField,
JvmArtifactExcludeDependenciesField,
JvmArtifactFieldSet,
JvmArtifactGroupField,
JvmArtifactJarSourceField,
JvmArtifactUrlField,
JvmArtifactVersionField,
)
from pants.util.ordered_set import FrozenOrderedSet
class InvalidCoordinateString(Exception):
"""The coordinate string being passed is invalid or malformed."""
def __init__(self, coords: str) -> None:
super().__init__(f"Received invalid artifact coordinates: {coords}")
@dataclass(frozen=True, order=True)
class Coordinate:
"""A single Maven-style coordinate for a JVM dependency.
Coursier uses at least two string serializations of coordinates:
1. A format that is accepted by the Coursier CLI which uses trailing attributes to specify
optional fields like `packaging`/`type`, `classifier`, `url`, etc. See `to_coord_arg_str`.
2. A format in the JSON report, which uses token counts to specify optional fields. We
additionally use this format in our own lockfile. See `to_coord_str` and `from_coord_str`.
"""
REGEX = re.compile("([^: ]+):([^: ]+)(:([^: ]*)(:([^: ]+))?)?:([^: ]+)")
group: str
artifact: str
version: str
packaging: str = "jar"
classifier: str | None = None
# True to enforce that the exact declared version of a coordinate is fetched, rather than
# allowing dependency resolution to adjust the version when conflicts occur.
strict: bool = True
@staticmethod
def from_json_dict(data: dict) -> Coordinate:
return Coordinate(
group=data["group"],
artifact=data["artifact"],
version=data["version"],
packaging=data.get("packaging", "jar"),
classifier=data.get("classifier", None),
)
def to_json_dict(self) -> dict:
ret = {
"group": self.group,
"artifact": self.artifact,
"version": self.version,
"packaging": self.packaging,
"classifier": self.classifier,
}
return ret
@classmethod
def from_coord_str(cls, s: str) -> Coordinate:
"""Parses from a coordinate string with optional `packaging` and `classifier` coordinates.
See the classdoc for more information on the format.
Using Aether's implementation as reference
http://www.javased.com/index.php?source_dir=aether-core/aether-api/src/main/java/org/eclipse/aether/artifact/DefaultArtifact.java
${organisation}:${artifact}[:${packaging}[:${classifier}]]:${version}
See also: `to_coord_str`.
"""
parts = Coordinate.REGEX.match(s)
if parts is not None:
packaging_part = parts.group(4)
return cls(
group=parts.group(1),
artifact=parts.group(2),
packaging=packaging_part if packaging_part is not None else "jar",
classifier=parts.group(6),
version=parts.group(7),
)
else:
raise InvalidCoordinateString(s)
def as_requirement(self) -> ArtifactRequirement:
"""Creates a `RequirementCoordinate` from a `Coordinate`."""
return ArtifactRequirement(coordinate=self)
def to_coord_str(self, versioned: bool = True) -> str:
"""Renders the coordinate in Coursier's JSON-report format, which does not use attributes.
See also: `from_coord_str`.
"""
unversioned = f"{self.group}:{self.artifact}"
if self.classifier is not None:
unversioned += f":{self.packaging}:{self.classifier}"
elif self.packaging != "jar":
unversioned += f":{self.packaging}"
version_suffix = ""
if versioned:
version_suffix = f":{self.version}"
return f"{unversioned}{version_suffix}"
def to_coord_arg_str(self, extra_attrs: dict[str, str] | None = None) -> str:
"""Renders the coordinate in Coursier's CLI input format.
The CLI input format uses trailing key-val attributes to specify `packaging`, `url`, etc.
See https://github.com/coursier/coursier/blob/b5d5429a909426f4465a9599d25c678189a54549/modules/coursier/shared/src/test/scala/coursier/parse/DependencyParserTests.scala#L7
"""
attrs = dict(extra_attrs or {})
if self.packaging != "jar":
# NB: Coursier refers to `packaging` as `type` internally.
attrs["type"] = self.packaging
if self.classifier:
attrs["classifier"] = self.classifier
attrs_sep_str = "," if attrs else ""
attrs_str = ",".join((f"{k}={v}" for k, v in attrs.items()))
return f"{self.group}:{self.artifact}:{self.version}{attrs_sep_str}{attrs_str}"
class Coordinates(DeduplicatedCollection[Coordinate]):
"""An ordered list of `Coordinate`s."""
@dataclass(frozen=True, order=True)
class ArtifactRequirement:
"""A single Maven-style coordinate for a JVM dependency, along with information of how to fetch
the dependency if it is not to be fetched from a Maven repository."""
coordinate: Coordinate
url: str | None = None
jar: JvmArtifactJarSourceField | None = None
excludes: frozenset[str] | None = None
@classmethod
def from_jvm_artifact_target(cls, target: Target) -> ArtifactRequirement:
if not JvmArtifactFieldSet.is_applicable(target):
raise AssertionError(
"`ArtifactRequirement.from_jvm_artifact_target()` only works on targets with "
"`JvmArtifactFieldSet` fields present."
)
return ArtifactRequirement(
coordinate=Coordinate(
group=target[JvmArtifactGroupField].value,
artifact=target[JvmArtifactArtifactField].value,
version=target[JvmArtifactVersionField].value,
),
url=target[JvmArtifactUrlField].value,
jar=(
target[JvmArtifactJarSourceField]
if target[JvmArtifactJarSourceField].value
else None
),
excludes=frozenset(target[JvmArtifactExcludeDependenciesField].value or []) or None,
)
def with_extra_excludes(self, *excludes: str) -> ArtifactRequirement:
"""Creates a copy of this `ArtifactRequirement` with `excludes` provided.
Mostly useful for testing (`Coordinate(...).as_requirement().with_extra_excludes(...)`).
"""
return dataclasses.replace(
self, excludes=self.excludes.union(excludes) if self.excludes else frozenset(excludes)
)
def to_coord_arg_str(self) -> str:
return self.coordinate.to_coord_arg_str(
{"url": url_quote_plus(self.url)} if self.url else {}
)
def to_metadata_str(self) -> str:
attrs = {
"url": self.url or "not_provided",
"jar": self.jar.address.spec if self.jar else "not_provided",
}
if self.excludes:
attrs["excludes"] = ",".join(self.excludes)
return self.coordinate.to_coord_arg_str(attrs)
# TODO: Consider whether to carry classpath scope in some fashion via ArtifactRequirements.
class ArtifactRequirements(DeduplicatedCollection[ArtifactRequirement]):
"""An ordered list of Coordinates used as requirements."""
@classmethod
def from_coordinates(cls, coordinates: Iterable[Coordinate]) -> ArtifactRequirements:
return ArtifactRequirements(coord.as_requirement() for coord in coordinates)
@dataclass(frozen=True)
class GatherJvmCoordinatesRequest:
"""A request to turn strings of coordinates (`group:artifact:version`) and/or addresses to
`jvm_artifact` targets into `ArtifactRequirements`."""
artifact_inputs: FrozenOrderedSet[str]
option_name: str
|
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
from sklearn.linear_model import LogisticRegression
from sklearn.grid_search import GridSearchCV
import mord
def fit_classifier_with_crossvalidation(X, y, basemod, cv, param_grid,
scoring='r2', verbose=False):
"""Fit a classifier with hyperparmaters set via cross-validation.
Parameters
----------
X : 2d np.array
The matrix of features, one example per row.
y : list
The list of labels for rows in `X`.
basemod : an sklearn model class instance
This is the basic model-type we'll be optimizing.
cv : int
Number of cross-validation folds.
param_grid : dict
A dict whose keys name appropriate parameters for `basemod` and
whose values are lists of values to try.
scoring : value to optimize for (default: accuracy)
What we optimize for. Best to choose "accuracy" or "r2".
The F1 variants are meaningless for this problem since so few
models predict in every category. "roc_auc", "average_precision",
"log_loss" are unsupported.
Prints
------
To standard output:
The best parameters found.
The best macro F1 score obtained.
Returns
-------
An instance of the same class as `basemod`.
A trained model instance, the best model found.
"""
# Find the best model within param_grid:
crossvalidator = GridSearchCV(basemod, param_grid, cv=cv, scoring=scoring)
crossvalidator.fit(X, y)
# Report some information:
for combination in crossvalidator.grid_scores_:
print combination
#print("Best params", crossvalidator.best_params_)
#print("Best score: %0.03f" % crossvalidator.best_score_)
# Return the best model found:
return crossvalidator.best_estimator_
def fit_maxent(X, y, C = 1.0):
"""A classification model of dataset. L2 regularized.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify
stronger regularizatioN
"""
basemod = LogisticRegression(penalty='l2', C = C)
basemod.fit(X,y)
return basemod
def fit_maxent_balanced(X, y, C = 1.0):
"""A classification model of dataset. L2 regularized & forces balanced classes.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify
stronger regularizatioN
"""
basemod = LogisticRegression(penalty='l2', class_weight='balanced', C = C)
basemod.fit(X,y)
return basemod
def fit_maxent_with_crossvalidation(X, y, C = 1.0):
"""A classification model of dataset with hyperparameter
cross-validation. Maximum entropy/logistic regression variant.
Some notes:
* 'fit_intercept': whether to include the class bias feature.
* 'C': weight for the regularization term (smaller is more regularized).
* 'penalty': type of regularization -- roughly, 'l1' ecourages small
sparse models, and 'l2' encourages the weights to conform to a
gaussian prior distribution.
Other arguments can be cross-validated; see
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
Parameters
----------
X : 2d np.array
The matrix of features, one example per row.
y : list
The list of labels for rows in `X`.
Returns
-------
sklearn.linear_model.LogisticRegression
A trained model instance, the best model found.
"""
basemod = LogisticRegression(penalty='l2', C = C)
cv = 5
param_grid = {'fit_intercept': [True, False],
'C': [0.4, 0.6, 0.8, 1.0, 2.0, 3.0],
'penalty': ['l1','l2']}
return fit_classifier_with_crossvalidation(X, y, basemod, cv, param_grid,
verbose=False)
def fit_logistic_it_with_crossvalidation(X, y, alpha = 1.0):
"""An ordinal model of dataset with hyperparameter
cross-validation. Immediate-Threshold (logistic/threshold) variant.
Parameters & returns as per other training functions.
alpha: float :
Regularization parameter. Zero is no regularization,
higher values increate the squared l2 regularization.
"""
basemod = mord.LogisticIT(alpha = alpha)
cv = 5
param_grid = {'alpha': [0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 2.0, 3.0]}
return fit_classifier_with_crossvalidation(X, y, basemod, cv, param_grid,
verbose=False)
def fit_logistic_at(X, y, alpha = 1.0):
"""An ordinal model of dataset without hyperparameter
cross-validation -- uses defaults.
All-Threshold (logistic/threshold) variant, recommended over
Intermediate-Threshold variant in Rennie and Srebro 2005.
Parameters & returns as per other training functions.
alpha: float :
Regularization parameter. Zero is no regularization,
higher values increate the squared l2 regularization.
"""
basemod = mord.LogisticAT(alpha = alpha)
basemod.fit(X,y)
return basemod
def fit_logistic_at_6(X, y):
return fit_logistic_at(X, y, 0.5)
def fit_logistic_at_with_crossvalidation(X, y, alpha = 1.0):
"""An ordinal model of dataset with hyperparameter
cross-validation. All-Threshold (logistic/threshold) variant.
Recommended over Intermediate-Threshold variant in Rennie and Srebro 2005.
Parameters & returns as per other training functions.
alpha: float :
Regularization parameter. Zero is no regularization,
higher values increate the squared l2 regularization.
"""
basemod = mord.LogisticAT(alpha = alpha)
cv = 3
param_grid = {'alpha': [0.2, 0.4, 0.6, 0.8, 1.0, 2.0, 3.0, 4.0, 6.0, 8.0, 12.0]}
return fit_classifier_with_crossvalidation(X, y, basemod, cv, param_grid,
verbose=False)
def fit_logistic_or_with_crossvalidation(X, y, alpha = 1.0):
"""An ordinal model of dataset with hyperparameter
cross-validation. Ordinal Ridge (regression) variant.
Parameters & returns as per other training functions.
alpha: float :
Regularization parameter. Zero is no regularization,
higher values increate the squared l2 regularization.
"""
basemod = mord.OrdinalRidge(alpha = alpha)
cv = 5
param_grid = {'fit_intercept': [True, False],
'alpha': [0.2, 0.4, 0.6, 0.8, 1.0, 2.0, 3.0],
'normalize': [True, False]}
return fit_classifier_with_crossvalidation(X, y, basemod, cv, param_grid,
verbose=False)
def fit_logistic_mcl_with_crossvalidation(X, y, alpha = 1.0):
"""An ordinal model of dataset with hyperparameter
cross-validation. Multiclass Logistic (logistic/classification) variant.
Parameters & returns as per other training functions.
alpha: float :
Regularization parameter. Zero is no regularization,
higher values increate the squared l2 regularization.
"""
basemod = mord.MulticlassLogistic(alpha = alpha)
cv = 5
param_grid = {'alpha': [0.2, 0.4, 0.6, 0.8, 1.0, 2.0, 3.0]}
return fit_classifier_with_crossvalidation(X, y, basemod, cv, param_grid,
verbose=False)
|
|
"""Tests for gree component."""
from datetime import timedelta
from greeclimate.device import HorizontalSwing, VerticalSwing
from greeclimate.exceptions import DeviceNotBoundError, DeviceTimeoutError
import pytest
from homeassistant.components.climate.const import (
ATTR_FAN_MODE,
ATTR_HVAC_MODE,
ATTR_PRESET_MODE,
ATTR_SWING_MODE,
DOMAIN,
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
PRESET_SLEEP,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_SWING_MODE,
SERVICE_SET_TEMPERATURE,
SWING_BOTH,
SWING_HORIZONTAL,
SWING_OFF,
SWING_VERTICAL,
)
from homeassistant.components.gree.climate import (
FAN_MODES_REVERSE,
HVAC_MODES_REVERSE,
SUPPORTED_FEATURES,
)
from homeassistant.components.gree.const import (
DOMAIN as GREE_DOMAIN,
FAN_MEDIUM_HIGH,
FAN_MEDIUM_LOW,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
STATE_UNAVAILABLE,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from .common import build_device_mock
from tests.async_mock import DEFAULT as DEFAULT_MOCK, AsyncMock, patch
from tests.common import MockConfigEntry, async_fire_time_changed
ENTITY_ID = f"{DOMAIN}.fake_device_1"
@pytest.fixture
def mock_now():
"""Fixture for dtutil.now."""
return dt_util.utcnow()
async def async_setup_gree(hass):
"""Set up the gree platform."""
MockConfigEntry(domain=GREE_DOMAIN).add_to_hass(hass)
await async_setup_component(hass, GREE_DOMAIN, {GREE_DOMAIN: {"climate": {}}})
await hass.async_block_till_done()
async def test_discovery_called_once(hass, discovery, device):
"""Test discovery is only ever called once."""
await async_setup_gree(hass)
assert discovery.call_count == 1
await async_setup_gree(hass)
assert discovery.call_count == 1
async def test_discovery_setup(hass, discovery, device):
"""Test setup of platform."""
MockDevice1 = build_device_mock(
name="fake-device-1", ipAddress="1.1.1.1", mac="aabbcc112233"
)
MockDevice2 = build_device_mock(
name="fake-device-2", ipAddress="2.2.2.2", mac="bbccdd223344"
)
discovery.return_value = [MockDevice1.device_info, MockDevice2.device_info]
device.side_effect = [MockDevice1, MockDevice2]
await async_setup_gree(hass)
await hass.async_block_till_done()
assert discovery.call_count == 1
assert len(hass.states.async_all(DOMAIN)) == 2
async def test_discovery_setup_connection_error(hass, discovery, device):
"""Test gree integration is setup."""
MockDevice1 = build_device_mock(name="fake-device-1")
MockDevice1.bind = AsyncMock(side_effect=DeviceNotBoundError)
MockDevice2 = build_device_mock(name="fake-device-2")
MockDevice2.bind = AsyncMock(side_effect=DeviceNotBoundError)
device.side_effect = [MockDevice1, MockDevice2]
await async_setup_gree(hass)
await hass.async_block_till_done()
assert discovery.call_count == 1
assert not hass.states.async_all(DOMAIN)
async def test_update_connection_failure(hass, discovery, device, mock_now):
"""Testing update hvac connection failure exception."""
device().update_state.side_effect = [
DEFAULT_MOCK,
DeviceTimeoutError,
DeviceTimeoutError,
]
await async_setup_gree(hass)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
# First update to make the device available
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state != STATE_UNAVAILABLE
next_update = mock_now + timedelta(minutes=10)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
next_update = mock_now + timedelta(minutes=15)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
# Then two more update failures to make the device unavailable
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state == STATE_UNAVAILABLE
async def test_update_connection_failure_recovery(hass, discovery, device, mock_now):
"""Testing update hvac connection failure recovery."""
device().update_state.side_effect = [
DeviceTimeoutError,
DeviceTimeoutError,
DEFAULT_MOCK,
]
await async_setup_gree(hass)
# First update becomes unavailable
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state == STATE_UNAVAILABLE
# Second update restores the connection
next_update = mock_now + timedelta(minutes=10)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state != STATE_UNAVAILABLE
async def test_update_unhandled_exception(hass, discovery, device, mock_now):
"""Testing update hvac connection unhandled response exception."""
device().update_state.side_effect = [DEFAULT_MOCK, Exception]
await async_setup_gree(hass)
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state != STATE_UNAVAILABLE
next_update = mock_now + timedelta(minutes=10)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state == STATE_UNAVAILABLE
async def test_send_command_device_timeout(hass, discovery, device, mock_now):
"""Test for sending power on command to the device with a device timeout."""
await async_setup_gree(hass)
# First update to make the device available
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.name == "fake-device-1"
assert state.state != STATE_UNAVAILABLE
device().push_state_update.side_effect = DeviceTimeoutError
# Send failure should not raise exceptions or change device state
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_AUTO},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state != STATE_UNAVAILABLE
async def test_send_power_on(hass, discovery, device, mock_now):
"""Test for sending power on command to the device."""
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_AUTO},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == HVAC_MODE_AUTO
async def test_send_power_on_device_timeout(hass, discovery, device, mock_now):
"""Test for sending power on command to the device with a device timeout."""
device().push_state_update.side_effect = DeviceTimeoutError
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_AUTO},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == HVAC_MODE_AUTO
async def test_send_target_temperature(hass, discovery, device, mock_now):
"""Test for sending target temperature command to the device."""
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_TEMPERATURE: 25.1},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_TEMPERATURE) == 25
async def test_send_target_temperature_device_timeout(
hass, discovery, device, mock_now
):
"""Test for sending target temperature command to the device with a device timeout."""
device().push_state_update.side_effect = DeviceTimeoutError
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_TEMPERATURE: 25.1},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_TEMPERATURE) == 25
async def test_update_target_temperature(hass, discovery, device, mock_now):
"""Test for updating target temperature from the device."""
device().target_temperature = 32
await async_setup_gree(hass)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_TEMPERATURE) == 32
@pytest.mark.parametrize(
"preset", (PRESET_AWAY, PRESET_ECO, PRESET_SLEEP, PRESET_BOOST, PRESET_NONE)
)
async def test_send_preset_mode(hass, discovery, device, mock_now, preset):
"""Test for sending preset mode command to the device."""
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: preset},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_PRESET_MODE) == preset
async def test_send_invalid_preset_mode(hass, discovery, device, mock_now):
"""Test for sending preset mode command to the device."""
await async_setup_gree(hass)
with pytest.raises(ValueError):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: "invalid"},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_PRESET_MODE) != "invalid"
@pytest.mark.parametrize(
"preset", (PRESET_AWAY, PRESET_ECO, PRESET_SLEEP, PRESET_BOOST, PRESET_NONE)
)
async def test_send_preset_mode_device_timeout(
hass, discovery, device, mock_now, preset
):
"""Test for sending preset mode command to the device with a device timeout."""
device().push_state_update.side_effect = DeviceTimeoutError
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: preset},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_PRESET_MODE) == preset
@pytest.mark.parametrize(
"preset", (PRESET_AWAY, PRESET_ECO, PRESET_SLEEP, PRESET_BOOST, PRESET_NONE)
)
async def test_update_preset_mode(hass, discovery, device, mock_now, preset):
"""Test for updating preset mode from the device."""
device().steady_heat = preset == PRESET_AWAY
device().power_save = preset == PRESET_ECO
device().sleep = preset == PRESET_SLEEP
device().turbo = preset == PRESET_BOOST
await async_setup_gree(hass)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_PRESET_MODE) == preset
@pytest.mark.parametrize(
"hvac_mode",
(
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
),
)
async def test_send_hvac_mode(hass, discovery, device, mock_now, hvac_mode):
"""Test for sending hvac mode command to the device."""
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: hvac_mode},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == hvac_mode
@pytest.mark.parametrize(
"hvac_mode",
(HVAC_MODE_AUTO, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT),
)
async def test_send_hvac_mode_device_timeout(
hass, discovery, device, mock_now, hvac_mode
):
"""Test for sending hvac mode command to the device with a device timeout."""
device().push_state_update.side_effect = DeviceTimeoutError
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: hvac_mode},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == hvac_mode
@pytest.mark.parametrize(
"hvac_mode",
(
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
),
)
async def test_update_hvac_mode(hass, discovery, device, mock_now, hvac_mode):
"""Test for updating hvac mode from the device."""
device().power = hvac_mode != HVAC_MODE_OFF
device().mode = HVAC_MODES_REVERSE.get(hvac_mode)
await async_setup_gree(hass)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == hvac_mode
@pytest.mark.parametrize(
"fan_mode",
(FAN_AUTO, FAN_LOW, FAN_MEDIUM_LOW, FAN_MEDIUM, FAN_MEDIUM_HIGH, FAN_HIGH),
)
async def test_send_fan_mode(hass, discovery, device, mock_now, fan_mode):
"""Test for sending fan mode command to the device."""
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_FAN_MODE: fan_mode},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_FAN_MODE) == fan_mode
async def test_send_invalid_fan_mode(hass, discovery, device, mock_now):
"""Test for sending fan mode command to the device."""
await async_setup_gree(hass)
with pytest.raises(ValueError):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_FAN_MODE: "invalid"},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_FAN_MODE) != "invalid"
@pytest.mark.parametrize(
"fan_mode",
(FAN_AUTO, FAN_LOW, FAN_MEDIUM_LOW, FAN_MEDIUM, FAN_MEDIUM_HIGH, FAN_HIGH),
)
async def test_send_fan_mode_device_timeout(
hass, discovery, device, mock_now, fan_mode
):
"""Test for sending fan mode command to the device with a device timeout."""
device().push_state_update.side_effect = DeviceTimeoutError
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_FAN_MODE: fan_mode},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_FAN_MODE) == fan_mode
@pytest.mark.parametrize(
"fan_mode",
(FAN_AUTO, FAN_LOW, FAN_MEDIUM_LOW, FAN_MEDIUM, FAN_MEDIUM_HIGH, FAN_HIGH),
)
async def test_update_fan_mode(hass, discovery, device, mock_now, fan_mode):
"""Test for updating fan mode from the device."""
device().fan_speed = FAN_MODES_REVERSE.get(fan_mode)
await async_setup_gree(hass)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_FAN_MODE) == fan_mode
@pytest.mark.parametrize(
"swing_mode", (SWING_OFF, SWING_BOTH, SWING_VERTICAL, SWING_HORIZONTAL)
)
async def test_send_swing_mode(hass, discovery, device, mock_now, swing_mode):
"""Test for sending swing mode command to the device."""
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_SWING_MODE: swing_mode},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_SWING_MODE) == swing_mode
async def test_send_invalid_swing_mode(hass, discovery, device, mock_now):
"""Test for sending swing mode command to the device."""
await async_setup_gree(hass)
with pytest.raises(ValueError):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_SWING_MODE: "invalid"},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_SWING_MODE) != "invalid"
@pytest.mark.parametrize(
"swing_mode", (SWING_OFF, SWING_BOTH, SWING_VERTICAL, SWING_HORIZONTAL)
)
async def test_send_swing_mode_device_timeout(
hass, discovery, device, mock_now, swing_mode
):
"""Test for sending swing mode command to the device with a device timeout."""
device().push_state_update.side_effect = DeviceTimeoutError
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_SWING_MODE: swing_mode},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_SWING_MODE) == swing_mode
@pytest.mark.parametrize(
"swing_mode", (SWING_OFF, SWING_BOTH, SWING_VERTICAL, SWING_HORIZONTAL)
)
async def test_update_swing_mode(hass, discovery, device, mock_now, swing_mode):
"""Test for updating swing mode from the device."""
device().horizontal_swing = (
HorizontalSwing.FullSwing
if swing_mode in (SWING_BOTH, SWING_HORIZONTAL)
else HorizontalSwing.Default
)
device().vertical_swing = (
VerticalSwing.FullSwing
if swing_mode in (SWING_BOTH, SWING_VERTICAL)
else VerticalSwing.Default
)
await async_setup_gree(hass)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.attributes.get(ATTR_SWING_MODE) == swing_mode
async def test_name(hass, discovery, device):
"""Test for name property."""
await async_setup_gree(hass)
state = hass.states.get(ENTITY_ID)
assert state.attributes[ATTR_FRIENDLY_NAME] == "fake-device-1"
async def test_supported_features_with_turnon(hass, discovery, device):
"""Test for supported_features property."""
await async_setup_gree(hass)
state = hass.states.get(ENTITY_ID)
assert state.attributes[ATTR_SUPPORTED_FEATURES] == SUPPORTED_FEATURES
|
|
"""
Here is probably the place to write the docs, since the test-cases
show how the type behave.
Later...
"""
from ctypes import *
import sys, unittest
try:
WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
WINFUNCTYPE = CFUNCTYPE
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
if sys.platform == "win32":
windll = WinDLL(_ctypes_test.__file__)
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class RECT(Structure):
_fields_ = [("left", c_int), ("top", c_int),
("right", c_int), ("bottom", c_int)]
class FunctionTestCase(unittest.TestCase):
def test_mro(self):
# in Python 2.3, this raises TypeError: MRO conflict among bases classes,
# in Python 2.2 it works.
#
# But in early versions of _ctypes.c, the result of tp_new
# wasn't checked, and it even crashed Python.
# Found by Greg Chapman.
try:
class X(object, Array):
_length_ = 5
_type_ = "i"
except TypeError:
pass
from _ctypes import _Pointer
try:
class X(object, _Pointer):
pass
except TypeError:
pass
from _ctypes import _SimpleCData
try:
class X(object, _SimpleCData):
_type_ = "i"
except TypeError:
pass
try:
class X(object, Structure):
_fields_ = []
except TypeError:
pass
def test_wchar_parm(self):
try:
c_wchar
except NameError:
return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(1, u"x", 3, 4, 5.0, 6.0)
self.assertEqual(result, 139)
self.assertEqual(type(result), int)
def test_wchar_result(self):
try:
c_wchar
except NameError:
return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_wchar
result = f(0, 0, 0, 0, 0, 0)
self.assertEqual(result, u'\x00')
def test_voidresult(self):
f = dll._testfunc_v
f.restype = None
f.argtypes = [c_int, c_int, POINTER(c_int)]
result = c_int()
self.assertEqual(None, f(1, 2, byref(result)))
self.assertEqual(result.value, 3)
def test_intresult(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_int
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), int)
# If we declare the function to return a short,
# is the high part split off?
f.restype = c_short
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
result = f(1, 2, 3, 0x10004, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
# You cannot assign character format codes as restype any longer
self.assertRaises(TypeError, setattr, f, "restype", "i")
def test_floatresult(self):
f = dll._testfunc_f_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_float
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
def test_doubleresult(self):
f = dll._testfunc_d_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_double
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
def test_longdoubleresult(self):
f = dll._testfunc_D_bhilfD
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_longdouble]
f.restype = c_longdouble
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
def test_longlongresult(self):
try:
c_longlong
except NameError:
return
f = dll._testfunc_q_bhilfd
f.restype = c_longlong
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
f = dll._testfunc_q_bhilfdq
f.restype = c_longlong
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double, c_longlong]
result = f(1, 2, 3, 4, 5.0, 6.0, 21)
self.assertEqual(result, 42)
def test_stringresult(self):
f = dll._testfunc_p_p
f.argtypes = None
f.restype = c_char_p
result = f("123")
self.assertEqual(result, "123")
result = f(None)
self.assertEqual(result, None)
def test_pointers(self):
f = dll._testfunc_p_p
f.restype = POINTER(c_int)
f.argtypes = [POINTER(c_int)]
# This only works if the value c_int(42) passed to the
# function is still alive while the pointer (the result) is
# used.
v = c_int(42)
self.assertEqual(pointer(v).contents.value, 42)
result = f(pointer(v))
self.assertEqual(type(result), POINTER(c_int))
self.assertEqual(result.contents.value, 42)
# This on works...
result = f(pointer(v))
self.assertEqual(result.contents.value, v.value)
p = pointer(c_int(99))
result = f(p)
self.assertEqual(result.contents.value, 99)
arg = byref(v)
result = f(arg)
self.assertNotEqual(result.contents, v.value)
self.assertRaises(ArgumentError, f, byref(c_short(22)))
# It is dangerous, however, because you don't control the lifetime
# of the pointer:
result = f(byref(c_int(99)))
self.assertNotEqual(result.contents, 99)
def test_errors(self):
f = dll._testfunc_p_p
f.restype = c_int
class X(Structure):
_fields_ = [("y", c_int)]
self.assertRaises(TypeError, f, X()) #cannot convert parameter
################################################################
def test_shorts(self):
f = dll._testfunc_callback_i_if
args = []
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def callback(v):
args.append(v)
return v
CallBack = CFUNCTYPE(c_int, c_int)
cb = CallBack(callback)
f(2**18, cb)
self.assertEqual(args, expected)
################################################################
def test_callbacks(self):
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
def callback(value):
#print "called back with", value
return value
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
# test with prototype
f.argtypes = [c_int, MyCallback]
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
AnotherCallback = WINFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
# check that the prototype works: we call f with wrong
# argument types
cb = AnotherCallback(callback)
self.assertRaises(ArgumentError, f, -10, cb)
def test_callbacks_2(self):
# Can also use simple datatypes as argument type specifiers
# for the callback function.
# In this case the call receives an instance of that type
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
f.argtypes = [c_int, MyCallback]
def callback(value):
#print "called back with", value
self.assertEqual(type(value), int)
return value
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
f.restype = c_longlong
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
f.argtypes = [c_longlong, MyCallback]
def callback(value):
self.assertTrue(isinstance(value, (int, long)))
return value & 0x7FFFFFFF
cb = MyCallback(callback)
self.assertEqual(13577625587, f(1000000000000, cb))
def test_errors(self):
self.assertRaises(AttributeError, getattr, dll, "_xxx_yyy")
self.assertRaises(ValueError, c_int.in_dll, dll, "_xxx_yyy")
def test_byval(self):
# without prototype
ptin = POINT(1, 2)
ptout = POINT()
# EXPORT int _testfunc_byval(point in, point *pout)
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 3, 1, 2
self.assertEqual(got, expected)
# with prototype
ptin = POINT(101, 102)
ptout = POINT()
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
dll._testfunc_byval.restype = c_int
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 203, 101, 102
self.assertEqual(got, expected)
def test_struct_return_2H(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
dll.ret_2h_func.restype = S2H
dll.ret_2h_func.argtypes = [S2H]
inp = S2H(99, 88)
s2h = dll.ret_2h_func(inp)
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
if sys.platform == "win32":
def test_struct_return_2H_stdcall(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
windll.s_ret_2h_func.restype = S2H
windll.s_ret_2h_func.argtypes = [S2H]
s2h = windll.s_ret_2h_func(S2H(99, 88))
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
def test_struct_return_8H(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
dll.ret_8i_func.restype = S8I
dll.ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = dll.ret_8i_func(inp)
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
if sys.platform == "win32":
def test_struct_return_8H_stdcall(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
windll.s_ret_8i_func.restype = S8I
windll.s_ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = windll.s_ret_8i_func(inp)
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
def test_sf1651235(self):
# see http://www.python.org/sf/1651235
proto = CFUNCTYPE(c_int, RECT, POINT)
def callback(*args):
return 0
callback = proto(callback)
self.assertRaises(ArgumentError, lambda: callback((1, 2, 3, 4), POINT()))
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import print_function, unicode_literals
from future.builtins import input, int
from optparse import make_option
try:
from urllib.parse import urlparse
except:
from urlparse import urlparse
from django.contrib.auth import get_user_model
from django.contrib.redirects.models import Redirect
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand, CommandError
from django.utils.encoding import force_text
from django.utils.html import strip_tags
from zhiliao.blog.models import BlogPost, BlogCategory
from zhiliao.conf import settings
from zhiliao.core.models import CONTENT_STATUS_DRAFT
from zhiliao.core.models import CONTENT_STATUS_PUBLISHED
from zhiliao.generic.models import AssignedKeyword, Keyword, ThreadedComment
from zhiliao.pages.models import RichTextPage
from zhiliao.utils.html import decode_entities
User = get_user_model()
class BaseImporterCommand(BaseCommand):
"""
Base importer command for blogging platform specific management
commands to subclass when importing blog posts into Mezzanine.
The ``handle_import`` method should be overridden to provide the
import mechanism specific to the blogging platform being dealt with.
"""
option_list = BaseCommand.option_list + (
make_option("-m", "--mezzanine-user", dest="mezzanine_user",
help="Mezzanine username to assign the imported blog posts to."),
make_option("--noinput", action="store_false", dest="interactive",
default=True, help="Do NOT prompt for input of any kind. "
"Fields will be truncated if too long."),
make_option("-n", "--navigation", action="store_true",
dest="in_navigation", help="Add any imported pages to navigation"),
make_option("-f", "--footer", action="store_true", dest="in_footer",
help="Add any imported pages to footer navigation"),
)
def __init__(self, **kwargs):
self.posts = []
self.pages = []
super(BaseImporterCommand, self).__init__(**kwargs)
def add_post(self, title=None, content=None, old_url=None, pub_date=None,
tags=None, categories=None, comments=None):
"""
Adds a post to the post list for processing.
- ``title`` and ``content`` are strings for the post.
- ``old_url`` is a string that a redirect will be created for.
- ``pub_date`` is assumed to be a ``datetime`` object.
- ``tags`` and ``categories`` are sequences of strings.
- ``comments`` is a sequence of dicts - each dict should be the
return value of ``add_comment``.
"""
if not title:
title = strip_tags(content).split(". ")[0]
title = decode_entities(title)
if categories is None:
categories = []
if tags is None:
tags = []
if comments is None:
comments = []
self.posts.append({
"title": force_text(title),
"publish_date": pub_date,
"content": force_text(content),
"categories": categories,
"tags": tags,
"comments": comments,
"old_url": old_url,
})
return self.posts[-1]
def add_page(self, title=None, content=None, old_url=None,
tags=None, old_id=None, old_parent_id=None):
"""
Adds a page to the list of pages to be imported - used by the
Wordpress importer.
"""
if not title:
text = decode_entities(strip_tags(content)).replace("\n", " ")
title = text.split(". ")[0]
if tags is None:
tags = []
self.pages.append({
"title": title,
"content": content,
"tags": tags,
"old_url": old_url,
"old_id": old_id,
"old_parent_id": old_parent_id,
})
def add_comment(self, post=None, name=None, email=None, pub_date=None,
website=None, body=None):
"""
Adds a comment to the post provided.
"""
if post is None:
if not self.posts:
raise CommandError("Cannot add comments without posts")
post = self.posts[-1]
post["comments"].append({
"user_name": name,
"user_email": email,
"submit_date": pub_date,
"user_url": website,
"comment": body,
})
def trunc(self, model, prompt, **fields):
"""
Truncates fields values for the given model. Prompts for a new
value if truncation occurs.
"""
for field_name, value in fields.items():
field = model._meta.get_field(field_name)
max_length = getattr(field, "max_length", None)
if not max_length:
continue
elif not prompt:
fields[field_name] = value[:max_length]
continue
while len(value) > max_length:
encoded_value = value.encode("utf-8")
new_value = input("The value for the field %s.%s exceeds "
"its maximum length of %s chars: %s\n\nEnter a new value "
"for it, or press return to have it truncated: " %
(model.__name__, field_name, max_length, encoded_value))
value = new_value if new_value else value[:max_length]
fields[field_name] = value
return fields
def handle(self, *args, **options):
"""
Processes the converted data into the Mezzanine database correctly.
Attributes:
mezzanine_user: the user to put this data in against
date_format: the format the dates are in for posts and comments
"""
mezzanine_user = options.get("mezzanine_user")
site = Site.objects.get_current()
verbosity = int(options.get("verbosity", 1))
prompt = options.get("interactive")
# Validate the Mezzanine user.
if mezzanine_user is None:
raise CommandError("No Mezzanine user has been specified")
try:
mezzanine_user = User.objects.get(username=mezzanine_user)
except User.DoesNotExist:
raise CommandError("Invalid Mezzanine user: %s" % mezzanine_user)
# Run the subclassed ``handle_import`` and save posts, tags,
# categories, and comments to the DB.
self.handle_import(options)
for post_data in self.posts:
categories = post_data.pop("categories")
tags = post_data.pop("tags")
comments = post_data.pop("comments")
old_url = post_data.pop("old_url")
post_data = self.trunc(BlogPost, prompt, **post_data)
initial = {
"title": post_data.pop("title"),
"user": mezzanine_user,
}
if post_data["publish_date"] is None:
post_data["status"] = CONTENT_STATUS_DRAFT
post, created = BlogPost.objects.get_or_create(**initial)
for k, v in post_data.items():
setattr(post, k, v)
post.save()
if created and verbosity >= 1:
print("Imported post: %s" % post)
for name in categories:
cat = self.trunc(BlogCategory, prompt, title=name)
if not cat["title"]:
continue
cat, created = BlogCategory.objects.get_or_create(**cat)
if created and verbosity >= 1:
print("Imported category: %s" % cat)
post.categories.add(cat)
for comment in comments:
comment = self.trunc(ThreadedComment, prompt, **comment)
comment["site"] = site
post.comments.add(ThreadedComment(**comment))
if verbosity >= 1:
print("Imported comment by: %s" % comment["user_name"])
self.add_meta(post, tags, prompt, verbosity, old_url)
# Create any pages imported (Wordpress can include pages)
in_menus = []
footer = [menu[0] for menu in settings.PAGE_MENU_TEMPLATES
if menu[-1] == "pages/menus/footer.html"]
if options["in_navigation"]:
in_menus = [menu[0] for menu in settings.PAGE_MENU_TEMPLATES]
if footer and not options["in_footer"]:
in_menus.remove(footer[0])
elif footer and options["in_footer"]:
in_menus = footer
parents = []
for page in self.pages:
tags = page.pop("tags")
old_url = page.pop("old_url")
old_id = page.pop("old_id")
old_parent_id = page.pop("old_parent_id")
page = self.trunc(RichTextPage, prompt, **page)
page["status"] = CONTENT_STATUS_PUBLISHED
page["in_menus"] = in_menus
page, created = RichTextPage.objects.get_or_create(**page)
if created and verbosity >= 1:
print("Imported page: %s" % page)
self.add_meta(page, tags, prompt, verbosity, old_url)
parents.append({
'old_id': old_id,
'old_parent_id': old_parent_id,
'page': page,
})
for obj in parents:
if obj['old_parent_id']:
for parent in parents:
if parent['old_id'] == obj['old_parent_id']:
obj['page'].parent = parent['page']
obj['page'].save()
break
def add_meta(self, obj, tags, prompt, verbosity, old_url=None):
"""
Adds tags and a redirect for the given obj, which is a blog
post or a page.
"""
for tag in tags:
keyword = self.trunc(Keyword, prompt, title=tag)
keyword, created = Keyword.objects.get_or_create_iexact(**keyword)
obj.keywords.add(AssignedKeyword(keyword=keyword))
if created and verbosity >= 1:
print("Imported tag: %s" % keyword)
if old_url is not None:
old_path = urlparse(old_url).path
if not old_path.strip("/"):
return
redirect = self.trunc(Redirect, prompt, old_path=old_path)
redirect['site'] = Site.objects.get_current()
redirect, created = Redirect.objects.get_or_create(**redirect)
redirect.new_path = obj.get_absolute_url()
redirect.save()
if created and verbosity >= 1:
print("Created redirect for: %s" % old_url)
def handle_import(self, options):
"""
Should be overridden by subclasses - performs the conversion from
the originating data source into the lists of posts and comments
ready for processing.
"""
raise NotImplementedError
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.api import utils
from tempest.common.utils.data_utils import rand_name
from tempest import config
from tempest import exceptions
from tempest.test import attr
from tempest.test import skip_because
class ListServerFiltersTestJSON(base.BaseComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(ListServerFiltersTestJSON, cls).setUpClass()
cls.client = cls.servers_client
# Check to see if the alternate image ref actually exists...
images_client = cls.images_client
resp, images = images_client.list_images()
if cls.image_ref != cls.image_ref_alt and \
any([image for image in images
if image['id'] == cls.image_ref_alt]):
cls.multiple_images = True
else:
cls.image_ref_alt = cls.image_ref
# Do some sanity checks here. If one of the images does
# not exist, fail early since the tests won't work...
try:
cls.images_client.get_image(cls.image_ref)
except exceptions.NotFound:
raise RuntimeError("Image %s (image_ref) was not found!" %
cls.image_ref)
try:
cls.images_client.get_image(cls.image_ref_alt)
except exceptions.NotFound:
raise RuntimeError("Image %s (image_ref_alt) was not found!" %
cls.image_ref_alt)
cls.s1_name = rand_name(cls.__name__ + '-instance')
resp, cls.s1 = cls.create_server(name=cls.s1_name,
image_id=cls.image_ref,
flavor=cls.flavor_ref,
wait_until='ACTIVE')
cls.s2_name = rand_name(cls.__name__ + '-instance')
resp, cls.s2 = cls.create_server(name=cls.s2_name,
image_id=cls.image_ref_alt,
flavor=cls.flavor_ref,
wait_until='ACTIVE')
cls.s3_name = rand_name(cls.__name__ + '-instance')
resp, cls.s3 = cls.create_server(name=cls.s3_name,
image_id=cls.image_ref,
flavor=cls.flavor_ref_alt,
wait_until='ACTIVE')
cls.fixed_network_name = cls.config.compute.fixed_network_name
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@attr(type='gate')
def test_list_servers_filter_by_image(self):
# Filter the list of servers by image
params = {'image': self.image_ref}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@attr(type='gate')
def test_list_servers_filter_by_flavor(self):
# Filter the list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@attr(type='gate')
def test_list_servers_filter_by_server_name(self):
# Filter the list of servers by server name
params = {'name': self.s1_name}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@attr(type='gate')
def test_list_servers_filter_by_server_status(self):
# Filter the list of servers by server status
params = {'status': 'active'}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@attr(type='gate')
def test_list_servers_filter_by_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 1}
resp, servers = self.client.list_servers(params)
# when _interface='xml', one element for servers_links in servers
self.assertEqual(1, len([x for x in servers['servers'] if 'id' in x]))
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@attr(type='gate')
def test_list_servers_detailed_filter_by_image(self):
# Filter the detailed list of servers by image
params = {'image': self.image_ref}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@attr(type='gate')
def test_list_servers_detailed_filter_by_flavor(self):
# Filter the detailed list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@attr(type='gate')
def test_list_servers_detailed_filter_by_server_name(self):
# Filter the detailed list of servers by server name
params = {'name': self.s1_name}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@attr(type='gate')
def test_list_servers_detailed_filter_by_server_status(self):
# Filter the detailed list of servers by server status
params = {'status': 'active'}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers])
@attr(type='gate')
def test_list_servers_filtered_by_name_wildcard(self):
# List all servers that contains '-instance' in name
params = {'name': '-instance'}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
# Let's take random part of name and try to search it
part_name = self.s1_name[6:-1]
params = {'name': part_name}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@skip_because(bug="1170718")
@attr(type='gate')
def test_list_servers_filtered_by_ip(self):
# Filter servers by ip
# Here should be listed 1 server
resp, self.s1 = self.client.get_server(self.s1['id'])
ip = self.s1['addresses'][self.fixed_network_name][0]['addr']
params = {'ip': ip}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@skip_because(bug="1182883",
condition=config.TempestConfig().service_available.neutron)
@attr(type='gate')
def test_list_servers_filtered_by_ip_regex(self):
# Filter servers by regex ip
# List all servers filtered by part of ip address.
# Here should be listed all servers
resp, self.s1 = self.client.get_server(self.s1['id'])
ip = self.s1['addresses'][self.fixed_network_name][0]['addr'][0:-3]
params = {'ip': ip}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
@attr(type='gate')
def test_list_servers_detailed_limit_results(self):
# Verify only the expected number of detailed results are returned
params = {'limit': 1}
resp, servers = self.client.list_servers_with_detail(params)
self.assertEqual(1, len(servers['servers']))
class ListServerFiltersTestXML(ListServerFiltersTestJSON):
_interface = 'xml'
|
|
# Copyright (C) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Goodness Weigher.
"""
from cinder.scheduler.weights import goodness
from cinder import test
from cinder.tests.unit.scheduler import fakes
class GoodnessWeigherTestCase(test.TestCase):
def test_goodness_weigher_with_no_goodness_function(self):
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com',
'capabilities': {
'foo': '50'
}
})
weight_properties = {}
weight = weigher._weigh_object(host_state, weight_properties)
self.assertEqual(0, weight)
def test_goodness_weigher_passing_host(self):
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com',
'capabilities': {
'goodness_function': '100'
}
})
host_state_2 = fakes.FakeBackendState('host2', {
'host': 'host2.example.com',
'capabilities': {
'goodness_function': '0'
}
})
host_state_3 = fakes.FakeBackendState('host3', {
'host': 'host3.example.com',
'capabilities': {
'goodness_function': '100 / 2'
}
})
weight_properties = {}
weight = weigher._weigh_object(host_state, weight_properties)
self.assertEqual(100, weight)
weight = weigher._weigh_object(host_state_2, weight_properties)
self.assertEqual(0, weight)
weight = weigher._weigh_object(host_state_3, weight_properties)
self.assertEqual(50, weight)
def test_goodness_weigher_capabilities_substitution(self):
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com',
'capabilities': {
'foo': 50,
'goodness_function': '10 + capabilities.foo'
}
})
weight_properties = {}
weight = weigher._weigh_object(host_state, weight_properties)
self.assertEqual(60, weight)
def test_goodness_weigher_extra_specs_substitution(self):
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com',
'capabilities': {
'goodness_function': '10 + extra.foo'
}
})
weight_properties = {
'volume_type': {
'extra_specs': {
'foo': 50
}
}
}
weight = weigher._weigh_object(host_state, weight_properties)
self.assertEqual(60, weight)
def test_goodness_weigher_volume_substitution(self):
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com',
'capabilities': {
'goodness_function': '10 + volume.foo'
}
})
weight_properties = {
'request_spec': {
'volume_properties': {
'foo': 50
}
}
}
weight = weigher._weigh_object(host_state, weight_properties)
self.assertEqual(60, weight)
def test_goodness_weigher_qos_substitution(self):
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com',
'capabilities': {
'goodness_function': '10 + qos.foo'
}
})
weight_properties = {
'qos_specs': {
'foo': 50
}
}
weight = weigher._weigh_object(host_state, weight_properties)
self.assertEqual(60, weight)
def test_goodness_weigher_stats_substitution(self):
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com',
'capabilities': {
'goodness_function': 'stats.free_capacity_gb > 20'
},
'free_capacity_gb': 50
})
weight_properties = {}
weight = weigher._weigh_object(host_state, weight_properties)
self.assertEqual(100, weight)
def test_goodness_weigher_invalid_substitution(self):
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com',
'capabilities': {
'goodness_function': '10 + stats.my_val'
},
'foo': 50
})
weight_properties = {}
weight = weigher._weigh_object(host_state, weight_properties)
self.assertEqual(0, weight)
def test_goodness_weigher_host_rating_out_of_bounds(self):
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com',
'capabilities': {
'goodness_function': '-10'
}
})
host_state_2 = fakes.FakeBackendState('host2', {
'host': 'host2.example.com',
'capabilities': {
'goodness_function': '200'
}
})
weight_properties = {}
weight = weigher._weigh_object(host_state, weight_properties)
self.assertEqual(0, weight)
weight = weigher._weigh_object(host_state_2, weight_properties)
self.assertEqual(0, weight)
def test_goodness_weigher_invalid_goodness_function(self):
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com',
'capabilities': {
'goodness_function': '50 / 0'
}
})
weight_properties = {}
weight = weigher._weigh_object(host_state, weight_properties)
self.assertEqual(0, weight)
def test_goodness_weigher_untyped_volume(self):
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com',
'capabilities': {
'goodness_function': '67'
}
})
weight_properties = {
'volume_type': None,
}
weight = weigher._weigh_object(host_state, weight_properties)
self.assertEqual(67, weight)
|
|
# coding=utf-8
import os
import sys
import time
import logging
import traceback
import configobj
import inspect
# Path Fix
sys.path.append(
os.path.abspath(
os.path.join(
os.path.dirname(__file__), "../")))
import diamond
from diamond.collector import Collector
from diamond.handler.Handler import Handler
from diamond.scheduler import ThreadedScheduler
from diamond.util import load_class_from_name
class Server(object):
"""
Server class loads and starts Handlers and Collectors
"""
def __init__(self, config):
# Initialize Logging
self.log = logging.getLogger('diamond')
# Initialize Members
self.config = config
self.running = False
self.handlers = []
self.modules = {}
self.tasks = {}
self.collector_paths = []
# Initialize Scheduler
self.scheduler = ThreadedScheduler()
def load_config(self):
"""
Load the full config / merge splitted configs if configured
"""
configfile = os.path.abspath(self.config['configfile'])
config = configobj.ConfigObj(configfile)
config['configfile'] = self.config['configfile']
try:
for cfgfile in os.listdir(config['configs']['path']):
if cfgfile.endswith(config['configs']['extension']):
newconfig = configobj.ConfigObj(
config['configs']['path'] + cfgfile)
config.merge(newconfig)
except KeyError:
pass
if 'server' not in config:
raise Exception('Failed to reload config file %s!' % configfile)
self.config = config
def load_handler(self, fqcn):
"""
Load Handler class named fqcn
"""
# Load class
cls = load_class_from_name(fqcn)
# Check if cls is subclass of Handler
if cls == Handler or not issubclass(cls, Handler):
raise TypeError("%s is not a valid Handler" % fqcn)
# Log
self.log.debug("Loaded Handler: %s", fqcn)
return cls
def load_handlers(self):
"""
Load handlers
"""
if isinstance(self.config['server']['handlers'], basestring):
handlers = [self.config['server']['handlers']]
self.config['server']['handlers'] = handlers
for h in self.config['server']['handlers']:
try:
# Load Handler Class
cls = self.load_handler(h)
# Initialize Handler config
handler_config = configobj.ConfigObj()
# Merge default Handler default config
handler_config.merge(self.config['handlers']['default'])
# Check if Handler config exists
if cls.__name__ in self.config['handlers']:
# Merge Handler config section
handler_config.merge(self.config['handlers'][cls.__name__])
# Check for config file in config directory
configfile = os.path.join(
self.config['server']['handlers_config_path'],
cls.__name__) + '.conf'
if os.path.exists(configfile):
# Merge Collector config file
handler_config.merge(configobj.ConfigObj(configfile))
# Initialize Handler class
self.handlers.append(cls(handler_config))
except (ImportError, SyntaxError):
# Log Error
self.log.debug("Failed to load handler %s. %s", h,
traceback.format_exc())
continue
def load_collector(self, fqcn):
"""
Load Collector class named fqcn
"""
# Load class
cls = load_class_from_name(fqcn)
# Check if cls is subclass of Collector
if cls == Collector or not issubclass(cls, Collector):
raise TypeError("%s is not a valid Collector" % fqcn)
# Log
self.log.debug("Loaded Collector: %s", fqcn)
return cls
def load_include_path(self, paths):
"""
Scan for and add paths to the include path
"""
for path in paths:
# Verify the path is valid
if not os.path.isdir(path):
continue
# Add path to the system path, to avoid name clashes
# with mysql-connector for example ...
sys.path.insert(1, path)
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
self.load_include_path([fpath])
def load_collectors(self, paths, filter=None):
"""
Scan for collectors to load from path
"""
# Initialize return value
collectors = {}
for path in paths:
# Get a list of files in the directory, if the directory exists
if not os.path.exists(path):
raise OSError("Directory does not exist: %s" % path)
if path.endswith('tests') or path.endswith('fixtures'):
return collectors
# Log
self.log.debug("Loading Collectors from: %s", path)
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
subcollectors = self.load_collectors([fpath])
for key in subcollectors:
collectors[key] = subcollectors[key]
# Ignore anything that isn't a .py file
elif (os.path.isfile(fpath)
and len(f) > 3
and f[-3:] == '.py'
and f[0:4] != 'test'
and f[0] != '.'):
# Check filter
if filter and os.path.join(path, f) != filter:
continue
modname = f[:-3]
# Stat module file to get mtime
st = os.stat(os.path.join(path, f))
mtime = st.st_mtime
# Check if module has been loaded before
if modname in self.modules:
# Check if file mtime is newer then the last check
if mtime <= self.modules[modname]:
# Module hasn't changed
# Log
self.log.debug("Found %s, but it hasn't changed.",
modname)
continue
try:
# Import the module
mod = __import__(modname, globals(), locals(), ['*'])
except (ImportError, SyntaxError):
# Log error
self.log.error("Failed to import module: %s. %s",
modname,
traceback.format_exc())
continue
# Update module mtime
self.modules[modname] = mtime
# Log
self.log.debug("Loaded Module: %s", modname)
# Find all classes defined in the module
for attrname in dir(mod):
attr = getattr(mod, attrname)
# Only attempt to load classes that are infact classes
# are Collectors but are not the base Collector class
if (inspect.isclass(attr)
and issubclass(attr, Collector)
and attr != Collector):
if attrname.startswith('parent_'):
continue
# Get class name
fqcn = '.'.join([modname, attrname])
try:
# Load Collector class
cls = self.load_collector(fqcn)
# Add Collector class
collectors[cls.__name__] = cls
except Exception:
# Log error
self.log.error(
"Failed to load Collector: %s. %s",
fqcn, traceback.format_exc())
continue
# Return Collector classes
return collectors
def init_collector(self, cls):
"""
Initialize collector
"""
collector = None
try:
# Initialize Collector
collector = cls(self.config, self.handlers)
# Log
self.log.debug("Initialized Collector: %s", cls.__name__)
except Exception:
# Log error
self.log.error("Failed to initialize Collector: %s. %s",
cls.__name__, traceback.format_exc())
# Return collector
return collector
def schedule_collector(self, c, interval_task=True):
"""
Schedule collector
"""
# Check collector is for realz
if c is None:
self.log.warn("Skipped loading invalid Collector: %s",
c.__class__.__name__)
return
if c.config['enabled'] is not True:
self.log.debug("Skipped loading disabled Collector: %s",
c.__class__.__name__)
return
# Get collector schedule
for name, schedule in c.get_schedule().items():
# Get scheduler args
func, args, splay, interval = schedule
# Check if Collecter with same name has already been scheduled
if name in self.tasks:
self.scheduler.cancel(self.tasks[name])
# Log
self.log.debug("Canceled task: %s", name)
method = diamond.scheduler.method.sequential
if 'method' in c.config:
if c.config['method'] == 'Threaded':
method = diamond.scheduler.method.threaded
elif c.config['method'] == 'Forked':
method = diamond.scheduler.method.forked
# Schedule Collector
if interval_task:
task = self.scheduler.add_interval_task(func,
name,
splay,
interval,
method,
args,
None,
True)
else:
task = self.scheduler.add_single_task(func,
name,
splay,
method,
args,
None)
# Log
self.log.debug("Scheduled task: %s", name)
# Add task to list
self.tasks[name] = task
def run(self):
"""
Load handler and collector classes and then start collectors
"""
# Set Running Flag
self.running = True
# Load config
self.load_config()
# Load handlers
if 'handlers_path' in self.config['server']:
handlers_path = self.config['server']['handlers_path']
self.load_include_path([handlers_path])
self.load_handlers()
# Load collectors
# Make an list if not one
if isinstance(self.config['server']['collectors_path'], basestring):
collectors_path = self.config['server']['collectors_path']
collectors_path = collectors_path.split(',')
self.config['server']['collectors_path'] = collectors_path
for path in self.config['server']['collectors_path']:
self.collector_paths.append(path.strip())
self.load_include_path(self.collector_paths)
collectors = self.load_collectors(self.collector_paths)
# Setup Collectors
for cls in collectors.values():
# Initialize Collector
c = self.init_collector(cls)
# Schedule Collector
self.schedule_collector(c)
# Start main loop
self.mainloop()
def run_one(self, file):
"""
Run given collector once and then exit
"""
# Set Running Flag
self.running = True
# Load handlers
if 'handlers_path' in self.config['server']:
handlers_path = self.config['server']['handlers_path']
self.load_include_path([handlers_path])
self.load_handlers()
# Overrides collector config dir
collector_config_path = os.path.abspath(os.path.dirname(file))
self.config['server']['collectors_config_path'] = collector_config_path
# Load config
self.load_config()
# Load collectors
if os.path.dirname(file) == '':
tmp_path = self.config['server']['collectors_path']
filter_out = True
else:
tmp_path = os.path.dirname(file)
filter_out = False
self.collector_paths.append(tmp_path)
self.load_include_path(self.collector_paths)
collectors = self.load_collectors(self.collector_paths, file)
# if file is a full path, rather than a collector name, only the
# collector(s) in that path are instantiated, and there's no need to
# filter extraneous ones from the collectors dictionary
if filter_out:
for item in collectors.keys():
if not item.lower() in file.lower():
del collectors[item]
# Setup Collectors
for cls in collectors.values():
# Initialize Collector
c = self.init_collector(cls)
# Schedule collector
self.schedule_collector(c, False)
# Start main loop
self.mainloop(False)
def mainloop(self, reload=True):
# Start scheduler
self.scheduler.start()
# Log
self.log.info('Started task scheduler.')
# Initialize reload timer
time_since_reload = 0
# Main Loop
while self.running:
time.sleep(1)
time_since_reload += 1
# Check if its time to reload collectors
if (reload
and time_since_reload
> int(self.config['server']['collectors_reload_interval'])):
self.log.debug("Reloading config.")
self.load_config()
# Log
self.log.debug("Reloading collectors.")
# Load collectors
collectors = self.load_collectors(self.collector_paths)
# Setup any Collectors that were loaded
for cls in collectors.values():
# Initialize Collector
c = self.init_collector(cls)
# Schedule Collector
self.schedule_collector(c)
# Reset reload timer
time_since_reload = 0
# Is the queue empty and we won't attempt to reload it? Exit
if not reload and len(self.scheduler.sched._queue) == 0:
self.running = False
# Log
self.log.debug('Stopping task scheduler.')
# Stop scheduler
self.scheduler.stop()
# Log
self.log.info('Stopped task scheduler.')
# Log
self.log.debug("Exiting.")
def stop(self):
"""
Close all connections and terminate threads.
"""
# Set Running Flag
self.running = False
|
|
import codecs
import io
import csv
import datetime
import decimal
import hashlib
import os
import random
import re
import uuid
import binascii
import pystache
import pytz
import simplejson
import sqlparse
from flask import current_app
from funcy import select_values
from redash import settings
from sqlalchemy.orm.query import Query
from .human_time import parse_human_time
COMMENTS_REGEX = re.compile("/\*.*?\*/")
WRITER_ENCODING = os.environ.get("REDASH_CSV_WRITER_ENCODING", "utf-8")
WRITER_ERRORS = os.environ.get("REDASH_CSV_WRITER_ERRORS", "strict")
def utcnow():
"""Return datetime.now value with timezone specified.
Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,
which leads to errors in calculations.
"""
return datetime.datetime.now(pytz.utc)
def dt_from_timestamp(timestamp, tz_aware=True):
timestamp = datetime.datetime.utcfromtimestamp(float(timestamp))
if tz_aware:
timestamp = timestamp.replace(tzinfo=pytz.utc)
return timestamp
def slugify(s):
return re.sub("[^a-z0-9_\-]+", "-", s.lower())
def gen_query_hash(sql):
"""Return hash of the given query after stripping all comments, line breaks
and multiple spaces, and lower casing all text.
TODO: possible issue - the following queries will get the same id:
1. SELECT 1 FROM table WHERE column='Value';
2. SELECT 1 FROM table where column='value';
"""
sql = COMMENTS_REGEX.sub("", sql)
sql = "".join(sql.split()).lower()
return hashlib.md5(sql.encode("utf-8")).hexdigest()
def generate_token(length):
chars = "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789"
rand = random.SystemRandom()
return "".join(rand.choice(chars) for x in range(length))
class JSONEncoder(simplejson.JSONEncoder):
"""Adapter for `simplejson.dumps`."""
def default(self, o):
# Some SQLAlchemy collections are lazy.
if isinstance(o, Query):
result = list(o)
elif isinstance(o, decimal.Decimal):
result = float(o)
elif isinstance(o, (datetime.timedelta, uuid.UUID)):
result = str(o)
# See "Date Time String Format" in the ECMA-262 specification.
elif isinstance(o, datetime.datetime):
result = o.isoformat()
if o.microsecond:
result = result[:23] + result[26:]
if result.endswith("+00:00"):
result = result[:-6] + "Z"
elif isinstance(o, datetime.date):
result = o.isoformat()
elif isinstance(o, datetime.time):
if o.utcoffset() is not None:
raise ValueError("JSON can't represent timezone-aware times.")
result = o.isoformat()
if o.microsecond:
result = result[:12]
elif isinstance(o, memoryview):
result = binascii.hexlify(o).decode()
elif isinstance(o, bytes):
result = binascii.hexlify(o).decode()
else:
result = super(JSONEncoder, self).default(o)
return result
def json_loads(data, *args, **kwargs):
"""A custom JSON loading function which passes all parameters to the
simplejson.loads function."""
return simplejson.loads(data, *args, **kwargs)
def json_dumps(data, *args, **kwargs):
"""A custom JSON dumping function which passes all parameters to the
simplejson.dumps function."""
kwargs.setdefault("cls", JSONEncoder)
kwargs.setdefault("encoding", None)
# Float value nan or inf in Python should be render to None or null in json.
# Using ignore_nan = False will make Python render nan as NaN, leading to parse error in front-end
kwargs.setdefault('ignore_nan', True)
return simplejson.dumps(data, *args, **kwargs)
def mustache_render(template, context=None, **kwargs):
renderer = pystache.Renderer(escape=lambda u: u)
return renderer.render(template, context, **kwargs)
def build_url(request, host, path):
parts = request.host.split(":")
if len(parts) > 1:
port = parts[1]
if (port, request.scheme) not in (("80", "http"), ("443", "https")):
host = "{}:{}".format(host, port)
return "{}://{}{}".format(request.scheme, host, path)
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding=WRITER_ENCODING, **kwds):
# Redirect output to a queue
self.queue = io.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def _encode_utf8(self, val):
if isinstance(val, str):
return val.encode(WRITER_ENCODING, WRITER_ERRORS)
return val
def writerow(self, row):
self.writer.writerow([self._encode_utf8(s) for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode(WRITER_ENCODING)
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def collect_parameters_from_request(args):
parameters = {}
for k, v in args.items():
if k.startswith("p_"):
parameters[k[2:]] = v
return parameters
def base_url(org):
if settings.MULTI_ORG:
return "https://{}/{}".format(settings.HOST, org.slug)
return settings.HOST
def filter_none(d):
return select_values(lambda v: v is not None, d)
def to_filename(s):
s = re.sub('[<>:"\\\/|?*]+', " ", s, flags=re.UNICODE)
s = re.sub("\s+", "_", s, flags=re.UNICODE)
return s.strip("_")
def deprecated():
def wrapper(K):
setattr(K, "deprecated", True)
return K
return wrapper
def render_template(path, context):
""" Render a template with context, without loading the entire app context.
Using Flask's `render_template` function requires the entire app context to load, which in turn triggers any
function decorated with the `context_processor` decorator, which is not explicitly required for rendering purposes.
"""
return current_app.jinja_env.get_template(path).render(**context)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import deprecation
import mock
from openstack.cluster.v1 import _proxy
from openstack.cluster.v1 import action
from openstack.cluster.v1 import build_info
from openstack.cluster.v1 import cluster
from openstack.cluster.v1 import cluster_attr
from openstack.cluster.v1 import cluster_policy
from openstack.cluster.v1 import event
from openstack.cluster.v1 import node
from openstack.cluster.v1 import policy
from openstack.cluster.v1 import policy_type
from openstack.cluster.v1 import profile
from openstack.cluster.v1 import profile_type
from openstack.cluster.v1 import receiver
from openstack.cluster.v1 import service
from openstack import proxy2 as proxy_base
from openstack.tests.unit import test_proxy_base2
class TestClusterProxy(test_proxy_base2.TestProxyBase):
def setUp(self):
super(TestClusterProxy, self).setUp()
self.proxy = _proxy.Proxy(self.session)
def test_build_info_get(self):
self.verify_get(self.proxy.get_build_info, build_info.BuildInfo,
ignore_value=True,
expected_kwargs={'requires_id': False})
def test_profile_types(self):
self.verify_list(self.proxy.profile_types,
profile_type.ProfileType,
paginated=False)
def test_profile_type_get(self):
self.verify_get(self.proxy.get_profile_type,
profile_type.ProfileType)
def test_policy_types(self):
self.verify_list(self.proxy.policy_types, policy_type.PolicyType,
paginated=False)
def test_policy_type_get(self):
self.verify_get(self.proxy.get_policy_type, policy_type.PolicyType)
def test_profile_create(self):
self.verify_create(self.proxy.create_profile, profile.Profile)
def test_profile_validate(self):
self.verify_create(self.proxy.validate_profile,
profile.ProfileValidate)
def test_profile_delete(self):
self.verify_delete(self.proxy.delete_profile, profile.Profile, False)
def test_profile_delete_ignore(self):
self.verify_delete(self.proxy.delete_profile, profile.Profile, True)
def test_profile_find(self):
self.verify_find(self.proxy.find_profile, profile.Profile)
def test_profile_get(self):
self.verify_get(self.proxy.get_profile, profile.Profile)
def test_profiles(self):
self.verify_list(self.proxy.profiles, profile.Profile,
paginated=True,
method_kwargs={'limit': 2},
expected_kwargs={'limit': 2})
def test_profile_update(self):
self.verify_update(self.proxy.update_profile, profile.Profile)
def test_cluster_create(self):
self.verify_create(self.proxy.create_cluster, cluster.Cluster)
def test_cluster_delete(self):
self.verify_delete(self.proxy.delete_cluster, cluster.Cluster, False)
def test_cluster_delete_ignore(self):
self.verify_delete(self.proxy.delete_cluster, cluster.Cluster, True)
def test_cluster_find(self):
self.verify_find(self.proxy.find_cluster, cluster.Cluster)
def test_cluster_get(self):
self.verify_get(self.proxy.get_cluster, cluster.Cluster)
def test_clusters(self):
self.verify_list(self.proxy.clusters, cluster.Cluster,
paginated=True,
method_kwargs={'limit': 2},
expected_kwargs={'limit': 2})
def test_cluster_update(self):
self.verify_update(self.proxy.update_cluster, cluster.Cluster)
@deprecation.fail_if_not_removed
@mock.patch.object(proxy_base.BaseProxy, '_find')
def test_cluster_add_nodes(self, mock_find):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
mock_find.return_value = mock_cluster
self._verify("openstack.cluster.v1.cluster.Cluster.add_nodes",
self.proxy.cluster_add_nodes,
method_args=["FAKE_CLUSTER", ["node1"]],
expected_args=[["node1"]])
mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER",
ignore_missing=False)
@deprecation.fail_if_not_removed
def test_cluster_add_nodes_with_obj(self):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
self._verify("openstack.cluster.v1.cluster.Cluster.add_nodes",
self.proxy.cluster_add_nodes,
method_args=[mock_cluster, ["node1"]],
expected_args=[["node1"]])
@deprecation.fail_if_not_removed
@mock.patch.object(proxy_base.BaseProxy, '_find')
def test_cluster_del_nodes(self, mock_find):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
mock_find.return_value = mock_cluster
self._verify("openstack.cluster.v1.cluster.Cluster.del_nodes",
self.proxy.cluster_del_nodes,
method_args=["FAKE_CLUSTER", ["node1"]],
expected_args=[["node1"]])
mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER",
ignore_missing=False)
@deprecation.fail_if_not_removed
def test_cluster_del_nodes_with_obj(self):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
self._verify("openstack.cluster.v1.cluster.Cluster.del_nodes",
self.proxy.cluster_del_nodes,
method_args=[mock_cluster, ["node1"]],
method_kwargs={"key": "value"},
expected_args=[["node1"]],
expected_kwargs={"key": "value"})
@deprecation.fail_if_not_removed
@mock.patch.object(proxy_base.BaseProxy, '_find')
def test_cluster_replace_nodes(self, mock_find):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
mock_find.return_value = mock_cluster
self._verify("openstack.cluster.v1.cluster.Cluster.replace_nodes",
self.proxy.cluster_replace_nodes,
method_args=["FAKE_CLUSTER", {"node1": "node2"}],
expected_args=[{"node1": "node2"}])
mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER",
ignore_missing=False)
@deprecation.fail_if_not_removed
def test_cluster_replace_nodes_with_obj(self):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
self._verify("openstack.cluster.v1.cluster.Cluster.replace_nodes",
self.proxy.cluster_replace_nodes,
method_args=[mock_cluster, {"node1": "node2"}],
expected_args=[{"node1": "node2"}])
@deprecation.fail_if_not_removed
@mock.patch.object(proxy_base.BaseProxy, '_find')
def test_cluster_scale_out(self, mock_find):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
mock_find.return_value = mock_cluster
self._verify("openstack.cluster.v1.cluster.Cluster.scale_out",
self.proxy.cluster_scale_out,
method_args=["FAKE_CLUSTER", 3],
expected_args=[3])
mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER",
ignore_missing=False)
@deprecation.fail_if_not_removed
def test_cluster_scale_out_with_obj(self):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
self._verify("openstack.cluster.v1.cluster.Cluster.scale_out",
self.proxy.cluster_scale_out,
method_args=[mock_cluster, 5],
expected_args=[5])
@deprecation.fail_if_not_removed
@mock.patch.object(proxy_base.BaseProxy, '_find')
def test_cluster_scale_in(self, mock_find):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
mock_find.return_value = mock_cluster
self._verify("openstack.cluster.v1.cluster.Cluster.scale_in",
self.proxy.cluster_scale_in,
method_args=["FAKE_CLUSTER", 3],
expected_args=[3])
mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER",
ignore_missing=False)
@deprecation.fail_if_not_removed
def test_cluster_scale_in_with_obj(self):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
self._verify("openstack.cluster.v1.cluster.Cluster.scale_in",
self.proxy.cluster_scale_in,
method_args=[mock_cluster, 5],
expected_args=[5])
def test_services(self):
self.verify_list(self.proxy.services,
service.Service,
paginated=False)
@mock.patch.object(proxy_base.BaseProxy, '_find')
def test_cluster_resize(self, mock_find):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
mock_find.return_value = mock_cluster
self._verify("openstack.cluster.v1.cluster.Cluster.resize",
self.proxy.cluster_resize,
method_args=["FAKE_CLUSTER"],
method_kwargs={'k1': 'v1', 'k2': 'v2'},
expected_kwargs={'k1': 'v1', 'k2': 'v2'})
mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER",
ignore_missing=False)
def test_cluster_resize_with_obj(self):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
self._verify("openstack.cluster.v1.cluster.Cluster.resize",
self.proxy.cluster_resize,
method_args=[mock_cluster],
method_kwargs={'k1': 'v1', 'k2': 'v2'},
expected_kwargs={'k1': 'v1', 'k2': 'v2'})
@deprecation.fail_if_not_removed
@mock.patch.object(proxy_base.BaseProxy, '_find')
def test_cluster_attach_policy(self, mock_find):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
mock_find.return_value = mock_cluster
self._verify("openstack.cluster.v1.cluster.Cluster.policy_attach",
self.proxy.cluster_attach_policy,
method_args=["FAKE_CLUSTER", "FAKE_POLICY"],
method_kwargs={"k1": "v1", "k2": "v2"},
expected_args=["FAKE_POLICY"],
expected_kwargs={"k1": "v1", 'k2': "v2"})
mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER",
ignore_missing=False)
@deprecation.fail_if_not_removed
def test_cluster_attach_policy_with_obj(self):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
self._verify("openstack.cluster.v1.cluster.Cluster.policy_attach",
self.proxy.cluster_attach_policy,
method_args=[mock_cluster, "FAKE_POLICY"],
method_kwargs={"k1": "v1", "k2": "v2"},
expected_args=["FAKE_POLICY"],
expected_kwargs={"k1": "v1", 'k2': "v2"})
@deprecation.fail_if_not_removed
@mock.patch.object(proxy_base.BaseProxy, '_find')
def test_cluster_detach_policy(self, mock_find):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
mock_find.return_value = mock_cluster
self._verify("openstack.cluster.v1.cluster.Cluster.policy_detach",
self.proxy.cluster_detach_policy,
method_args=["FAKE_CLUSTER", "FAKE_POLICY"],
expected_args=["FAKE_POLICY"])
mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER",
ignore_missing=False)
@deprecation.fail_if_not_removed
def test_cluster_detach_policy_with_obj(self):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
self._verify("openstack.cluster.v1.cluster.Cluster.policy_detach",
self.proxy.cluster_detach_policy,
method_args=[mock_cluster, "FAKE_POLICY"],
expected_args=["FAKE_POLICY"])
@deprecation.fail_if_not_removed
@mock.patch.object(proxy_base.BaseProxy, '_find')
def test_cluster_update_policy(self, mock_find):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
mock_find.return_value = mock_cluster
self._verify("openstack.cluster.v1.cluster.Cluster.policy_update",
self.proxy.cluster_update_policy,
method_args=["FAKE_CLUSTER", "FAKE_POLICY"],
method_kwargs={"k1": "v1", "k2": "v2"},
expected_args=["FAKE_POLICY"],
expected_kwargs={"k1": "v1", 'k2': "v2"})
mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER",
ignore_missing=False)
@deprecation.fail_if_not_removed
def test_cluster_update_policy_with_obj(self):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
self._verify("openstack.cluster.v1.cluster.Cluster.policy_update",
self.proxy.cluster_update_policy,
method_args=[mock_cluster, "FAKE_POLICY"],
method_kwargs={"k1": "v1", "k2": "v2"},
expected_args=["FAKE_POLICY"],
expected_kwargs={"k1": "v1", 'k2': "v2"})
def test_collect_cluster_attrs(self):
self.verify_list(self.proxy.collect_cluster_attrs,
cluster_attr.ClusterAttr, paginated=False,
method_args=['FAKE_ID', 'path.to.attr'],
expected_kwargs={'cluster_id': 'FAKE_ID',
'path': 'path.to.attr'})
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_cluster_check(self, mock_get):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
mock_get.return_value = mock_cluster
self._verify("openstack.cluster.v1.cluster.Cluster.check",
self.proxy.check_cluster,
method_args=["FAKE_CLUSTER"])
mock_get.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER")
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_cluster_recover(self, mock_get):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
mock_get.return_value = mock_cluster
self._verify("openstack.cluster.v1.cluster.Cluster.recover",
self.proxy.recover_cluster,
method_args=["FAKE_CLUSTER"])
mock_get.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER")
@deprecation.fail_if_not_removed
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_cluster_operation(self, mock_get):
mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
mock_get.return_value = mock_cluster
self._verify("openstack.cluster.v1.cluster.Cluster.op",
self.proxy.cluster_operation,
method_args=["FAKE_CLUSTER", "dance"],
expected_args=["dance"])
mock_get.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER")
def test_node_create(self):
self.verify_create(self.proxy.create_node, node.Node)
def test_node_delete(self):
self.verify_delete(self.proxy.delete_node, node.Node, False)
def test_node_delete_ignore(self):
self.verify_delete(self.proxy.delete_node, node.Node, True)
def test_node_find(self):
self.verify_find(self.proxy.find_node, node.Node)
def test_node_get(self):
self.verify_get(self.proxy.get_node, node.Node)
def test_node_get_with_details(self):
self._verify2('openstack.proxy2.BaseProxy._get',
self.proxy.get_node,
method_args=['NODE_ID'],
method_kwargs={'details': True},
expected_args=[node.NodeDetail],
expected_kwargs={'node_id': 'NODE_ID',
'requires_id': False})
def test_nodes(self):
self.verify_list(self.proxy.nodes, node.Node,
paginated=True,
method_kwargs={'limit': 2},
expected_kwargs={'limit': 2})
def test_node_update(self):
self.verify_update(self.proxy.update_node, node.Node)
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_node_check(self, mock_get):
mock_node = node.Node.new(id='FAKE_NODE')
mock_get.return_value = mock_node
self._verify("openstack.cluster.v1.node.Node.check",
self.proxy.check_node,
method_args=["FAKE_NODE"])
mock_get.assert_called_once_with(node.Node, "FAKE_NODE")
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_node_recover(self, mock_get):
mock_node = node.Node.new(id='FAKE_NODE')
mock_get.return_value = mock_node
self._verify("openstack.cluster.v1.node.Node.recover",
self.proxy.recover_node,
method_args=["FAKE_NODE"])
mock_get.assert_called_once_with(node.Node, "FAKE_NODE")
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_node_adopt(self, mock_get):
mock_node = node.Node.new()
mock_get.return_value = mock_node
self._verify("openstack.cluster.v1.node.Node.adopt",
self.proxy.adopt_node,
method_kwargs={"preview": False, "foo": "bar"},
expected_kwargs={"preview": False, "foo": "bar"})
mock_get.assert_called_once_with(node.Node, None)
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_node_adopt_preview(self, mock_get):
mock_node = node.Node.new()
mock_get.return_value = mock_node
self._verify("openstack.cluster.v1.node.Node.adopt",
self.proxy.adopt_node,
method_kwargs={"preview": True, "foo": "bar"},
expected_kwargs={"preview": True, "foo": "bar"})
mock_get.assert_called_once_with(node.Node, None)
@deprecation.fail_if_not_removed
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_node_operation(self, mock_get):
mock_node = node.Node.new(id='FAKE_CLUSTER')
mock_get.return_value = mock_node
self._verify("openstack.cluster.v1.node.Node.op",
self.proxy.node_operation,
method_args=["FAKE_NODE", "dance"],
expected_args=["dance"])
mock_get.assert_called_once_with(node.Node, "FAKE_NODE")
def test_policy_create(self):
self.verify_create(self.proxy.create_policy, policy.Policy)
def test_policy_validate(self):
self.verify_create(self.proxy.validate_policy, policy.PolicyValidate)
def test_policy_delete(self):
self.verify_delete(self.proxy.delete_policy, policy.Policy, False)
def test_policy_delete_ignore(self):
self.verify_delete(self.proxy.delete_policy, policy.Policy, True)
def test_policy_find(self):
self.verify_find(self.proxy.find_policy, policy.Policy)
def test_policy_get(self):
self.verify_get(self.proxy.get_policy, policy.Policy)
def test_policies(self):
self.verify_list(self.proxy.policies, policy.Policy,
paginated=True,
method_kwargs={'limit': 2},
expected_kwargs={'limit': 2})
def test_policy_update(self):
self.verify_update(self.proxy.update_policy, policy.Policy)
def test_cluster_policies(self):
self.verify_list(self.proxy.cluster_policies,
cluster_policy.ClusterPolicy,
paginated=False, method_args=["FAKE_CLUSTER"],
expected_kwargs={"cluster_id": "FAKE_CLUSTER"})
def test_get_cluster_policy(self):
fake_policy = cluster_policy.ClusterPolicy.new(id="FAKE_POLICY")
fake_cluster = cluster.Cluster.new(id='FAKE_CLUSTER')
# ClusterPolicy object as input
self._verify2('openstack.proxy2.BaseProxy._get',
self.proxy.get_cluster_policy,
method_args=[fake_policy, "FAKE_CLUSTER"],
expected_args=[cluster_policy.ClusterPolicy,
fake_policy],
expected_kwargs={'cluster_id': 'FAKE_CLUSTER'},
expected_result=fake_policy)
# Policy ID as input
self._verify2('openstack.proxy2.BaseProxy._get',
self.proxy.get_cluster_policy,
method_args=["FAKE_POLICY", "FAKE_CLUSTER"],
expected_args=[cluster_policy.ClusterPolicy,
"FAKE_POLICY"],
expected_kwargs={"cluster_id": "FAKE_CLUSTER"})
# Cluster object as input
self._verify2('openstack.proxy2.BaseProxy._get',
self.proxy.get_cluster_policy,
method_args=["FAKE_POLICY", fake_cluster],
expected_args=[cluster_policy.ClusterPolicy,
"FAKE_POLICY"],
expected_kwargs={"cluster_id": fake_cluster})
def test_receiver_create(self):
self.verify_create(self.proxy.create_receiver, receiver.Receiver)
def test_receiver_update(self):
self.verify_update(self.proxy.update_receiver, receiver.Receiver)
def test_receiver_delete(self):
self.verify_delete(self.proxy.delete_receiver, receiver.Receiver,
False)
def test_receiver_delete_ignore(self):
self.verify_delete(self.proxy.delete_receiver, receiver.Receiver, True)
def test_receiver_find(self):
self.verify_find(self.proxy.find_receiver, receiver.Receiver)
def test_receiver_get(self):
self.verify_get(self.proxy.get_receiver, receiver.Receiver)
def test_receivers(self):
self.verify_list(self.proxy.receivers, receiver.Receiver,
paginated=True,
method_kwargs={'limit': 2},
expected_kwargs={'limit': 2})
def test_action_get(self):
self.verify_get(self.proxy.get_action, action.Action)
def test_actions(self):
self.verify_list(self.proxy.actions, action.Action,
paginated=True,
method_kwargs={'limit': 2},
expected_kwargs={'limit': 2})
def test_event_get(self):
self.verify_get(self.proxy.get_event, event.Event)
def test_events(self):
self.verify_list(self.proxy.events, event.Event,
paginated=True,
method_kwargs={'limit': 2},
expected_kwargs={'limit': 2})
@mock.patch("openstack.resource2.wait_for_status")
def test_wait_for(self, mock_wait):
mock_resource = mock.Mock()
mock_wait.return_value = mock_resource
self.proxy.wait_for_status(mock_resource, 'ACTIVE')
mock_wait.assert_called_once_with(self.session, mock_resource,
'ACTIVE', [], 2, 120)
@mock.patch("openstack.resource2.wait_for_status")
def test_wait_for_params(self, mock_wait):
mock_resource = mock.Mock()
mock_wait.return_value = mock_resource
self.proxy.wait_for_status(mock_resource, 'ACTIVE', ['ERROR'], 1, 2)
mock_wait.assert_called_once_with(self.session, mock_resource,
'ACTIVE', ['ERROR'], 1, 2)
@mock.patch("openstack.resource2.wait_for_delete")
def test_wait_for_delete(self, mock_wait):
mock_resource = mock.Mock()
mock_wait.return_value = mock_resource
self.proxy.wait_for_delete(mock_resource)
mock_wait.assert_called_once_with(self.session, mock_resource, 2, 120)
@mock.patch("openstack.resource2.wait_for_delete")
def test_wait_for_delete_params(self, mock_wait):
mock_resource = mock.Mock()
mock_wait.return_value = mock_resource
self.proxy.wait_for_delete(mock_resource, 1, 2)
mock_wait.assert_called_once_with(self.session, mock_resource, 1, 2)
|
|
#!/usr/bin/env python2
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, glob, re, urllib, socket
from contextlib import closing
from collections import defaultdict
from hibench_prop_env_mapping import HiBenchEnvPropMappingMandatory, HiBenchEnvPropMapping, HiBenchPropEnvMapping, HiBenchPropEnvMappingMandatory
HibenchConf={}
HibenchConfRef={}
#FIXME: use log helper later
def log(*s):
if len(s)==1: s=s[0]
else: s= " ".join([str(x) for x in s])
sys.stderr.write( str(s) +'\n')
def log_debug(*s):
#log(*s)
pass
# copied from http://stackoverflow.com/questions/3575554/python-subprocess-with-timeout-and-large-output-64k
# Comment: I have a better solution, but I'm too lazy to write.
import fcntl
import os
import subprocess
import time
def nonBlockRead(output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read()
except:
return ''
def execute_cmd(cmdline, timeout):
"""
Execute cmdline, limit execution time to 'timeout' seconds.
Uses the subprocess module and subprocess.PIPE.
Raises TimeoutInterrupt
"""
p = subprocess.Popen(
cmdline,
bufsize = 0, # default value of 0 (unbuffered) is best
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
t_begin = time.time() # Monitor execution time
seconds_passed = 0
stdout = ''
stderr = ''
while p.poll() is None and ( seconds_passed < timeout or timeout == 0): # Monitor process
time.sleep(0.1) # Wait a little
seconds_passed = time.time() - t_begin
stdout += nonBlockRead(p.stdout)
stderr += nonBlockRead(p.stderr)
if seconds_passed >= timeout and timeout>0:
try:
p.stdout.close() # If they are not closed the fds will hang around until
p.stderr.close() # os.fdlimit is exceeded and cause a nasty exception
p.terminate() # Important to close the fds prior to terminating the process!
# NOTE: Are there any other "non-freed" resources?
except:
pass
return ('Timeout', stdout, stderr)
return (p.returncode, stdout, stderr)
def shell(cmd, timeout=5):
retcode, stdout, stderr = execute_cmd(cmd, timeout)
if retcode == 'Timeout':
log("ERROR, execute cmd: '%s' timedout." % cmd)
log(" STDOUT:\n"+stdout)
log(" STDERR:\n"+stderr)
log(" Please check!")
assert 0, cmd + " executed timedout for %d seconds" % timeout
return stdout
def OneAndOnlyOneFile(filename_pattern):
files = glob.glob(filename_pattern)
if len(files)==1: return files[0]
else:
log('This filename pattern "%s" is required to match only one file.' % filename_pattern)
if len(files)==0:
log("However, there's no file found, please fix it.")
else:
log("However, there's several files found, please remove the redundant files:\n", "\n".join(files))
raise Exception("Need to match one and only one file!")
def load_config(conf_root, workload_root, workload_folder):
abspath = os.path.abspath
conf_root = abspath(conf_root)
workload_root = abspath(workload_root)
workload_folder = abspath(workload_folder)
workload_tail = workload_folder[len(workload_root):][1:]
workload_api = os.path.dirname(workload_tail) if os.path.dirname(workload_tail) else workload_tail
workload_name = os.path.basename(workload_root)
conf_files = sorted(glob.glob(conf_root+"/*.conf")) + \
sorted(glob.glob("%s/conf/*.conf" % (workload_root,))) + \
sorted(glob.glob("%s/%s/*.conf" % (workload_root, workload_api)))
# load values from conf files
for filename in conf_files:
log("Parsing conf: %s" % filename)
with open(filename) as f:
for line in f.readlines():
line = line.strip()
if not line: continue # skip empty lines
if line[0]=='#': continue # skip comments
try:
key, value = re.split("\s", line, 1)
except ValueError:
key = line.strip()
value = ""
HibenchConf[key] = value.strip()
HibenchConfRef[key] = filename
# override values from os environment variable settings
for env_name, prop_name in HiBenchEnvPropMappingMandatory.items() + HiBenchEnvPropMapping.items():
if env_name in os.environ:
env_value = os.getenv(env_name)
HibenchConf[prop_name] = env_value
HibenchConfRef[prop_name] = "OS environment variable:%s" % env_name
# generate ref values
waterfall_config()
# generate auto probe values
generate_optional_value()
# generate ref values again to ensure all values can be found
waterfall_config(force=True)
# check
check_config()
# Export config to file, let bash script to import as local variables.
print export_config(workload_name, workload_api)
def check_config(): # check configures
# Ensure mandatory configures are available
for _, prop_name in HiBenchEnvPropMappingMandatory.items():
assert HibenchConf.get(prop_name, None) is not None, "Mandatory configure missing: %s" % prop_name
# Ensure all ref values in configure has been expanded
for _, prop_name in HiBenchEnvPropMappingMandatory.items() + HiBenchEnvPropMapping.items():
assert "${" not in HibenchConf.get(prop_name, ""), "Unsolved ref key: %s. \n Defined at %s:\n Unsolved value:%s\n" % (prop_name,
HibenchConfRef.get(prop_name, "unknown"),
HibenchConf.get(prop_name, "unknown"))
def waterfall_config(force=False): # replace "${xxx}" to its values
def process_replace(m):
raw_key = m.groups()[0]
key = raw_key[2:-1].strip()
log_debug("key:", key, " value:", HibenchConf.get(key, "RAWKEY:"+raw_key))
if force:
return HibenchConf.get(key, raw_key)
else:
return HibenchConf.get(key, "") or raw_key
p = re.compile("(\$\{\s*[^\s^\$^\}]+\s*\})")
finish = False
while not finish:
finish = True
for key, value in HibenchConf.items():
old_value = value
value = p.sub(process_replace, value)
if value != old_value: # we have updated value, try again
# log("Waterfall conf: %s: %s -> %s" % (key, old_value, value))
HibenchConf[key] = value
finish = False
def generate_optional_value(): # get some critical values from environment or make a guess
d = os.path.dirname
join = os.path.join
HibenchConf['hibench.home']=d(d(d(os.path.abspath(__file__))))
del d
HibenchConfRef['hibench.home']="Inferred from relative path of dirname(%s)/../../" % __file__
# probe hadoop version & release.
if not HibenchConf.get("hibench.hadoop.version", "") or not HibenchConf.get("hibench.hadoop.release", ""):
# check hadoop version first
hadoop_version = ""
cmd = HibenchConf['hibench.hadoop.executable'] +' version | head -1 | cut -d \ -f 2'
if not HibenchConf.get("hibench.hadoop.version", ""):
hadoop_version = shell(cmd).strip()
assert hadoop_version, "ERROR, execute '%s' with no return, please confirm hadoop environment is configured properly." % cmd
if hadoop_version[0] != '1': # hadoop2? or CDH's MR1?
cmd2 = HibenchConf['hibench.hadoop.executable'] + " mradmin 2>&1 | grep yarn"
mradm_result = shell(cmd2).strip()
if mradm_result: # match with keyword "yarn", must be CDH's MR2, do nothing
pass
else: # didn't match with "yarn", however it calms as hadoop2, must be CDH's MR1
HibenchConf["hibench.hadoop.version"] = "hadoop1"
HibenchConfRef["hibench.hadoop.version"] = "Probed by: `%s` and `%s`" % (cmd, cmd2)
if not HibenchConf.get("hibench.hadoop.version", ""):
HibenchConf["hibench.hadoop.version"] = "hadoop" + hadoop_version[0]
HibenchConfRef["hibench.hadoop.version"] = "Probed by: " + cmd
assert HibenchConf["hibench.hadoop.version"] in ["hadoop1", "hadoop2"], "Unknown hadoop version (%s). Auto probe failed, please override `hibench.hadoop.version` to explicitly define this property" % HibenchConf["hibench.hadoop.version"]
# check hadoop release
if not HibenchConf.get("hibench.hadoop.release", ""):
if not hadoop_version:
hadoop_version = shell(cmd).strip()
HibenchConf["hibench.hadoop.release"] = \
"cdh4" if "cdh4" in hadoop_version else \
"cdh5" if "cdh5" in hadoop_version else \
"apache" if "hadoop" in HibenchConf["hibench.hadoop.version"] else \
"UNKNOWN"
HibenchConfRef["hibench.hadoop.release"] = "Inferred by: hadoop version, which is:\"%s\"" % hadoop_version
assert HibenchConf["hibench.hadoop.release"] in ["cdh4", "cdh5", "apache"], "Unknown hadoop release. Auto probe failed, please override `hibench.hadoop.release` to explicitly define this property"
# probe spark version
if not HibenchConf.get("hibench.spark.version", ""):
spark_home = HibenchConf.get("hibench.spark.home", "")
assert spark_home, "`hibench.spark.home` undefined, please fix it and retry"
try:
release_file = join(spark_home, "RELEASE")
with open(release_file) as f:
spark_version_raw = f.readlines()[0]
#spark_version_raw="Spark 1.2.2-SNAPSHOT (git revision f9d8c5e) built for Hadoop 1.0.4\n" # version sample
spark_version = spark_version_raw.split()[1].strip()
HibenchConfRef["hibench.spark.version"] = "Probed from file %s, parsed by value:%s" % (release_file, spark_version_raw)
except IOError as e: # no release file, fall back to hard way
log("Probing spark verison, may last long at first time...")
shell_cmd = '( cd %s; mvn help:evaluate -Dexpression=project.version 2> /dev/null | grep -v "INFO" | tail -n 1)' % spark_home
spark_version = shell(shell_cmd, timeout = 600).strip()
HibenchConfRef["hibench.spark.version"] = "Probed by shell command: %s, value: %s" % (shell_cmd, spark_version)
assert spark_version, "Spark version probe failed, please override `hibench.spark.version` to explicitly define this property"
HibenchConf["hibench.spark.version"] = "spark" + spark_version[:3]
# probe hadoop example jars
if not HibenchConf.get("hibench.hadoop.examples.jar", ""):
if HibenchConf["hibench.hadoop.version"] == "hadoop1": # MR1
if HibenchConf['hibench.hadoop.release'] == 'apache': # Apache release
HibenchConf["hibench.hadoop.examples.jar"] = OneAndOnlyOneFile(HibenchConf['hibench.hadoop.home']+"/hadoop-examples*.jar")
HibenchConfRef["hibench.hadoop.examples.jar"]= "Inferred by: " + HibenchConf['hibench.hadoop.home']+"/hadoop-examples*.jar"
elif HibenchConf['hibench.hadoop.release'].startswith('cdh'): # CDH release
HibenchConf["hibench.hadoop.examples.jar"] = OneAndOnlyOneFile(HibenchConf['hibench.hadoop.home']+"/share/hadoop/mapreduce1/hadoop-examples*.jar")
HibenchConfRef["hibench.hadoop.examples.jar"]= "Inferred by: " + HibenchConf['hibench.hadoop.home']+"/share/hadoop/mapreduce1/hadoop-examples*.jar"
else: # MR2
if HibenchConf['hibench.hadoop.release'] == 'apache': # Apache release
HibenchConf["hibench.hadoop.examples.jar"] = OneAndOnlyOneFile(HibenchConf['hibench.hadoop.home'] + "/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar")
HibenchConfRef["hibench.hadoop.examples.jar"]= "Inferred by: " + HibenchConf['hibench.hadoop.home']+"/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar"
elif HibenchConf['hibench.hadoop.release'].startswith('cdh'): # CDH release
HibenchConf["hibench.hadoop.examples.jar"] = OneAndOnlyOneFile(HibenchConf['hibench.hadoop.home'] + "/share/hadoop/mapreduce2/hadoop-mapreduce-examples-*.jar")
HibenchConfRef["hibench.hadoop.examples.jar"]= "Inferred by: " + HibenchConf['hibench.hadoop.home']+"/share/hadoop/mapreduce2/hadoop-mapreduce-examples-*.jar"
# probe hadoop examples test jars (for sleep in hadoop2 only)
if not HibenchConf.get("hibench.hadoop.examples.test.jar", ""):
if HibenchConf["hibench.hadoop.version"] == "hadoop1" and HibenchConf["hibench.hadoop.release"] == "apache":
HibenchConf["hibench.hadoop.examples.test.jar"] = "dummy"
HibenchConfRef["hibench.hadoop.examples.test.jar"]= "Dummy value, not available in hadoop1"
else:
if HibenchConf['hibench.hadoop.release'] == 'apache':
HibenchConf["hibench.hadoop.examples.test.jar"] = OneAndOnlyOneFile(HibenchConf['hibench.hadoop.home'] + "/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient*-tests.jar")
HibenchConfRef["hibench.hadoop.examples.test.jar"]= "Inferred by: " + HibenchConf['hibench.hadoop.home']+"/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient*-tests.jar"
elif HibenchConf['hibench.hadoop.release'].startswith('cdh'):
if HibenchConf["hibench.hadoop.version"] == "hadoop2":
HibenchConf["hibench.hadoop.examples.test.jar"] = OneAndOnlyOneFile(HibenchConf['hibench.hadoop.home'] + "/share/hadoop/mapreduce2/hadoop-mapreduce-client-jobclient*-tests.jar")
HibenchConfRef["hibench.hadoop.examples.test.jar"]= "Inferred by: " + HibenchConf['hibench.hadoop.home']+"/share/hadoop/mapreduce2/hadoop-mapreduce-client-jobclient*-tests.jar"
elif HibenchConf["hibench.hadoop.version"] == "hadoop1":
HibenchConf["hibench.hadoop.examples.test.jar"] = OneAndOnlyOneFile(HibenchConf['hibench.hadoop.home'] + "/share/hadoop/mapreduce1/hadoop-examples-*.jar")
HibenchConfRef["hibench.hadoop.examples.test.jar"]= "Inferred by: " + HibenchConf['hibench.hadoop.home']+"/share/hadoop/mapreduce1/hadoop-mapreduce-client-jobclient*-tests.jar"
# set hibench.sleep.job.jar
if not HibenchConf.get('hibench.sleep.job.jar', ''):
if HibenchConf['hibench.hadoop.release'] == 'apache' and HibenchConf["hibench.hadoop.version"] == "hadoop1":
HibenchConf["hibench.sleep.job.jar"] = HibenchConf['hibench.hadoop.examples.jar']
HibenchConfRef["hibench.sleep.job.jar"] = "Refer to `hibench.hadoop.examples.jar` according to the evidence of `hibench.hadoop.release` and `hibench.hadoop.version`"
else:
# log("probe sleep jar:", HibenchConf['hibench.hadoop.examples.test.jar'])
HibenchConf["hibench.sleep.job.jar"] = HibenchConf['hibench.hadoop.examples.test.jar']
HibenchConfRef["hibench.sleep.job.jar"] = "Refer to `hibench.hadoop.examples.test.jar` according to the evidence of `hibench.hadoop.release` and `hibench.hadoop.version`"
# probe hadoop configuration files
if not HibenchConf.get("hibench.hadoop.configure.dir", ""):
if HibenchConf["hibench.hadoop.release"] == "apache": # Apache release
HibenchConf["hibench.hadoop.configure.dir"] = join(HibenchConf["hibench.hadoop.home"], "conf") if HibenchConf["hibench.hadoop.version"] == "hadoop1" \
else join(HibenchConf["hibench.hadoop.home"], "etc", "hadoop")
HibenchConfRef["hibench.hadoop.configure.dir"] = "Inferred by: 'hibench.hadoop.version' & 'hibench.hadoop.release'"
elif HibenchConf["hibench.hadoop.release"].startswith("cdh"): # CDH release
HibenchConf["hibench.hadoop.configure.dir"] = join(HibenchConf["hibench.hadoop.home"], "etc", "hadoop-mapreduce1") if HibenchConf["hibench.hadoop.version"] == "hadoop1" \
else join(HibenchConf["hibench.hadoop.home"], "etc", "hadoop")
HibenchConfRef["hibench.hadoop.configure.dir"] = "Inferred by: 'hibench.hadoop.version' & 'hibench.hadoop.release'"
# set hadoop mapper/reducer property names
if not HibenchConf.get("hibench.hadoop.mapper.name", ""):
HibenchConf["hibench.hadoop.mapper.name"] = "mapred.map.tasks" if HibenchConf["hibench.hadoop.version"] == "hadoop1" else "mapreduce.job.maps"
HibenchConfRef["hibench.hadoop.mapper.name"] = "Inferred by: 'hibench.hadoop.version'"
if not HibenchConf.get("hibench.hadoop.reducer.name", ""):
HibenchConf["hibench.hadoop.reducer.name"] = "mapred.reduce.tasks" if HibenchConf["hibench.hadoop.version"] == "hadoop1" else "mapreduce.job.reduces"
HibenchConfRef["hibench.hadoop.reducer.name"] = "Inferred by: 'hibench.hadoop.version'"
# probe masters, slaves hostnames
# determine running mode according to spark master configuration
if not (HibenchConf.get("hibench.masters.hostnames", "") or HibenchConf.get("hibench.slaves.hostnames", "")): # no pre-defined hostnames, let's probe
spark_master = HibenchConf['hibench.spark.master']
if spark_master.startswith("local"): # local mode
HibenchConf['hibench.masters.hostnames'] = '' # no master
HibenchConf['hibench.slaves.hostnames'] = 'localhost' # localhost as slaves
HibenchConfRef['hibench.masters.hostnames'] = HibenchConfRef['hibench.slaves.hostnames'] = "Probed by the evidence of 'hibench.spark.master=%s'" % spark_master
elif spark_master.startswith("spark"): # spark standalone mode
HibenchConf['hibench.masters.hostnames'] = spark_master.lstrip("spark://").split(":")[0]
HibenchConfRef['hibench.masters.hostnames'] = "Probed by the evidence of 'hibench.spark.master=%s'" % spark_master
try:
log(spark_master, HibenchConf['hibench.masters.hostnames'])
with closing(urllib.urlopen('http://%s:8080' % HibenchConf['hibench.masters.hostnames'])) as page:
worker_hostnames=[re.findall("http:\/\/([a-zA-Z\-\._0-9]+):8081", x)[0] for x in page.readlines() if "8081" in x and "worker" in x]
HibenchConf['hibench.slaves.hostnames'] = " ".join(worker_hostnames)
HibenchConfRef['hibench.slaves.hostnames'] = "Probed by parsing "+ 'http://%s:8080' % HibenchConf['hibench.masters.hostnames']
except Exception as e:
assert 0, "Get workers from spark master's web UI page failed, reason:%s\nPlease check your configurations, network settings, proxy settings, or set `hibench.masters.hostnames` and `hibench.slaves.hostnames` manually to bypass auto-probe" % e
elif spark_master.startswith("yarn"): # yarn mode
yarn_executable = os.path.join(os.path.dirname(HibenchConf['hibench.hadoop.executable']), "yarn")
cmd = "( " + yarn_executable + " node -list 2> /dev/null | grep RUNNING )"
try:
worker_hostnames = [line.split(":")[0] for line in shell(cmd).split("\n")]
HibenchConf['hibench.slaves.hostnames'] = " ".join(worker_hostnames)
HibenchConfRef['hibench.slaves.hostnames'] = "Probed by parsing results from: "+cmd
# parse yarn resource manager from hadoop conf
yarn_site_file = os.path.join(HibenchConf["hibench.hadoop.configure.dir"], "yarn-site.xml")
with open(yarn_site_file) as f:
match_address=re.findall("\<property\>\s*\<name\>\s*yarn.resourcemanager.address\s*\<\/name\>\s*\<value\>([a-zA-Z\-\._0-9]+)(:\d+)\<\/value\>", f.read())
#match_hostname=re.findall("\<property\>\s*\<name\>\s*yarn.resourcemanager.hostname\s*\<\/name\>\s*\<value\>([a-zA-Z\-\._0-9]+)(:\d+)\<\/value\>", f.read())
if match_address:
resourcemanager_hostname = match_address[0][0]
HibenchConf['hibench.masters.hostnames'] = resourcemanager_hostname
HibenchConfRef['hibench.masters.hostnames'] = "Parsed from "+ yarn_site_file
elif re.findall("\<property\>\s*\<name\>\s*yarn.resourcemanager.hostname\s*\<\/name\>\s*\<value\>([a-zA-Z\-\._0-9]+)(:\d+)\<\/value\>", f.read()):
match_hostname=re.findall("\<property\>\s*\<name\>\s*yarn.resourcemanager.hostname\s*\<\/name\>\s*\<value\>([a-zA-Z\-\._0-9]+)(:\d+)\<\/value\>", f.read())
resourcemanager_hostname = match_hostname[0][0]
HibenchConf['hibench.masters.hostnames'] = resourcemanager_hostname
HibenchConfRef['hibench.masters.hostnames'] = "Parsed from "+ yarn_site_file
else:
assert 0, "Unknown resourcemanager, please check `hibench.hadoop.configure.dir` and \"yarn-site.xml\" file"
except Exception as e:
assert 0, "Get workers from yarn web UI page failed, reason:%s\nplease set `hibench.masters.hostnames` and `hibench.slaves.hostnames` manually" % e
# reset hostnames according to gethostbyaddr
names = set(HibenchConf['hibench.masters.hostnames'].split() + HibenchConf['hibench.slaves.hostnames'].split())
new_name_mapping={}
for name in names:
try:
new_name_mapping[name] = socket.gethostbyaddr(name)[0]
except: # host name lookup failure?
new_name_mapping[name] = name
HibenchConf['hibench.masters.hostnames'] = repr(" ".join([new_name_mapping[x] for x in HibenchConf['hibench.masters.hostnames'].split()]))
HibenchConf['hibench.slaves.hostnames'] = repr(" ".join([new_name_mapping[x] for x in HibenchConf['hibench.slaves.hostnames'].split()]))
# probe map.java_opts red.java_opts
cmd1 = """cat %s | grep "mapreduce.map.java.opts" | awk -F\< '{print $5}' | awk -F\> '{print $NF}'""" % os.path.join(HibenchConf['hibench.hadoop.configure.dir'], 'mapred-site.xml')
cmd2 = """cat %s | grep "mapreduce.reduce.java.opts" | awk -F\< '{print $5}' | awk -F\> '{print $NF}'""" % os.path.join(HibenchConf['hibench.hadoop.configure.dir'], 'mapred-site.xml')
HibenchConf['hibench.dfsioe.map.java_opts'] = shell(cmd1)
HibenchConfRef['hibench.dfsioe.map.java_opts'] = "Probed by shell command:'%s'" % cmd1
HibenchConf['hibench.dfsioe.red.java_opts'] = shell(cmd2)
HibenchConfRef['hibench.dfsioe.red.java_opts'] = "Probed by shell command:'%s'" % cmd2
def export_config(workload_name, workload_tail):
join = os.path.join
report_dir = HibenchConf['hibench.report.dir']
conf_dir = join(report_dir, workload_name, workload_tail, 'conf')
conf_filename= join(conf_dir, "%s.conf" % workload_name)
spark_conf_dir = join(conf_dir, "sparkbench")
spark_prop_conf_filename = join(spark_conf_dir, "spark.conf")
sparkbench_prop_conf_filename = join(spark_conf_dir, "sparkbench.conf")
if not os.path.exists(spark_conf_dir): os.makedirs(spark_conf_dir)
if not os.path.exists(conf_dir): os.makedirs(conf_dir)
# generate configure for hibench
sources=defaultdict(list)
for env_name, prop_name in HiBenchEnvPropMappingMandatory.items() + HiBenchEnvPropMapping.items():
source = HibenchConfRef.get(prop_name, 'None')
sources[source].append('%s=%s' % (env_name, HibenchConf.get(prop_name, '')))
with open(conf_filename, 'w') as f:
for source in sorted(sources.keys()):
f.write("# Source: %s\n" % source)
f.write("\n".join(sorted(sources[source])))
f.write("\n\n")
f.write("#Source: add for internal usage\n")
f.write("SPARKBENCH_PROPERTIES_FILES=%s\n" % sparkbench_prop_conf_filename)
f.write("SPARK_PROP_CONF=%s\n" % spark_prop_conf_filename)
f.write("WORKLOAD_RESULT_FOLDER=%s\n" % join(conf_dir, ".."))
f.write("HIBENCH_WORKLOAD_CONF=%s\n" % conf_filename)
f.write("export HADOOP_EXECUTABLE\n")
f.write("export HADOOP_CONF_DIR\n")
# generate properties for spark & sparkbench
sources=defaultdict(list)
for prop_name, prop_value in HibenchConf.items():
source = HibenchConfRef.get(prop_name, 'None')
sources[source].append('%s\t%s' % (prop_name, prop_value))
# generate configure for sparkbench
with open(spark_prop_conf_filename, 'w') as f:
for source in sorted(sources.keys()):
items = [x for x in sources[source] if x.startswith("spark.")]
if items:
f.write("# Source: %s\n" % source)
f.write("\n".join(sorted(items)))
f.write("\n\n")
# generate configure for spark
with open(sparkbench_prop_conf_filename, 'w') as f:
for source in sorted(sources.keys()):
items = [x for x in sources[source] if x.startswith("sparkbench.") or x.startswith("hibench.")]
if items:
f.write("# Source: %s\n" % source)
f.write("\n".join(sorted(items)))
f.write("\n\n")
return conf_filename
if __name__=="__main__":
if len(sys.argv)<4:
raise Exception("Please supply <conf root path>, <workload root path>, <workload folder path>")
conf_root, workload_root, workload_folder = sys.argv[1], sys.argv[2], sys.argv[3]
load_config(conf_root, workload_root, workload_folder)
|
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to interact with CoreFoundation
CFRunLoops. This includes Cocoa's NSRunLoop.
In order to use this support, simply do the following::
| from twisted.internet import cfreactor
| cfreactor.install()
Then use the twisted.internet APIs as usual. The other methods here are not
intended to be called directly under normal use. However, install can take
a runLoop kwarg, and run will take a withRunLoop arg if you need to explicitly
pass a CFRunLoop for some reason. Otherwise it will make a pretty good guess
as to which runLoop you want (the current NSRunLoop if PyObjC is imported,
otherwise the current CFRunLoop. Either way, if one doesn't exist, it will
be created).
Maintainer: Bob Ippolito
"""
__all__ = ['install']
import sys
# hints for py2app
import Carbon.CF
import traceback
import cfsupport as cf
from zope.interface import implements
from twisted.python import log, threadable, failure
from twisted.internet.interfaces import IReactorFDSet
from twisted.internet import posixbase, error
from weakref import WeakKeyDictionary
from Foundation import NSRunLoop
from AppKit import NSApp
# cache two extremely common "failures" without traceback info
_faildict = {
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost()),
}
class SelectableSocketWrapper(object):
_objCache = WeakKeyDictionary()
cf = None
def socketWrapperForReactorAndObject(klass, reactor, obj):
_objCache = klass._objCache
if obj in _objCache:
return _objCache[obj]
v = _objCache[obj] = klass(reactor, obj)
return v
socketWrapperForReactorAndObject = classmethod(socketWrapperForReactorAndObject)
def __init__(self, reactor, obj):
if self.cf:
raise ValueError, "This socket wrapper is already initialized"
self.reactor = reactor
self.obj = obj
obj._orig_ssw_connectionLost = obj.connectionLost
obj.connectionLost = self.objConnectionLost
self.fd = obj.fileno()
self.writing = False
self.reading = False
self.wouldRead = False
self.wouldWrite = False
self.cf = cf.PyCFSocket(obj.fileno(), self.doRead, self.doWrite, self.doConnect)
self.cf.stopWriting()
reactor.getRunLoop().addSocket(self.cf)
def __repr__(self):
return 'SSW(fd=%r r=%r w=%r x=%08x o=%08x)' % (self.fd, int(self.reading), int(self.writing), id(self), id(self.obj))
def objConnectionLost(self, *args, **kwargs):
obj = self.obj
self.reactor.removeReader(obj)
self.reactor.removeWriter(obj)
obj.connectionLost = obj._orig_ssw_connectionLost
obj.connectionLost(*args, **kwargs)
try:
del self._objCache[obj]
except:
pass
self.obj = None
self.cf = None
def doConnect(self, why):
pass
def startReading(self):
self.cf.startReading()
self.reading = True
if self.wouldRead:
if not self.reactor.running:
self.reactor.callLater(0, self.doRead)
else:
self.doRead()
self.wouldRead = False
return self
def stopReading(self):
self.cf.stopReading()
self.reading = False
self.wouldRead = False
return self
def startWriting(self):
self.cf.startWriting()
self.writing = True
if self.wouldWrite:
if not self.reactor.running:
self.reactor.callLater(0, self.doWrite)
else:
self.doWrite()
self.wouldWrite = False
return self
def stopWriting(self):
self.cf.stopWriting()
self.writing = False
self.wouldWrite = False
def _finishReadOrWrite(self, fn, faildict=_faildict):
try:
why = fn()
except:
why = sys.exc_info()[1]
log.err()
if why:
try:
f = faildict.get(why.__class__) or failure.Failure(why)
self.objConnectionLost(f)
except:
log.err()
if self.reactor.running:
self.reactor.simulate()
def doRead(self):
obj = self.obj
if not obj:
return
if not self.reading:
self.wouldRead = True
if self.reactor.running:
self.reactor.simulate()
return
self._finishReadOrWrite(obj.doRead)
def doWrite(self):
obj = self.obj
if not obj:
return
if not self.writing:
self.wouldWrite = True
if self.reactor.running:
self.reactor.simulate()
return
self._finishReadOrWrite(obj.doWrite)
def __hash__(self):
return hash(self.fd)
class CFReactor(posixbase.PosixReactorBase):
implements(IReactorFDSet)
# how long to poll if we're don't care about signals
longIntervalOfTime = 60.0
# how long we should poll if we do care about signals
shortIntervalOfTime = 1.0
# don't set this
pollInterval = longIntervalOfTime
def __init__(self, runLoop=None):
self.readers = {}
self.writers = {}
self.running = 0
self.crashing = False
self._doRunUntilCurrent = True
self.timer = None
self.runLoop = None
self.nsRunLoop = None
self.didStartRunLoop = False
if runLoop is not None:
self.getRunLoop(runLoop)
posixbase.PosixReactorBase.__init__(self)
def getRunLoop(self, runLoop=None):
if self.runLoop is None:
self.nsRunLoop = runLoop or NSRunLoop.currentRunLoop()
self.runLoop = cf.PyCFRunLoop(self.nsRunLoop.getCFRunLoop())
return self.runLoop
def addReader(self, reader):
self.readers[reader] = SelectableSocketWrapper.socketWrapperForReactorAndObject(self, reader).startReading()
def addWriter(self, writer):
self.writers[writer] = SelectableSocketWrapper.socketWrapperForReactorAndObject(self, writer).startWriting()
def removeReader(self, reader):
wrapped = self.readers.get(reader, None)
if wrapped is not None:
del self.readers[reader]
wrapped.stopReading()
def removeWriter(self, writer):
wrapped = self.writers.get(writer, None)
if wrapped is not None:
del self.writers[writer]
wrapped.stopWriting()
def getReaders(self):
return self.readers.keys()
def getWriters(self):
return self.writers.keys()
def removeAll(self):
r = self.readers.keys()
for s in self.readers.itervalues():
s.stopReading()
for s in self.writers.itervalues():
s.stopWriting()
self.readers.clear()
self.writers.clear()
return r
def run(self, installSignalHandlers=1, withRunLoop=None):
if self.running:
raise ValueError, "Reactor already running"
if installSignalHandlers:
self.pollInterval = self.shortIntervalOfTime
runLoop = self.getRunLoop(withRunLoop)
self._startup()
self.startRunning(installSignalHandlers=installSignalHandlers)
self.running = True
if NSApp() is None and self.nsRunLoop.currentMode() is None:
# Most of the time the NSRunLoop will have already started,
# but in this case it wasn't.
runLoop.run()
self.crashing = False
self.didStartRunLoop = True
def callLater(self, howlong, *args, **kwargs):
rval = posixbase.PosixReactorBase.callLater(self, howlong, *args, **kwargs)
if self.timer:
timeout = self.timeout()
if timeout is None:
timeout = howlong
sleepUntil = cf.now() + min(timeout, howlong)
if sleepUntil < self.timer.getNextFireDate():
self.timer.setNextFireDate(sleepUntil)
else:
pass
return rval
def iterate(self, howlong=0.0):
if self.running:
raise ValueError, "Can't iterate a running reactor"
self.runUntilCurrent()
self.doIteration(howlong)
def doIteration(self, howlong):
if self.running:
raise ValueError, "Can't iterate a running reactor"
howlong = howlong or 0.01
pi = self.pollInterval
self.pollInterval = howlong
self._doRunUntilCurrent = False
self.run()
self._doRunUntilCurrent = True
self.pollInterval = pi
def simulate(self):
if self.crashing:
return
if not self.running:
raise ValueError, "You can't simulate a stopped reactor"
if self._doRunUntilCurrent:
self.runUntilCurrent()
if self.crashing:
return
if self.timer is None:
return
nap = self.timeout()
if nap is None:
nap = self.pollInterval
else:
nap = min(self.pollInterval, nap)
if self.running:
self.timer.setNextFireDate(cf.now() + nap)
if not self._doRunUntilCurrent:
self.crash()
def _startup(self):
if self.running:
raise ValueError, "Can't bootstrap a running reactor"
self.timer = cf.PyCFRunLoopTimer(cf.now(), self.pollInterval, self.simulate)
self.runLoop.addTimer(self.timer)
def cleanup(self):
pass
def sigInt(self, *args):
self.callLater(0.0, self.stop)
def crash(self):
if not self.running:
raise ValueError, "Can't crash a stopped reactor"
posixbase.PosixReactorBase.crash(self)
self.crashing = True
if self.timer is not None:
self.runLoop.removeTimer(self.timer)
self.timer = None
if self.didStartRunLoop:
self.runLoop.stop()
def stop(self):
if not self.running:
raise ValueError, "Can't stop a stopped reactor"
posixbase.PosixReactorBase.stop(self)
def install(runLoop=None):
"""Configure the twisted mainloop to be run inside CFRunLoop.
"""
reactor = CFReactor(runLoop=runLoop)
reactor.addSystemEventTrigger('after', 'shutdown', reactor.cleanup)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
|
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from envisage.ui.tasks.preferences_pane import PreferencesPane
from pyface.confirmation_dialog import confirm
from pyface.constant import YES
from pyface.message_dialog import warning
from traits.api import HasTraits, Float, Enum, Str, Bool, on_trait_change, Property, Button, List, Dict
from traitsui.api import View, Item, UItem, Spring, Label, spring, VGroup, HGroup, EnumEditor, ButtonEditor
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.pychron_traits import BorderHGroup, BorderVGroup
from pychron.envisage.resources import icon
from pychron.envisage.tasks.base_preferences_helper import BasePreferencesHelper
from pychron.pychron_constants import PLUSMINUS, NULL_STR, K_DECAY_CONSTANTS, PLUSMINUS_ONE_SIGMA
LAMBDA_K_ATTRS = ('lambda_e', 'lambda_e_error', 'lambda_b', 'lambda_b_error')
ATM_ATTRS = ('ar40_ar36_atm', 'ar40_ar36_atm_error', 'ar40_ar36_atm_citation',
'ar40_ar38_atm', 'ar40_ar38_atm_error', 'ar40_ar38_atm_citation')
class DecayConstantEntry(HasTraits):
name = Str # ('Steiger & Jager')
lambda_e = Float # (5.81e-11)
lambda_e_error = Float # (0)
lambda_b = Float # (4.962e-10)
lambda_b_error = Float # (0)
total_k_decay = Property(depends_on='lambda_e, lambda_b')
def _get_total_k_decay(self):
return self.lambda_e + self.lambda_b
def totuple(self):
return tuple([getattr(self, a) for a in LAMBDA_K_ATTRS])
def traits_view(self):
v = okcancel_view(VGroup(Item('name'),
BorderHGroup(UItem('lambda_e'), Label(PLUSMINUS), UItem('lambda_e_error'),
label='Ar40K epsilon/yr'),
BorderHGroup(UItem('lambda_b'), Label(PLUSMINUS), UItem('lambda_b_error'),
label='Ar40K beta/yr'),
Item('total_k_decay', style='readonly')),
title='Add Decay Constant Entry')
return v
class AtmConstantsEntry(HasTraits):
name = Str
ar40_ar36_atm = Float
ar40_ar36_atm_error = Float
ar40_ar38_atm = Float
ar40_ar38_atm_error = Float
def totuple(self):
return tuple([getattr(self, a) for a in ATM_ATTRS])
def traits_view(self):
v = okcancel_view(VGroup(Item('name'),
BorderHGroup(UItem('ar40_ar36_atm'), Label(PLUSMINUS), UItem('ar40_ar36_atm_error'),
label='(Ar40/Ar36)atm'),
BorderHGroup(UItem('ar40_ar38_atm'), Label(PLUSMINUS), UItem('ar40_ar38_atm_error'),
label='(Ar40/Ar38)atm')),
title='Add Atm Constant Entry')
return v
class ArArConstantsPreferences(BasePreferencesHelper):
name = 'Constants'
preferences_path = 'pychron.arar.constants'
ar40_ar36_atm = Float(295.5)
ar40_ar36_atm_error = Float(0)
ar40_ar38_atm = Float(1575)
ar40_ar38_atm_error = Float(2)
lambda_e = Float(5.81e-11)
lambda_e_error = Float(0)
lambda_b = Float(4.962e-10)
lambda_b_error = Float(0)
lambda_cl36 = Float(6.308e-9)
lambda_cl36_error = Float(0)
lambda_ar37 = Float(0.01975)
lambda_ar37_error = Float(0)
lambda_ar39 = Float(7.068e-6)
lambda_ar39_error = Float(0)
ar37_ar39_mode = Enum('Normal', 'Fixed')
ar37_ar39 = Float(0.01)
ar37_ar39_error = Float(0.01)
allow_negative_ca_correction = Bool
use_irradiation_endtime = Bool
# ===========================================================================
# spectrometer
# ===========================================================================
abundance_sensitivity = Float(0)
sensitivity = Float(0)
ic_factor = Float(1.0)
ic_factor_error = Float(0.0)
age_units = Enum('a', 'ka', 'Ma', 'Ga')
# citations
ar40_ar36_atm_citation = Str
ar40_ar38_atm_citation = Str
lambda_e_citation = Str
lambda_b_citation = Str
lambda_cl36_citation = Str
lambda_ar37_citation = Str
lambda_ar39_citation = Str
decay_constant_entries = Dict(K_DECAY_CONSTANTS)
add_decay_constant = Button
delete_decay_constant = Button
decay_constant_name = Str(NULL_STR)
decay_constant_names = List([NULL_STR, 'Min et al., 2000', 'Steiger & Jager 1977'])
decay_constant_entry_deletable = Property(depends_on='decay_constant_name', transient=True)
total_k_decay = Property(depends_on='lambda_e, lambda_b')
atm_constant_entries = Dict({'Nier 1950': (295.5, 0.5, 'Nier 1950', 1575.0, 2.0, 'Nier 1950'),
'Lee et al., 2006': (
298.56, 0.31, 'Lee et al., 2006', 1583.87, 3.01, 'Lee et al., 2006')})
atm_constant_name = Str(NULL_STR)
atm_constant_names = List([NULL_STR, 'Nier 1950', 'Lee et al., 2006'])
add_atm_constant = Button
delete_atm_constant = Button
atm_constant_entry_deletable = Property(depends_on='atm_constant_name', transient=True)
def _update_entries(self, new, entries, attrs):
if new in entries:
vs = entries[new]
for a, v in zip(attrs, vs):
setattr(self, a, v)
def _find_entry(self, entries, attrs):
def test_entry(v):
return all([getattr(self, attr) == pvalue
for attr, pvalue in zip(attrs, v)])
return next((k for k, v in entries.items() if test_entry(v)), NULL_STR)
def _find_decay_constant_entry(self):
return self._find_entry(self.decay_constant_entries, LAMBDA_K_ATTRS)
def _find_atm_constant_entry(self):
return self._find_entry(self.atm_constant_entries, ATM_ATTRS)
# handlers
def _delete_atm_constant_fired(self):
dn = self.atm_constant_name
result = confirm(None, 'Are you sure you want to remove "{}"'.format(dn))
if result == YES:
self.atm_constant_names.remove(dn)
self.atm_constant_entries.pop(dn)
self.atm_constant_name = self.atm_constant_names[-1] if self.atm_constant_names else NULL_STR
def _delete_decay_constant_fired(self):
dn = self.decay_constant_name
result = confirm(None, 'Are you sure you want to remove "{}"'.format(dn))
if result == YES:
self.decay_constant_names.remove(dn)
self.decay_constant_entries.pop(dn)
self.decay_constant_name = self.decay_constant_names[-1] if self.decay_constant_names else NULL_STR
def _add_atm_constant_fired(self):
e = AtmConstantsEntry()
for a in ATM_ATTRS:
setattr(e, a, getattr(self, a))
info = e.edit_traits()
name = e.name
if info.result and name:
if name not in self.atm_constant_names:
nv = e.totuple()
for k, v in self.atm_constant_entries.items():
print('k={}, v={}, nv={}'.format(k, v, nv))
exists = next((k for k, v in self.atm_constant_entries.items() if nv == v), None)
if exists:
warning(None,
'Atm constant entry with those values already exists.\nExisting entry named "{}"'.format(
exists))
else:
self.atm_constant_names.append(name)
self.atm_constant_entries[name] = e.totuple()
self.atm_constant_name = name
else:
warning(None, 'Atm constant entry with that name alreay exists')
def _add_decay_constant_fired(self):
e = DecayConstantEntry()
for a in LAMBDA_K_ATTRS:
setattr(e, a, getattr(self, a))
info = e.edit_traits()
name = e.name
if info.result and name:
if name not in self.decay_constant_names:
nv = e.totuple()
exists = next((k for k, v in self.decay_constant_entries.items() if nv == v), None)
if exists:
warning(None,
'Decay constant entry with those values already exists.\nExisting entry named "{}"'.format(
exists))
else:
self.decay_constant_names.append(name)
self.decay_constant_entries[name] = e.totuple()
self.decay_constant_name = name
else:
warning(None, 'Decay constant entry with that name alreay exists')
def _decay_constant_name_changed(self, new):
self._update_entries(new, self.decay_constant_entries, LAMBDA_K_ATTRS)
def _atm_constant_name_changed(self, new):
self._update_entries(new, self.atm_constant_entries, ATM_ATTRS)
@on_trait_change('ar40_ar36_atm,ar40_ar36_atm_error, ar40_ar38_atm,ar40_ar38_atm_error')
def _decay_constants_change(self):
d = self._find_atm_constant_entry()
self.atm_constant_name = d
@on_trait_change('lambda_e,lambda_e_error, lambda_b,lambda_b_error')
def _decay_constants_change(self):
d = self._find_decay_constant_entry()
self.decay_constant_name = d
def _get_total_k_decay(self):
return self.lambda_e + self.lambda_b
def _set_total_k_decay(self, v):
pass
def _get_decay_constant_entry_deletable(self):
return self.decay_constant_name not in (NULL_STR, 'Min et al., 2000', 'Steiger & Jager 1977')
def _get_atm_constant_entry_deletable(self):
return self.atm_constant_name not in (NULL_STR, 'Lee et al., 2006', 'Nier 1950')
def _set_atm_constant_entry_deletable(self, v):
pass
def _set_decay_constant_entry_deletable(self, v):
pass
def _get_value(self, name, value):
if name == 'total_k_decay':
return self._get_total_k_decay()
elif name in ('decay_constant_entry_deletable', 'atm_constant_entry_deletable'):
pass
else:
return super(ArArConstantsPreferences, self)._get_value(name, value)
class ArArConstantsPreferencesPane(PreferencesPane):
category = 'Constants'
model_factory = ArArConstantsPreferences
def _get_decay_group(self):
presets = HGroup(Item('decay_constant_name', editor=EnumEditor(name='decay_constant_names')),
UItem('add_decay_constant',
tooltip='add decay constant entry',
style='custom',
editor=ButtonEditor(image=icon('add'))),
UItem('delete_decay_constant',
tooltip='delete current constant entry',
enabled_when='decay_constant_entry_deletable',
style='custom',
editor=ButtonEditor(image=icon('delete'))))
vs = [
('Ar40K epsilon/yr', 'lambda_e', 'lambda_e_error'),
('Ar40K beta/yr', 'lambda_b', 'lambda_b_error'),
('Cl36/d', 'lambda_cl36', 'lambda_cl36_error'),
('Ar37/d', 'lambda_ar37', 'lambda_ar37_error'),
('Ar39/d', 'lambda_ar39', 'lambda_ar39_error')]
items = [HGroup(Label(l), spring, UItem(v), UItem(e)) for l, v, e in vs]
items.append(Item('use_irradiation_endtime', label='Use Irradiation End time',
tooltip='Use irradiation end time for decay calculations instead of the start time. '
'FYI Mass Spec and NMGRL by default use the start time. '
'McDougall and Harrison 1999 and ArArCalc use the end time.'))
decay = BorderVGroup(presets,
HGroup(Item('total_k_decay', style='readonly', label='Total Ar40K/yr')),
HGroup(spring, Label('Value'),
Spring(width=75, springy=False),
Label(PLUSMINUS_ONE_SIGMA),
Spring(width=75, springy=False)),
*items,
label='Decay')
return decay
def _get_ratio_group(self):
presets = HGroup(Item('atm_constant_name', editor=EnumEditor(name='atm_constant_names')),
UItem('add_atm_constant',
tooltip='add atm constant entry',
style='custom',
editor=ButtonEditor(image=icon('add'))),
UItem('delete_atm_constant',
tooltip='delete current constant entry',
enabled_when='atm_constant_entry_deletable',
style='custom',
editor=ButtonEditor(image=icon('delete'))))
ratios = VGroup(
presets,
HGroup(Spring(springy=False, width=125),
Label('Value'),
Spring(springy=False, width=55),
Label(PLUSMINUS_ONE_SIGMA),
Spring(springy=False, width=55),
Label('Citation')),
HGroup(Item('ar40_ar36_atm', label='(40Ar/36Ar)atm'),
Item('ar40_ar36_atm_error', show_label=False),
Item('ar40_ar36_atm_citation', show_label=False),
enabled_when='atm_constant_entry_deletable'),
HGroup(Item('ar40_ar38_atm', label='(40Ar/38Ar)atm'),
Item('ar40_ar38_atm_error', show_label=False),
Item('ar40_ar38_atm_citation', show_label=False),
enabled_when='atm_constant_entry_deletable'),
Item('_'),
HGroup(
Item('ar37_ar39_mode', label='(37Ar/39Ar)K'),
Item('ar37_ar39', show_label=False, enabled_when='ar37_ar39_mode=="Fixed"'),
Item('ar37_ar39_error', show_label=False, enabled_when='ar37_ar39_mode=="Fixed"')),
label='Ratios')
return ratios
def traits_view(self):
ratios = self._get_ratio_group()
decay = self._get_decay_group()
spectrometer = VGroup(
Item('abundance_sensitivity'),
Item('sensitivity',
tooltip='Nominal spectrometer sensitivity saved with analysis'),
label='Spectrometer')
general = VGroup(Item('age_units', label='Age Units'),
Item('allow_negative_ca_correction',
tooltip='If checked Ca36 can be negative when correcting Ar36 for Ca inteference',
label='Allow Negative Ca Correction'),
label='General')
v = View(general, decay, ratios, spectrometer)
return v
# ============= EOF =============================================
|
|
import struct
import glob
from warnings import warn
from pcapdump import *
from daintree import *
from pcapdlt import *
from kbutils import * #provides serial, usb, USBVER
from zigbeedecode import * #would like to import only within killerbee class
from dot154decode import * #would like to import only within killerbee class
from config import * #to get DEV_ENABLE_* variables
# Utility Functions
def getKillerBee(channel):
'''
Returns an instance of a KillerBee device, setup on the given channel.
Error handling for KillerBee creation and setting of the channel is wrapped
and will raise an Exception().
@return: A KillerBee instance initialized to the given channel.
'''
kb = KillerBee()
if kb is None:
raise Exception("Failed to create a KillerBee instance.")
try:
kb.set_channel(channel)
except Exception, e:
raise Exception('Error: Failed to set channel to %d' % channel, e)
return kb
def kb_dev_list(vendor=None, product=None):
'''Deprecated. Use show_dev or call kbutils.devlist.'''
return kbutils.devlist(vendor=None, product=None)
def show_dev(vendor=None, product=None, gps=None, include=None):
'''
A basic function to output the device listing.
Placed here for reuse, as many tool scripts were implementing it.
@param gps: Provide device names in this argument (previously known as
'gps') which you wish to not be enumerated. Aka, exclude these items.
@param include: Provide device names in this argument if you would like only
these to be enumerated. Aka, include only these items.
'''
print("{: >14} {: <20} {: >10}".format("Dev", "Product String", "Serial Number"))
for dev in kbutils.devlist(vendor=vendor, product=product, gps=gps, include=include):
print("{0: >14} {1: <20} {2: >10}".format(dev[0], dev[1], dev[2]))
# KillerBee Class
class KillerBee:
def __init__(self, device=None, datasource=None, gps=None):
'''
Instantiates the KillerBee class.
@type device: String
@param device: Device identifier, either USB vendor:product, serial device node, or IP address
@type datasource: String
@param datasource: A known datasource type that is used
by dblog to record how the data was captured.
@type gps: String
@param gps: Optional serial device identifier for an attached GPS
unit. If provided, or if global variable has previously been set,
KillerBee skips that device in initalization process.
@return: None
@rtype: None
'''
global gps_devstring
if gps_devstring is None and gps is not None:
gps_devstring = gps
self.dev = None
self.__bus = None
self.driver = None
# IP devices may be the most straightforward, and we aren't doing
# discovery, just connecting to defined addresses, so we'll check
# first to see if we have an IP address given as our device parameter.
if (device is not None) and kbutils.isIpAddr(device):
from dev_sewio import isSewio
if isSewio(device):
from dev_sewio import SEWIO
self.driver = SEWIO(dev=device) #give it the ip address
else: del isSewio
# Figure out a device is one is not set, trying USB devices next
if self.driver is None:
if device is None:
result = kbutils.search_usb(None)
if result != None:
if USBVER == 0:
(self.__bus, self.dev) = result
elif USBVER == 1:
#TODO remove self.__bus attribute, not needed in 1.x as all info in self.dev
self.dev = result
# Recognize if device is provided in the USB format (like a 012:456 string):
elif ":" in device:
result = kbutils.search_usb(device)
if result == None:
raise KBInterfaceError("Did not find a USB device matching %s." % device)
else:
if USBVER == 0:
(self.__bus, self.dev) = result
elif USBVER == 1:
#TODO remove self.__bus attribute, not needed in 1.x as all info in self.dev
self.dev = result
if self.dev is not None:
if self.__device_is(RZ_USB_VEND_ID, RZ_USB_PROD_ID):
from dev_rzusbstick import RZUSBSTICK
self.driver = RZUSBSTICK(self.dev, self.__bus)
elif self.__device_is(ZN_USB_VEND_ID, ZN_USB_PROD_ID):
raise KBInterfaceError("Zena firmware not yet implemented.")
else:
raise KBInterfaceError("KillerBee doesn't know how to interact with USB device vendor=%04x, product=%04x.".format(self.dev.idVendor, self.dev.idProduct))
# Figure out a device from serial if one is not set
#TODO be able to try more than one serial device here (merge with devlist code somehow)
# if device == None:
# seriallist = get_serial_ports()
# if len(seriallist) > 0:
# device = seriallist[0]
# If a USB device driver was not loaded, now we try serial devices
if self.driver is None:
# If no device was specified
if device is None:
glob_list = get_serial_ports()
if len(glob_list) > 0:
#TODO be able to check other devices if this one is not correct
device = glob_list[0]
# Recognize if device specified by serial string:
if (device is not None) and kbutils.isSerialDeviceString(device):
self.dev = device
if (self.dev == gps_devstring):
pass
elif (DEV_ENABLE_ZIGDUINO and kbutils.iszigduino(self.dev)):
from dev_zigduino import ZIGDUINO
self.driver = ZIGDUINO(self.dev)
elif (DEV_ENABLE_FREAKDUINO and kbutils.isfreakduino(self.dev)):
from dev_freakduino import FREAKDUINO
self.driver = FREAKDUINO(self.dev)
else:
gfccspi,subtype = isgoodfetccspi(self.dev)
if gfccspi and subtype == 0:
from dev_telosb import TELOSB
self.driver = TELOSB(self.dev)
elif gfccspi and subtype == 1:
from dev_apimote import APIMOTE
self.driver = APIMOTE(self.dev, revision=1)
elif gfccspi and subtype == 2:
from dev_apimote import APIMOTE
self.driver = APIMOTE(self.dev, revision=2)
else:
raise KBInterfaceError("KillerBee doesn't know how to interact with serial device at '%s'." % self.dev)
# Otherwise unrecognized device string type was provided:
else:
raise KBInterfaceError("KillerBee doesn't understand device given by '%s'." % device)
# Start a connection to the remote packet logging server, if able:
if datasource is not None:
try:
import dblog
self.dblog = dblog.DBLogger(datasource)
except Exception as e:
warn("Error initializing DBLogger (%s)." % e)
datasource = None #give up nicely if error connecting, etc.
def __device_is(self, vendorId, productId):
'''
Compares KillerBee class' device data to a known USB vendorId and productId
@type vendorId:
@type productId:
@rtype: Boolean
@return: True if KillerBee class has device matching the vendor and product IDs provided.
'''
if self.dev.idVendor == vendorId and self.dev.idProduct == productId: return True
else: return False
def get_dev_info(self):
'''
Returns device information in a list identifying the device. Implemented by the loaded driver.
@rtype: List
@return: List of 3 strings identifying device.
'''
return self.driver.get_dev_info()
def close(self):
'''
Closes the device out.
@return: None
@rtype: None
'''
if self.driver != None: self.driver.close()
if hasattr(self, "dblog") and (self.dblog is not None):
self.dblog.close()
def check_capability(self, capab):
'''
Uses the specified capability to determine if the opened device
is supported. Returns True when supported, else False.
@rtype: Boolean
'''
return self.driver.capabilities.check(capab)
def is_valid_channel(self, channel):
'''
Use the driver's capabilities class to determine if a requested channel number
is within the capabilities of that device.
@rtype: Boolean
'''
return self.driver.capabilities.is_valid_channel(channel)
def get_capabilities(self):
'''
Returns a list of capability information for the device.
@rtype: List
@return: Capability information for the opened device.
'''
return self.driver.capabilities.getlist()
def sniffer_on(self, channel=None):
'''
Turns the sniffer on such that pnext() will start returning observed
data. Will set the command mode to Air Capture if it is not already
set.
@type channel: Integer
@param channel: Sets the channel, optional
@rtype: None
'''
return self.driver.sniffer_on(channel)
def sniffer_off(self):
'''
Turns the sniffer off, freeing the hardware for other functions. It is
not necessary to call this function before closing the interface with
close().
@rtype: None
'''
return self.driver.sniffer_off()
@property
def channel(self):
"""Getter function for the channel that was last set on the device."""
# Driver must have this variable name set in it's set_channel function
return self.driver._channel
def set_channel(self, channel):
'''
Sets the radio interface to the specifid channel. Currently, support is
limited to 2.4 GHz channels 11 - 26.
@type channel: Integer
@param channel: Sets the channel, optional
@rtype: None
'''
if hasattr(self, "dblog"):
self.dblog.set_channel(channel)
self.driver.set_channel(channel)
def is_valid_channel(self, channel):
'''
Based on sniffer capabilities, return if this is an OK channel number.
@rtype: Boolean
'''
return self.driver.capabilities.is_valid_channel(channel)
def inject(self, packet, channel=None, count=1, delay=0):
'''
Injects the specified packet contents.
@type packet: String
@param packet: Packet contents to transmit, without FCS.
@type channel: Integer
@param channel: Sets the channel, optional
@type count: Integer
@param count: Transmits a specified number of frames, def=1
@type delay: Float
@param delay: Delay between each frame, def=1
@rtype: None
'''
return self.driver.inject(packet, channel, count, delay)
def pnext(self, timeout=100):
'''
Returns packet data as a string, else None.
@type timeout: Integer
@param timeout: Timeout to wait for packet reception in usec
@rtype: List
@return: Returns None is timeout expires and no packet received. When a packet is received, a list is returned, in the form [ String: packet contents | Bool: Valid CRC | Int: Unscaled RSSI ]
'''
return self.driver.pnext(timeout)
def jammer_on(self, channel=None):
'''
Attempts reflexive jamming on all 802.15.4 frames.
Targeted frames must be >12 bytes for reliable jamming in current firmware.
@type channel: Integer
@param channel: Sets the channel, optional.
@rtype: None
'''
return self.driver.jammer_on(channel=channel)
|
|
import calendar
import datetime
from decimal import Decimal
from typing import Any, Dict, List, Optional, Tuple
import swagger_client as saltedge_client
from budget.models import Account, Category, Connection, Transaction
from django.db.models import QuerySet, Sum
from django.http.request import QueryDict
from users.models import User
def import_saltedge_connection(
saltedge_connection: saltedge_client.Connection, user: User
) -> Tuple[Connection, bool]:
return Connection.objects.update_or_create(
external_id=int(saltedge_connection.id),
defaults={"provider": saltedge_connection.provider_name, "user": user},
)
def import_saltedge_connections(
saltedge_connections: List[saltedge_client.Connection], user: User
) -> List[Tuple["Connection", bool]]:
output = []
for saltedge_connection in saltedge_connections:
output.append(import_saltedge_connection(saltedge_connection, user))
return output
def import_saltedge_accounts(
saltedge_accounts: List[saltedge_client.Account], user: User
) -> List[Tuple["Account", bool]]:
output = []
for saltedge_account in saltedge_accounts:
alias = (
saltedge_account.extra.account_name
if saltedge_account.extra.account_name
else ""
)
o = Account.objects.update_or_create(
external_id=int(saltedge_account.id),
defaults={
"name": saltedge_account.name,
"alias": alias,
"connection": Connection.objects.get(
external_id=int(saltedge_account.connection_id)
),
"user": user,
},
)
output.append(o)
return output
def import_saltedge_transactions(
saltedge_transactions: List[saltedge_client.Transaction], user: User
) -> List[Tuple["Transaction", bool]]:
output = []
for saltedge_transaction in saltedge_transactions:
o = Transaction.objects.update_or_create(
external_id=int(saltedge_transaction.id),
defaults={
"date": saltedge_transaction.made_on,
"amount": saltedge_transaction.amount,
"description": saltedge_transaction.description,
"account": Account.objects.get(
external_id=saltedge_transaction.account_id
),
"user": user,
},
)
output.append(o)
return output
def create_initial_balance(
account: Account,
saltedge_account: saltedge_client.Account,
saltedge_transactions: List[saltedge_client.Transaction],
) -> Transaction:
initial_balance = saltedge_account.balance - sum_saltedge_transactions(
saltedge_transactions
)
oldest_saltedge_transaction = get_oldest_saltedge_transaction(saltedge_transactions)
made_on = (
oldest_saltedge_transaction.made_on
if oldest_saltedge_transaction
else datetime.date.today()
)
return Transaction.objects.create(
date=made_on,
amount=initial_balance,
description="Initial balance",
account=account,
user=account.user,
)
def sum_saltedge_transactions(transactions: List[saltedge_client.Transaction]) -> float:
return sum(t.amount for t in transactions)
def get_oldest_saltedge_transaction(
transactions: List[saltedge_client.Transaction],
) -> Optional[saltedge_client.Transaction]:
oldest = None
for transaction in transactions:
if not oldest or transaction.made_on < oldest.made_on:
oldest = transaction
return oldest
def get_date_range_per_month(
from_date: datetime.date, to_date: datetime.date
) -> List[Tuple[datetime.date, datetime.date]]:
date_ranges = []
start_date = from_date
while abs(diff_month(start_date, to_date)) > 0:
end_date = get_month_end(start_date)
date_ranges.append((start_date, end_date))
start_date = get_month_start(add_month(start_date))
date_ranges.append((start_date, to_date))
return date_ranges
def diff_month(from_date: datetime.date, to_date: datetime.date) -> int:
return (from_date.year - to_date.year) * 12 + from_date.month - to_date.month
def add_month(date: datetime.date) -> datetime.date:
year = date.year + date.month // 12
month = date.month % 12 + 1
day = min(date.day, calendar.monthrange(year, month)[1])
return datetime.date(year, month, day)
def get_month_start(date: datetime.date) -> datetime.date:
return datetime.date(date.year, date.month, 1)
def get_month_end(date: datetime.date) -> datetime.date:
return datetime.date(
date.year, date.month, calendar.monthrange(date.year, date.month)[1]
)
def get_income_report(
accounts: List[Account],
from_date: datetime.date,
to_date: datetime.date,
excluded_categories: Optional[List[Category]] = None,
) -> Dict[str, Any]:
income_records = get_income_record_per_month(
accounts, from_date, to_date, excluded_categories
)
summary = get_income_records_summary(income_records)
return {"records": income_records, "summary": summary}
def get_income_record_per_month(
accounts: List[Account],
from_date: datetime.date,
to_date: datetime.date,
excluded_categories: Optional[List[Category]] = None,
) -> List[Dict[str, Any]]:
records = []
for start, end in get_date_range_per_month(from_date, to_date):
record = get_income_record(accounts, start, end, excluded_categories)
records.append(record)
return records
def get_income_record(
accounts: List[Account],
from_date: datetime.date,
to_date: datetime.date,
excluded_categories: Optional[List[Category]] = None,
) -> Dict[str, Any]:
transactions = get_income_transactions(
accounts, from_date, to_date, excluded_categories
)
revenue = get_revenue(transactions)
expenses = get_expenses(transactions)
income = revenue - expenses
return {
"from": from_date,
"to": to_date,
"revenue": revenue,
"expenses": expenses,
"income": income,
}
def get_income_transactions(
accounts: List[Account],
from_date: datetime.date,
to_date: datetime.date,
excluded_categories: Optional[List[Category]] = None,
) -> QuerySet:
filter_query: Dict[str, Any] = {
"account__in": accounts,
}
filter_query["date__gte"] = from_date
filter_query["date__lte"] = to_date
transactions = Transaction.objects.filter(**filter_query)
if excluded_categories:
transactions = transactions.exclude(category__in=excluded_categories)
return transactions
def get_revenue(transactions: QuerySet) -> Decimal:
revenue_transactions = transactions.filter(amount__gt=0.0)
revenue = revenue_transactions.aggregate(Sum("amount"))["amount__sum"]
return revenue if revenue else Decimal()
def get_expenses(transactions: QuerySet) -> Decimal:
expense_transactions = transactions.filter(amount__lt=0.0)
expenses = expense_transactions.aggregate(Sum("amount"))["amount__sum"]
return abs(expenses) if expenses else Decimal()
def get_income_records_summary(records: List[Dict[str, Any]]) -> Dict[str, Any]:
revenue = Decimal()
expenses = Decimal()
income = Decimal()
for r in records:
revenue += r["revenue"]
expenses += r["expenses"]
income += r["income"]
from_date = records[0]["from"] if records else datetime.date.today()
to_date = records[-1]["to"] if records else datetime.date.today()
return {
"from": from_date,
"to": to_date,
"revenue": revenue,
"expenses": expenses,
"income": income,
}
def get_balance_report(
accounts: List[Account], from_date: datetime.date, to_date: datetime.date,
) -> Dict[str, Any]:
records = get_balance_record_per_month(accounts, from_date, to_date)
summary = get_balance_records_summary(records)
return {"records": records, "summary": summary}
def get_balance_record_per_month(
accounts: List[Account], from_date: datetime.date, to_date: datetime.date,
) -> List[Dict[str, Any]]:
records = []
for start, end in get_date_range_per_month(from_date, to_date):
record = get_balance_record(accounts, start, end)
records.append(record)
return records
def get_balance_record(
accounts: List[Account], from_date: datetime.date, to_date: datetime.date,
) -> Dict[str, Any]:
opening_balance = get_opening_balance(from_date, accounts)
ending_balance = get_ending_balance(to_date, accounts)
difference = ending_balance - opening_balance
return {
"from": from_date,
"to": to_date,
"opening_balance": opening_balance,
"ending_balance": ending_balance,
"difference": difference,
}
def get_opening_balance(date: datetime.date, accounts: List[Account]) -> Decimal:
all_transactions = Transaction.objects.filter(account__in=accounts, date__lt=date)
opening_balance = all_transactions.aggregate(Sum("amount"))["amount__sum"]
return opening_balance if opening_balance else Decimal()
def get_ending_balance(date: datetime.date, accounts: List[Account]) -> Decimal:
all_transactions = Transaction.objects.filter(account__in=accounts, date__lte=date)
ending_balance = all_transactions.aggregate(Sum("amount"))["amount__sum"]
return ending_balance if ending_balance else Decimal()
def get_balance_records_summary(records: List[Dict[str, Any]]) -> Dict[str, Any]:
opening_balance = records[0]["opening_balance"] if records else Decimal()
ending_balance = records[-1]["ending_balance"] if records else Decimal()
difference = ending_balance - opening_balance
from_date = records[0]["from"] if records else datetime.date.today()
to_date = records[-1]["to"] if records else datetime.date.today()
return {
"from": from_date,
"to": to_date,
"opening_balance": opening_balance,
"ending_balance": ending_balance,
"difference": difference,
}
def get_category_balance_report(
categories: List[Category],
accounts: List[Account],
from_date: datetime.date,
to_date: datetime.date,
) -> Dict[str, Any]:
header = get_category_balance_report_header(categories)
records = get_category_balance_record_per_month(
categories, accounts, from_date, to_date
)
summary = get_category_balance_records_summary(records)
return {"header": header, "records": records, "summary": summary}
def get_category_balance_report_header(categories: List[Category]) -> List[str]:
return ["From", "To"] + [c.name for c in categories]
def get_category_balance_record_per_month(
categories: List[Category],
accounts: List[Account],
from_date: datetime.date,
to_date: datetime.date,
) -> List[Dict[str, Any]]:
records = []
for start, end in get_date_range_per_month(from_date, to_date):
record: Dict[str, Any] = {"from": start, "to": end}
for category in categories:
record[category.name] = get_category_balance(category, accounts, start, end)
records.append(record)
return records
def get_category_balance(
category: Category,
accounts: List[Account],
from_date: datetime.date,
to_date: datetime.date,
) -> Decimal:
transactions = Transaction.objects.filter(
category=category, account__in=accounts, date__gte=from_date, date__lte=to_date
)
balance = transactions.aggregate(Sum("amount"))["amount__sum"]
return balance if balance else Decimal()
def get_category_balance_records_summary(
records: List[Dict[str, Any]]
) -> Dict[str, Any]:
balance: Dict[str, Decimal] = {}
for r in records:
for k, v in r.items():
if k == "from" or k == "to":
continue
balance[k] = balance.get(k, Decimal()) + v
from_date = records[0]["from"] if records else datetime.date.today()
to_date = records[-1]["to"] if records else datetime.date.today()
return {"from": from_date, "to": to_date, **balance}
def query_dict_to_filter_query(
query_dict: QueryDict, single_value_keys: List[str], multiple_values_keys: List[str]
) -> Dict[str, Any]:
output = {}
for k, v in query_dict.lists():
if k in single_value_keys and v[0] != "":
output[k] = v[0]
elif k in multiple_values_keys and v[0] != "":
output[k] = v
return output
def filter_query_to_query_dict(
filter_query: Dict[str, Any],
single_value_keys: List[str],
multiple_values_keys: List[str],
) -> QueryDict:
output = QueryDict("", mutable=True)
for k, v in filter_query.items():
if k in single_value_keys:
output[k] = v
elif k in multiple_values_keys:
output.setlist(k, v)
return output
def filter_transactions(user: User, **kwargs: Any) -> QuerySet:
query: Dict[str, Any] = {
"user": user,
}
if "from_date" in kwargs:
query["date__gte"] = kwargs["from_date"]
if "to_date" in kwargs:
query["date__lte"] = kwargs["to_date"]
if "min_amount" in kwargs:
query["amount__gte"] = kwargs["min_amount"]
if "max_amount" in kwargs:
query["amount__lte"] = kwargs["max_amount"]
if "categories" in kwargs:
query["category__in"] = kwargs["categories"]
if "description" in kwargs:
query["description__icontains"] = kwargs["description"]
if "accounts" in kwargs:
query["account__in"] = kwargs["accounts"]
return Transaction.objects.filter(**query)
def query_dict_to_transaction_filter_query(query_dict: QueryDict) -> Dict[str, Any]:
single_value_keys = [
"from_date",
"to_date",
"min_amount",
"max_amount",
"description",
]
multiple_values_keys = ["categories", "accounts"]
return query_dict_to_filter_query(
query_dict, single_value_keys, multiple_values_keys
)
def transaction_filter_query_to_query_dict(filter_query: Dict[str, Any]) -> QueryDict:
single_value_keys = [
"from_date",
"to_date",
"min_amount",
"max_amount",
"description",
]
multiple_values_keys = ["categories", "accounts"]
return filter_query_to_query_dict(
filter_query, single_value_keys, multiple_values_keys
)
def filter_accounts(user: User, **kwargs: Any) -> QuerySet:
query: Dict[str, Any] = {
"user": user,
}
if "name" in kwargs:
query["name__icontains"] = kwargs["name"]
if "alias" in kwargs:
query["alias__icontains"] = kwargs["alias"]
if "account_types" in kwargs:
query["account_type__in"] = kwargs["account_types"]
if "connections" in kwargs:
query["connection__in"] = kwargs["connections"]
return Account.objects.filter(**query)
def query_dict_to_account_filter_query(query_dict: QueryDict) -> Dict[str, Any]:
single_value_keys = [
"name",
"alias",
]
multiple_values_keys = ["account_types", "connections"]
return query_dict_to_filter_query(
query_dict, single_value_keys, multiple_values_keys
)
def account_filter_query_to_query_dict(filter_query: Dict[str, Any]) -> QueryDict:
single_value_keys = [
"name",
"alias",
]
multiple_values_keys = ["account_types", "connections"]
return filter_query_to_query_dict(
filter_query, single_value_keys, multiple_values_keys
)
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2017-, labman development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from json import dumps
from unittest import main
from tornado.escape import json_decode
from tornado.web import HTTPError
from labman.gui.testing import TestHandlerBase
from labman.db.plate import Plate
from labman.db.user import User
from labman.gui.handlers.plate import (
_get_plate, plate_handler_patch_request, plate_layout_handler_get_request,
plate_map_handler_get_request)
class TestUtils(TestHandlerBase):
def test_get_plate(self):
self.assertEqual(_get_plate('21'), Plate(21))
regex = 'Plate 100 doesn\'t exist'
with self.assertRaisesRegex(HTTPError, regex):
_get_plate(100)
def test_plate_map_handler_get_request(self):
regex = 'Plating process 100 doesn\'t exist'
with self.assertRaisesRegex(HTTPError, regex):
plate_map_handler_get_request(100)
obs = plate_map_handler_get_request(10)
exp_plate_confs = [[1, '96-well deep-well plate', 8, 12],
[2, '96-well microtiter plate', 8, 12],
[3, '384-well microtiter plate', 16, 24]]
exp_contr_desc = [
{'external_id': 'blank',
'description': 'gDNA extraction blanks. Represents an empty '
'extraction well.'},
{'external_id': 'empty',
'description': 'Empty well. Represents an empty well that should '
'not be included in library preparation.'},
{'external_id': 'vibrio.positive.control',
'description': 'Bacterial isolate control (Vibrio fischeri ES114)'
'. Represents an extraction well loaded with '
'Vibrio.'},
{'external_id': 'zymo.mock',
'description': 'Bacterial community control (Zymo Mock D6306). '
'Represents an extraction well loaded with Zymo '
'Mock community.'}]
exp = {'plate_confs': exp_plate_confs, 'plate_id': 21,
'process_id': 10, 'controls_description': exp_contr_desc}
self.assertEqual(obs, exp)
obs = plate_map_handler_get_request(None)
exp = {'plate_confs': exp_plate_confs, 'plate_id': None,
'process_id': None, 'controls_description': exp_contr_desc}
self.assertEqual(obs, exp)
def test_plate_handler_patch_request(self):
tester = Plate(21)
user = User('test@foo.bar')
# Incorrect path parameter
regex = 'Incorrect path parameter'
with self.assertRaisesRegex(HTTPError, regex):
plate_handler_patch_request(user, 21, 'replace', '/name/newname',
'NewName', None)
# Unknown attribute
regex = 'Attribute unknown not recognized'
with self.assertRaisesRegex(HTTPError, regex):
plate_handler_patch_request(user, 21, 'replace', '/unknown/',
'NewName', None)
# Unknown operation
regex = ('Operation add not supported. Current supported '
'operations: replace')
with self.assertRaisesRegex(HTTPError, regex):
plate_handler_patch_request(user, 21, 'add', '/name/',
'NewName', None)
# Plate doesn't exist
regex = 'Plate 100 doesn\'t exist'
with self.assertRaisesRegex(HTTPError, regex):
plate_handler_patch_request(user, 100, 'replace', '/name/',
'NewName', None)
# Test success - Name
plate_handler_patch_request(user, 21, 'replace', '/name/',
'NewName', None)
self.assertEqual(tester.external_id, 'NewName')
tester.external_id = 'Test plate 1'
# Test success - discarded
plate_handler_patch_request(user, 21, 'replace', '/discarded/',
True, None)
self.assertEqual(tester.discarded, True)
tester.discarded = False
def test_plate_layout_handler_get_request(self):
obs = plate_layout_handler_get_request(21)
self.assertEqual(len(obs), 8)
exp = [{'sample': '1.SKB1.640202.21.A1', 'notes': None},
{'sample': '1.SKB2.640194.21.A2', 'notes': None},
{'sample': '1.SKB3.640195.21.A3', 'notes': None},
{'sample': '1.SKB4.640189.21.A4', 'notes': None},
{'sample': '1.SKB5.640181.21.A5', 'notes': None},
{'sample': '1.SKB6.640176.21.A6', 'notes': None},
{'sample': '1.SKB7.640196.21.A7', 'notes': None},
{'sample': '1.SKB8.640193.21.A8', 'notes': None},
{'sample': '1.SKB9.640200.21.A9', 'notes': None},
{'sample': '1.SKD1.640179.21.A10', 'notes': None},
{'sample': '1.SKD2.640178.21.A11', 'notes': None},
{'sample': '1.SKD3.640198.21.A12', 'notes': None}]
self.assertEqual(obs[0], exp)
# The 7th row contains virio controls
exp = [{'sample': 'vibrio.positive.control.21.G%s' % i, 'notes': None}
for i in range(1, 13)]
self.assertEqual(obs[6], exp)
# The 8th row contains blanks
exp = [{'sample': 'blank.21.H%s' % i, 'notes': None}
for i in range(1, 12)]
self.assertEqual(obs[7][:-1], exp)
self.assertEqual(obs[7][11], {'sample': 'empty.21.H12', 'notes': None})
regex = 'Plate 100 doesn\'t exist'
with self.assertRaisesRegex(HTTPError, regex):
plate_layout_handler_get_request(100)
class TestPlateHandlers(TestHandlerBase):
def test_get_plate_list_handler(self):
response = self.get('/plate_list')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
self.assertCountEqual(obs.keys(), ['data'])
obs_data = obs['data']
self.assertEqual(len(obs_data), 26)
self.assertEqual(obs_data[0], [1, 'EMP 16S V4 primer plate 1', None])
response = self.get('/plate_list?plate_type=%5B%22sample%22%5D')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
self.assertCountEqual(obs.keys(), ['data'])
obs_data = obs['data']
self.assertEqual(len(obs_data), 1)
self.assertEqual(
obs_data[0], [
21, 'Test plate 1',
['Identification of the Microbiomes for Cannabis Soils']])
response = self.get(
'/plate_list?plate_type=%5B%22compressed+gDNA%22%2C+%22'
'normalized+gDNA%22%5D')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
self.assertCountEqual(obs.keys(), ['data'])
obs_data = obs['data']
self.assertEqual(len(obs_data), 2)
self.assertEqual(
obs_data,
[[24, 'Test compressed gDNA plate 1',
['Identification of the Microbiomes for Cannabis Soils']],
[25, 'Test normalized gDNA plate 1',
['Identification of the Microbiomes for Cannabis Soils']]])
response = self.get(
'/plate_list?plate_type=%5B%22compressed+gDNA%22%2C+%22'
'normalized+gDNA%22%5D&only_quantified=true')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
self.assertCountEqual(obs.keys(), ['data'])
obs_data = obs['data']
self.assertEqual(len(obs_data), 1)
self.assertEqual(
obs_data,
[[24, 'Test compressed gDNA plate 1',
['Identification of the Microbiomes for Cannabis Soils']]])
def test_get_plate_map_handler(self):
response = self.get('/plate')
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, '')
response = self.get('/plate?process_id=10')
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, '')
response = self.get('/plate?process_id=100')
self.assertEqual(response.code, 404)
self.assertNotEqual(response.body, '')
def test_get_plate_name_handler(self):
response = self.get('/platename')
# It is missing the parameter
self.assertEqual(response.code, 400)
# It doesn't exist
response = self.get('/platename?new-name=something')
self.assertEqual(response.code, 404)
# It exists
response = self.get('/platename?new-name=Test%20plate%201')
self.assertEqual(response.code, 200)
def test_get_plate_handler(self):
response = self.get('/plate/21/')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
exp = {'plate_id': 21,
'plate_name': 'Test plate 1',
'discarded': False,
'plate_configuration': [1, '96-well deep-well plate', 8, 12],
'notes': None,
'studies': [1],
'duplicates': [
[1, 1, '1.SKB1.640202.21.A1'],
[2, 1, '1.SKB1.640202.21.B1'],
[3, 1, '1.SKB1.640202.21.C1'],
[4, 1, '1.SKB1.640202.21.D1'],
[5, 1, '1.SKB1.640202.21.E1'],
[6, 1, '1.SKB1.640202.21.F1'],
[1, 2, '1.SKB2.640194.21.A2'],
[2, 2, '1.SKB2.640194.21.B2'],
[3, 2, '1.SKB2.640194.21.C2'],
[4, 2, '1.SKB2.640194.21.D2'],
[5, 2, '1.SKB2.640194.21.E2'],
[6, 2, '1.SKB2.640194.21.F2'],
[1, 3, '1.SKB3.640195.21.A3'],
[2, 3, '1.SKB3.640195.21.B3'],
[3, 3, '1.SKB3.640195.21.C3'],
[4, 3, '1.SKB3.640195.21.D3'],
[5, 3, '1.SKB3.640195.21.E3'],
[6, 3, '1.SKB3.640195.21.F3'],
[1, 4, '1.SKB4.640189.21.A4'],
[2, 4, '1.SKB4.640189.21.B4'],
[3, 4, '1.SKB4.640189.21.C4'],
[4, 4, '1.SKB4.640189.21.D4'],
[5, 4, '1.SKB4.640189.21.E4'],
[6, 4, '1.SKB4.640189.21.F4'],
[1, 5, '1.SKB5.640181.21.A5'],
[2, 5, '1.SKB5.640181.21.B5'],
[3, 5, '1.SKB5.640181.21.C5'],
[4, 5, '1.SKB5.640181.21.D5'],
[5, 5, '1.SKB5.640181.21.E5'],
[6, 5, '1.SKB5.640181.21.F5'],
[1, 6, '1.SKB6.640176.21.A6'],
[2, 6, '1.SKB6.640176.21.B6'],
[3, 6, '1.SKB6.640176.21.C6'],
[4, 6, '1.SKB6.640176.21.D6'],
[5, 6, '1.SKB6.640176.21.E6'],
[6, 6, '1.SKB6.640176.21.F6'],
[1, 7, '1.SKB7.640196.21.A7'],
[2, 7, '1.SKB7.640196.21.B7'],
[3, 7, '1.SKB7.640196.21.C7'],
[4, 7, '1.SKB7.640196.21.D7'],
[5, 7, '1.SKB7.640196.21.E7'],
[6, 7, '1.SKB7.640196.21.F7'],
[1, 8, '1.SKB8.640193.21.A8'],
[2, 8, '1.SKB8.640193.21.B8'],
[3, 8, '1.SKB8.640193.21.C8'],
[4, 8, '1.SKB8.640193.21.D8'],
[5, 8, '1.SKB8.640193.21.E8'],
[6, 8, '1.SKB8.640193.21.F8'],
[1, 9, '1.SKB9.640200.21.A9'],
[2, 9, '1.SKB9.640200.21.B9'],
[3, 9, '1.SKB9.640200.21.C9'],
[4, 9, '1.SKB9.640200.21.D9'],
[5, 9, '1.SKB9.640200.21.E9'],
[6, 9, '1.SKB9.640200.21.F9'],
[1, 10, '1.SKD1.640179.21.A10'],
[2, 10, '1.SKD1.640179.21.B10'],
[3, 10, '1.SKD1.640179.21.C10'],
[4, 10, '1.SKD1.640179.21.D10'],
[5, 10, '1.SKD1.640179.21.E10'],
[6, 10, '1.SKD1.640179.21.F10'],
[1, 11, '1.SKD2.640178.21.A11'],
[2, 11, '1.SKD2.640178.21.B11'],
[3, 11, '1.SKD2.640178.21.C11'],
[4, 11, '1.SKD2.640178.21.D11'],
[5, 11, '1.SKD2.640178.21.E11'],
[6, 11, '1.SKD2.640178.21.F11'],
[1, 12, '1.SKD3.640198.21.A12'],
[2, 12, '1.SKD3.640198.21.B12'],
[3, 12, '1.SKD3.640198.21.C12'],
[4, 12, '1.SKD3.640198.21.D12'],
[5, 12, '1.SKD3.640198.21.E12'],
[6, 12, '1.SKD3.640198.21.F12']],
'previous_plates': [],
'unknowns': []}
obs_duplicates = obs.pop('duplicates')
exp_duplicates = exp.pop('duplicates')
self.assertEqual(obs, exp)
self.assertCountEqual(obs_duplicates, exp_duplicates)
# Plate doesn't exist
response = self.get('/plate/100/')
self.assertEqual(response.code, 404)
def test_patch_plate_handler(self):
tester = Plate(21)
data = {'op': 'replace', 'path': '/name/', 'value': 'NewName'}
response = self.patch('/plate/21/', data)
self.assertEqual(response.code, 200)
self.assertEqual(tester.external_id, 'NewName')
tester.external_id = 'Test plate 1'
def test_patch_plate_discarded_handler(self):
tester = Plate(21)
data = {'op': 'replace', 'path': '/discarded/', 'value': True}
response = self.patch('/plate/21/', data)
self.assertEqual(response.code, 200)
self.assertEqual(tester.discarded, True)
tester.discarded = False
def test_get_plate_layout_handler(self):
response = self.get('/plate/21/layout')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
# Spot check some positions, since a more in-depth test has already
# been performed in test_plate_layout_handler_get_request
self.assertEqual(obs[0][0],
{'sample': '1.SKB1.640202.21.A1', 'notes': None})
self.assertEqual(obs[5][9],
{'sample': '1.SKD1.640179.21.F10', 'notes': None})
self.assertEqual(
obs[6][1], {'sample':
'vibrio.positive.control.21.G2', 'notes': None})
self.assertEqual(obs[7][4], {'sample': 'blank.21.H5', 'notes': None})
def test_get_plate_search_handler(self):
response = self.get('/plate_search')
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, '')
def test_post_plate_search_handler(self):
# Note: these tests don't exercise all the cases covered in
# db/tests/test_plate.py test_search; instead, they focus on
# testing at least one search based on each of the input
# fields, to verify that these are being passed through
# correctly to the db's Plate.search method.
# Test search by sample names:
post_data = {
'sample_names': dumps(['1.SKB1.640202', '1.SKB2.640194']),
'plate_comment_keywords': "",
'well_comment_keywords': "",
'operation': "INTERSECT"
}
response = self.post('/plate_search', post_data)
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
self.assertCountEqual(obs.keys(), ['data'])
obs_data = obs['data']
self.assertEqual(len(obs_data), 1)
self.assertEqual(obs_data[0], [21, 'Test plate 1'])
# Test search by plate comment keywords:
# It looks like none of the plates in the test database have
# any notes, so it is necessary to add some to be able to
# test the keywords search functionality; the below is lifted
# verbatim from db/tests/test_plate.py test_search
plate22 = Plate(22)
plate23 = Plate(23)
# Add comments to a plate so we can actually test the
# search functionality
plate22.notes = 'Some interesting notes'
plate23.notes = 'More boring notes'
# end verbatim lift
post_data = {
'sample_names': dumps([]),
'plate_comment_keywords': 'interesting boring',
'well_comment_keywords': "",
'operation': "INTERSECT"
}
response = self.post('/plate_search', post_data)
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
self.assertCountEqual(obs.keys(), ['data'])
obs_data = obs['data']
self.assertEqual(len(obs_data), 0)
# Test search by intersecting or unioning multiple search terms:
post_data = {
'sample_names': dumps(['1.SKB1.640202']),
'plate_comment_keywords': 'interesting boring',
'well_comment_keywords': "",
'operation': "INTERSECT"
}
response = self.post('/plate_search', post_data)
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
self.assertCountEqual(obs.keys(), ['data'])
obs_data = obs['data']
self.assertEqual(len(obs_data), 0)
post_data = {
'sample_names': dumps(['1.SKB1.640202']),
'plate_comment_keywords': 'interesting boring',
'well_comment_keywords': "",
'operation': "UNION"
}
response = self.post('/plate_search', post_data)
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
self.assertCountEqual(obs.keys(), ['data'])
obs_data = obs['data']
self.assertEqual(len(obs_data), 1)
self.assertEqual(obs_data[0], [21, 'Test plate 1'])
# Test search by well comment keywords:
# Add comments to some wells so can test well comment search
plate23.get_well(1, 1).composition.notes = 'What should I write?'
post_data = {
'sample_names': dumps([]),
'plate_comment_keywords': '',
'well_comment_keywords': "write",
'operation': "INTERSECT"
}
response = self.post('/plate_search', post_data)
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
self.assertCountEqual(obs.keys(), ['data'])
obs_data = obs['data']
self.assertEqual(len(obs_data), 1)
self.assertEqual(obs_data[0], [23, 'Test 16S plate 1'])
def test_get_plate_process_handler(self):
response = self.get('/plate/21/process')
self.assertEqual(response.code, 200)
self.assertTrue(
response.effective_url.endswith('/plate?process_id=10'))
response = self.get('/plate/22/process')
self.assertEqual(response.code, 200)
self.assertTrue(
response.effective_url.endswith(
'/process/gdna_extraction?process_id=1'))
response = self.get('/plate/23/process')
self.assertEqual(response.code, 200)
self.assertTrue(
response.effective_url.endswith(
'/process/library_prep_16S?process_id=1'))
response = self.get('/plate/24/process')
self.assertEqual(response.code, 200)
self.assertTrue(
response.effective_url.endswith(
'/process/gdna_compression?process_id=1'))
response = self.get('/plate/25/process')
self.assertEqual(response.code, 200)
self.assertTrue(
response.effective_url.endswith(
'/process/normalize?process_id=1'))
response = self.get('/plate/26/process')
self.assertEqual(response.code, 200)
self.assertTrue(
response.effective_url.endswith(
'/process/library_prep_shotgun?process_id=1'))
if __name__ == '__main__':
main()
|
|
# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
L{HostKeys}
"""
import base64
import binascii
from Crypto.Hash import SHA, HMAC
import UserDict
from paramiko.common import *
from paramiko.dsskey import DSSKey
from paramiko.rsakey import RSAKey
from paramiko.util import get_logger
from paramiko.ecdsakey import ECDSAKey
class InvalidHostKey(Exception):
def __init__(self, line, exc):
self.line = line
self.exc = exc
self.args = (line, exc)
class HostKeyEntry:
"""
Representation of a line in an OpenSSH-style "known hosts" file.
"""
def __init__(self, hostnames=None, key=None):
self.valid = (hostnames is not None) and (key is not None)
self.hostnames = hostnames
self.key = key
def from_line(cls, line, lineno=None):
"""
Parses the given line of text to find the names for the host,
the type of key, and the key data. The line is expected to be in the
format used by the openssh known_hosts file.
Lines are expected to not have leading or trailing whitespace.
We don't bother to check for comments or empty lines. All of
that should be taken care of before sending the line to us.
@param line: a line from an OpenSSH known_hosts file
@type line: str
"""
log = get_logger('paramiko.hostkeys')
fields = line.split(' ')
if len(fields) < 3:
# Bad number of fields
log.info("Not enough fields found in known_hosts in line %s (%r)" %
(lineno, line))
return None
fields = fields[:3]
names, keytype, key = fields
names = names.split(',')
# Decide what kind of key we're looking at and create an object
# to hold it accordingly.
try:
if keytype == 'ssh-rsa':
key = RSAKey(data=base64.decodestring(key))
elif keytype == 'ssh-dss':
key = DSSKey(data=base64.decodestring(key))
elif keytype == 'ecdsa-sha2-nistp256':
key = ECDSAKey(data=base64.decodestring(key))
else:
log.info("Unable to handle key of type %s" % (keytype,))
return None
except binascii.Error, e:
raise InvalidHostKey(line, e)
return cls(names, key)
from_line = classmethod(from_line)
def to_line(self):
"""
Returns a string in OpenSSH known_hosts file format, or None if
the object is not in a valid state. A trailing newline is
included.
"""
if self.valid:
return '%s %s %s\n' % (','.join(self.hostnames), self.key.get_name(),
self.key.get_base64())
return None
def __repr__(self):
return '<HostKeyEntry %r: %r>' % (self.hostnames, self.key)
class HostKeys (UserDict.DictMixin):
"""
Representation of an openssh-style "known hosts" file. Host keys can be
read from one or more files, and then individual hosts can be looked up to
verify server keys during SSH negotiation.
A HostKeys object can be treated like a dict; any dict lookup is equivalent
to calling L{lookup}.
@since: 1.5.3
"""
def __init__(self, filename=None):
"""
Create a new HostKeys object, optionally loading keys from an openssh
style host-key file.
@param filename: filename to load host keys from, or C{None}
@type filename: str
"""
# emulate a dict of { hostname: { keytype: PKey } }
self._entries = []
if filename is not None:
self.load(filename)
def add(self, hostname, keytype, key):
"""
Add a host key entry to the table. Any existing entry for a
C{(hostname, keytype)} pair will be replaced.
@param hostname: the hostname (or IP) to add
@type hostname: str
@param keytype: key type (C{"ssh-rsa"} or C{"ssh-dss"})
@type keytype: str
@param key: the key to add
@type key: L{PKey}
"""
for e in self._entries:
if (hostname in e.hostnames) and (e.key.get_name() == keytype):
e.key = key
return
self._entries.append(HostKeyEntry([hostname], key))
def load(self, filename):
"""
Read a file of known SSH host keys, in the format used by openssh.
This type of file unfortunately doesn't exist on Windows, but on
posix, it will usually be stored in
C{os.path.expanduser("~/.ssh/known_hosts")}.
If this method is called multiple times, the host keys are merged,
not cleared. So multiple calls to C{load} will just call L{add},
replacing any existing entries and adding new ones.
@param filename: name of the file to read host keys from
@type filename: str
@raise IOError: if there was an error reading the file
"""
f = open(filename, 'r')
for lineno, line in enumerate(f):
line = line.strip()
if (len(line) == 0) or (line[0] == '#'):
continue
e = HostKeyEntry.from_line(line, lineno)
if e is not None:
_hostnames = e.hostnames
for h in _hostnames:
if self.check(h, e.key):
e.hostnames.remove(h)
if len(e.hostnames):
self._entries.append(e)
f.close()
def save(self, filename):
"""
Save host keys into a file, in the format used by openssh. The order of
keys in the file will be preserved when possible (if these keys were
loaded from a file originally). The single exception is that combined
lines will be split into individual key lines, which is arguably a bug.
@param filename: name of the file to write
@type filename: str
@raise IOError: if there was an error writing the file
@since: 1.6.1
"""
f = open(filename, 'w')
for e in self._entries:
line = e.to_line()
if line:
f.write(line)
f.close()
def lookup(self, hostname):
"""
Find a hostkey entry for a given hostname or IP. If no entry is found,
C{None} is returned. Otherwise a dictionary of keytype to key is
returned. The keytype will be either C{"ssh-rsa"} or C{"ssh-dss"}.
@param hostname: the hostname (or IP) to lookup
@type hostname: str
@return: keys associated with this host (or C{None})
@rtype: dict(str, L{PKey})
"""
class SubDict (UserDict.DictMixin):
def __init__(self, hostname, entries, hostkeys):
self._hostname = hostname
self._entries = entries
self._hostkeys = hostkeys
def __getitem__(self, key):
for e in self._entries:
if e.key.get_name() == key:
return e.key
raise KeyError(key)
def __setitem__(self, key, val):
for e in self._entries:
if e.key is None:
continue
if e.key.get_name() == key:
# replace
e.key = val
break
else:
# add a new one
e = HostKeyEntry([hostname], val)
self._entries.append(e)
self._hostkeys._entries.append(e)
def keys(self):
return [e.key.get_name() for e in self._entries if e.key is not None]
entries = []
for e in self._entries:
for h in e.hostnames:
if (h.startswith('|1|') and (self.hash_host(hostname, h) == h)) or (h == hostname):
entries.append(e)
if len(entries) == 0:
return None
return SubDict(hostname, entries, self)
def check(self, hostname, key):
"""
Return True if the given key is associated with the given hostname
in this dictionary.
@param hostname: hostname (or IP) of the SSH server
@type hostname: str
@param key: the key to check
@type key: L{PKey}
@return: C{True} if the key is associated with the hostname; C{False}
if not
@rtype: bool
"""
k = self.lookup(hostname)
if k is None:
return False
host_key = k.get(key.get_name(), None)
if host_key is None:
return False
return str(host_key) == str(key)
def clear(self):
"""
Remove all host keys from the dictionary.
"""
self._entries = []
def __getitem__(self, key):
ret = self.lookup(key)
if ret is None:
raise KeyError(key)
return ret
def __setitem__(self, hostname, entry):
# don't use this please.
if len(entry) == 0:
self._entries.append(HostKeyEntry([hostname], None))
return
for key_type in entry.keys():
found = False
for e in self._entries:
if (hostname in e.hostnames) and (e.key.get_name() == key_type):
# replace
e.key = entry[key_type]
found = True
if not found:
self._entries.append(HostKeyEntry([hostname], entry[key_type]))
def keys(self):
# python 2.4 sets would be nice here.
ret = []
for e in self._entries:
for h in e.hostnames:
if h not in ret:
ret.append(h)
return ret
def values(self):
ret = []
for k in self.keys():
ret.append(self.lookup(k))
return ret
def hash_host(hostname, salt=None):
"""
Return a "hashed" form of the hostname, as used by openssh when storing
hashed hostnames in the known_hosts file.
@param hostname: the hostname to hash
@type hostname: str
@param salt: optional salt to use when hashing (must be 20 bytes long)
@type salt: str
@return: the hashed hostname
@rtype: str
"""
if salt is None:
salt = rng.read(SHA.digest_size)
else:
if salt.startswith('|1|'):
salt = salt.split('|')[2]
salt = base64.decodestring(salt)
assert len(salt) == SHA.digest_size
hmac = HMAC.HMAC(salt, hostname, SHA).digest()
hostkey = '|1|%s|%s' % (base64.encodestring(salt), base64.encodestring(hmac))
return hostkey.replace('\n', '')
hash_host = staticmethod(hash_host)
|
|
"""
precision_four_panel.py
-----------------------
Plot a Figueira et al. (2016) Figure 1 like plot.
"""
import argparse
import sys
from os.path import join
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from eniric import config
from eniric.utilities import rv_cumulative, rv_cumulative_full
matplotlib.use("Agg")
def load_dataframe(filename):
"""Load in precision file, clean up spaces in csv.
Parameters
----------
filename: str
Name of phoenix_precision.py output.
Returns
-------
df: pandas.DataFrame
DataFrame of data.
"""
df = pd.read_csv(precision_file)
# Temp, logg, [Fe/H], Alpha, Band, Resolution, vsini, Sampling, Quality, Cond. 1, Cond. 2, Cond. 3, correct flag
df.columns = df.columns.str.strip()
df.Band = df.Band.str.strip()
df.Resolution = df.Resolution.str.strip()
df.Quality = df.Quality.astype(float)
df.Temp = df.Temp.astype(float)
df.logg = df.logg.astype(float)
df["[Fe/H]"] = df["[Fe/H]"].astype(float)
df.Alhpa = df.Alpha.astype(float)
df.vsini = df.vsini.astype(float)
df.Sampling = df.Sampling.astype(float)
df["Cond. 1"] = df["Cond. 1"].astype(float)
df["Cond. 2"] = df["Cond. 2"].astype(float)
df["Cond. 3"] = df["Cond. 3"].astype(float)
df["correct flag"] = df["correct flag"].astype(bool)
return df
def plot_precision(
precision_file, teffs=None, logg=4.5, fe_h=0.0, vsini=1.0, sampling=3
):
"""Plot precision 4 panel with RV precision.
Saves figure to ``plots/``.
Parameters
----------
precision_file: str
Name of phoenix_precision.py output.
teffs: List of int or None
Stellar temperatures. Default is [3900, 3500, 2800, 2600].
logg: int
Stellar Logg. Default is 4.5.
fe_h: int
Stellar metallicity. Default is 0.0.
vsini: float
Rotational velocity. Default is 1.0.
sampling:
Spectral sampling. Default is 3.
"""
if teffs is None:
# Default teffs
teffs = [3900, 3500, 2800, 2600]
assert len(teffs) == 4
df = load_dataframe(precision_file)
filter_dict = {"logg": logg, "[Fe/H]": fe_h, "vsini": vsini, "Sampling": sampling}
df = filter_df(
df, filter_dict, drop_list=["Alpha", "[Fe/H]", "correct flag", "Quality"]
)
fig, axes = plt.subplots(2, 2)
ax = axes.flatten()
df_selected = df[df.Resolution.isin(["60k", "80k", "100k"])]
df_selected = df_selected[df_selected.Temp.isin(teffs)]
maximums = []
minimums = []
for ii, temp in enumerate(teffs):
# This entry
df_ii = df_selected[df_selected["Temp"] == temp]
df_ii_60k = df_ii[df_ii["Resolution"].str.strip() == "60k"]
df_ii_80k = df_ii[df_ii["Resolution"].str.strip() == "80k"]
df_ii_100k = df_ii[df_ii["Resolution"].str.strip() == "100k"]
df_ii_60k = df_ii_60k.set_index("Band")
df_ii_60k = df_ii_60k.reindex(["Z", "Y", "J", "H", "K"])
df_ii_80k = df_ii_80k.set_index("Band")
df_ii_80k = df_ii_80k.reindex(["Z", "Y", "J", "H", "K"])
df_ii_100k = df_ii_100k.set_index("Band")
df_ii_100k = df_ii_100k.reindex(["Z", "Y", "J", "H", "K"])
maximums.append(
np.max(
[
df_ii_60k[["Cond. 1", "Cond. 2", "Cond. 3"]].max(),
df_ii_80k[["Cond. 1", "Cond. 2", "Cond. 3"]].max(),
df_ii_100k[["Cond. 1", "Cond. 2", "Cond. 3"]].max(),
]
)
)
minimums.append(
np.min(
[
df_ii_60k[["Cond. 1", "Cond. 2", "Cond. 3"]].min(),
df_ii_80k[["Cond. 1", "Cond. 2", "Cond. 3"]].min(),
df_ii_100k[["Cond. 1", "Cond. 2", "Cond. 3"]].min(),
]
)
)
ax[ii].fill_between(
df_ii_60k.index,
df_ii_60k["Cond. 2"].values,
df_ii_60k["Cond. 3"].values,
color="b",
alpha=0.2,
)
ax[ii].fill_between(
df_ii_80k.index,
df_ii_80k["Cond. 2"].values,
df_ii_80k["Cond. 3"].values,
color="g",
alpha=0.2,
)
ax[ii].fill_between(
df_ii_100k.index,
df_ii_100k["Cond. 2"].values,
df_ii_100k["Cond. 3"].values,
color="r",
alpha=0.2,
)
ax[ii].plot(
df_ii_60k.index, df_ii_60k["Cond. 1"].values, color="b", linestyle="--"
) # lim
ax[ii].plot(
df_ii_80k.index, df_ii_80k["Cond. 1"].values, color="g", linestyle="--"
) # lim
ax[ii].plot(
df_ii_100k.index, df_ii_100k["Cond. 1"].values, color="r", linestyle="--"
) # lim
ax[ii].scatter(
df_ii_60k.index,
df_ii_60k["Cond. 2"].values,
marker="^",
color="b",
alpha=0.4,
)
ax[ii].scatter(
df_ii_60k.index,
df_ii_60k["Cond. 3"].values,
marker="o",
color="b",
alpha=0.4,
)
ax[ii].scatter(
df_ii_80k.index,
df_ii_80k["Cond. 3"].values,
marker="^",
color="g",
alpha=0.4,
)
ax[ii].scatter(
df_ii_80k.index,
df_ii_80k["Cond. 2"].values,
marker="o",
color="g",
alpha=0.4,
)
ax[ii].scatter(
df_ii_100k.index,
df_ii_100k["Cond. 3"].values,
marker="^",
color="r",
alpha=0.4,
)
ax[ii].scatter(
df_ii_100k.index,
df_ii_100k["Cond. 2"].values,
marker="o",
color="r",
alpha=0.4,
)
# Set limits ticks and labels
ymax = np.max(maximums)
ymin = np.min(minimums)
delta_y = ymax - ymin
band_size = len(df_ii_60k.index)
for jj in range(4):
ax[jj].text(0, ymax, "{} K".format(teffs[jj]), size=14)
ax[jj].set_ylim(ymin - 0.11 * delta_y, ymax + 0.11 * delta_y)
ax[jj].set_xlim(-0.5, band_size - 0.5)
ax[jj].tick_params(axis="both", which="major", labelsize=12)
# ticks and labels
if (jj == 2) or (jj == 3):
ax[jj].set_xlabel("Bands", fontsize=12)
if (jj == 1) or (jj == 3):
ax[jj].set_yticklabels([])
if (jj == 0) or (jj == 1):
ax[jj].set_xticklabels([])
fig.text(
0.04,
0.5,
r"Precision [m/s]",
ha="center",
va="center",
rotation="vertical",
size=12,
)
fig.subplots_adjust(hspace=0, wspace=0, bottom=0.12, top=0.95, right=0.95)
fig.savefig("plots/precision_logg{0}_feh_{1}.pdf".format(logg, fe_h))
fig.savefig("plots/precision_logg{0}_feh_{1}.png".format(logg, fe_h), dpi=400)
def filter_df(df, filter_dict, drop_list=None):
"""Filter DataFrame by dictionary of key and values."""
for key, val in filter_dict.items():
df = df[df[key] == val]
if drop_list is not None:
df = df.drop(columns=drop_list)
return df
def cumulative_df(df, full_cum=False):
"""Calculated cumulative RV precision across bands.
The precision of "Z", "ZY", "ZYJ", "ZYJH", "ZYJHK" bands.
Parameters
----------
df: pandas.DataFrame
DataFrame.
full_cum: bool
Include "YJHK", "JHK", "HK", "K" grouping also. Default is False.
"""
bands = df.index
assert all(bands == ["Z", "Y", "J", "H", "K"]), bands
if full_cum:
cum_bands = ["Z", "ZY", "ZYJ", "ZYJH", "ZYJHK", "YJHK", "JHK", "HK", "K"]
cum_dict = {
"Band": cum_bands,
"Cond. 1": rv_cumulative_full(df["Cond. 1"]),
"Cond. 2": rv_cumulative_full(df["Cond. 2"]),
"Cond. 3": rv_cumulative_full(df["Cond. 3"]),
}
else:
cum_bands = ["Z", "ZY", "ZYJ", "ZYJH", "ZYJHK"]
cum_dict = {
"Band": cum_bands,
"Cond. 1": rv_cumulative(df["Cond. 1"], single=True),
"Cond. 2": rv_cumulative(df["Cond. 2"], single=True),
"Cond. 3": rv_cumulative(df["Cond. 3"], single=True),
}
cum_df = pd.DataFrame(cum_dict)
cum_df = cum_df.set_index("Band")
cum_df = cum_df.reindex(cum_bands)
return cum_df
def cumulative_plot(
precision_file,
teffs=None,
logg=4.5,
fe_h=0.0,
vsini=1.0,
sampling=3,
full_cum=False,
):
"""RV precision with cumulative bands.
full_cum: bool
Cumlative over entire range [ "Z","ZY", "ZYJ", "ZYJH", "ZYJHK","YJHK", "JHK","HK","K"]
Saves figure to ``plots/``.
Parameters
----------
precision_file: str
Name of phoenix_precision.py output.
teffs: List of int or None
Stellar temperatures. Default is [3900, 3500, 2800, 2600].
logg: int
Stellar Logg. Default is 4.5.
fe_h: int
Stellar metallicity. Default is 0.0.
vsini: float
Rotational velocity. Default is 1.0.
sampling:
Spectral sampling. Default is 3.
full_cum: bool
Cumulative over entire range. Default is False.
"""
if teffs is None:
# Default values
teffs = [3900, 3500, 2800, 2600]
assert len(teffs) == 4
df = load_dataframe(precision_file)
filter_dict = {"logg": logg, "[Fe/H]": fe_h, "vsini": vsini, "Sampling": sampling}
df = filter_df(
df, filter_dict, drop_list=["Alpha", "[Fe/H]", "correct flag", "Quality"]
)
fig, axes = plt.subplots(2, 2)
ax = axes.flatten()
df_selected = df[df.Resolution.isin(["60k", "80k", "100k"])]
df_selected = df_selected[df_selected.Temp.isin(teffs)]
maximums = []
minimums = []
for ii, temp in enumerate(teffs):
# This entry
df_ii = df_selected[df_selected["Temp"] == temp]
df_ii_60k = df_ii[df_ii["Resolution"].str.strip() == "60k"]
df_ii_80k = df_ii[df_ii["Resolution"].str.strip() == "80k"]
df_ii_100k = df_ii[df_ii["Resolution"].str.strip() == "100k"]
df_ii_60k = df_ii_60k.set_index("Band")
df_ii_60k = df_ii_60k.reindex(["Z", "Y", "J", "H", "K"])
df_ii_80k = df_ii_80k.set_index("Band")
df_ii_80k = df_ii_80k.reindex(["Z", "Y", "J", "H", "K"])
df_ii_100k = df_ii_100k.set_index("Band")
df_ii_100k = df_ii_100k.reindex(["Z", "Y", "J", "H", "K"])
# Cumulative
df_ii_60k = cumulative_df(df_ii_60k, full_cum=full_cum)
df_ii_80k = cumulative_df(df_ii_80k, full_cum=full_cum)
df_ii_100k = cumulative_df(df_ii_100k, full_cum=full_cum)
maximums.append(
np.max(
[
df_ii_60k[["Cond. 1", "Cond. 2", "Cond. 3"]].max(),
df_ii_80k[["Cond. 1", "Cond. 2", "Cond. 3"]].max(),
df_ii_100k[["Cond. 1", "Cond. 2", "Cond. 3"]].max(),
]
)
)
minimums.append(
np.min(
[
df_ii_60k[["Cond. 1", "Cond. 2", "Cond. 3"]].min(),
df_ii_80k[["Cond. 1", "Cond. 2", "Cond. 3"]].min(),
df_ii_100k[["Cond. 1", "Cond. 2", "Cond. 3"]].min(),
]
)
)
ax[ii].fill_between(
df_ii_60k.index,
df_ii_60k["Cond. 2"].values,
df_ii_60k["Cond. 3"].values,
color="b",
alpha=0.2,
)
ax[ii].fill_between(
df_ii_80k.index,
df_ii_80k["Cond. 2"].values,
df_ii_80k["Cond. 3"].values,
color="g",
alpha=0.2,
)
ax[ii].fill_between(
df_ii_100k.index,
df_ii_100k["Cond. 2"].values,
df_ii_100k["Cond. 3"].values,
color="r",
alpha=0.2,
)
ax[ii].plot(
df_ii_60k.index, df_ii_60k["Cond. 1"].values, color="b", linestyle="--"
) # lim
ax[ii].plot(
df_ii_80k.index, df_ii_80k["Cond. 1"].values, color="g", linestyle="--"
) # lim
ax[ii].plot(
df_ii_100k.index, df_ii_100k["Cond. 1"].values, color="r", linestyle="--"
) # lim
ax[ii].scatter(
df_ii_60k.index,
df_ii_60k["Cond. 2"].values,
marker="^",
color="b",
alpha=0.4,
)
ax[ii].scatter(
df_ii_60k.index,
df_ii_60k["Cond. 3"].values,
marker="o",
color="b",
alpha=0.4,
)
ax[ii].scatter(
df_ii_80k.index,
df_ii_80k["Cond. 3"].values,
marker="^",
color="g",
alpha=0.4,
)
ax[ii].scatter(
df_ii_80k.index,
df_ii_80k["Cond. 2"].values,
marker="o",
color="g",
alpha=0.4,
)
ax[ii].scatter(
df_ii_100k.index,
df_ii_100k["Cond. 3"].values,
marker="^",
color="r",
alpha=0.4,
)
ax[ii].scatter(
df_ii_100k.index,
df_ii_100k["Cond. 2"].values,
marker="o",
color="r",
alpha=0.4,
)
ax[ii].fill_between(
df_ii_100k.index,
df_ii_100k["Cond. 2"].values,
df_ii_100k["Cond. 3"].values,
color="r",
alpha=0.2,
)
# Set limits ticks and labels
ymax = np.min([np.max(maximums), 10]) # Set limit to 20 km/s
ymin = np.min(minimums)
delta_y = ymax - ymin
band_size = len(df_ii_60k.index)
for jj in range(4):
ax[jj].text(0, ymax, "{} K".format(teffs[jj]), size=14)
ax[jj].set_ylim(ymin - 0.1 * delta_y, ymax + 0.15 * delta_y)
ax[jj].set_xlim(-0.5, band_size - 0.5)
ax[jj].tick_params(axis="both", which="major", labelsize=12)
# ticks and labels
if (jj == 2) or (jj == 3):
ax[jj].set_xlabel("Bands", fontsize=12)
if full_cum:
ax[jj].tick_params(axis="x", labelrotation=40)
else:
ax[jj].tick_params(axis="x", labelrotation=25)
if (jj == 1) or (jj == 3):
ax[jj].set_yticklabels([])
if (jj == 0) or (jj == 1):
ax[jj].set_xticklabels([])
fig.text(
0.04,
0.5,
r"Precision [m/s]",
ha="center",
va="center",
rotation="vertical",
size=12,
)
fig.subplots_adjust(hspace=0, wspace=0, bottom=0.17, top=0.95, right=0.95)
fname = "plots/cummulative_precision_logg{0}_feh_{1}_{2}".format(
logg, fe_h, full_cum
)
fig.savefig(fname + ".pdf")
fig.savefig(fname + ".png", dpi=400)
default_file = join(
config.pathdir, config.paths["precision_results"], "precision_results.csv"
)
def _parser():
"""Take care of all the argparse stuff."""
parser = argparse.ArgumentParser(
description="Create a four panel RV relative precision plot."
)
parser.add_argument(
"precision_file",
help="Precision result csv to use. Default is set with the config.yaml file. Currently {0}".format(
default_file
),
type=str,
default=default_file,
)
parser.add_argument(
"-t",
"--temperatures",
nargs=4,
help="Temperatures to display in Kelvin. Default is [3900, 3500, 2800, 2600].",
type=int,
default=[3900, 3500, 2800, 2600],
)
return parser.parse_args()
if __name__ == "__main__":
args = _parser()
precision_file = args.precision_file
temperatures = args.temperatures
plot_precision(precision_file, teffs=temperatures)
# plot_precision(precision_file, teffs=temperatures, logg=4, fe_h=1)
cumulative_plot(precision_file, teffs=temperatures)
cumulative_plot(precision_file, teffs=temperatures, full_cum=True)
sys.exit(0)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Activity
from ._models_py3 import ActivityListResult
from ._models_py3 import ActivityOutputType
from ._models_py3 import ActivityParameter
from ._models_py3 import ActivityParameterSet
from ._models_py3 import ActivityParameterValidationSet
from ._models_py3 import AdvancedSchedule
from ._models_py3 import AdvancedScheduleMonthlyOccurrence
from ._models_py3 import AgentRegistration
from ._models_py3 import AgentRegistrationKeys
from ._models_py3 import AgentRegistrationRegenerateKeyParameter
from ._models_py3 import AutomationAccount
from ._models_py3 import AutomationAccountCreateOrUpdateParameters
from ._models_py3 import AutomationAccountListResult
from ._models_py3 import AutomationAccountUpdateParameters
from ._models_py3 import AzureQueryProperties
from ._models_py3 import Certificate
from ._models_py3 import CertificateCreateOrUpdateParameters
from ._models_py3 import CertificateListResult
from ._models_py3 import CertificateUpdateParameters
from ._models_py3 import Connection
from ._models_py3 import ConnectionCreateOrUpdateParameters
from ._models_py3 import ConnectionListResult
from ._models_py3 import ConnectionType
from ._models_py3 import ConnectionTypeAssociationProperty
from ._models_py3 import ConnectionTypeCreateOrUpdateParameters
from ._models_py3 import ConnectionTypeListResult
from ._models_py3 import ConnectionUpdateParameters
from ._models_py3 import ContentHash
from ._models_py3 import ContentLink
from ._models_py3 import ContentSource
from ._models_py3 import Credential
from ._models_py3 import CredentialCreateOrUpdateParameters
from ._models_py3 import CredentialListResult
from ._models_py3 import CredentialUpdateParameters
from ._models_py3 import DscCompilationJob
from ._models_py3 import DscCompilationJobCreateParameters
from ._models_py3 import DscCompilationJobListResult
from ._models_py3 import DscConfiguration
from ._models_py3 import DscConfigurationAssociationProperty
from ._models_py3 import DscConfigurationCreateOrUpdateParameters
from ._models_py3 import DscConfigurationListResult
from ._models_py3 import DscConfigurationParameter
from ._models_py3 import DscConfigurationUpdateParameters
from ._models_py3 import DscMetaConfiguration
from ._models_py3 import DscNode
from ._models_py3 import DscNodeConfiguration
from ._models_py3 import DscNodeConfigurationCreateOrUpdateParameters
from ._models_py3 import DscNodeConfigurationListResult
from ._models_py3 import DscNodeExtensionHandlerAssociationProperty
from ._models_py3 import DscNodeListResult
from ._models_py3 import DscNodeReport
from ._models_py3 import DscNodeReportListResult
from ._models_py3 import DscNodeUpdateParameters
from ._models_py3 import DscNodeUpdateParametersProperties
from ._models_py3 import DscReportError
from ._models_py3 import DscReportResource
from ._models_py3 import DscReportResourceNavigation
from ._models_py3 import ErrorResponse
from ._models_py3 import FieldDefinition
from ._models_py3 import HybridRunbookWorker
from ._models_py3 import HybridRunbookWorkerGroup
from ._models_py3 import HybridRunbookWorkerGroupUpdateParameters
from ._models_py3 import HybridRunbookWorkerGroupsListResult
from ._models_py3 import Job
from ._models_py3 import JobCollectionItem
from ._models_py3 import JobCreateParameters
from ._models_py3 import JobListResultV2
from ._models_py3 import JobNavigation
from ._models_py3 import JobSchedule
from ._models_py3 import JobScheduleCreateParameters
from ._models_py3 import JobScheduleListResult
from ._models_py3 import JobStream
from ._models_py3 import JobStreamListResult
from ._models_py3 import Key
from ._models_py3 import KeyListResult
from ._models_py3 import LinkedWorkspace
from ._models_py3 import LinuxProperties
from ._models_py3 import Module
from ._models_py3 import ModuleCreateOrUpdateParameters
from ._models_py3 import ModuleErrorInfo
from ._models_py3 import ModuleListResult
from ._models_py3 import ModuleUpdateParameters
from ._models_py3 import NodeCount
from ._models_py3 import NodeCountProperties
from ._models_py3 import NodeCounts
from ._models_py3 import NonAzureQueryProperties
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import ProxyResource
from ._models_py3 import PythonPackageCreateParameters
from ._models_py3 import PythonPackageUpdateParameters
from ._models_py3 import Resource
from ._models_py3 import RunAsCredentialAssociationProperty
from ._models_py3 import Runbook
from ._models_py3 import RunbookAssociationProperty
from ._models_py3 import RunbookCreateOrUpdateDraftParameters
from ._models_py3 import RunbookCreateOrUpdateDraftProperties
from ._models_py3 import RunbookCreateOrUpdateParameters
from ._models_py3 import RunbookDraft
from ._models_py3 import RunbookDraftUndoEditResult
from ._models_py3 import RunbookListResult
from ._models_py3 import RunbookParameter
from ._models_py3 import RunbookUpdateParameters
from ._models_py3 import SUCScheduleProperties
from ._models_py3 import Schedule
from ._models_py3 import ScheduleAssociationProperty
from ._models_py3 import ScheduleCreateOrUpdateParameters
from ._models_py3 import ScheduleListResult
from ._models_py3 import ScheduleUpdateParameters
from ._models_py3 import Sku
from ._models_py3 import SoftwareUpdateConfiguration
from ._models_py3 import SoftwareUpdateConfigurationCollectionItem
from ._models_py3 import SoftwareUpdateConfigurationListResult
from ._models_py3 import SoftwareUpdateConfigurationMachineRun
from ._models_py3 import SoftwareUpdateConfigurationMachineRunListResult
from ._models_py3 import SoftwareUpdateConfigurationRun
from ._models_py3 import SoftwareUpdateConfigurationRunListResult
from ._models_py3 import SoftwareUpdateConfigurationRunTaskProperties
from ._models_py3 import SoftwareUpdateConfigurationRunTasks
from ._models_py3 import SoftwareUpdateConfigurationTasks
from ._models_py3 import SourceControl
from ._models_py3 import SourceControlCreateOrUpdateParameters
from ._models_py3 import SourceControlListResult
from ._models_py3 import SourceControlSecurityTokenProperties
from ._models_py3 import SourceControlSyncJob
from ._models_py3 import SourceControlSyncJobById
from ._models_py3 import SourceControlSyncJobCreateParameters
from ._models_py3 import SourceControlSyncJobListResult
from ._models_py3 import SourceControlSyncJobStream
from ._models_py3 import SourceControlSyncJobStreamById
from ._models_py3 import SourceControlSyncJobStreamsListBySyncJob
from ._models_py3 import SourceControlUpdateParameters
from ._models_py3 import Statistics
from ._models_py3 import StatisticsListResult
from ._models_py3 import TagSettingsProperties
from ._models_py3 import TargetProperties
from ._models_py3 import TaskProperties
from ._models_py3 import TestJob
from ._models_py3 import TestJobCreateParameters
from ._models_py3 import TrackedResource
from ._models_py3 import TypeField
from ._models_py3 import TypeFieldListResult
from ._models_py3 import UpdateConfiguration
from ._models_py3 import UpdateConfigurationNavigation
from ._models_py3 import Usage
from ._models_py3 import UsageCounterName
from ._models_py3 import UsageListResult
from ._models_py3 import Variable
from ._models_py3 import VariableCreateOrUpdateParameters
from ._models_py3 import VariableListResult
from ._models_py3 import VariableUpdateParameters
from ._models_py3 import Watcher
from ._models_py3 import WatcherListResult
from ._models_py3 import WatcherUpdateParameters
from ._models_py3 import Webhook
from ._models_py3 import WebhookCreateOrUpdateParameters
from ._models_py3 import WebhookListResult
from ._models_py3 import WebhookUpdateParameters
from ._models_py3 import WindowsProperties
except (SyntaxError, ImportError):
from ._models import Activity # type: ignore
from ._models import ActivityListResult # type: ignore
from ._models import ActivityOutputType # type: ignore
from ._models import ActivityParameter # type: ignore
from ._models import ActivityParameterSet # type: ignore
from ._models import ActivityParameterValidationSet # type: ignore
from ._models import AdvancedSchedule # type: ignore
from ._models import AdvancedScheduleMonthlyOccurrence # type: ignore
from ._models import AgentRegistration # type: ignore
from ._models import AgentRegistrationKeys # type: ignore
from ._models import AgentRegistrationRegenerateKeyParameter # type: ignore
from ._models import AutomationAccount # type: ignore
from ._models import AutomationAccountCreateOrUpdateParameters # type: ignore
from ._models import AutomationAccountListResult # type: ignore
from ._models import AutomationAccountUpdateParameters # type: ignore
from ._models import AzureQueryProperties # type: ignore
from ._models import Certificate # type: ignore
from ._models import CertificateCreateOrUpdateParameters # type: ignore
from ._models import CertificateListResult # type: ignore
from ._models import CertificateUpdateParameters # type: ignore
from ._models import Connection # type: ignore
from ._models import ConnectionCreateOrUpdateParameters # type: ignore
from ._models import ConnectionListResult # type: ignore
from ._models import ConnectionType # type: ignore
from ._models import ConnectionTypeAssociationProperty # type: ignore
from ._models import ConnectionTypeCreateOrUpdateParameters # type: ignore
from ._models import ConnectionTypeListResult # type: ignore
from ._models import ConnectionUpdateParameters # type: ignore
from ._models import ContentHash # type: ignore
from ._models import ContentLink # type: ignore
from ._models import ContentSource # type: ignore
from ._models import Credential # type: ignore
from ._models import CredentialCreateOrUpdateParameters # type: ignore
from ._models import CredentialListResult # type: ignore
from ._models import CredentialUpdateParameters # type: ignore
from ._models import DscCompilationJob # type: ignore
from ._models import DscCompilationJobCreateParameters # type: ignore
from ._models import DscCompilationJobListResult # type: ignore
from ._models import DscConfiguration # type: ignore
from ._models import DscConfigurationAssociationProperty # type: ignore
from ._models import DscConfigurationCreateOrUpdateParameters # type: ignore
from ._models import DscConfigurationListResult # type: ignore
from ._models import DscConfigurationParameter # type: ignore
from ._models import DscConfigurationUpdateParameters # type: ignore
from ._models import DscMetaConfiguration # type: ignore
from ._models import DscNode # type: ignore
from ._models import DscNodeConfiguration # type: ignore
from ._models import DscNodeConfigurationCreateOrUpdateParameters # type: ignore
from ._models import DscNodeConfigurationListResult # type: ignore
from ._models import DscNodeExtensionHandlerAssociationProperty # type: ignore
from ._models import DscNodeListResult # type: ignore
from ._models import DscNodeReport # type: ignore
from ._models import DscNodeReportListResult # type: ignore
from ._models import DscNodeUpdateParameters # type: ignore
from ._models import DscNodeUpdateParametersProperties # type: ignore
from ._models import DscReportError # type: ignore
from ._models import DscReportResource # type: ignore
from ._models import DscReportResourceNavigation # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import FieldDefinition # type: ignore
from ._models import HybridRunbookWorker # type: ignore
from ._models import HybridRunbookWorkerGroup # type: ignore
from ._models import HybridRunbookWorkerGroupUpdateParameters # type: ignore
from ._models import HybridRunbookWorkerGroupsListResult # type: ignore
from ._models import Job # type: ignore
from ._models import JobCollectionItem # type: ignore
from ._models import JobCreateParameters # type: ignore
from ._models import JobListResultV2 # type: ignore
from ._models import JobNavigation # type: ignore
from ._models import JobSchedule # type: ignore
from ._models import JobScheduleCreateParameters # type: ignore
from ._models import JobScheduleListResult # type: ignore
from ._models import JobStream # type: ignore
from ._models import JobStreamListResult # type: ignore
from ._models import Key # type: ignore
from ._models import KeyListResult # type: ignore
from ._models import LinkedWorkspace # type: ignore
from ._models import LinuxProperties # type: ignore
from ._models import Module # type: ignore
from ._models import ModuleCreateOrUpdateParameters # type: ignore
from ._models import ModuleErrorInfo # type: ignore
from ._models import ModuleListResult # type: ignore
from ._models import ModuleUpdateParameters # type: ignore
from ._models import NodeCount # type: ignore
from ._models import NodeCountProperties # type: ignore
from ._models import NodeCounts # type: ignore
from ._models import NonAzureQueryProperties # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import ProxyResource # type: ignore
from ._models import PythonPackageCreateParameters # type: ignore
from ._models import PythonPackageUpdateParameters # type: ignore
from ._models import Resource # type: ignore
from ._models import RunAsCredentialAssociationProperty # type: ignore
from ._models import Runbook # type: ignore
from ._models import RunbookAssociationProperty # type: ignore
from ._models import RunbookCreateOrUpdateDraftParameters # type: ignore
from ._models import RunbookCreateOrUpdateDraftProperties # type: ignore
from ._models import RunbookCreateOrUpdateParameters # type: ignore
from ._models import RunbookDraft # type: ignore
from ._models import RunbookDraftUndoEditResult # type: ignore
from ._models import RunbookListResult # type: ignore
from ._models import RunbookParameter # type: ignore
from ._models import RunbookUpdateParameters # type: ignore
from ._models import SUCScheduleProperties # type: ignore
from ._models import Schedule # type: ignore
from ._models import ScheduleAssociationProperty # type: ignore
from ._models import ScheduleCreateOrUpdateParameters # type: ignore
from ._models import ScheduleListResult # type: ignore
from ._models import ScheduleUpdateParameters # type: ignore
from ._models import Sku # type: ignore
from ._models import SoftwareUpdateConfiguration # type: ignore
from ._models import SoftwareUpdateConfigurationCollectionItem # type: ignore
from ._models import SoftwareUpdateConfigurationListResult # type: ignore
from ._models import SoftwareUpdateConfigurationMachineRun # type: ignore
from ._models import SoftwareUpdateConfigurationMachineRunListResult # type: ignore
from ._models import SoftwareUpdateConfigurationRun # type: ignore
from ._models import SoftwareUpdateConfigurationRunListResult # type: ignore
from ._models import SoftwareUpdateConfigurationRunTaskProperties # type: ignore
from ._models import SoftwareUpdateConfigurationRunTasks # type: ignore
from ._models import SoftwareUpdateConfigurationTasks # type: ignore
from ._models import SourceControl # type: ignore
from ._models import SourceControlCreateOrUpdateParameters # type: ignore
from ._models import SourceControlListResult # type: ignore
from ._models import SourceControlSecurityTokenProperties # type: ignore
from ._models import SourceControlSyncJob # type: ignore
from ._models import SourceControlSyncJobById # type: ignore
from ._models import SourceControlSyncJobCreateParameters # type: ignore
from ._models import SourceControlSyncJobListResult # type: ignore
from ._models import SourceControlSyncJobStream # type: ignore
from ._models import SourceControlSyncJobStreamById # type: ignore
from ._models import SourceControlSyncJobStreamsListBySyncJob # type: ignore
from ._models import SourceControlUpdateParameters # type: ignore
from ._models import Statistics # type: ignore
from ._models import StatisticsListResult # type: ignore
from ._models import TagSettingsProperties # type: ignore
from ._models import TargetProperties # type: ignore
from ._models import TaskProperties # type: ignore
from ._models import TestJob # type: ignore
from ._models import TestJobCreateParameters # type: ignore
from ._models import TrackedResource # type: ignore
from ._models import TypeField # type: ignore
from ._models import TypeFieldListResult # type: ignore
from ._models import UpdateConfiguration # type: ignore
from ._models import UpdateConfigurationNavigation # type: ignore
from ._models import Usage # type: ignore
from ._models import UsageCounterName # type: ignore
from ._models import UsageListResult # type: ignore
from ._models import Variable # type: ignore
from ._models import VariableCreateOrUpdateParameters # type: ignore
from ._models import VariableListResult # type: ignore
from ._models import VariableUpdateParameters # type: ignore
from ._models import Watcher # type: ignore
from ._models import WatcherListResult # type: ignore
from ._models import WatcherUpdateParameters # type: ignore
from ._models import Webhook # type: ignore
from ._models import WebhookCreateOrUpdateParameters # type: ignore
from ._models import WebhookListResult # type: ignore
from ._models import WebhookUpdateParameters # type: ignore
from ._models import WindowsProperties # type: ignore
from ._automation_client_enums import (
AgentRegistrationKeyName,
AutomationAccountState,
AutomationKeyName,
AutomationKeyPermissions,
ContentSourceType,
CountType,
DscConfigurationState,
GroupTypeEnum,
HttpStatusCode,
JobProvisioningState,
JobStatus,
JobStreamType,
LinuxUpdateClasses,
ModuleProvisioningState,
OperatingSystemType,
ProvisioningState,
RunbookState,
RunbookTypeEnum,
ScheduleDay,
ScheduleFrequency,
SkuNameEnum,
SourceType,
StreamType,
SyncType,
TagOperators,
TokenType,
WindowsUpdateClasses,
)
__all__ = [
'Activity',
'ActivityListResult',
'ActivityOutputType',
'ActivityParameter',
'ActivityParameterSet',
'ActivityParameterValidationSet',
'AdvancedSchedule',
'AdvancedScheduleMonthlyOccurrence',
'AgentRegistration',
'AgentRegistrationKeys',
'AgentRegistrationRegenerateKeyParameter',
'AutomationAccount',
'AutomationAccountCreateOrUpdateParameters',
'AutomationAccountListResult',
'AutomationAccountUpdateParameters',
'AzureQueryProperties',
'Certificate',
'CertificateCreateOrUpdateParameters',
'CertificateListResult',
'CertificateUpdateParameters',
'Connection',
'ConnectionCreateOrUpdateParameters',
'ConnectionListResult',
'ConnectionType',
'ConnectionTypeAssociationProperty',
'ConnectionTypeCreateOrUpdateParameters',
'ConnectionTypeListResult',
'ConnectionUpdateParameters',
'ContentHash',
'ContentLink',
'ContentSource',
'Credential',
'CredentialCreateOrUpdateParameters',
'CredentialListResult',
'CredentialUpdateParameters',
'DscCompilationJob',
'DscCompilationJobCreateParameters',
'DscCompilationJobListResult',
'DscConfiguration',
'DscConfigurationAssociationProperty',
'DscConfigurationCreateOrUpdateParameters',
'DscConfigurationListResult',
'DscConfigurationParameter',
'DscConfigurationUpdateParameters',
'DscMetaConfiguration',
'DscNode',
'DscNodeConfiguration',
'DscNodeConfigurationCreateOrUpdateParameters',
'DscNodeConfigurationListResult',
'DscNodeExtensionHandlerAssociationProperty',
'DscNodeListResult',
'DscNodeReport',
'DscNodeReportListResult',
'DscNodeUpdateParameters',
'DscNodeUpdateParametersProperties',
'DscReportError',
'DscReportResource',
'DscReportResourceNavigation',
'ErrorResponse',
'FieldDefinition',
'HybridRunbookWorker',
'HybridRunbookWorkerGroup',
'HybridRunbookWorkerGroupUpdateParameters',
'HybridRunbookWorkerGroupsListResult',
'Job',
'JobCollectionItem',
'JobCreateParameters',
'JobListResultV2',
'JobNavigation',
'JobSchedule',
'JobScheduleCreateParameters',
'JobScheduleListResult',
'JobStream',
'JobStreamListResult',
'Key',
'KeyListResult',
'LinkedWorkspace',
'LinuxProperties',
'Module',
'ModuleCreateOrUpdateParameters',
'ModuleErrorInfo',
'ModuleListResult',
'ModuleUpdateParameters',
'NodeCount',
'NodeCountProperties',
'NodeCounts',
'NonAzureQueryProperties',
'Operation',
'OperationDisplay',
'OperationListResult',
'ProxyResource',
'PythonPackageCreateParameters',
'PythonPackageUpdateParameters',
'Resource',
'RunAsCredentialAssociationProperty',
'Runbook',
'RunbookAssociationProperty',
'RunbookCreateOrUpdateDraftParameters',
'RunbookCreateOrUpdateDraftProperties',
'RunbookCreateOrUpdateParameters',
'RunbookDraft',
'RunbookDraftUndoEditResult',
'RunbookListResult',
'RunbookParameter',
'RunbookUpdateParameters',
'SUCScheduleProperties',
'Schedule',
'ScheduleAssociationProperty',
'ScheduleCreateOrUpdateParameters',
'ScheduleListResult',
'ScheduleUpdateParameters',
'Sku',
'SoftwareUpdateConfiguration',
'SoftwareUpdateConfigurationCollectionItem',
'SoftwareUpdateConfigurationListResult',
'SoftwareUpdateConfigurationMachineRun',
'SoftwareUpdateConfigurationMachineRunListResult',
'SoftwareUpdateConfigurationRun',
'SoftwareUpdateConfigurationRunListResult',
'SoftwareUpdateConfigurationRunTaskProperties',
'SoftwareUpdateConfigurationRunTasks',
'SoftwareUpdateConfigurationTasks',
'SourceControl',
'SourceControlCreateOrUpdateParameters',
'SourceControlListResult',
'SourceControlSecurityTokenProperties',
'SourceControlSyncJob',
'SourceControlSyncJobById',
'SourceControlSyncJobCreateParameters',
'SourceControlSyncJobListResult',
'SourceControlSyncJobStream',
'SourceControlSyncJobStreamById',
'SourceControlSyncJobStreamsListBySyncJob',
'SourceControlUpdateParameters',
'Statistics',
'StatisticsListResult',
'TagSettingsProperties',
'TargetProperties',
'TaskProperties',
'TestJob',
'TestJobCreateParameters',
'TrackedResource',
'TypeField',
'TypeFieldListResult',
'UpdateConfiguration',
'UpdateConfigurationNavigation',
'Usage',
'UsageCounterName',
'UsageListResult',
'Variable',
'VariableCreateOrUpdateParameters',
'VariableListResult',
'VariableUpdateParameters',
'Watcher',
'WatcherListResult',
'WatcherUpdateParameters',
'Webhook',
'WebhookCreateOrUpdateParameters',
'WebhookListResult',
'WebhookUpdateParameters',
'WindowsProperties',
'AgentRegistrationKeyName',
'AutomationAccountState',
'AutomationKeyName',
'AutomationKeyPermissions',
'ContentSourceType',
'CountType',
'DscConfigurationState',
'GroupTypeEnum',
'HttpStatusCode',
'JobProvisioningState',
'JobStatus',
'JobStreamType',
'LinuxUpdateClasses',
'ModuleProvisioningState',
'OperatingSystemType',
'ProvisioningState',
'RunbookState',
'RunbookTypeEnum',
'ScheduleDay',
'ScheduleFrequency',
'SkuNameEnum',
'SourceType',
'StreamType',
'SyncType',
'TagOperators',
'TokenType',
'WindowsUpdateClasses',
]
|
|
"""
Tests for chimpy. Run them with noserunner
You need to activate groups in the Mailchimp web UI before running tests:
* Browse to http://admin.mailchimp.com
* List setting -> Groups for segmentation
* Check "add groups to my list"
"""
import os
import pprint
import operator
import random
import md5
import datetime
import chimpy
chimp = None
EMAIL_ADDRESS = 'casualbear@googlemail.com'
EMAIL_ADDRESS2 = 'dummy@dummy.com'
LIST_NAME = 'unittests'
LIST_ID = None
def setup_module():
assert 'MAILCHIMP_APIKEY' in os.environ, \
"please set the MAILCHIMP_APIKEY environment variable\n" \
"you can get a new api key by calling:\n" \
" wget 'http://api.mailchimp.com/1.1/?output=json&method=login" \
"&password=xxxxxx&username=yyyyyyyy' -O apikey"
global chimp
chimp = chimpy.Connection(os.environ['MAILCHIMP_APIKEY'])
def test_ping():
assert chimp.ping() == "Everything's Chimpy!"
def test_lists():
lists = chimp.lists()
pprint.pprint(lists)
list_names = map(lambda x: x['name'], lists)
assert LIST_NAME in list_names
def list_id():
global LIST_ID
if LIST_ID is None:
test_list = [x for x in chimp.lists() if x['name'] == LIST_NAME].pop()
LIST_ID = test_list['id']
return LIST_ID
# use double_optin=False to prevent manual intervention
def test_list_subscribe_and_unsubscribe():
result = chimp.list_subscribe(list_id(), EMAIL_ADDRESS,
{'FIRST': 'unit', 'LAST': 'tests'},
double_optin=False)
pprint.pprint(result)
assert result == True
members = chimp.list_members(list_id())['data']
print members
emails = map(lambda x: x['email'], members)
print members
assert EMAIL_ADDRESS in emails
result = chimp.list_unsubscribe(list_id(),
EMAIL_ADDRESS,
delete_member=True,
send_goodbye=False,
send_notify=False)
pprint.pprint(result)
assert result == True
def test_list_batch_subscribe_and_batch_unsubscribe():
batch = [{'EMAIL':EMAIL_ADDRESS,'EMAIL_TYPE':'html'},
{'EMAIL':EMAIL_ADDRESS2,'EMAIL_TYPE':'text'}]
result = chimp.list_batch_subscribe(list_id(),
batch,
double_optin=False,
update_existing=False,
replace_interests=False)
assert result['add_count'] == 2
members = chimp.list_members(list_id())['data']
emails = map(lambda x: x['email'], members)
assert EMAIL_ADDRESS in emails
assert EMAIL_ADDRESS2 in emails
result = chimp.list_batch_unsubscribe(list_id(),
[EMAIL_ADDRESS,EMAIL_ADDRESS2],
delete_member=True,
send_goodbye=False,
send_notify=False)
assert result['success_count'] == 2
def test_list_interest_groups_add_and_delete():
# check no lists exists
# pprint.pprint(chimp.list_interest_groups(list_id()))
grouping_id = chimp.list_interest_groupings_add(list_id(), 'test grouping', 'hidden', ['first group'])
assert len(chimp.list_interest_groups(list_id(), grouping_id)['groups']) == 1
# add list
assert chimp.list_interest_group_add(list_id(), 'test', grouping_id)
assert len(chimp.list_interest_groups(list_id(), grouping_id)['groups']) == 2
# delete list
assert chimp.list_interest_group_del(list_id(), 'test', grouping_id)
assert len(chimp.list_interest_groups(list_id(), grouping_id)['groups']) == 1
assert (chimp.list_interest_groupings_del(grouping_id))
def test_list_merge_vars_add_and_delete():
pprint.pprint(chimp.list_merge_vars(list_id()))
assert len(chimp.list_merge_vars(list_id())) == 3
# add list
assert chimp.list_merge_var_add(list_id(), 'test', 'some_text')
assert len(chimp.list_merge_vars(list_id())) == 4
# delete list
assert chimp.list_merge_var_del(list_id(), 'test')
assert len(chimp.list_merge_vars(list_id())) == 3
def test_list_update_member_and_member_info():
# set up
assert chimp.list_subscribe(list_id(), EMAIL_ADDRESS,
{'FIRST': 'unit', 'LAST': 'tests'},
double_optin=False)
assert chimp.list_merge_var_add(list_id(), 'TEST', 'test_merge_var')
grouping_id = chimp.list_interest_groupings_add(list_id(), 'tlistg', 'hidden', ['tlist'])
# update member and get the info back
assert chimp.list_update_member(list_id(), EMAIL_ADDRESS,
{'TEST': 'abc',
'INTERESTS': 'tlist'}, replace_interests=False)
info = chimp.list_member_info(list_id(), EMAIL_ADDRESS)
pprint.pprint(info)
# tear down
assert chimp.list_merge_var_del(list_id(), 'TEST')
assert chimp.list_interest_group_del(list_id(), 'tlist', grouping_id)
assert chimp.list_interest_groupings_del(grouping_id)
assert chimp.list_unsubscribe(list_id(), EMAIL_ADDRESS,
delete_member=True,
send_goodbye=False,
send_notify=False)
# check the info matches the set up
assert 'TEST' in info['merges']
assert info['merges']['TEST'] == 'abc'
def test_create_delete_campaign():
uid = md5.new(str(random.random())).hexdigest()
subject = 'chimpy campaign test %s' % uid
options = {'list_id': list_id(),
'subject': subject,
'from_email': EMAIL_ADDRESS,
'from_name': 'chimpy',
'generate_text': True
}
#this just to be sure flatten utility is working
segment_opts = {'match': 'any',
'conditions':[{'field': 'date', 'op': 'gt', 'value': '2000-01-01'},
{'field': 'email', 'op': 'like', 'value': '@'}]}
html = """ <html><body><h1>My test newsletter</h1><p>Just testing</p>
<a href="*|UNSUB|*">Unsubscribe</a>*|REWARDS|*</body>"""
content = {'html': html}
cid = chimp.campaign_create('regular', options, content, segment_opts=segment_opts)
assert isinstance(cid, basestring)
# check if the new campaign really is there
campaigns = chimp.campaigns(filter_subject=subject)
assert len(campaigns['data'])==1
assert campaigns['data'][0]['id'] == cid
# our content properly addd?
final_content = chimp.campaign_content(cid)
assert '<h1>My test newsletter</h1>' in final_content['html']
assert 'My test newsletter' in final_content['text']
# clean up
chimp.campaign_delete(cid)
def test_replicate_update_campaign():
""" replicates and updates a campaign """
uid = md5.new(str(random.random())).hexdigest()
subject = 'chimpy campaign test %s' % uid
options = {'list_id': list_id(),
'subject': subject,
'from_email': EMAIL_ADDRESS,
'from_name': 'chimpy',
'generate_text': True
}
html = """ <html><body><h1>My test newsletter</h1><p>Just testing</p>
<a href="*|UNSUB|*">Unsubscribe</a>*|REWARDS|*</body>"""
content = {'html': html}
cid = chimp.campaign_create('regular', options, content)
newcid = chimp.campaign_replicate(cid=cid)
assert isinstance(newcid, basestring)
newsubject = 'Fresh subject ' + uid
newtitle = 'Custom title ' + uid
res = chimp.campaign_update(newcid, 'subject', newsubject)
assert res is True
res = chimp.campaign_update(newcid, 'title', newtitle)
assert res is True
# campaigns = chimp.campaigns(filter_subject=newsubject)
# pprint.pprint(campaigns['data'])
# assert len(campaigns['data'])==1
# campaigns = chimp.campaigns(filter_title=newtitle)
# assert len(campaigns['data'])==1
#clean up
chimp.campaign_delete(newcid)
chimp.campaign_delete(cid)
def test_schedule_campaign():
""" schedules and unschedules a campaign """
uid = md5.new(str(random.random())).hexdigest()
subject = 'chimpy campaign schedule test %s' % uid
options = {'list_id': list_id(),
'subject': subject,
'from_email': EMAIL_ADDRESS,
'from_name': 'chimpy',
'generate_text': True
}
html = """ <html><body><h1>My test newsletter</h1><p>Just testing</p>
<a href="*|UNSUB|*">Unsubscribe</a>*|REWARDS|*</body>"""
content = {'html': html}
cid = chimp.campaign_create('regular', options, content)
schedule_time = datetime.datetime(2012, 12, 20, 19, 0, 0)
chimp.campaign_schedule(cid, schedule_time)
campaign = chimp.campaigns(filter_subject=subject)['data'][0]
assert campaign['status'] == 'schedule'
assert campaign['send_time'] in ('Dec 20, 2012 07:00 pm', '2012-12-20 19:00:00')
chimp.campaign_unschedule(cid)
campaign = chimp.campaigns(filter_subject=subject)['data'][0]
assert campaign['status'] == 'save'
#clean up
chimp.campaign_delete(cid)
def test_rss_campaign():
""" add, pause, resume rss campaign """
uid = md5.new(str(random.random())).hexdigest()
subject = 'chimpy campaign rss test %s' % uid
options = {'list_id': list_id(),
'subject': subject,
'from_email': EMAIL_ADDRESS,
'from_name': 'chimpy',
'generate_text': True
}
html = """ <html><body><h1>My test RSS newsletter</h1><p>Just testing</p>
<a href="*|UNSUB|*">Unsubscribe</a>*|REWARDS|*</body>"""
content = {'html': html}
type_opts = {'url': 'http://mailchimp.com/blog/rss'}
cid = chimp.campaign_create('rss', options, content, type_opts=type_opts)
campaign = chimp.campaigns(filter_subject=subject)['data'][0]
assert campaign['type'] == 'rss'
# Todo: Could not find a way to activate the RSS from the API. You need to
# activate before being able to test pause and resume. send_now and schedule
# didn't do the trick.
#chimp.campaign_pause(cid)
#chimp.campaign_resume(cid)
#clean up
chimp.campaign_delete(cid)
if __name__ == '__main__':
setup_module()
for f in globals().keys():
if f.startswith('test_') and callable(globals()[f]):
print f
globals()[f]()
|
|
from __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
from sentry.models import (
ApiKey,
AuditLogEntryEvent,
DeletedOrganization,
DeletedTeam,
DeletedProject,
Organization,
OrganizationStatus,
)
from sentry.testutils import TestCase
from sentry.utils.audit import create_audit_entry
class FakeHttpRequest(object):
def __init__(self, user):
self.user = user
self.META = {"REMOTE_ADDR": "127.0.0.1"}
class CreateAuditEntryTest(TestCase):
def setUp(self):
self.user = self.create_user()
self.req = FakeHttpRequest(self.user)
self.org = self.create_organization(owner=self.user)
self.team = self.create_team(organization=self.org)
self.project = self.create_project(teams=[self.team], platform="java")
def assert_no_delete_log_created(self):
assert not DeletedOrganization.objects.filter(slug=self.org.slug).exists()
assert not DeletedTeam.objects.filter(slug=self.team.slug).exists()
assert not DeletedProject.objects.filter(slug=self.project.slug).exists()
def test_audit_entry_api(self):
org = self.create_organization()
apikey = ApiKey.objects.create(organization=org, allowed_origins="*")
req = FakeHttpRequest(AnonymousUser())
req.auth = apikey
entry = create_audit_entry(req)
assert entry.actor_key == apikey
assert entry.actor is None
assert entry.ip_address == req.META["REMOTE_ADDR"]
self.assert_no_delete_log_created()
def test_audit_entry_frontend(self):
req = FakeHttpRequest(self.create_user())
entry = create_audit_entry(req)
assert entry.actor == req.user
assert entry.actor_key is None
assert entry.ip_address == req.META["REMOTE_ADDR"]
self.assert_no_delete_log_created()
def test_audit_entry_org_delete_log(self):
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.org.id,
event=AuditLogEntryEvent.ORG_REMOVE,
data=self.org.get_audit_log_data(),
)
assert entry.actor == self.user
assert entry.target_object == self.org.id
assert entry.event == AuditLogEntryEvent.ORG_REMOVE
deleted_org = DeletedOrganization.objects.get(slug=self.org.slug)
self.assert_valid_deleted_log(deleted_org, self.org)
def test_audit_entry_org_restore_log(self):
Organization.objects.filter(id=self.organization.id).update(
status=OrganizationStatus.PENDING_DELETION
)
org = Organization.objects.get(id=self.organization.id)
Organization.objects.filter(id=self.organization.id).update(
status=OrganizationStatus.DELETION_IN_PROGRESS
)
org2 = Organization.objects.get(id=self.organization.id)
Organization.objects.filter(id=self.organization.id).update(
status=OrganizationStatus.VISIBLE
)
org3 = Organization.objects.get(id=self.organization.id)
orgs = [org, org2, org3]
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.org.id,
event=AuditLogEntryEvent.ORG_RESTORE,
data=self.org.get_audit_log_data(),
)
entry2 = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.org.id,
event=AuditLogEntryEvent.ORG_EDIT,
data=self.org.get_audit_log_data(),
)
for i in orgs:
if (
i.status == OrganizationStatus.PENDING_DELETION
or i.status == OrganizationStatus.DELETION_IN_PROGRESS
):
assert i.status != OrganizationStatus.VISIBLE
assert ("restored") in entry.get_note()
assert entry.actor == self.user
assert entry.target_object == self.org.id
assert entry.event == AuditLogEntryEvent.ORG_RESTORE
else:
assert i.status == OrganizationStatus.VISIBLE
assert ("edited") in entry2.get_note()
assert entry2.actor == self.user
assert entry2.target_object == self.org.id
assert entry2.event == AuditLogEntryEvent.ORG_EDIT
def test_audit_entry_team_delete_log(self):
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.team.id,
event=AuditLogEntryEvent.TEAM_REMOVE,
data=self.team.get_audit_log_data(),
)
assert entry.actor == self.user
assert entry.target_object == self.team.id
assert entry.event == AuditLogEntryEvent.TEAM_REMOVE
deleted_team = DeletedTeam.objects.get(slug=self.team.slug)
self.assert_valid_deleted_log(deleted_team, self.team)
def test_audit_entry_project_delete_log(self):
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=AuditLogEntryEvent.PROJECT_REMOVE,
data=self.project.get_audit_log_data(),
)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == AuditLogEntryEvent.PROJECT_REMOVE
deleted_project = DeletedProject.objects.get(slug=self.project.slug)
self.assert_valid_deleted_log(deleted_project, self.project)
assert deleted_project.platform == self.project.platform
def test_audit_entry_integration_log(self):
project = self.create_project()
self.login_as(user=self.user)
entry = create_audit_entry(
request=self.req,
organization=self.project.organization,
target_object=self.project.id,
event=AuditLogEntryEvent.INTEGRATION_ADD,
data={"integration": "webhooks", "project": project.slug},
)
assert ("enabled") in entry.get_note()
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == AuditLogEntryEvent.INTEGRATION_ADD
entry2 = create_audit_entry(
request=self.req,
organization=self.project.organization,
target_object=self.project.id,
event=AuditLogEntryEvent.INTEGRATION_EDIT,
data={"integration": "webhooks", "project": project.slug},
)
assert ("edited") in entry2.get_note()
assert entry2.actor == self.user
assert entry2.target_object == self.project.id
assert entry2.event == AuditLogEntryEvent.INTEGRATION_EDIT
entry3 = create_audit_entry(
request=self.req,
organization=self.project.organization,
target_object=self.project.id,
event=AuditLogEntryEvent.INTEGRATION_REMOVE,
data={"integration": "webhooks", "project": project.slug},
)
assert ("disable") in entry3.get_note()
assert entry3.actor == self.user
assert entry3.target_object == self.project.id
assert entry3.event == AuditLogEntryEvent.INTEGRATION_REMOVE
|
|
"""
Link checker for ANDS DOIs.
"""
"""
Data structures used throughout this module:
client_list: dict (despite the name, grr)
key: int: client_id
value: tuple: The row the doi_client table
that has client_id as its key.
doi_list: list
element: tuple: a row from the doi_object table.
testing_array: dict (despite the name, grr)
key: int: An index into the doi_list array.
value: dict: Details of the link to be tested.
There are three key/value pairs in each dictionary,
taken from the corresponding tuple in doi_list.
key: "url_str"
value: The value of the "url" column (whitespace stripped).
key: "creator"
value: The value of the "client_id" column.
key: "doi_id"
value: The value of the "doi_id" column (whitespace stripped).
result_list: dict
key: int: client_id
value: str: text containing a list of the broken links
for this client (for insertion into an email going to the client).
error_count: dict
key: int: client_id
value: int: The number of errors encounted when checking the links
belonging to the client.
"""
import asyncio
import asyncio.futures
import datetime
import socket
import sys
import time
import urllib
# The base module contains the BaseChecker class.
from . import base
class DOIChecker(base.BaseChecker):
"""Checker for DOIs.
"""
def do_link_checking(self):
"""Do the link checking.
"""
client_list = {}
self._get_client_list(client_list, self._params['client_id'])
result_list = {}
error_count = {}
self._run_tests(self._params['ssl_context'],
self._params['client_id'],
self._params['admin_email'], client_list,
int(self._params['link_timeout']),
int(self._params['batch_size']),
result_list, error_count)
self._process_result_lists(client_list, result_list, error_count,
self._params['client_id'],
self._params['admin_email'])
# All the columns in the doi_client table, in order.
DOI_CLIENT_COLUMNS = """\
`client_id`,
`client_name`,
`client_contact_name`,
`ip_address`,
`app_id`,
`created_when`,
`client_contact_email`,
`datacite_prefix`,
`shared_secret`
"""
def _get_client_list(self, client_list, client_id):
"""Get client information for DOIs.
Get client information for generating a personalised record
for each test run.
Arguments:
client_list -- The dictionary to be populated with the results
of the database query.
client_id -- A client_id to use for searching the database,
or None, if all clients are to be returned.
"""
cur = self._conn.cursor()
query = "SELECT " + self.DOI_CLIENT_COLUMNS + " FROM doi_client"
if client_id is not None:
cur.execute(query + " where `client_id`=" + str(client_id) + ";")
else:
cur.execute(query + ";")
for r in cur:
client_list[r[0]] = r
if self._debug:
print("DEBUG: Assigning client_list[{}] = {}".format(
r[0], r), file=sys.stderr)
cur.close()
def _run_tests(self, ssl_context, client_id, admin_email, client_list,
link_timeout, batch_size,
result_list, error_count):
"""
Arguments:
ssl_context -- The SSL context to use when making HTTP requests.
client_id -- A client_id to use for searching the database,
admin_email -- If not None, the email address to use as
recipient of all outgoing messages.
client_list -- The details of the client(s) of the DOIs.
link_timeout -- Timeout to use, in seconds.
batch_size -- Maximum number of concurrent link checks.
result_list -- The results of the tests.
error_count -- The errors resulting from the tests.
"""
doi_list = []
testing_array = {}
self._get_DOI_links(doi_list, client_id)
REPORT_HEADER = "Number of URLs to be tested: " + str(len(doi_list))
self.print_text_or_html(REPORT_HEADER,
REPORT_HEADER + "\n<br />")
socket.setdefaulttimeout(link_timeout)
loop = asyncio.get_event_loop()
# Sleep 1 before getting started. (Why?)
time.sleep(1)
TIMEOUT_ERROR_FORMAT = 'Error DOI_ID: {} URL: {} CONNECTION TIMEOUT'
# The variable "batch_number" iterates over batches
# of size batch_size.
batch_number = 0
# Store the length of doi_list for convenience
len_doi_list = len(doi_list)
while len_doi_list > (batch_number * batch_size):
task_array = []
# This range() iterates over a range of size (at most) batch_size
for i in range(batch_number * batch_size,
min((batch_number + 1) * batch_size,
len_doi_list)):
if self._debug:
print("DEBUG: i =", i, "; doi_list[i] =", doi_list[i],
file=sys.stderr)
testing_array[i] = {"url_str": doi_list[i][13].strip(),
"creator": doi_list[i][11],
"doi_id": doi_list[i][0].strip()}
task_array.append(asyncio.async(
self._check_URL_resource(ssl_context,
doi_list[i],
i,
result_list,
error_count,
testing_array)))
try:
loop.run_until_complete(asyncio.wait(task_array,
timeout=link_timeout))
# If a test is successful, the corresponding entry in
# testing_array is deleted.
# So when run_until_complete returns, the entries
# remaining in testing_array are all timeouts.
for k, v in testing_array.items():
self._handle_one_error(result_list, error_count,
testing_array,
v['creator'],
TIMEOUT_ERROR_FORMAT.format(
v['doi_id'],
v['url_str']),
-1)
testing_array.clear()
except ValueError:
print("i: {}, range start {}, end {}".
format(i,
batch_number * batch_size,
((batch_number + 1) * batch_size)))
finally:
# Clean up all pending tasks. See:
# https://groups.google.com/d/msg/python-tulip/
# qQbdxREjn1Q/guWqL8tjH8gJ
for t in asyncio.Task.all_tasks(loop):
# print("Cancelling task: ", t)
t.cancel()
# Give cancelled tasks a chance to recover.
loop.run_until_complete(asyncio.sleep(0.1))
batch_number += 1
loop.close()
DOI_OBJECTS_COLUMNS = """\
`doi_id`,
`publisher`,
`publication_year`,
`language`,
`version`,
`updated_when`,
`status`,
`identifier_type`,
`rights`,
`last_metadata_update`,
`last_metadata_version`,
`client_id`,
`created_who`,
`url`,
`created_when`,
`datacite_xml`
"""
def _get_DOI_links(self, doi_list, client_id=None):
"""Get all production DOIs to be tested.
Production DOIs are those which have a status other than
"REQUESTED", and which have a doi_id beginning with "10.4".
The doi_list array is updated in situ.
Arguments:
doi_list -- The array to be populated with DOI data from the database.
client_id -- A client_id to use for searching the database,
or None, if the DOIs of all clients are to be returned.
"""
cur = self._conn.cursor()
query = ("SELECT " + self.DOI_OBJECTS_COLUMNS +
" FROM doi_objects WHERE ")
if client_id is not None:
query += "`client_id`=" + str(client_id) + " AND "
query += ("`identifier_type`='DOI'"
" AND `status`!='REQUESTED'"
" AND `doi_id` LIKE '10.4%';")
if self._debug:
print("DEBUG: _get_DOI_links query:", query, file=sys.stderr)
cur.execute(query)
for r in cur:
# If url is missing (NULL in the database), set it to
# an empty string. This allows calling strip() on it
# later (in _run_tests).
if not r[13]:
l = list(r)
l[13] = ""
r = tuple(l)
doi_list.append(r)
cur.close()
# Format string for HEAD query.
# NB: The Keep-Alive entry is for possible future work:
# doing a subsequent GET request to analyse the page content.
# Replacement fields:
# url_path -- The query URL to be sent.
# url -- The entire URL object, as returned by urlsplit().
HEAD_QUERY_FORMAT = (
'HEAD {url_path} HTTP/1.0\r\n'
'Host: {url.hostname}\r\n'
'User-agent: Mozilla/5.0 (Windows; U; Windows NT 5.1; '
'en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6\r\n'
'Accept: text/xml,application/xml,application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5\r\n'
'Accept-Language: en-us,en;q=0.5\r\n'
'Accept-Encoding: gzip,deflate\r\n'
'Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n'
'Keep-Alive: 300\r\n'
'\r\n')
# Maximum number of attempts to check a link.
# This includes testing the original URL, and following redirects.
# Why 7? That is the number of attempts (including the source
# URL) made by the original DOI link checker.
# This constant might be suitable for turning
# into a configuration parameter.
ATTEMPTS_MAX = 7
@asyncio.coroutine
def _check_URL_resource(self, ssl_context,
r, counter,
result_list, error_count, testing_array):
"""Check one URL resource.
Request the header for each resource and try to determine
if it is resolvable.
Record a log entry if an exception occurs, or the server
returns a 400/500 error.
Arguments:
ssl_context -- The SSL context to use when making HTTP requests.
r -- The tuple containing a row from the doi_object table with
the details of the link to be tested.
counter -- The key of testing_array corresponding to this test.
If the key is valid, and the link is valid, the key/value pair
will be removed from testing_array.
result_list -- The dict containing the results of testing.
error_count -- The dict containing the error counts.
testing_array -- The dict containing the details of the current batch
of tests.
"""
# Hmm, why did we put the data in testing_array?
# See _run_tests for the same code.
url_str = url_str_original = r[13].strip()
creator = r[11]
doi_id = r[0].strip()
SCHEME_NOT_HTTP_FORMAT = ('Error: Scheme is not http(s): '
'DOI_ID: {} URL: {}')
URL_PARSE_ERROR_FORMAT = ('Error: Parsing URL failed: '
'DOI_ID: {} URL: {}')
NO_STATUS_ERROR_FORMAT = ('Error: Server did not return an '
'HTTP status code')
STATUS_ERROR_FORMAT = '4/500s: DOI_ID: {} URL: {} Status {}'
REDIRECT_SAME_FORMAT = ('Error: Redirect URL same as original: '
'DOI_ID: {} URL: {}')
EXCEPTION_FORMAT = 'Error: DOI_ID: {} URL: {} exception {}'
TOO_MANY_REDIRECTS_FORMAT = ('Error: too many redirects: '
'DOI_ID: {} ORIGINAL URL: {} '
'FINAL URL: {}')
try:
# First time round (i.e., before attempting to follow any
# redirects), do a small sleep. This helps avoid
# DoS attacking the server.
# NB This "should" say "yield from asyncio.sleep(0.3)",
# but we do really want the whole system to pause at
# this point, to give a delay between each
# connection initiation.
time.sleep(0.3)
for redirect_count in range(0, self.ATTEMPTS_MAX):
url = urllib.parse.urlsplit(url_str)
if not url.scheme.startswith('http'):
# The scheme must begin with "http",
# i.e., be either "http" or "https".
self._handle_one_error(result_list, error_count,
testing_array,
creator,
SCHEME_NOT_HTTP_FORMAT.format(
doi_id,
url_str),
counter)
return
if not url.hostname:
# Something wrong with the parsing of the URL,
# possibly "http:/only-one-slash.com".
self._handle_one_error(result_list, error_count,
testing_array,
creator,
URL_PARSE_ERROR_FORMAT.format(
doi_id,
url_str),
counter)
return
# Scheme OK, so now construct the query path to be sent to the
# server in a HEAD request.
url_path = url.path
# Handle the case of "http://hostname.but.no.trailing.slash"
if url_path == '':
url_path = '/'
if url.query != '':
url_path += "?" + url.query
if self._debug:
print('DEBUG: Counter:', counter,
'redirect_count:', redirect_count,
'url_str:', url_str, file=sys.stderr)
# Determine the port to use for the connection.
# Since 'https' contains 'http' as a prefix,
# check for the former.
if url.scheme.startswith('https'):
# For HTTPS, default to port 443.
port = url.port if url.port else 443
if self._debug:
print("DEBUG: Opening HTTPS connection to "
"host {}, port {}".format(url.hostname,
port),
file=sys.stderr)
reader, writer = yield from \
asyncio.open_connection(url.hostname,
port, ssl=ssl_context)
else:
# "Plain" HTTP request; port defaults to 80.
port = url.port if url.port else 80
if self._debug:
print("DEBUG: Opening HTTP connection to "
"host {}, port {}".format(url.hostname,
port),
file=sys.stderr)
reader, writer = yield from \
asyncio.open_connection(url.hostname, port)
query = self.HEAD_QUERY_FORMAT.format(
url_path=url_path, url=url)
if self._debug:
print("DEBUG:", counter, "Sending query string: ",
query,
file=sys.stderr)
writer.write(query.encode("utf-8"))
# Await and read the response.
while True:
line = yield from reader.readline()
if not line:
# End of file read.
break
# readline() returns a bytes, so it must be decoded.
line = line.decode("utf-8").rstrip()
if line.startswith('<'):
# Oh dear, the server is now sending the page.
# This has been seen with an IIS/6.0 server.
break
if line:
# The next two lines are not used for now,
# but might be useful in the future.
# Apparently, there are some pages that are
# "soft 404s", i.e., they return a status code of
# (say) 200, but the content of the page is text
# which says "No such page" or the like.
# So in future, we may
# scrape pages to see if the page returned actually
# reports that the page is missing/deleted.
# if line.startswith('Content-Type'):
# mType = line
if self._debug:
print('DEBUG:', counter, line, file=sys.stderr)
if line.startswith('HTTP/1.'):
mStatus = line
if line.startswith(('Location:', 'location:')):
location = line.split()[1]
else:
# Empty line was read; end of headers.
break
if 'mStatus' not in locals():
# Made it through the loop without setting mStatus,
# which means (for some reason) we didn't get
# an HTTP status code.
self._handle_one_error(result_list, error_count,
testing_array,
creator,
NO_STATUS_ERROR_FORMAT,
counter)
return
if mStatus:
# The status line is "HTTP/1.x 300 ....", so the status
# code is the second field after split,
# i.e., at position 1.
status_code = int(mStatus.split()[1])
# Now treat the different status codes as appropriate.
if status_code > 399:
# Status > 399 is an error, e.g., a "404".
self._handle_one_error(result_list, error_count,
testing_array,
creator,
STATUS_ERROR_FORMAT.format(
doi_id,
url_str,
mStatus),
counter)
return
elif status_code == 301 or status_code == 302:
# Handle a redirection.
location = self.construct_absolute_path(url.scheme,
url.hostname,
url.port,
location)
if url_str != location:
# Follow a redirect.
url_str = location
# This is the only branch that falls through and
# leads to the next iteration of the for loop.
else:
# The redirected URL was the same as the original.
# Don't proceed any further.
self._handle_one_error(
result_list, error_count,
testing_array,
creator,
REDIRECT_SAME_FORMAT.format(
doi_id,
url_str),
counter)
return
else:
# Success. This is indicated by deleting
# the corresponding element of testing_array.
try:
del testing_array[counter]
except KeyError:
pass
return
# "Successful" conclusion of the for loop. But this means
# we have now followed too many redirects.
self._handle_one_error(result_list, error_count, testing_array,
creator,
TOO_MANY_REDIRECTS_FORMAT.format(
doi_id,
url_str_original,
url_str),
counter)
return
# An UnboundLocalError occurs if mStatus is tested without
# having been set. Handle this using the catch-all handler
# below.
# except UnboundLocalError as e:
# _handle_one_error(result_list, error_count, testing_array,
# creator,
# EXCEPTION_FORMAT.format(doi_id,
# url_str, repr(e)),
# counter)
except asyncio.futures.CancelledError:
# This is caused by _run_tests() cancelling the task
# because of a timeout.
pass
except Exception as e:
self._handle_one_error(result_list, error_count, testing_array,
creator,
EXCEPTION_FORMAT.format(
doi_id, url_str, repr(e)),
counter)
# Format for text part of emailed reports.
MESSAGE_FORMAT = """\
Report Run: {}
Broken Links Discovered: {}
Client Name: {}
Client App ID: {}"""
# HTML wrapper for MESSAGE_FORMAT
MESSAGE_HTML_WRAPPER_FORMAT = """\
<html>
<head>Cite My Data Broken Links Report</head>
<body>
<p>{}</p>
</body>
</html>
"""
# Format for message about missing client.
MISSING_CLIENT_FORMAT = """Report from DOI checker.
There is a DOI in the doi_objects table with an owner_id
which does not appear as a client_id in the doi_client table.
owner_id: {}
"""
def _process_result_lists(self, client_list, result_list, error_count,
client_id=None, admin_email=None):
"""Summarize the errors, log and email the results.
Summarize the logs, create appropriate headings, log an entry, and
email the content to whom it supposed to.
An entry is logged to the database for all clients with an
error in the link.
The recipient of each email is determined as follows:
1. If a value is specified for admin_email (using the "-e"
command-line argument), then this address is
used as the recipient of all outgoing mails. The admin_email
parameter serves to override all other possible recipients.
2. Otherwise (no admin_email was provided), was a client_id
provided?
2a. If a client_id was provided, then use the client's email address
as the recipient.
2b. If no client_id was provided, then this is a report over all
clients. The value of params['sender_email] is used as the
recipient.
Arguments:
client_list -- The details of the client(s) of the DOIs.
result_list -- The results of the tests.
error_count -- The errors resulting from the tests.
client_id -- A client_id, if one was specified, or None, if
all clients are to be reported.
admin_email -- If specified, this is used as the recipient of all
outgoing messages. If not specified, use the client's address,
or fall back to the sender's address.
"""
if len(result_list) == 0 and (client_id is not None):
# Success; one client's links were tested and all OK.
client_app_id = client_list[client_id][4]
client_name = client_list[client_id][1]
if admin_email:
# admin_email overrides all other possibilities.
recipient = admin_email
else:
recipient = client_list[client_id][6]
message_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
message_text = self.MESSAGE_FORMAT.format(message_time,
0,
str(client_name),
str(client_app_id))
message_text_as_html = message_text.replace('\n', '\n <br />')
message_html = self.MESSAGE_HTML_WRAPPER_FORMAT.format(
message_text_as_html)
self._insert_message_log(client_id, message_text, 'SUCCESS')
message_subject = \
("Broken Links Discovered for Cite My Data Client: " +
client_name)
self.send_one_email(recipient,
"DOI.LINK.CHECKER",
message_subject,
message_text, message_html)
self.print_text_or_html(message_text, message_text_as_html)
# Loop over every client with at least one error.
for owner_id, message in result_list.items():
try:
client_app_id = client_list[owner_id][4]
client_name = client_list[owner_id][1]
client_broken_link_count = error_count[owner_id]
message_time = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M")
message_text = self.MESSAGE_FORMAT.format(
message_time,
str(client_broken_link_count),
str(client_name),
str(client_app_id))
message_text += '\nDOIs with broken links:\n' + message
message_text_as_html = message_text.replace('\n',
'\n <br />')
message_html = self.MESSAGE_HTML_WRAPPER_FORMAT.format(
message_text_as_html)
self._insert_message_log(owner_id, message_text, 'FAILURE')
if client_id is not None:
# A client_id was specified, so print out the result
# on the console.
self.print_text_or_html(message_text,
message_text_as_html)
# Determine the email recipient.
if admin_email:
# admin_email overrides all other possibilities.
recipient = admin_email
elif client_id is not None:
# No admin_email specified, but there is a client_id.
recipient = client_list[owner_id][6]
else:
# Fall back to using the sender as the recipient.
recipient = self._params['sender_email']
message_subject = \
("Broken Links Discovered for Cite My Data Client: " +
client_name)
self.send_one_email(
recipient,
"DOI.LINK.CHECKER",
message_subject,
message_text, message_html)
except KeyError:
# There is no such owner_id, so client_list[owner_id]
# failed. Send a message to the admin.
if self._debug:
print("DEBUG: Going to send a missing client "
"email for owner_id: ", owner_id,
file=sys.stderr)
message_text = self.MISSING_CLIENT_FORMAT.format(
owner_id)
message_text_as_html = message_text.replace('\n',
'\n <br />')
message_html = self.MESSAGE_HTML_WRAPPER_FORMAT.format(
message_text_as_html)
self.send_one_email(
self._params['sender_email'],
"DOI LINK CHECKER",
"DOI doi_objects has a link with a missing owner",
message_text, "")
# Logging functions
def _insert_message_log(self, owner_id, message, status):
"""Insert a log entry into the database's activity_log table.
The activity is specified as "LINKCHECK".
Arguments:
owner_id -- The owner of the DOI. This value is used as the
"client_id" column of the entry.
message -- The value to use for the "message" column of the entry.
status -- The value to use for the "status" column of the entry.
"""
cursor = self._conn.cursor()
sql = ("INSERT INTO activity_log "
"(`client_id`, `message`, `activity`, `result`) "
"values (%s, %s, %s, %s);")
cursor.execute(sql, (owner_id, message, 'LINKCHECK', status))
cursor.close()
self._conn.commit()
def _handle_one_error(self, result_list, error_count, testing_array,
owner_id, message, test_index):
"""Store details of one error.
This maintains a summary for each client.
Arguments:
result_list -- The dict for storing error messages, per client_id.
error_count -- The dict for storing the count of the number
of errors, per client_id.
testing_array -- The dict containing the details of the current batch
of tests.
owner_id -- The creator (client_id) of the link.
message -- The error message to be saved.
test_index -- The key of testing_array corresponding to this test,
or -1. If the key is valid, the key/value pair will be removed
from testing_array.
"""
try:
result_list[owner_id] = (result_list[owner_id] +
'\n' +
message)
error_count[owner_id] = error_count[owner_id] + 1
except KeyError:
result_list[owner_id] = message
error_count[owner_id] = 1
try:
# _run_tests calls this function with test_index = -1.
del testing_array[test_index]
except KeyError:
pass
if __name__ == "__main__":
print('This module can not be executed standalone.')
sys.exit(1)
|
|
import re
from django.utils.translation import ugettext_lazy as _
from .base import (
ADDON_DICT, ADDON_EXTENSION, ADDON_LPAPP, ADDON_PERSONA, ADDON_PLUGIN,
ADDON_SEARCH, ADDON_STATICTHEME, ADDON_THEME)
from olympia.versions.compare import version_int as vint
class App(object):
@classmethod
def matches_user_agent(cls, user_agent):
return cls.user_agent_string in user_agent
# Applications
class FIREFOX(App):
id = 1
shortername = 'fx'
short = 'firefox'
pretty = _(u'Firefox')
browser = True
types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_SEARCH,
ADDON_LPAPP, ADDON_PLUGIN, ADDON_PERSONA, ADDON_STATICTHEME]
guid = '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}'
min_display_version = 3.0
# These versions were relabeled and should not be displayed.
exclude_versions = (3.1, 3.7, 4.2)
user_agent_string = 'Firefox'
platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms)
@classmethod
def matches_user_agent(cls, user_agent):
matches = cls.user_agent_string in user_agent
if ('Android' in user_agent or 'Mobile' in user_agent or
'Tablet' in user_agent):
matches = False
return matches
class THUNDERBIRD(App):
id = 18
short = 'thunderbird'
shortername = 'tb'
pretty = _(u'Thunderbird')
browser = False
types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_LPAPP,
ADDON_PERSONA]
guid = '{3550f703-e582-4d05-9a08-453d09bdfdc6}'
min_display_version = 1.0
user_agent_string = 'Thunderbird'
platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms)
class SEAMONKEY(App):
id = 59
short = 'seamonkey'
shortername = 'sm'
pretty = _(u'SeaMonkey')
browser = True
types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_SEARCH,
ADDON_LPAPP, ADDON_PLUGIN, ADDON_PERSONA]
guid = '{92650c4d-4b8e-4d2a-b7eb-24ecf4f6b63a}'
min_display_version = 1.0
exclude_versions = (1.5,)
latest_version = None
user_agent_string = 'SeaMonkey'
platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms)
class SUNBIRD(App):
"""This application is retired and should not be used on the site. It
remains as there are still some sunbird add-ons in the db."""
id = 52
short = 'sunbird'
shortername = 'sb'
pretty = _(u'Sunbird')
browser = False
types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_LPAPP]
guid = '{718e30fb-e89b-41dd-9da7-e25a45638b28}'
min_display_version = 0.2
latest_version = None
user_agent_string = 'Sunbird'
platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms)
class MOBILE(App):
"""Old Firefox for Mobile.
Not supported anymore, should not be added to APPS."""
id = 60
short = 'mobile'
shortername = 'fn'
pretty = _(u'Mobile')
browser = True
types = [ADDON_EXTENSION, ADDON_DICT, ADDON_SEARCH,
ADDON_LPAPP, ADDON_PERSONA]
guid = '{a23983c0-fd0e-11dc-95ff-0800200c9a66}'
min_display_version = 0.1
user_agent_string = 'Fennec'
platforms = 'mobile' # DESKTOP_PLATFORMS (set in constants.platforms)
class ANDROID(App):
# This is for the Android native Firefox.
id = 61
short = 'android'
shortername = 'an'
pretty = _(u'Firefox for Android')
browser = True
types = [ADDON_EXTENSION, ADDON_DICT, ADDON_SEARCH,
ADDON_LPAPP, ADDON_PERSONA]
guid = '{aa3c5121-dab2-40e2-81ca-7ea25febc110}'
min_display_version = 11.0
user_agent_string = 'Fennec'
# Mobile and Android have the same user agent. The only way to distinguish
# is by the version number.
user_agent_re = [re.compile('Fennec/([\d.]+)'),
re.compile('Android; Mobile; rv:([\d.]+)'),
re.compile('Android; Tablet; rv:([\d.]+)'),
re.compile('Mobile; rv:([\d.]+)'),
re.compile('Tablet; rv:([\d.]+)')]
platforms = 'mobile'
latest_version = None
@classmethod
def matches_user_agent(cls, user_agent):
for user_agent_re in cls.user_agent_re:
match = user_agent_re.search(user_agent)
if match:
v = match.groups()[0]
return vint(cls.min_display_version) <= vint(v)
class MOZILLA(App):
"""Mozilla exists for completeness and historical purposes.
Stats and other modules may reference this for history.
This should NOT be added to APPS.
"""
id = 2
short = 'mz'
shortername = 'mz'
pretty = _(u'Mozilla')
browser = True
types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_SEARCH,
ADDON_LPAPP, ADDON_PLUGIN]
guid = '{86c18b42-e466-45a9-ae7a-9b95ba6f5640}'
platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms)
class UNKNOWN_APP(App):
"""Placeholder for unknown applications."""
pretty = _(u'Unknown')
# UAs will attempt to match in this order.
APP_DETECT = (ANDROID, THUNDERBIRD, SEAMONKEY, FIREFOX)
APP_USAGE = (FIREFOX, THUNDERBIRD, ANDROID, SEAMONKEY)
# APP_USAGE_FIREFOXES_ONLY is a temporary constant while we have a waffle to
# disable thunderbird and seamonkey support.
# Since it's evaluated at import time, we can't change APP_USAGE through a
# waffle, so to support the waffle disabling Thunderbird and Seamonkey support
# we add a temporary constant that will be used by relevant code in place of
# APP_USAGE while the waffle is still used. When the waffle is turned on
# permanently and removed this constant can go away and APP_USAGE can be
# changed to only (ANDROID, FIREFOX).
APP_USAGE_FIREFOXES_ONLY = (ANDROID, FIREFOX)
APP_USAGE_STATICTHEME = (FIREFOX,)
APPS = {app.short: app for app in APP_USAGE}
APPS_ALL = {app.id: app for app in APP_USAGE + (MOZILLA, SUNBIRD, MOBILE)}
APP_IDS = {app.id: app for app in APP_USAGE}
APP_GUIDS = {app.guid: app for app in APP_USAGE}
APPS_CHOICES = tuple((app.id, app.pretty) for app in APP_USAGE)
APP_TYPE_SUPPORT = {}
for _app in APP_USAGE:
for _type in _app.types:
APP_TYPE_SUPPORT.setdefault(_type, []).append(_app)
# Fake max version for when we want max compatibility
FAKE_MAX_VERSION = '9999'
# The lowest maxVersion an app has to support to allow default-to-compatible.
D2C_MIN_VERSIONS = {
FIREFOX.id: '4.0',
SEAMONKEY.id: '2.1',
THUNDERBIRD.id: '5.0',
ANDROID.id: '11.0',
}
for _app in APPS_ALL.values():
_versions = list(getattr(_app, 'exclude_versions', []))
# 99 comes from the hacks we do to make search tools compatible with
# versions (bug 692360).
_versions.append(99)
_app.exclude_versions = tuple(_versions)
del _app, _type, _versions
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.